Measure lower-range performance for infeasible trials
This commit is contained in:
@@ -2062,6 +2062,90 @@ class CoreFlowTests(unittest.TestCase):
|
||||
self.assertEqual(rows[0]["outcomes"][0]["request_id"], "r1")
|
||||
self.assertEqual(rows[0]["outcomes"][0]["sampling_u"], 0.1)
|
||||
|
||||
def test_run_trial_falls_back_below_inherited_search_floor(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
study_path = _write_study_assets(tmp_path)
|
||||
payload = json.loads(study_path.read_text(encoding="utf-8"))
|
||||
payload["search"]["max_probes"] = 2
|
||||
study_path.write_text(json.dumps(payload), encoding="utf-8")
|
||||
study = load_study_spec(study_path)
|
||||
store = StudyStore(tmp_path / ".aituner" / "studies")
|
||||
store.init_study(spec_path=study_path, study=study)
|
||||
state = StudyState(
|
||||
study_id=study.study_id,
|
||||
best_trial_id="trial-0001",
|
||||
best_parallel_size=1,
|
||||
best_sampling_u=0.5,
|
||||
best_request_rate=2.0,
|
||||
best_request_rate_per_gpu=2.0,
|
||||
next_trial_index=2,
|
||||
best_by_parallel_size={
|
||||
"1": {
|
||||
"trial_id": "trial-0001",
|
||||
"parallel_size": 1,
|
||||
"best_sampling_u": 0.5,
|
||||
"best_request_rate": 2.0,
|
||||
"best_request_rate_per_gpu": 2.0,
|
||||
}
|
||||
},
|
||||
trials=[],
|
||||
)
|
||||
proposal = Proposal.from_dict(
|
||||
{
|
||||
"observation": "runtime patch",
|
||||
"diagnosis": "measure even if worse than incumbent",
|
||||
"config_patch": {"env_patch": {}, "flag_patch": {"max-num-seqs": 2}},
|
||||
"expected_effects": ["measure"],
|
||||
}
|
||||
)
|
||||
trial, _ = store.materialize_trial(study=study, state=state, proposal=proposal)
|
||||
self.assertEqual(trial.search.low, 0.5)
|
||||
|
||||
def fake_replay(requests, **kwargs):
|
||||
passing = len(requests) <= 1
|
||||
return (
|
||||
[
|
||||
RequestOutcome(
|
||||
request_id=request.row_id,
|
||||
success=True,
|
||||
ttft_ms=10.0 if passing else 10000.0,
|
||||
tpot_ms=5.0 if passing else 1000.0,
|
||||
prompt_tokens=request.prompt_tokens_hint,
|
||||
completion_tokens=request.completion_tokens_hint,
|
||||
)
|
||||
for request in requests
|
||||
],
|
||||
False,
|
||||
"",
|
||||
)
|
||||
|
||||
process = mock.Mock()
|
||||
process.poll.return_value = 0
|
||||
with mock.patch("aituner.worker.subprocess.Popen", return_value=process):
|
||||
with mock.patch("aituner.worker._wait_for_server_or_exit", return_value=None):
|
||||
with mock.patch("aituner.worker._terminate_process_tree", return_value=None):
|
||||
with mock.patch("aituner.worker._replay_requests", side_effect=fake_replay):
|
||||
result = run_trial(Path(trial.artifact_dir) / "trial_spec.json")
|
||||
|
||||
self.assertEqual(result["status"], "completed")
|
||||
self.assertEqual(result["best_source"], "lower_range_fallback")
|
||||
self.assertEqual(result["best_sampling_u"], 0.375)
|
||||
self.assertEqual(result["best_request_rate"], 0.1)
|
||||
self.assertEqual(result["primary_search"]["low"], 0.5)
|
||||
self.assertIsNone(result["primary_search"]["best_request_rate"])
|
||||
self.assertEqual(result["lower_range_fallback"]["low"], 0.0)
|
||||
self.assertEqual(result["lower_range_fallback"]["high"], 0.5)
|
||||
self.assertEqual(result["lower_range_fallback"]["best_request_rate"], 0.1)
|
||||
self.assertEqual(
|
||||
[probe["threshold"] for probe in result["primary_search"]["probes"]],
|
||||
[0.75, 0.625],
|
||||
)
|
||||
self.assertEqual(
|
||||
[probe["threshold"] for probe in result["lower_range_fallback"]["probes"]],
|
||||
[0.25, 0.375],
|
||||
)
|
||||
|
||||
def test_materialize_trial_does_not_mutate_input_state_trials(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
|
||||
Reference in New Issue
Block a user