Inherit incumbent topology for runtime validation
This commit is contained in:
@@ -1614,6 +1614,182 @@ class CoreFlowTests(unittest.TestCase):
|
||||
trial, _ = store.materialize_trial(study=study, state=state, proposal=proposal)
|
||||
self.assertEqual(trial.search.low, study.search.low)
|
||||
|
||||
def test_materialize_trial_inherits_incumbent_topology_for_runtime_patch(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
study_path = _write_study_assets(
|
||||
tmp_path,
|
||||
engine_overrides={
|
||||
"base_flags": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8000,
|
||||
"enable-expert-parallel": True,
|
||||
"tensor-parallel-size": 4,
|
||||
"data-parallel-size": 2,
|
||||
"expert-parallel-size": 8,
|
||||
},
|
||||
"tunable_flags": [
|
||||
"tensor-parallel-size",
|
||||
"data-parallel-size",
|
||||
"expert-parallel-size",
|
||||
"max-num-seqs",
|
||||
],
|
||||
"topology_constraints": {
|
||||
"require_tp_dp_product_equals_gpu_count": True,
|
||||
"require_ep_size_leq_tp_dp_product": True,
|
||||
"require_ep_size_divides_tp_dp_product": True,
|
||||
"allowed_tensor_parallel_sizes": [1, 2, 4, 8],
|
||||
"allowed_data_parallel_sizes": [1, 2, 4, 8],
|
||||
"allowed_expert_parallel_sizes": [1, 2, 4, 8],
|
||||
},
|
||||
},
|
||||
)
|
||||
study = load_study_spec(study_path)
|
||||
store = StudyStore(tmp_path / ".aituner" / "studies")
|
||||
store.init_study(spec_path=study_path, study=study)
|
||||
state = StudyState(
|
||||
study_id=study.study_id,
|
||||
best_trial_id="trial-0002",
|
||||
best_parallel_size=8,
|
||||
best_sampling_u=0.125,
|
||||
best_request_rate=3.0,
|
||||
best_request_rate_per_gpu=0.375,
|
||||
next_trial_index=3,
|
||||
best_by_parallel_size={
|
||||
"8": {
|
||||
"trial_id": "trial-0002",
|
||||
"parallel_size": 8,
|
||||
"best_sampling_u": 0.125,
|
||||
"best_request_rate": 3.0,
|
||||
"best_request_rate_per_gpu": 0.375,
|
||||
}
|
||||
},
|
||||
trials=[
|
||||
TrialSummary(
|
||||
trial_id="trial-0002",
|
||||
status="completed",
|
||||
parallel_size=8,
|
||||
best_sampling_u=0.125,
|
||||
best_request_rate=3.0,
|
||||
best_request_rate_per_gpu=0.375,
|
||||
config_patch={
|
||||
"env_patch": {},
|
||||
"flag_patch": {
|
||||
"tensor-parallel-size": 2,
|
||||
"data-parallel-size": 4,
|
||||
"expert-parallel-size": 8,
|
||||
},
|
||||
},
|
||||
)
|
||||
],
|
||||
)
|
||||
proposal = Proposal.from_dict(
|
||||
{
|
||||
"observation": "Validate runtime headroom around the incumbent.",
|
||||
"diagnosis": "Try lower concurrency on the current best topology.",
|
||||
"config_patch": {"env_patch": {}, "flag_patch": {"max-num-seqs": 160}},
|
||||
"expected_effects": ["validate incumbent runtime headroom"],
|
||||
}
|
||||
)
|
||||
|
||||
trial, next_state = store.materialize_trial(study=study, state=state, proposal=proposal)
|
||||
|
||||
self.assertEqual(
|
||||
trial.config_patch.flag_patch,
|
||||
{
|
||||
"tensor-parallel-size": 2,
|
||||
"data-parallel-size": 4,
|
||||
"max-num-seqs": 160,
|
||||
},
|
||||
)
|
||||
self.assertEqual(trial.search.low, 0.125)
|
||||
self.assertEqual(
|
||||
next_state.trials[-1].config_patch["flag_patch"],
|
||||
{
|
||||
"tensor-parallel-size": 2,
|
||||
"data-parallel-size": 4,
|
||||
"max-num-seqs": 160,
|
||||
},
|
||||
)
|
||||
|
||||
def test_materialize_trial_keeps_explicit_topology_runtime_patch(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
study_path = _write_study_assets(
|
||||
tmp_path,
|
||||
engine_overrides={
|
||||
"base_flags": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8000,
|
||||
"enable-expert-parallel": True,
|
||||
"tensor-parallel-size": 4,
|
||||
"data-parallel-size": 2,
|
||||
"expert-parallel-size": 8,
|
||||
},
|
||||
"tunable_flags": [
|
||||
"tensor-parallel-size",
|
||||
"data-parallel-size",
|
||||
"expert-parallel-size",
|
||||
"max-num-seqs",
|
||||
],
|
||||
"topology_constraints": {
|
||||
"require_tp_dp_product_equals_gpu_count": True,
|
||||
"require_ep_size_leq_tp_dp_product": True,
|
||||
"require_ep_size_divides_tp_dp_product": True,
|
||||
"allowed_tensor_parallel_sizes": [1, 2, 4, 8],
|
||||
"allowed_data_parallel_sizes": [1, 2, 4, 8],
|
||||
"allowed_expert_parallel_sizes": [1, 2, 4, 8],
|
||||
},
|
||||
},
|
||||
)
|
||||
study = load_study_spec(study_path)
|
||||
store = StudyStore(tmp_path / ".aituner" / "studies")
|
||||
store.init_study(spec_path=study_path, study=study)
|
||||
state = StudyState(
|
||||
study_id=study.study_id,
|
||||
best_trial_id="trial-0002",
|
||||
next_trial_index=3,
|
||||
trials=[
|
||||
TrialSummary(
|
||||
trial_id="trial-0002",
|
||||
status="completed",
|
||||
config_patch={
|
||||
"env_patch": {},
|
||||
"flag_patch": {
|
||||
"tensor-parallel-size": 2,
|
||||
"data-parallel-size": 4,
|
||||
},
|
||||
},
|
||||
)
|
||||
],
|
||||
)
|
||||
proposal = Proposal.from_dict(
|
||||
{
|
||||
"observation": "Validate base topology runtime.",
|
||||
"diagnosis": "Explicitly keep base topology and adjust concurrency.",
|
||||
"config_patch": {
|
||||
"env_patch": {},
|
||||
"flag_patch": {
|
||||
"tensor-parallel-size": 4,
|
||||
"data-parallel-size": 2,
|
||||
"max-num-seqs": 160,
|
||||
},
|
||||
},
|
||||
"expected_effects": ["test base topology runtime headroom"],
|
||||
}
|
||||
)
|
||||
|
||||
trial, _ = store.materialize_trial(study=study, state=state, proposal=proposal)
|
||||
|
||||
self.assertEqual(
|
||||
trial.config_patch.flag_patch,
|
||||
{
|
||||
"tensor-parallel-size": 4,
|
||||
"data-parallel-size": 2,
|
||||
"max-num-seqs": 160,
|
||||
},
|
||||
)
|
||||
|
||||
def test_ingest_trial_results_records_failure_reason(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
|
||||
Reference in New Issue
Block a user