Guide harness runtime refinement after TP

This commit is contained in:
2026-05-06 02:46:07 +08:00
parent 50067c926d
commit 0622e23817
2 changed files with 205 additions and 9 deletions

View File

@@ -669,6 +669,94 @@ class CoreFlowTests(unittest.TestCase):
self.assertEqual(proposal.config_patch.flag_patch, {"tensor-parallel-size": 2})
self.assertFalse(proposal.should_stop)
def test_harness_guided_runtime_seed_preserves_tp_incumbent(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
study_path = _write_study_assets(
tmp_path,
engine_overrides={
"tunable_flags": [
"tensor-parallel-size",
"gpu-memory-utilization",
"enable-chunked-prefill",
"max-num-batched-tokens",
],
"topology_constraints": {
"allowed_tensor_parallel_sizes": [1, 2, 4],
"allowed_tp_dp_products": [1, 2, 4],
},
},
)
study = load_study_spec(study_path)
result_path = tmp_path / "trial-0002.json"
result_path.write_text(
json.dumps(
{
"status": "completed",
"best_sampling_u": 0.75,
"best_request_rate": 6.0,
"best_pass_rate": 1.0,
"probes": [
{
"threshold": 0.75,
"feasible": True,
"payload": {
"request_count": 100,
"pass_rate": 1.0,
"request_rate": 6.0,
"early_stopped": False,
"early_stop_reason": "",
"latency_summary": {"failed_reason_counts": {}},
},
}
],
}
),
encoding="utf-8",
)
state = StudyState(
study_id=study.study_id,
best_trial_id="trial-0002",
best_request_rate=6.0,
best_request_rate_per_gpu=3.0,
trials=[
TrialSummary(
trial_id="trial-0001",
status="completed",
best_request_rate=2.0,
best_request_rate_per_gpu=2.0,
config_patch={"env_patch": {}, "flag_patch": {}},
),
TrialSummary(
trial_id="trial-0002",
status="completed",
best_request_rate=6.0,
best_request_rate_per_gpu=3.0,
result_path=str(result_path),
config_patch={
"env_patch": {},
"flag_patch": {"tensor-parallel-size": 2},
},
),
],
)
context = build_harness_context(
study=study,
window_summary={"prompt_tokens_p99": 8100},
state=state,
)
proposal = build_harness_guided_proposal(context)
self.assertIsNotNone(proposal)
self.assertEqual(
proposal.config_patch.flag_patch,
{
"tensor-parallel-size": 2,
"gpu-memory-utilization": 0.95,
"enable-chunked-prefill": True,
"max-num-batched-tokens": 16384,
},
)
def test_trace_input_length_filter_keeps_only_matching_rows(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)