Repair truncated LLM proposal JSON

This commit is contained in:
2026-04-07 11:38:08 +08:00
parent 94c89e1103
commit 79ba8a50c8
2 changed files with 63 additions and 3 deletions

View File

@@ -317,6 +317,31 @@ class CoreFlowTests(unittest.TestCase):
self.assertIn('"failure_reason": "engine_process_exited_before_ready exit_code=1"', prompt)
self.assertIn('"VLLM_ATTENTION_BACKEND": "FLASHINFER"', prompt)
def test_parse_proposal_text_repairs_truncated_json(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
study = load_study_spec(_write_study_assets(tmp_path))
proposal = parse_proposal_text(
"""
{
"observation": "obs",
"diagnosis": "diag",
"config_patch": {
"env_patch": {},
"flag_patch": {
"max-num-seqs": 24
}
},
"expected_effects": [
"faster batching"
],
"why_not_previous_failures": "none"
""",
study,
)
self.assertEqual(proposal.diagnosis, "diag")
self.assertEqual(proposal.config_patch.flag_patch["max-num-seqs"], 24)
def test_length_only_trace_rows_are_synthesized(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)