diff --git a/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json b/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json index 8c40200..9a0e7dd 100644 --- a/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json +++ b/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json @@ -1,26 +1,14 @@ { - "observation": "The incumbent should start from the known launch-safe qwen3.5-27b serving recipe on dash0 before asking the LLM to optimize throughput above that baseline.", - "diagnosis": "This model uses a long-context hybrid stack and fp8 quantization. The safest first measurement is to preserve the existing warmup, hybrid-model, chunked-prefill, and prefix-caching behavior from run_qwen27b.sh, while keeping a conservative sequence cap.", + "observation": "The incumbent should start from the exact known launch-safe qwen3.5-27b serving recipe on dash0 before asking the LLM to optimize throughput per GPU above that baseline.", + "diagnosis": "This model uses a long-context hybrid stack and fp8 quantization. The correct first measurement is to preserve the TP1 run_qwen27b.sh baseline exactly, then let later trials explore TP/DP and runtime knobs from that anchor.", "config_patch": { - "env_patch": { - "VLLM_ENABLE_TORCH_COMPILE": "1" - }, - "flag_patch": { - "tensor-parallel-size": 4, - "data-parallel-size": 1, - "expert-parallel-size": 1, - "gpu-memory-utilization": 0.9, - "block-size": 64, - "max-num-batched-tokens": 8192, - "max-num-seqs": 16, - "enable-prefix-caching": true, - "enable-chunked-prefill": true - } + "env_patch": {}, + "flag_patch": {} }, "expected_effects": [ - "Launch-safe baseline aligned with the current hand-tuned qwen27b recipe while using all 4 visible H20 GPUs", - "Reliable first incumbent under the tighter TTFT and TPOT SLO", - "Clear trial history for the LLM to propose a higher-throughput follow-up patch" + "Launch-safe TP1 baseline exactly aligned with run_qwen27b.sh", + "Reliable first incumbent for per-GPU comparison under the tighter TTFT and TPOT SLO", + "Clean anchor before topology exploration across TP*DP groups" ], - "why_not_previous_failures": "This baseline intentionally avoids speculative new kernels or batching spikes before we have an incumbent under the new SLO." + "why_not_previous_failures": "This baseline intentionally introduces no patch at all, so it cannot repeat previous tuning-only failures." }