29 lines
1.4 KiB
JSON
29 lines
1.4 KiB
JSON
{
|
|
"observation": "The incumbent should start from the known launch-safe qwen3.5-27b serving recipe on dash0 before asking the LLM to optimize throughput above that baseline.",
|
|
"diagnosis": "This model uses a long-context hybrid stack and fp8 quantization. The safest first measurement is to preserve the existing warmup, hybrid-model, chunked-prefill, and prefix-caching behavior from run_qwen27b.sh, while keeping a conservative sequence cap.",
|
|
"config_patch": {
|
|
"env_patch": {
|
|
"VLLM_ATTENTION_BACKEND": "FLASH_ATTN",
|
|
"VLLM_ENABLE_TORCH_COMPILE": "1",
|
|
"VLLM_USE_FLASHINFER_SAMPLER": "0",
|
|
"VLLM_ENABLE_MODEL_RUNNER_WARMUP": "1"
|
|
},
|
|
"flag_patch": {
|
|
"tensor-parallel-size": 4,
|
|
"gpu-memory-utilization": 0.9,
|
|
"block-size": 64,
|
|
"max-num-batched-tokens": 8192,
|
|
"max-num-seqs": 16,
|
|
"enable-prefix-caching": true,
|
|
"enable-chunked-prefill": true,
|
|
"disable-cascade-attn": true
|
|
}
|
|
},
|
|
"expected_effects": [
|
|
"Launch-safe baseline aligned with the current hand-tuned qwen27b recipe while using all 4 visible H20 GPUs",
|
|
"Reliable first incumbent under the tighter TTFT and TPOT SLO",
|
|
"Clear trial history for the LLM to propose a higher-throughput follow-up patch"
|
|
],
|
|
"why_not_previous_failures": "This baseline intentionally avoids speculative new kernels or batching spikes before we have an incumbent under the new SLO."
|
|
}
|