Add deeper infeasible probe diagnostics
This commit is contained in:
22
configs/examples/dash0_manual_trial2_proposal.json
Normal file
22
configs/examples/dash0_manual_trial2_proposal.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"observation": "Long-context chat traffic is dominated by large prefills, so TTFT under the stepped SLO is the binding constraint. A launch-safe baseline should spread compute across all 4 GPUs while limiting concurrent long-prefill contention.",
|
||||
"diagnosis": "The FLASHINFER attempt failed at startup, but the safer FLASH_ATTN family launched successfully. A conservative seq cap plus a moderate batched-token cap is a better baseline for diagnosing whether the SLO itself is too aggressive under this trace.",
|
||||
"config_patch": {
|
||||
"env_patch": {
|
||||
"VLLM_ATTENTION_BACKEND": "FLASH_ATTN"
|
||||
},
|
||||
"flag_patch": {
|
||||
"tensor-parallel-size": 4,
|
||||
"max-num-seqs": 16,
|
||||
"max-num-batched-tokens": 24576,
|
||||
"gpu-memory-utilization": 0.94,
|
||||
"block-size": 32
|
||||
}
|
||||
},
|
||||
"expected_effects": [
|
||||
"Stable 4-GPU launch without FLASHINFER warmup failure",
|
||||
"Lower head-of-line blocking than larger sequence caps",
|
||||
"More interpretable lower-bound throughput/SLO measurement"
|
||||
],
|
||||
"why_not_previous_failures": "This proposal keeps the launch-safe FLASH_ATTN backend and the conservative batching limits that already avoided the earlier FLASHINFER startup failure."
|
||||
}
|
||||
Reference in New Issue
Block a user