From 06d4c380b36685cc2f516b4c1098828f389a961a Mon Sep 17 00:00:00 2001 From: Gahow Wang Date: Fri, 10 Apr 2026 17:43:02 +0800 Subject: [PATCH] Align qwen27b baseline proposal with topology study --- .../dash0_qwen27b_tight_slo_baseline_proposal.json | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json b/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json index 662a30d..8c40200 100644 --- a/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json +++ b/configs/examples/dash0_qwen27b_tight_slo_baseline_proposal.json @@ -3,20 +3,18 @@ "diagnosis": "This model uses a long-context hybrid stack and fp8 quantization. The safest first measurement is to preserve the existing warmup, hybrid-model, chunked-prefill, and prefix-caching behavior from run_qwen27b.sh, while keeping a conservative sequence cap.", "config_patch": { "env_patch": { - "VLLM_ATTENTION_BACKEND": "FLASH_ATTN", - "VLLM_ENABLE_TORCH_COMPILE": "1", - "VLLM_USE_FLASHINFER_SAMPLER": "0", - "VLLM_ENABLE_MODEL_RUNNER_WARMUP": "1" + "VLLM_ENABLE_TORCH_COMPILE": "1" }, "flag_patch": { "tensor-parallel-size": 4, + "data-parallel-size": 1, + "expert-parallel-size": 1, "gpu-memory-utilization": 0.9, "block-size": 64, "max-num-batched-tokens": 8192, "max-num-seqs": 16, "enable-prefix-caching": true, - "enable-chunked-prefill": true, - "disable-cascade-attn": true + "enable-chunked-prefill": true } }, "expected_effects": [