Add decode-only study mode support

This commit is contained in:
2026-04-09 11:23:17 +08:00
parent 96140b79bb
commit c158807fac
6 changed files with 282 additions and 1 deletions

View File

@@ -0,0 +1,13 @@
{
"observation": "This is the decode-only baseline directly aligned with run_qwen235b_decode.sh, including DecodeBenchConnector and the internal qwen3-235b launch shape.",
"diagnosis": "A baseline measurement is required before proposing decode-only throughput changes. Preserve all current envs and flags to establish the first feasible sampling_u/request_rate point under the TPOT-only SLO.",
"config_patch": {
"env_patch": {},
"flag_patch": {}
},
"expected_effects": [
"Establish a launch-safe baseline for decode-only TPOT tuning",
"Seed later trials from the first feasible sampling_u if one exists"
],
"why_not_previous_failures": "No previous failures in this study."
}

View File

@@ -0,0 +1,187 @@
{
"study_id": "dash0-qwen235b-decode-thinking-run1",
"hardware": {
"gpu_count": 8,
"gpu_model": "H20",
"host_candidates": [
"dash0"
]
},
"model": {
"model_id": "qwen3-235b-a22b-256k-0717-internal",
"served_model_name": "qwen3-235b-decode-aituner"
},
"engine": {
"engine_name": "vllm",
"engine_version": "internal-on-dash0",
"exec_path": "/usr/local/bin/vllm",
"cwd": "/home/admin/cpfs/wjh/aituner/aituner",
"host": "127.0.0.1",
"port": 18120,
"healthcheck_path": "/v1/models",
"ready_timeout_s": 1800,
"request_timeout_s": 1800,
"launch_args": [
"serve",
"/home/admin/resource/model/464482ce.qwen3-235b-a22b/256k-0717"
],
"base_envs": {
"CUDA_VISIBLE_DEVICES": "0,1,2,3,4,5,6,7",
"DS_LLM_MULTI_ENGINE_NUM": "2",
"DS_GPU_NUM": "8",
"DS_LLM_GRACEFUL_SHUTDOWN_WAIT_SECONDS": "600",
"DASHGEN_DEPLOYMENT_ROLE": "decode",
"DS_LLM_IGNORE_WARMUP": "1",
"DS_MODEL_PRELOAD_TO_SHM": "1",
"DS_LLM_PD_DECODE_FIRST_TIMEOUT_TIME": "120",
"DS_LLM_SERVER_MAX_CONCURRENCY": "256",
"AQUILA_RPC_ENABLE_WSPP": "1",
"AQUILA_HEALTHY_PROCESS_TIME_AVG_THRESHOLD": "7200000",
"VLLM_FUSE_QKNORM_AND_ROPE": "1",
"VLLM_FUSE_QKNORM_ROPE_AND_KVCACHE_WRITE": "1",
"VLLM_FP8_USE_BLADNN": "0",
"VLLM_MOE_USE_BLADNN": "0",
"VLLM_USE_V1": "1",
"VLLM_ENABLE_TORCH_COMPILE": "1",
"VLLM_ATTENTION_BACKEND": "FLASH_ATTN",
"VLLM_QUANTIZATION_LAYER_WISE": "1",
"VLLM_MOE_USE_DEEPEP": "1",
"VLLM_ENABLE_TBO_OPT": "0",
"VLLM_MOE_BALANCED_GATING": "0",
"VLLM_MOE_RANDOM_GATING": "0",
"VLLM_FUSED_MOE_CHUNK_SIZE": "4096",
"VLLM_DP_META_USE_CPU_GROUP": "0",
"VLLM_MLA_FP8_ATTENTION": "0",
"VLLM_MOE_EXPERTS_OVERLAP": "1",
"VLLM_USE_FLASHINFER_SAMPLER": "0",
"VLLM_DP_MASTER_PORT": "9524",
"VLLM_RESPONSE_TIMEOUT": "120",
"VLLM_PD_TRY_CONNECT_TIMEOUT_SECONDS": "150",
"VLLM_KVT_MAX_DELAY_MS": "2000",
"VLLM_DEEP_GEMM_WARMUP": "skip",
"TORCH_CUDA_ARCH_LIST": "9.0+PTX",
"CUDA_DEVICE_MAX_CONNECTIONS": "1",
"ENABLE_SWAPAB": "1",
"ACCL_WRITEBATCH_OPT": "2",
"ACCL_IBV_MTU": "9000",
"ACCL_TX_DEPTH": "1024",
"ACCL_RETRANSMIT_TIMEOUT": "17",
"ACCL_C4_STATS_MODE": "CONN",
"ACCL_IB_SPLIT_DATA_NUM": "4",
"ACCL_IB_QPS_LOAD_BALANCE": "1",
"ACCL_IB_GID_INDEX_FIX": "1",
"ACCL_LOG_TIME": "1",
"NCCL_NVLS_ENABLE": "0",
"NCCL_CUMEM_ENABLE": "0",
"ACCL_NORMAL_MODE": "ibrc",
"ACCL_LOAD_BALANCE": "1",
"ACCL_TOPO_FIX": "1",
"ACCL_LOW_LATENCY_COMBINE_USE_FP8": "1",
"ACCL_LOW_LATENCY_BUFFER_FP8_OPT": "1",
"ACCL_LOW_LATENCY_BUFFER_USE_SINGLE": "1",
"ACCL_DISPATCH_NUM_WARP_GROUPS": "4",
"ACCL_COMBINE_NUM_WARP_GROUPS": "4",
"ACCL_LOW_LATENCY_OPTIMIZE": "3",
"BLLM_KVTRANS_RDMA_SP": "2",
"NCCL_IB_TC": "136",
"NCCL_IB_SL": "5",
"NCCL_IB_GID_INDEX": "3",
"NCCL_SOCKET_IFNAME": "eth1",
"NCCL_SOCKET_FAMILY": "AF_INET",
"NCCL_DEBUG": "WARN",
"NCCL_DEBUG_SUBSYS": "TUNING",
"NCCL_IB_HCA": "mlx5",
"NCCL_IB_TIMEOUT": "22",
"NCCL_IB_QPS_PER_CONNECTION": "4",
"NCCL_MIN_NCHANNELS": "2",
"NCCL_NET_PLUGIN": "none",
"NVSHMEM_ENABLE_NIC_PE_MAPPING": "1",
"NVSHMEM_HCA_PE_MAPPING": "mlx5_0:1:2,mlx5_1:1:2,mlx5_2:1:2,mlx5_3:1:2",
"NVSHMEM_IBRC_ROCE_LAG_PORT_SELECTION": "3",
"NVSHMEM_IB_GID_INDEX": "3",
"NVSHMEM_IB_ENABLE_IBGDA": "1",
"NVSHMEM_IB_TRAFFIC_CLASS": "16",
"NVSHMEM_BOOTSTRAP_UID_SOCK_IFNAME": "eth1",
"NVSHMEM_IBGDA_NUM_RC_PER_PE": "4"
},
"base_flags": {
"host": "127.0.0.1",
"port": 18120,
"served-model-name": "qwen3-235b-decode-aituner",
"gpu-memory-utilization": 0.75,
"max-model-len": 262144,
"enable-chunked-prefill": true,
"speculative-config": "{\"method\":\"eagle3\",\"num_speculative_tokens\":2,\"hf_overrides\":{\"rope_scaling\":{\"type\":\"yarn\",\"factor\":128,\"original_max_position_embeddings\":2048,\"semi_dynamic\":false,\"dynamic\":true},\"num_experts\":0},\"model\":\"/home/admin/resource/model/464482ce.qwen3-235b-a22b/0717-eagle-0820\"}",
"enable-prefix-caching": true,
"max-num-batched-tokens": 1024,
"enable-expert-parallel": true,
"block-size": 64,
"max-num-seqs": 192,
"disable-custom-all-reduce": true,
"quantization": "fp8",
"expert-parallel-size": 8,
"data-parallel-size": 2,
"tensor-parallel-size": 4,
"cuda-graph-sizes": [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 40, 48,
64, 128, 192
],
"compilation-config": "{\"cudagraph_mode\":\"FULL_DECODE_ONLY\",\"use_inductor\":true}",
"hf-overrides": "{\"architectures\":[\"Qwen3MoeForCausalLM\"],\"model_type\":\"qwen3_moe\"}",
"kv-cache-dtype": "fp8",
"disable-hybrid-kv-cache-manager": true,
"kv-transfer-config": "{\"kv_connector\":\"DecodeBenchConnector\",\"kv_role\":\"kv_both\"}",
"disable-log-requests": true
},
"tunable_envs": [
"VLLM_ENABLE_TORCH_COMPILE",
"VLLM_ENABLE_TBO_OPT",
"VLLM_USE_FLASHINFER_SAMPLER",
"CUDA_DEVICE_MAX_CONNECTIONS"
],
"tunable_flags": [
"gpu-memory-utilization",
"max-num-batched-tokens",
"max-num-seqs",
"block-size"
],
"python_executable": "python3"
},
"trace": {
"windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json",
"window_id": "thinking_w20260327_1000",
"request_mode": "decode_only",
"u_field": "sampling_u",
"timestamp_field": "timestamp",
"max_concurrency": 128,
"replay_time_scale": 1.0,
"early_stop_max_lag_s": 180.0,
"early_stop_max_elapsed_s": 1200.0
},
"slo": {
"target_pass_rate": 0.95,
"tpot_rule": {
"kind": "fixed_ms",
"threshold_ms": 20
}
},
"search": {
"low": 0.0,
"high": 0.125,
"tolerance": 0.001,
"max_probes": 6,
"sample_seed": 20260325
},
"llm": {
"system_prompt": "You are tuning a decode-only vLLM serving stack. TTFT is not an enforced objective unless the study says so. Propose one launch-safe config patch that increases the maximum feasible sampling_u while keeping p95-style TPOT pass rate at or above target.",
"max_history_trials": 8,
"endpoint": {
"provider": "codex",
"model": "gpt-5.4",
"stream": true,
"api_key_env": "OPENAI_API_KEY",
"timeout_s": 240
}
}
}

View File

@@ -15,6 +15,15 @@ def build_prompt(
state: StudyState,
capability_profile: dict[str, Any] | None,
) -> str:
objective_notes: list[str] = []
if study.trace.request_mode == "decode_only":
objective_notes.append(
"This study is decode-only. The engine uses a KV decode benchmark connector, so TTFT is informational only unless an explicit TTFT rule is configured."
)
if study.slo.ttft_rule is None:
objective_notes.append("There is no TTFT SLO for this study.")
if study.slo.tpot_rule is None:
objective_notes.append("There is no TPOT SLO for this study.")
history = []
for trial in state.trials[-study.llm.max_history_trials :]:
history.append(
@@ -57,6 +66,7 @@ def build_prompt(
},
"trace": {
"window_id": study.trace.window_id,
"request_mode": study.trace.request_mode,
"input_length_filter": (
{
"min_input_tokens": study.trace.input_length_filter.min_input_tokens,
@@ -88,6 +98,7 @@ def build_prompt(
"target_pass_rate": study.slo.target_pass_rate,
"ttft_rule": study.slo.ttft_rule,
"tpot_rule": study.slo.tpot_rule,
"objective_notes": objective_notes,
},
default=lambda value: value.__dict__,
ensure_ascii=False,

View File

@@ -237,6 +237,7 @@ class TraceSpec:
windows_path: str
window_id: str
trace_file_override: str | None
request_mode: str
u_field: str
timestamp_field: str
max_concurrency: int
@@ -251,12 +252,16 @@ class TraceSpec:
def from_dict(cls, data: Mapping[str, Any]) -> "TraceSpec":
max_requests = data.get("max_requests_per_probe")
synthetic_prompt_cap = data.get("synthetic_prompt_cap_tokens")
request_mode = str(data.get("request_mode") or "chat").strip().lower()
if request_mode not in {"chat", "decode_only"}:
raise SpecError("trace.request_mode must be one of: chat, decode_only.")
return cls(
windows_path=_require_str(data.get("windows_path"), context="trace.windows_path"),
window_id=_require_str(data.get("window_id"), context="trace.window_id"),
trace_file_override=str(data["trace_file_override"]).strip()
if data.get("trace_file_override")
else None,
request_mode=request_mode,
u_field=str(data.get("u_field") or "sampling_u").strip(),
timestamp_field=str(data.get("timestamp_field") or "timestamp").strip(),
max_concurrency=_require_int(

View File

@@ -71,6 +71,7 @@ def _latency_summary(
tpot_values = [float(item.tpot_ms) for item in outcomes if item.tpot_ms is not None]
return {
"observed_request_count": len(outcomes),
"request_mode": study.trace.request_mode,
"ttft_ms": _metric_summary(ttft_values),
"tpot_ms": _metric_summary(tpot_values),
"failed_reason_counts": _reason_counts(evaluations),

View File

@@ -34,7 +34,10 @@ from aituner.trace import TraceRequest
def _write_study_assets(
tmp_path: Path, *, trace_overrides: dict[str, object] | None = None
tmp_path: Path,
*,
trace_overrides: dict[str, object] | None = None,
slo_overrides: dict[str, object] | None = None,
) -> Path:
trace_dir = tmp_path / "trace_windows" / "traces"
trace_dir.mkdir(parents=True)
@@ -148,6 +151,8 @@ def _write_study_assets(
"llm": {"system_prompt": "Tune it.", "max_history_trials": 8},
"capability_profile_path": str(capability_path)
}
if slo_overrides:
study_payload["slo"].update(slo_overrides)
study_path.write_text(json.dumps(study_payload), encoding="utf-8")
return study_path
@@ -222,6 +227,30 @@ class CoreFlowTests(unittest.TestCase):
with self.assertRaisesRegex(SpecError, "min_input_tokens must be <="):
load_study_spec(study_path)
def test_decode_only_mode_is_loaded_and_prompt_mentions_it(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
study_path = _write_study_assets(
tmp_path,
trace_overrides={"request_mode": "decode_only"},
slo_overrides={
"ttft_rule": None,
"tpot_rule": {"kind": "fixed_ms", "threshold_ms": 20},
},
)
study = load_study_spec(study_path)
self.assertEqual(study.trace.request_mode, "decode_only")
window, requests = load_trace_requests(study, study_spec_path=study_path)
prompt = build_prompt(
study=study,
window_summary=summarize_window(requests, window),
state=StudyState(study_id=study.study_id),
capability_profile=None,
)
self.assertIn('"request_mode": "decode_only"', prompt)
self.assertIn("There is no TTFT SLO for this study.", prompt)
self.assertIn("decode-only", prompt)
def test_bailian_endpoint_defaults(self) -> None:
endpoint = LLMEndpointSpec.from_dict({"provider": "bailian", "model": "qwen-plus"})
self.assertEqual(endpoint.provider, "bailian")
@@ -481,6 +510,40 @@ class CoreFlowTests(unittest.TestCase):
self.assertFalse(evaluations[1].passed)
self.assertEqual(summary["slo_pass_rate"], 0.5)
def test_slo_evaluation_supports_tpot_only_95_percent_target(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
study = load_study_spec(
_write_study_assets(
Path(tmp),
slo_overrides={
"ttft_rule": None,
"tpot_rule": {"kind": "fixed_ms", "threshold_ms": 20},
},
)
)
outcomes = [
RequestOutcome(
request_id="r1",
success=True,
ttft_ms=3000,
tpot_ms=10,
prompt_tokens=1000,
completion_tokens=16,
),
RequestOutcome(
request_id="r2",
success=True,
ttft_ms=9000,
tpot_ms=21,
prompt_tokens=5000,
completion_tokens=16,
),
]
evaluations, summary = summarize_evaluations(outcomes, study.slo)
self.assertEqual([item.passed for item in evaluations], [True, False])
self.assertEqual(summary["slo_pass_rate"], 0.5)
self.assertFalse(summary["feasible"])
def test_prepare_trace_windows_materializes_repo_local_assets(self) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
@@ -1241,6 +1304,7 @@ class CoreFlowTests(unittest.TestCase):
evaluations = [evaluate_request(item, study.slo) for item in outcomes]
summary = _latency_summary(outcomes=outcomes, evaluations=evaluations, study=study)
self.assertEqual(summary["observed_request_count"], 2)
self.assertEqual(summary["request_mode"], "chat")
self.assertEqual(summary["ttft_ms"]["mean"], 150.0)
self.assertEqual(summary["ttft_ms"]["p50"], 100.0)
self.assertEqual(summary["ttft_ms"]["p99"], 200.0)