Add study tune loop and smoke configs

This commit is contained in:
2026-04-04 22:29:59 +08:00
parent 7b7eaafd78
commit f192c741ed
8 changed files with 387 additions and 1 deletions

View File

@@ -0,0 +1,19 @@
{
"observation": "Push batching further after validating the balanced layout.",
"diagnosis": "If TTFT remains under control, a larger admission window should maximize achieved request rate.",
"config_patch": {
"env_patch": {},
"flag_patch": {
"tensor-parallel-size": 4,
"max-num-seqs": 24,
"max-num-batched-tokens": 98304,
"gpu-memory-utilization": 0.92,
"block-size": 64
}
},
"expected_effects": [
"Highest throughput among the smoke candidates if memory is sufficient",
"More pressure on TTFT, so the binary search should locate the safe threshold"
],
"why_not_previous_failures": "Keeps tp=4 and block-size stable while only expanding batching and memory utilization."
}

View File

@@ -0,0 +1,19 @@
{
"observation": "Increase batching once tp=4 is fixed.",
"diagnosis": "Throughput should improve if the engine can admit more concurrent prefills without violating TTFT.",
"config_patch": {
"env_patch": {},
"flag_patch": {
"tensor-parallel-size": 4,
"max-num-seqs": 16,
"max-num-batched-tokens": 65536,
"gpu-memory-utilization": 0.9,
"block-size": 64
}
},
"expected_effects": [
"Higher feasible sampling_u than the conservative baseline",
"Better token throughput if memory headroom is sufficient"
],
"why_not_previous_failures": "Raises batching in a controlled step instead of jumping directly to the most aggressive setting."
}

View File

@@ -0,0 +1,19 @@
{
"observation": "Start from a safe tp=4 layout and conservative batching.",
"diagnosis": "The first pass should verify multi-GPU launch and avoid queueing collapse from over-batching.",
"config_patch": {
"env_patch": {},
"flag_patch": {
"tensor-parallel-size": 4,
"max-num-seqs": 8,
"max-num-batched-tokens": 32768,
"gpu-memory-utilization": 0.85,
"block-size": 64
}
},
"expected_effects": [
"Stable startup on 4x H20",
"Low risk of OOM during the first binary-search probes"
],
"why_not_previous_failures": "This is the initial baseline proposal."
}

View File

@@ -0,0 +1,83 @@
{
"study_id": "dash0-qwen30b-chat-smoke",
"hardware": {
"gpu_count": 4,
"gpu_model": "H20",
"host_candidates": ["dash0"]
},
"model": {
"model_id": "qwen3-30b-a3b",
"served_model_name": "qwen3-30b-smoke"
},
"engine": {
"engine_name": "vllm",
"engine_version": "0.13.0rc2.dev2111+gb44b43f43.d20260309",
"exec_path": "/usr/local/bin/vllm",
"cwd": "/home/admin/cpfs/wjh/aituner/aituner",
"host": "127.0.0.1",
"port": 18080,
"healthcheck_path": "/v1/models",
"ready_timeout_s": 900,
"request_timeout_s": 900,
"launch_args": [
"serve",
"/home/admin/resource/model/464482ce.qwen3-30b-a3b/1m-instruct-0726-fp4"
],
"base_envs": {
"CUDA_VISIBLE_DEVICES": "0,1,2,3"
},
"base_flags": {
"host": "127.0.0.1",
"port": 18080,
"served-model-name": "qwen3-30b-smoke",
"max-model-len": 65536,
"disable-log-requests": true,
"trust-remote-code": true
},
"tunable_envs": [
"VLLM_ATTENTION_BACKEND"
],
"tunable_flags": [
"tensor-parallel-size",
"max-num-seqs",
"max-num-batched-tokens",
"gpu-memory-utilization",
"block-size"
],
"python_executable": "python3"
},
"trace": {
"windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json",
"window_id": "chat_w20260311_1000",
"u_field": "sampling_u",
"timestamp_field": "timestamp",
"max_concurrency": 2,
"max_requests_per_probe": 24
},
"slo": {
"target_pass_rate": 0.95,
"ttft_rule": {
"kind": "step_ms",
"buckets": [
{"max_input_tokens": 4096, "threshold_ms": 15000},
{"max_input_tokens": 16384, "threshold_ms": 30000},
{"threshold_ms": 45000}
]
},
"tpot_rule": {
"kind": "fixed_ms",
"threshold_ms": 1500
}
},
"search": {
"low": 0.0,
"high": 1.0,
"tolerance": 0.1,
"max_probes": 4,
"sample_seed": 20260325
},
"llm": {
"system_prompt": "Propose a single engine config patch that increases the maximum feasible sampling_u under the SLO target.",
"max_history_trials": 8
}
}