Discrete-event simulator for evaluating KV cache-aware routing policies in prefill-disaggregated LLM serving clusters. Models a two-tier KV cache hierarchy (L0 GPU HBM + L1 CPU DRAM) with RDMA/PCIe link contention, architecture-derived roofline compute (MoE, MLA, DSA), and a cluster-wide meta-store for prefix-aware routing decisions. Includes 11 routing policies (random, round_robin, least_loaded, least_tokens, ttl_aware, precise, min_pd, cache_load, cache_score, estimated_ttft, prefix_affinity), HuggingFace config.json auto-parsing, built-in GPU hardware presets (H100/H800/H20/A100/B200), and ablation tooling for systematic policy comparison across real Alibaba serving traces. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
30 lines
710 B
YAML
30 lines
710 B
YAML
# Qwen3-Coder-480B-A35B (MoE, GQA) on 8 x H20 (96GB each).
|
|
# Architecture auto-loaded from HuggingFace config.json.
|
|
|
|
model:
|
|
config_json: ../models/Qwen3-Coder-480B-A35B-Instruct-FP8/config.json
|
|
name: qwen3-coder-480b
|
|
dtype_bytes: 1 # FP8 inference
|
|
block_size_tokens: 16
|
|
|
|
hardware:
|
|
type: 8xh20
|
|
hbm_bytes: 400.0e9 # KV budget after FP8 weights on 8x96GB
|
|
|
|
cluster:
|
|
num_instances: 32
|
|
meta_store:
|
|
ttl_seconds: 120.0
|
|
router:
|
|
mode: min_pd
|
|
precise_probe_latency_us: 50.0
|
|
precise_probe_topk: 4
|
|
load_alpha: 1.0
|
|
|
|
sim:
|
|
trace_path: traces/qwen_coder_blksz_16.jsonl
|
|
max_requests: null
|
|
output_dir: runs/qwen3_coder_8xh20
|
|
sample_interval_s: 1.0
|
|
seed: 42
|