KVCache simulator for LLM serving cluster routing research
Discrete-event simulator for evaluating KV cache-aware routing policies in prefill-disaggregated LLM serving clusters. Models a two-tier KV cache hierarchy (L0 GPU HBM + L1 CPU DRAM) with RDMA/PCIe link contention, architecture-derived roofline compute (MoE, MLA, DSA), and a cluster-wide meta-store for prefix-aware routing decisions. Includes 11 routing policies (random, round_robin, least_loaded, least_tokens, ttl_aware, precise, min_pd, cache_load, cache_score, estimated_ttft, prefix_affinity), HuggingFace config.json auto-parsing, built-in GPU hardware presets (H100/H800/H20/A100/B200), and ablation tooling for systematic policy comparison across real Alibaba serving traces. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
67
configs/glm5-8xb200.yaml
Normal file
67
configs/glm5-8xb200.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
# GLM-5 (zai-org/GLM-5) served as a single tensor-parallel instance on
|
||||
# 8 x NVIDIA B200 SXM (192GB each, 1.5 TB aggregate HBM).
|
||||
#
|
||||
# GLM-5 is a 744B-total / 40B-active Mixture-of-Experts model (BF16),
|
||||
# using DeepSeek Sparse Attention (DSA). The HF card does not publish
|
||||
# layer/head shapes, so the values below are reasonable estimates based
|
||||
# on the GLM-4.5 lineage; adjust once the official config.json is public.
|
||||
#
|
||||
# Hardware values below represent the *aggregate* of the 8-GPU TP group
|
||||
# (one simulated "instance" == one 8xB200 serving replica). This is how
|
||||
# the roofline in src/instance/compute.rs wants to see it: gpu_flops and
|
||||
# gpu_mem_bw are the effective peaks seen by the TP'd model.
|
||||
#
|
||||
# Calibrate `flops_per_token_prefill` and `attn_quadratic_coeff` against
|
||||
# measured prefill latency before trusting absolute TTFT numbers.
|
||||
|
||||
model:
|
||||
name: glm-5
|
||||
# --- estimates; refine from official config.json when available ---
|
||||
num_layers: 92
|
||||
num_kv_heads: 8 # GQA
|
||||
head_dim: 128
|
||||
dtype_bytes: 2 # BF16
|
||||
block_size_tokens: 16 # trace convention
|
||||
# Active-params-driven roofline: MoE activates ~40B params per token,
|
||||
# so non-attention prefill FLOPs/token ≈ 2 * 40e9 = 8e10.
|
||||
flops_per_token_prefill: 8.0e10
|
||||
# Quadratic attention term ≈ 2 * num_heads * head_dim. GLM-5 uses
|
||||
# DeepSeek Sparse Attention which is sub-quadratic in practice, so
|
||||
# this coefficient is an upper bound — lower it if your measurements
|
||||
# show DSA kicking in for long prompts.
|
||||
attn_quadratic_coeff: 2048.0
|
||||
bytes_per_token_prefill: 0.0
|
||||
|
||||
hardware:
|
||||
# Aggregate of 8 x B200 in one tensor-parallel group.
|
||||
gpu_flops: 1.80e16 # 8 * 2.25 PFLOPS BF16 dense
|
||||
gpu_mem_bw: 6.40e13 # 8 * 8 TB/s HBM3e
|
||||
# KV-cache budget after weights + activations. GLM-5 @ BF16 is ~1.49TB,
|
||||
# which barely fits in 1.5TB HBM; realistic serving uses FP8 weights
|
||||
# (~744GB), leaving ~500GB for activations + KV cache. Adjust if your
|
||||
# deployment uses a different weight dtype.
|
||||
hbm_bytes: 500.0e9
|
||||
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM / v6d per node
|
||||
pcie_bw: 128.0e9 # PCIe Gen6 x16 ~ 128 GB/s per direction
|
||||
pcie_latency_us: 4.0
|
||||
rdma_bw: 50.0e9 # ConnectX-7 400 Gbps ≈ 50 GB/s
|
||||
rdma_latency_us: 6.0
|
||||
max_batch_slots: 256
|
||||
prefill_chunk_tokens: 2048
|
||||
|
||||
cluster:
|
||||
num_instances: 8 # 8 TP replicas -> 64 B200s cluster-wide
|
||||
meta_store:
|
||||
ttl_seconds: 120.0
|
||||
router:
|
||||
mode: ttl_aware
|
||||
precise_probe_latency_us: 50.0
|
||||
precise_probe_topk: 4
|
||||
load_alpha: 1.0
|
||||
|
||||
sim:
|
||||
trace_path: qwen-bailian-usagetraces-anon/qwen_coder_blksz_16.jsonl
|
||||
max_requests: null
|
||||
output_dir: runs/glm5_8xb200
|
||||
sample_interval_s: 1.0
|
||||
seed: 42
|
||||
Reference in New Issue
Block a user