chore: update ablation and clean configs

This commit is contained in:
2026-04-15 14:48:59 +08:00
parent eaf574cd4e
commit 365ceac3be
15 changed files with 879 additions and 324 deletions

View File

@@ -1,66 +1,39 @@
# GLM-5 (zai-org/GLM-5) served as a single tensor-parallel instance on
# 8 x NVIDIA B200 SXM (192GB each, 1.5 TB aggregate HBM).
# GLM-5 using HuggingFace config.json + hardware preset.
#
# GLM-5 is a 744B-total / 40B-active Mixture-of-Experts model (BF16),
# using DeepSeek Sparse Attention (DSA). The HF card does not publish
# layer/head shapes, so the values below are reasonable estimates based
# on the GLM-4.5 lineage; adjust once the official config.json is public.
# This config demonstrates the simplified format:
# model.config_json — loads architecture from HF config.json
# hardware.type — loads GPU specs from built-in preset
#
# Hardware values below represent the *aggregate* of the 8-GPU TP group
# (one simulated "instance" == one 8xB200 serving replica). This is how
# the roofline in src/instance/compute.rs wants to see it: gpu_flops and
# gpu_mem_bw are the effective peaks seen by the TP'd model.
#
# Calibrate `flops_per_token_prefill` and `attn_quadratic_coeff` against
# measured prefill latency before trusting absolute TTFT numbers.
# Only deployment-specific fields need to be set explicitly.
# Any field from config_json or the preset can be overridden in YAML.
model:
name: glm-5
# --- estimates; refine from official config.json when available ---
num_layers: 92
num_kv_heads: 8 # GQA
head_dim: 128
dtype_bytes: 2 # BF16
block_size_tokens: 16 # trace convention
# Active-params-driven roofline: MoE activates ~40B params per token,
# so non-attention prefill FLOPs/token ≈ 2 * 40e9 = 8e10.
flops_per_token_prefill: 8.0e10
# Quadratic attention term ≈ 2 * num_heads * head_dim. GLM-5 uses
# DeepSeek Sparse Attention which is sub-quadratic in practice, so
# this coefficient is an upper bound — lower it if your measurements
# show DSA kicking in for long prompts.
attn_quadratic_coeff: 2048.0
bytes_per_token_prefill: 0.0
# Auto-detect architecture: MoE, MLA, DSA, head dims, etc.
config_json: ../models/GLM-5/config.json
name: glm-5 # override HF model_type
dtype_bytes: 1 # BF16 (not in HF config.json)
block_size_tokens: 512 # matches bailian-traces blksz_512
hardware:
# Aggregate of 8 x B200 in one tensor-parallel group.
gpu_flops: 1.80e16 # 8 * 2.25 PFLOPS BF16 dense
gpu_mem_bw: 6.40e13 # 8 * 8 TB/s HBM3e
# KV-cache budget after weights + activations. GLM-5 @ BF16 is ~1.49TB,
# which barely fits in 1.5TB HBM; realistic serving uses FP8 weights
# (~744GB), leaving ~500GB for activations + KV cache. Adjust if your
# deployment uses a different weight dtype.
hbm_bytes: 500.0e9
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM / v6d per node
pcie_bw: 128.0e9 # PCIe Gen6 x16 ~ 128 GB/s per direction
pcie_latency_us: 4.0
rdma_bw: 50.0e9 # ConnectX-7 400 Gbps ≈ 50 GB/s
rdma_latency_us: 6.0
max_batch_slots: 256
prefill_chunk_tokens: 2048
type: 8xb200 # 8 x B200 SXM (192GB each)
# Override preset values for this specific deployment:
hbm_bytes: 500.0e9 # KV budget after FP8 weights + activations
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM per node
max_batch_slots: 256
cluster:
num_instances: 8 # 8 TP replicas -> 64 B200s cluster-wide
num_instances: 32
meta_store:
ttl_seconds: 120.0
ttl_seconds: 300.0
router:
mode: ttl_aware
mode: min_pd
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
prefix_k: 8
sim:
trace_path: qwen-bailian-usagetraces-anon/qwen_coder_blksz_16.jsonl
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/glm5_8xb200
sample_interval_s: 1.0