chore: update ablation and clean configs

This commit is contained in:
2026-04-15 14:48:59 +08:00
parent eaf574cd4e
commit 365ceac3be
15 changed files with 879 additions and 324 deletions

View File

@@ -1,68 +0,0 @@
# GLM-5 (zai-org/GLM-5) on 8 x B200 SXM (192GB each).
# Architecture from HuggingFace config.json — all roofline coefficients
# are derived automatically.
model:
name: glm-5
# Core architecture (from HF config.json)
num_layers: 78
hidden_size: 6144
num_attention_heads: 64
num_kv_heads: 64 # formalism; MLA overrides KV cache sizing
head_dim: 64
intermediate_size: 12288 # shared expert FFN width
dtype_bytes: 2 # BF16
block_size_tokens: 512 # matches bailian-traces blksz_512
# MoE: 256 routed + 1 shared, 8 active per token
moe:
num_experts: 256
num_active_experts: 8
num_shared_experts: 1
expert_intermediate_size: 2048 # moe_intermediate_size
# MLA (Multi-head Latent Attention): compressed KV cache
mla:
kv_lora_rank: 512
q_lora_rank: 2048
qk_nope_head_dim: 192
qk_rope_head_dim: 64
v_head_dim: 256
# DSA (DeepSeek Sparse Attention): sub-quadratic past dense_window
attention:
type: dsa
dense_window: 4096
sparse_stride: 8
first_dense_layers: 3
hardware:
# Aggregate of 8 x B200 in one tensor-parallel group.
gpu_flops: 1.80e16 # 8 * 2.25 PFLOPS BF16 dense
gpu_mem_bw: 6.40e13 # 8 * 8 TB/s HBM3e
# KV budget after FP8 weights + activations. GLM-5 FP8 ~744GB of 1536GB.
hbm_bytes: 500.0e9
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM / v6d per node
pcie_bw: 128.0e9 # PCIe Gen6 x16
pcie_latency_us: 4.0
rdma_bw: 50.0e9 # ConnectX-7 400 Gbps
rdma_latency_us: 6.0
max_batch_slots: 256
prefill_chunk_tokens: 4096
cluster:
num_instances: 64
meta_store:
ttl_seconds: 300.0
router:
mode: min_pd
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
sim:
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/glm5_8xb200_blk512
sample_interval_s: 1.0
seed: 42

View File

@@ -1,40 +0,0 @@
# GLM-5 using HuggingFace config.json + hardware preset.
#
# This config demonstrates the simplified format:
# model.config_json — loads architecture from HF config.json
# hardware.type — loads GPU specs from built-in preset
#
# Only deployment-specific fields need to be set explicitly.
# Any field from config_json or the preset can be overridden in YAML.
model:
# Auto-detect architecture: MoE, MLA, DSA, head dims, etc.
config_json: ../models/GLM-5/config.json
name: glm-5 # override HF model_type
dtype_bytes: 1 # BF16 (not in HF config.json)
block_size_tokens: 512 # matches bailian-traces blksz_512
hardware:
type: 8xb200 # 8 x B200 SXM (192GB each)
# Override preset values for this specific deployment:
hbm_bytes: 500.0e9 # KV budget after FP8 weights + activations
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM per node
max_batch_slots: 256
cluster:
num_instances: 32
meta_store:
ttl_seconds: 300.0
router:
mode: min_pd
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
prefix_k: 8
sim:
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/glm5_8xb200_hf
sample_interval_s: 1.0
seed: 42

View File

@@ -1,66 +1,39 @@
# GLM-5 (zai-org/GLM-5) served as a single tensor-parallel instance on
# 8 x NVIDIA B200 SXM (192GB each, 1.5 TB aggregate HBM).
# GLM-5 using HuggingFace config.json + hardware preset.
#
# GLM-5 is a 744B-total / 40B-active Mixture-of-Experts model (BF16),
# using DeepSeek Sparse Attention (DSA). The HF card does not publish
# layer/head shapes, so the values below are reasonable estimates based
# on the GLM-4.5 lineage; adjust once the official config.json is public.
# This config demonstrates the simplified format:
# model.config_json — loads architecture from HF config.json
# hardware.type — loads GPU specs from built-in preset
#
# Hardware values below represent the *aggregate* of the 8-GPU TP group
# (one simulated "instance" == one 8xB200 serving replica). This is how
# the roofline in src/instance/compute.rs wants to see it: gpu_flops and
# gpu_mem_bw are the effective peaks seen by the TP'd model.
#
# Calibrate `flops_per_token_prefill` and `attn_quadratic_coeff` against
# measured prefill latency before trusting absolute TTFT numbers.
# Only deployment-specific fields need to be set explicitly.
# Any field from config_json or the preset can be overridden in YAML.
model:
name: glm-5
# --- estimates; refine from official config.json when available ---
num_layers: 92
num_kv_heads: 8 # GQA
head_dim: 128
dtype_bytes: 2 # BF16
block_size_tokens: 16 # trace convention
# Active-params-driven roofline: MoE activates ~40B params per token,
# so non-attention prefill FLOPs/token ≈ 2 * 40e9 = 8e10.
flops_per_token_prefill: 8.0e10
# Quadratic attention term ≈ 2 * num_heads * head_dim. GLM-5 uses
# DeepSeek Sparse Attention which is sub-quadratic in practice, so
# this coefficient is an upper bound — lower it if your measurements
# show DSA kicking in for long prompts.
attn_quadratic_coeff: 2048.0
bytes_per_token_prefill: 0.0
# Auto-detect architecture: MoE, MLA, DSA, head dims, etc.
config_json: ../models/GLM-5/config.json
name: glm-5 # override HF model_type
dtype_bytes: 1 # BF16 (not in HF config.json)
block_size_tokens: 512 # matches bailian-traces blksz_512
hardware:
# Aggregate of 8 x B200 in one tensor-parallel group.
gpu_flops: 1.80e16 # 8 * 2.25 PFLOPS BF16 dense
gpu_mem_bw: 6.40e13 # 8 * 8 TB/s HBM3e
# KV-cache budget after weights + activations. GLM-5 @ BF16 is ~1.49TB,
# which barely fits in 1.5TB HBM; realistic serving uses FP8 weights
# (~744GB), leaving ~500GB for activations + KV cache. Adjust if your
# deployment uses a different weight dtype.
hbm_bytes: 500.0e9
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM / v6d per node
pcie_bw: 128.0e9 # PCIe Gen6 x16 ~ 128 GB/s per direction
pcie_latency_us: 4.0
rdma_bw: 50.0e9 # ConnectX-7 400 Gbps ≈ 50 GB/s
rdma_latency_us: 6.0
max_batch_slots: 256
prefill_chunk_tokens: 2048
type: 8xb200 # 8 x B200 SXM (192GB each)
# Override preset values for this specific deployment:
hbm_bytes: 500.0e9 # KV budget after FP8 weights + activations
dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM per node
max_batch_slots: 256
cluster:
num_instances: 8 # 8 TP replicas -> 64 B200s cluster-wide
num_instances: 32
meta_store:
ttl_seconds: 120.0
ttl_seconds: 300.0
router:
mode: ttl_aware
mode: min_pd
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
prefix_k: 8
sim:
trace_path: qwen-bailian-usagetraces-anon/qwen_coder_blksz_16.jsonl
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/glm5_8xb200
sample_interval_s: 1.0

View File

@@ -1,42 +0,0 @@
# Qwen2.5-Coder-32B (dense, GQA) on H800 SXM (80GB).
# Architecture from HuggingFace config.json — roofline auto-derived.
model:
name: qwen2.5-coder-32b
num_layers: 64
hidden_size: 5120
num_attention_heads: 40
num_kv_heads: 8 # GQA
head_dim: 128
intermediate_size: 27648 # SwiGLU FFN
dtype_bytes: 2 # BF16
block_size_tokens: 16
hardware:
gpu_flops: 9.89e14
gpu_mem_bw: 3.35e12
hbm_bytes: 20.0e9 # smaller budget: 32B weights are large
dram_bytes: 512.0e9
pcie_bw: 64.0e9
pcie_latency_us: 5.0
rdma_bw: 25.0e9
rdma_latency_us: 8.0
max_batch_slots: 128
prefill_chunk_tokens: 1024
cluster:
num_instances: 16
meta_store:
ttl_seconds: 60.0
router:
mode: ttl_aware
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
sim:
trace_path: traces/qwen_coder_blksz_16.jsonl
max_requests: null
output_dir: runs/qwen32b
sample_interval_s: 1.0
seed: 42

View File

@@ -1,42 +0,0 @@
# Qwen2.5-Coder-7B (dense, GQA) on a single H800 SXM (80GB).
# Architecture from HuggingFace config.json — roofline auto-derived.
model:
name: qwen2.5-coder-7b
num_layers: 28
hidden_size: 3584
num_attention_heads: 28
num_kv_heads: 4 # GQA: 28 query heads, 4 KV heads
head_dim: 128
intermediate_size: 18944 # SwiGLU FFN
dtype_bytes: 2 # BF16
block_size_tokens: 16 # matches qwen_coder_blksz_16 trace
hardware:
gpu_flops: 9.89e14 # H800 bf16 dense
gpu_mem_bw: 3.35e12 # 3.35 TB/s HBM3
hbm_bytes: 60.0e9 # leave headroom for weights/activations
dram_bytes: 512.0e9
pcie_bw: 64.0e9 # PCIe Gen5 x16
pcie_latency_us: 5.0
rdma_bw: 25.0e9 # ~200 Gbps NIC
rdma_latency_us: 8.0
max_batch_slots: 256
prefill_chunk_tokens: 2048
cluster:
num_instances: 16
meta_store:
ttl_seconds: 60.0
router:
mode: ttl_aware
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
sim:
trace_path: qwen-bailian-usagetraces-anon/qwen_coder_blksz_16.jsonl
max_requests: null
output_dir: runs/qwen7b
sample_interval_s: 1.0
seed: 42

View File

@@ -1,36 +0,0 @@
# Qwen2.5-Coder-7B using hardware preset.
#
# Model architecture is specified inline (no config.json needed for simple
# models). Hardware uses preset "h800" with a single override for hbm_bytes.
model:
name: qwen2.5-coder-7b
num_layers: 28
hidden_size: 3584
num_attention_heads: 28
num_kv_heads: 4
head_dim: 128
intermediate_size: 18944
dtype_bytes: 2
block_size_tokens: 16
hardware:
type: h800 # single H800 SXM (80GB)
hbm_bytes: 60.0e9 # KV budget after 7B model weights
cluster:
num_instances: 16
meta_store:
ttl_seconds: 60.0
router:
mode: ttl_aware
precise_probe_latency_us: 50.0
precise_probe_topk: 4
load_alpha: 1.0
sim:
trace_path: qwen-bailian-usagetraces-anon/qwen_coder_blksz_16.jsonl
max_requests: null
output_dir: runs/qwen7b_preset
sample_interval_s: 1.0
seed: 42

View File

@@ -5,16 +5,17 @@ model:
config_json: ../models/Qwen3-Coder-480B-A35B-Instruct-FP8/config.json
name: qwen3-coder-480b
dtype_bytes: 1 # FP8 inference
block_size_tokens: 16
block_size_tokens: 512
hardware:
type: 8xh20
hbm_bytes: 400.0e9 # KV budget after FP8 weights on 8x96GB
dram_bytes: 1.0e12 # ~1.0 TB usable CPU DRAM per node
cluster:
num_instances: 32
num_instances: 128
meta_store:
ttl_seconds: 120.0
ttl_seconds: 300.0
router:
mode: min_pd
precise_probe_latency_us: 50.0
@@ -22,7 +23,7 @@ cluster:
load_alpha: 1.0
sim:
trace_path: traces/qwen_coder_blksz_16.jsonl
trace_path: bailian-traces/qwen3_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/qwen3_coder_8xh20
sample_interval_s: 1.0