Files
kvcache-simulator/configs/glm5-nvfp4-8xb200.yaml

37 lines
883 B
YAML

# GLM-5-NVFP4 (nvidia/GLM-5-NVFP4) on 8 x B200 (192GB each).
# Architecture auto-loaded from HuggingFace config.json.
#
# FP4 weights: ~744B params * 0.5 bytes = ~372 GB across 8 GPUs.
# Total HBM: 8 * 192 GB = 1536 GB. Keep the KV budget below the raw
# remainder to leave room for runtime activations and allocator slack.
model:
config_json: ../models/GLM-5-NVFP4/config.json
name: glm-5-nvfp4
compute_dtype: fp8
weight_dtype: fp4
dtype_bytes: 1
block_size_tokens: 512
hardware:
type: 8xb200
hbm_bytes: 1150.0e9
dram_bytes: 1.5e12
max_batch_slots: 256
cluster:
num_instances: 8
meta_store:
ttl_seconds: 300.0
router:
mode: prefix_affinity
prefix_k: 8
load_alpha: 1.0
sim:
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
max_requests: null
output_dir: runs/glm5_nvfp4_8xb200
sample_interval_s: 1.0
seed: 42