# GLM-5 using HuggingFace config.json + hardware preset. # # This config demonstrates the simplified format: # model.config_json — loads architecture from HF config.json # hardware.type — loads GPU specs from built-in preset # # Only deployment-specific fields need to be set explicitly. # Any field from config_json or the preset can be overridden in YAML. model: # Auto-detect architecture: MoE, MLA, DSA, head dims, etc. config_json: ../models/GLM-5/config.json name: glm-5 # override HF model_type dtype_bytes: 1 # BF16 (not in HF config.json) block_size_tokens: 512 # matches bailian-traces blksz_512 hardware: type: 8xb200 # 8 x B200 SXM (192GB each) # Override preset values for this specific deployment: hbm_bytes: 500.0e9 # KV budget after FP8 weights + activations dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM per node max_batch_slots: 256 cluster: num_instances: 32 meta_store: ttl_seconds: 300.0 router: mode: min_pd precise_probe_latency_us: 50.0 precise_probe_topk: 4 load_alpha: 1.0 prefix_k: 8 sim: trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl max_requests: null output_dir: runs/glm5_8xb200_hf sample_interval_s: 1.0 seed: 42