# GLM-5-NVFP4 (nvidia/GLM-5-NVFP4) on 8 x B300 (Blackwell Ultra, 288GB each). # Architecture auto-loaded from HuggingFace config.json. # # FP4 weights: ~744B params * 0.5 bytes = ~372 GB across 8 GPUs. # Total HBM: 8 * 288 GB = 2304 GB. KV budget: ~1900 GB after weights. model: config_json: ../models/GLM-5-NVFP4/config.json name: glm-5-nvfp4 compute_dtype: fp4 # FP4 weights → selects FP4 tensor core FLOPS dtype_bytes: 1 # FP8 KV cache block_size_tokens: 512 hardware: type: 8xb300 hbm_bytes: 1900.0e9 # KV budget after FP4 weights (~372 GB) dram_bytes: 1.5e12 # ~1.5 TB usable CPU DRAM per node cluster: num_instances: 8 meta_store: ttl_seconds: 300.0 router: mode: prefix_affinity prefix_k: 8 load_alpha: 1.0 sim: trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl max_requests: null output_dir: runs/glm5_nvfp4_8xb300 sample_interval_s: 1.0 seed: 42