# GLM-5-FP8 (ZhipuAI/GLM-5-FP8) on 8 x H20-141G (N3E). # Tuned for the 0-32768 input-length slice of # bailian-traces/glm_coder_blksz_512_040915-040917.jsonl. model: config_json: ../models/GLM-5-FP8/config.json name: glm-5-fp8 compute_dtype: fp8 dtype_bytes: 1 block_size_tokens: 512 hardware: type: 8xh20-141g hbm_bytes: 300.0e9 dram_bytes: 1.5e12 max_batch_slots: 256 cluster: num_instances: 64 meta_store: ttl_seconds: 300.0 router: mode: cache_affinity precise_probe_latency_us: 50.0 precise_probe_topk: 4 # Tuned on this filtered GLM coder workload: stronger queue penalty than # the default 1.0 keeps cache_affinity's locality gains while reducing TTFT. load_alpha: 1.5 prefix_k: 8 sim: trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl max_requests: null output_dir: runs/glm5_fp8_8xh20_141g_ca_tuned sample_interval_s: 1.0 seed: 42 input_length_min: 0 input_length_max: 32768