33 lines
695 B
YAML
33 lines
695 B
YAML
# GLM-5-NVFP4 on 8 x B300 with FP8 tensor-core compute.
|
|
# Weights remain stored in NVFP4, so HBM budget follows FP4 storage.
|
|
|
|
model:
|
|
config_json: ../models/GLM-5-NVFP4/config.json
|
|
name: glm-5-nvfp4
|
|
compute_dtype: fp8
|
|
weight_dtype: fp4
|
|
dtype_bytes: 1
|
|
block_size_tokens: 512
|
|
|
|
hardware:
|
|
type: 8xb300
|
|
hbm_bytes: 1900.0e9
|
|
dram_bytes: 1.5e12
|
|
max_batch_slots: 256
|
|
|
|
cluster:
|
|
num_instances: 8
|
|
meta_store:
|
|
ttl_seconds: 300.0
|
|
router:
|
|
mode: prefix_affinity
|
|
prefix_k: 8
|
|
load_alpha: 1.0
|
|
|
|
sim:
|
|
trace_path: bailian-traces/glm_coder_blksz_512_040915-040917.jsonl
|
|
max_requests: null
|
|
output_dir: runs/glm5_nvfp4_fp8compute_8xb300
|
|
sample_interval_s: 1.0
|
|
seed: 42
|