Use uv auto torch backend for vllm 0.20

This commit is contained in:
2026-05-02 08:21:27 +08:00
parent a7c9518ef6
commit e215827503
3 changed files with 7 additions and 5 deletions

View File

@@ -14,7 +14,7 @@
"engine": { "engine": {
"engine_name": "vllm", "engine_name": "vllm",
"engine_version": "0.20.0", "engine_version": "0.20.0",
"exec_path": "/tmp/wjh/venvs/vllm-0.20.0/bin/vllm", "exec_path": "/tmp/wjh/venvs/vllm-0.20.0-auto/bin/vllm",
"cwd": "/home/admin/cpfs/wjh/aituner/aituner", "cwd": "/home/admin/cpfs/wjh/aituner/aituner",
"host": "127.0.0.1", "host": "127.0.0.1",
"port": 18230, "port": 18230,
@@ -57,7 +57,7 @@
"allowed_data_parallel_sizes": [1, 2, 4, 8], "allowed_data_parallel_sizes": [1, 2, 4, 8],
"allowed_expert_parallel_sizes": [1, 2, 4, 8] "allowed_expert_parallel_sizes": [1, 2, 4, 8]
}, },
"python_executable": "/tmp/wjh/venvs/vllm-0.20.0/bin/python" "python_executable": "/tmp/wjh/venvs/vllm-0.20.0-auto/bin/python"
}, },
"trace": { "trace": {
"windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json", "windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json",

View File

@@ -14,7 +14,7 @@
"engine": { "engine": {
"engine_name": "vllm", "engine_name": "vllm",
"engine_version": "0.20.0", "engine_version": "0.20.0",
"exec_path": "/tmp/wjh/venvs/vllm-0.20.0/bin/vllm", "exec_path": "/tmp/wjh/venvs/vllm-0.20.0-auto/bin/vllm",
"cwd": "/home/admin/cpfs/wjh/aituner/aituner", "cwd": "/home/admin/cpfs/wjh/aituner/aituner",
"host": "127.0.0.1", "host": "127.0.0.1",
"port": 18231, "port": 18231,
@@ -57,7 +57,7 @@
"allowed_data_parallel_sizes": [1, 2, 4, 8], "allowed_data_parallel_sizes": [1, 2, 4, 8],
"allowed_expert_parallel_sizes": [1, 2, 4, 8] "allowed_expert_parallel_sizes": [1, 2, 4, 8]
}, },
"python_executable": "/tmp/wjh/venvs/vllm-0.20.0/bin/python" "python_executable": "/tmp/wjh/venvs/vllm-0.20.0-auto/bin/python"
}, },
"trace": { "trace": {
"windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json", "windows_path": "/home/admin/cpfs/wjh/aituner/aituner/trace_windows/windows.json",

View File

@@ -19,7 +19,9 @@ Both specs start from the same base vLLM configuration. The base contains only s
PyPI reports `vllm==0.20.0` as the current community release checked on 2026-05-02. The dash0 runtime venv is on local rootfs rather than CPFS, because installing torch/CUDA wheels into CPFS was I/O-bound: PyPI reports `vllm==0.20.0` as the current community release checked on 2026-05-02. The dash0 runtime venv is on local rootfs rather than CPFS, because installing torch/CUDA wheels into CPFS was I/O-bound:
`/tmp/wjh/venvs/vllm-0.20.0` `/tmp/wjh/venvs/vllm-0.20.0-auto`
The first plain `pip install vllm==0.20.0` smoke pulled `torch 2.11.0+cu130` and failed on dash0's driver (`570.133.20`, CUDA 12.9). The active install uses the vLLM-documented `uv pip install vllm==0.20.0 --torch-backend=auto` path so uv selects a CUDA backend compatible with the installed driver.
Install log: Install log: