Files
kernel-lab/tasks/04_online_softmax/bench.py
2026-04-10 13:15:06 +00:00

50 lines
1.4 KiB
Python

from __future__ import annotations
import statistics
import sys
import time
from pathlib import Path
ROOT = Path(__file__).resolve().parents[2]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))
import torch
from kernels.triton.online_softmax import triton_online_softmax
from reference.torch_online_softmax import torch_online_softmax
def benchmark(fn, *args, warmup: int = 5, reps: int = 25) -> float:
for _ in range(warmup):
fn(*args)
if args[0].is_cuda:
torch.cuda.synchronize()
times_ms = []
for _ in range(reps):
if args[0].is_cuda:
torch.cuda.synchronize()
start = time.perf_counter()
fn(*args)
if args[0].is_cuda:
torch.cuda.synchronize()
times_ms.append((time.perf_counter() - start) * 1e3)
return statistics.median(times_ms)
def main() -> None:
device = "cuda" if torch.cuda.is_available() else "cpu"
x = torch.randn(2048, 2048, device=device)
ref_ms = benchmark(torch_online_softmax, x)
print(f"torch_online_softmax: {ref_ms:.3f} ms")
if device == "cuda":
try:
triton_ms = benchmark(triton_online_softmax, x)
print(f"triton_online_softmax: {triton_ms:.3f} ms")
except (NotImplementedError, RuntimeError) as exc:
print(f"triton_online_softmax: skipped ({exc})")
if __name__ == "__main__":
main()