Files
ali-trace-tools/trace_analyzer/reporting.py
2026-04-21 15:44:47 +00:00

229 lines
9.9 KiB
Python

from __future__ import annotations
import csv
import json
from collections import Counter
from pathlib import Path
from trace_analyzer.helpers import safe_float, safe_int, series_stats
from trace_analyzer.layout import resolve_details_summary_path
from trace_analyzer.report import build_markdown_report
def _iter_feature_rows(features_path: str | Path):
with Path(features_path).open("r", encoding="utf-8") as handle:
for row in csv.DictReader(handle):
row["message_count"] = safe_int(row.get("message_count"))
row["conversation_depth"] = safe_int(row.get("conversation_depth"))
row["declared_tool_count"] = safe_int(row.get("declared_tool_count"))
row["assistant_msg_count"] = safe_int(row.get("assistant_msg_count"))
row["tool_msg_count"] = safe_int(row.get("tool_msg_count"))
row["user_msg_count"] = safe_int(row.get("user_msg_count"))
row["system_msg_count"] = safe_int(row.get("system_msg_count"))
row["assistant_to_tool_count"] = safe_int(row.get("assistant_to_tool_count"))
row["tool_to_assistant_count"] = safe_int(row.get("tool_to_assistant_count"))
row["tool_to_tool_count"] = safe_int(row.get("tool_to_tool_count"))
row["assistant_to_user_count"] = safe_int(row.get("assistant_to_user_count"))
row["user_to_assistant_count"] = safe_int(row.get("user_to_assistant_count"))
row["max_consecutive_tool_msgs"] = safe_int(row.get("max_consecutive_tool_msgs"))
row["avg_tool_burst_len"] = safe_float(row.get("avg_tool_burst_len"))
row["has_tool_loop"] = safe_int(row.get("has_tool_loop"))
row["input_tokens"] = safe_int(row.get("input_tokens"))
row["output_tokens"] = safe_int(row.get("output_tokens"))
row["total_tokens"] = safe_int(row.get("total_tokens"))
row["reasoning_tokens"] = safe_int(row.get("reasoning_tokens"))
row["cached_tokens"] = safe_int(row.get("cached_tokens"))
row["cache_hit_ratio"] = safe_float(row.get("cache_hit_ratio"))
row["uncached_prompt_tokens"] = safe_int(row.get("uncached_prompt_tokens"))
row["output_input_ratio"] = safe_float(row.get("output_input_ratio"))
row["latency_ms"] = safe_int(row.get("latency_ms"))
row["ms_per_input_token"] = safe_float(row.get("ms_per_input_token"))
row["ms_per_output_token"] = safe_float(row.get("ms_per_output_token"))
row["long_context"] = safe_int(row.get("long_context"))
row["high_cache"] = safe_int(row.get("high_cache"))
row["tool_burst_alert"] = safe_int(row.get("tool_burst_alert"))
row["tool_loop_alert"] = safe_int(row.get("tool_loop_alert"))
row["slow_request"] = safe_int(row.get("slow_request"))
row["pattern_labels"] = [label for label in str(row.get("pattern_labels", "")).split(";") if label]
yield row
def build_summary_from_features(features_path: str | Path) -> dict:
model_counts = Counter()
status_code_counts = Counter()
role_transition_counts = Counter()
session_ids: set[str] = set()
latencies: list[int] = []
cache_ratios: list[float] = []
cached_tokens_list: list[int] = []
declared_tool_counts: list[int] = []
burst_values: list[int] = []
record_count = 0
success_count = 0
high_burst_requests: list[dict] = []
slow_despite_cache: list[dict] = []
long_context_no_cache: list[dict] = []
tool_burst_alert_count = 0
tool_loop_alert_count = 0
cache_bucket_input = {
"lt_0_2": {"latencies": [], "ratios": [], "count": 0},
"0_2_to_0_8": {"latencies": [], "ratios": [], "count": 0},
"ge_0_8": {"latencies": [], "ratios": [], "count": 0},
}
for row in _iter_feature_rows(features_path):
record_count += 1
model_counts[row.get("model") or "unknown"] += 1
status_code_counts[row.get("status_code") or "unknown"] += 1
if row.get("session_id"):
session_ids.add(row["session_id"])
if row.get("status_code") in {"1000", "200"}:
success_count += 1
role_transition_counts["assistant->tool"] += row["assistant_to_tool_count"]
role_transition_counts["tool->assistant"] += row["tool_to_assistant_count"]
role_transition_counts["tool->tool"] += row["tool_to_tool_count"]
role_transition_counts["assistant->user"] += row["assistant_to_user_count"]
role_transition_counts["user->assistant"] += row["user_to_assistant_count"]
latencies.append(row["latency_ms"])
cache_ratios.append(row["cache_hit_ratio"])
cached_tokens_list.append(row["cached_tokens"])
declared_tool_counts.append(row["declared_tool_count"])
burst_values.append(row["max_consecutive_tool_msgs"])
tool_burst_alert_count += row["tool_burst_alert"]
tool_loop_alert_count += row["tool_loop_alert"]
if row["tool_burst_alert"]:
high_burst_requests.append(
{
"request_id": row["request_id"],
"session_id": row["session_id"],
"max_consecutive_tool_msgs": row["max_consecutive_tool_msgs"],
"tool_to_tool_count": row["tool_to_tool_count"],
}
)
high_burst_requests.sort(
key=lambda item: (item["max_consecutive_tool_msgs"], item["tool_to_tool_count"]),
reverse=True,
)
del high_burst_requests[10:]
if "slow-despite-cache" in row["pattern_labels"]:
slow_despite_cache.append(
{
"request_id": row["request_id"],
"session_id": row["session_id"],
"latency_ms": row["latency_ms"],
"cache_hit_ratio": row["cache_hit_ratio"],
}
)
slow_despite_cache.sort(key=lambda item: item["latency_ms"], reverse=True)
del slow_despite_cache[10:]
if "long-context-no-cache" in row["pattern_labels"]:
long_context_no_cache.append(
{
"request_id": row["request_id"],
"session_id": row["session_id"],
"input_tokens": row["input_tokens"],
"cache_hit_ratio": row["cache_hit_ratio"],
}
)
long_context_no_cache.sort(key=lambda item: item["input_tokens"], reverse=True)
del long_context_no_cache[10:]
ratio = row["cache_hit_ratio"]
if ratio < 0.2:
bucket_name = "lt_0_2"
elif ratio < 0.8:
bucket_name = "0_2_to_0_8"
else:
bucket_name = "ge_0_8"
cache_bucket_input[bucket_name]["count"] += 1
cache_bucket_input[bucket_name]["latencies"].append(row["latency_ms"])
cache_bucket_input[bucket_name]["ratios"].append(row["cache_hit_ratio"])
latency_stats = series_stats(latencies)
cache_ratio_stats = series_stats(cache_ratios)
cached_token_stats = series_stats(cached_tokens_list)
declared_tool_stats = series_stats(declared_tool_counts)
burst_stats = series_stats(burst_values)
cache_buckets = []
for label in ["lt_0_2", "0_2_to_0_8", "ge_0_8"]:
bucket = cache_bucket_input[label]
cache_buckets.append(
{
"bucket": label,
"count": bucket["count"],
"avg_latency_ms": series_stats(bucket["latencies"])["mean"],
"avg_cache_hit_ratio": series_stats(bucket["ratios"])["mean"],
}
)
return {
"record_count": record_count,
"success_count": success_count,
"session_count": len(session_ids),
"model_counts": dict(model_counts),
"status_code_counts": dict(status_code_counts),
"thresholds": {
"long_context": 32000,
"high_cache": 0.8,
"tool_burst_alert": 4,
"tool_loop_alert": 3,
"slow_request_p90_latency_ms": latency_stats["p90"],
},
"tool_patterns": {
"role_transitions": dict(role_transition_counts),
"declared_tool_count": declared_tool_stats,
"max_consecutive_tool_msgs": burst_stats,
"tool_burst_alert_count": tool_burst_alert_count,
"tool_loop_alert_count": tool_loop_alert_count,
"high_burst_requests": high_burst_requests,
},
"cache_patterns": {
"cached_tokens": cached_token_stats,
"cache_hit_ratio": cache_ratio_stats,
"latency_ms": latency_stats,
"cache_buckets": cache_buckets,
},
"anomalies": {
"slow_despite_cache": slow_despite_cache,
"long_context_no_cache": long_context_no_cache,
},
}
def write_reports(
*,
features_path: str | Path,
output_dir: str | Path,
pipeline_summary: dict | None = None,
) -> dict:
output_root = Path(output_dir)
output_root.mkdir(parents=True, exist_ok=True)
summary = build_summary_from_features(features_path)
summary_path = output_root / "summary.json"
summary_path.write_text(json.dumps(summary, ensure_ascii=False, indent=2), encoding="utf-8")
report_path = output_root / "report.md"
report_path.write_text(build_markdown_report(summary), encoding="utf-8")
combined = {
"summary": summary,
"pipeline": pipeline_summary or {},
}
details_summary_path = resolve_details_summary_path(output_root)
if details_summary_path is not None:
combined["details_summary"] = json.loads(details_summary_path.read_text(encoding="utf-8"))
combined_path = output_root / "analysis_snapshot.json"
combined_path.write_text(json.dumps(combined, ensure_ascii=False, indent=2), encoding="utf-8")
return {
"summary_path": str(summary_path),
"report_path": str(report_path),
"analysis_snapshot_path": str(combined_path),
}