New FactorComboStrategy class (strategies/factor_combo.py) implements
8 champion factor signals (4 US, 4 CN) discovered through iterative
factor research, each at 4 rebalancing frequencies (daily/weekly/
biweekly/monthly). Registered in trader.py as fc_{signal}_{freq}.
Existing strategies and state files are untouched — safe to git pull
and restart monitor on server.
Also includes factor research scripts (factor_loop.py, factor_research.py,
etc.) used to discover and validate these factors.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
151 lines
4.8 KiB
Python
151 lines
4.8 KiB
Python
"""Final robustness check on champions from the discovery loop."""
|
||
|
||
from __future__ import annotations
|
||
import warnings
|
||
import numpy as np
|
||
import pandas as pd
|
||
import data_manager
|
||
from universe import UNIVERSES
|
||
from factor_loop import (
|
||
strat, bt, stats, combo, yearly,
|
||
f_rec_mom, f_rec_126, f_rec_63,
|
||
f_mom_12_1, f_mom_6_1, f_mom_intermediate,
|
||
f_above_ma200, f_golden_cross,
|
||
f_up_volume_proxy, f_gap_up_freq,
|
||
f_rec_mom_filtered, f_down_resilience,
|
||
f_up_capture, f_52w_high, f_str_10d,
|
||
f_earnings_drift, f_reversal_vol,
|
||
)
|
||
|
||
warnings.filterwarnings("ignore")
|
||
|
||
|
||
def f_quality_mom(p):
|
||
mom = f_mom_12_1(p)
|
||
consist_ret = p.pct_change()
|
||
consist = (consist_ret > 0).astype(float).rolling(252, min_periods=126).mean()
|
||
mom_r = mom.rank(axis=1, pct=True, na_option="keep")
|
||
con_r = consist.rank(axis=1, pct=True, na_option="keep")
|
||
up_r = f_up_volume_proxy(p).rank(axis=1, pct=True, na_option="keep")
|
||
return 0.4 * mom_r + 0.3 * con_r + 0.3 * up_r
|
||
|
||
|
||
def f_mom_x_gap(p):
|
||
mom_r = f_mom_12_1(p).rank(axis=1, pct=True, na_option="keep")
|
||
gap_r = f_gap_up_freq(p).rank(axis=1, pct=True, na_option="keep")
|
||
return mom_r * gap_r
|
||
|
||
|
||
def rolling_2yr(eq):
|
||
dr = eq.pct_change().dropna()
|
||
results = []
|
||
for end_i in range(504, len(dr), 63):
|
||
chunk = dr.iloc[end_i - 504:end_i]
|
||
tot = (1 + chunk).prod() - 1
|
||
ann = (1 + tot) ** (252 / len(chunk)) - 1
|
||
sh = chunk.mean() / chunk.std() * np.sqrt(252) if chunk.std() > 0 else 0
|
||
results.append({"end": chunk.index[-1].date(), "ann": ann, "sh": sh})
|
||
return pd.DataFrame(results)
|
||
|
||
|
||
def run_robustness(name, func, prices, label_prefix):
|
||
print(f"\n {name}:")
|
||
|
||
# Top-N sensitivity
|
||
print(f" Top-N: ", end="")
|
||
for n in [5, 10, 15, 20]:
|
||
w = strat(prices, func, top_n=n)
|
||
eq = bt(w, prices)
|
||
s = stats(eq)
|
||
print(f"N={n}: {s['cagr']:+.1%}/{s['sharpe']:.2f} ", end="")
|
||
print()
|
||
|
||
# Rebal sensitivity
|
||
print(f" Rebal: ", end="")
|
||
for r in [5, 10, 21, 42]:
|
||
w = strat(prices, func, top_n=10, rebal=r)
|
||
eq = bt(w, prices)
|
||
s = stats(eq)
|
||
print(f"{r}d: {s['cagr']:+.1%}/{s['sharpe']:.2f} ", end="")
|
||
print()
|
||
|
||
# Cost sensitivity
|
||
print(f" Cost: ", end="")
|
||
for c in [0, 0.001, 0.002, 0.005]:
|
||
w = strat(prices, func, top_n=10)
|
||
eq = bt(w, prices, cost=c)
|
||
s = stats(eq)
|
||
print(f"{c*1e4:.0f}bp: {s['cagr']:+.1%} ", end="")
|
||
print()
|
||
|
||
# Rolling 2-year
|
||
w = strat(prices, func, top_n=10)
|
||
eq = bt(w, prices)
|
||
roll = rolling_2yr(eq)
|
||
if not roll.empty:
|
||
pct_pos = (roll["ann"] > 0).mean()
|
||
print(f" 2yr rolling: mean={roll['ann'].mean():+.1%} min={roll['ann'].min():+.1%} "
|
||
f"max={roll['ann'].max():+.1%} %pos={pct_pos:.0%} mean_sharpe={roll['sh'].mean():.2f}")
|
||
|
||
|
||
def main():
|
||
# ============= US =============
|
||
prices_us = data_manager.load("us")
|
||
stocks_us = prices_us.drop(columns=["SPY"], errors="ignore")
|
||
|
||
print("=" * 95)
|
||
print(" US FINAL ROBUSTNESS — Champions vs Baseline")
|
||
print("=" * 95)
|
||
|
||
us_champs = [
|
||
("BASELINE: rec+mom", f_rec_mom),
|
||
("rec_mom_filtered+rec_deep×upvol",
|
||
combo([(f_rec_mom_filtered, 0.5),
|
||
combo([(f_rec_126, 0.5), (f_up_volume_proxy, 0.5)]), (lambda x: x, 0.0)])), # hack
|
||
("above_ma200+mom_7m+rec_126d",
|
||
combo([(f_above_ma200, 0.33), (f_mom_intermediate, 0.33), (f_rec_126, 0.34)])),
|
||
("rec_mom_filtered+above_ma200",
|
||
combo([(f_rec_mom_filtered, 0.5), (f_above_ma200, 0.5)])),
|
||
("mom_7m+rec_126d",
|
||
combo([(f_mom_intermediate, 0.5), (f_rec_126, 0.5)])),
|
||
]
|
||
|
||
# Fix the first champion - need proper 2-factor combo
|
||
us_champs[1] = (
|
||
"rec_mom_filt + rec_deep×upvol",
|
||
combo([
|
||
(f_rec_mom_filtered, 0.5),
|
||
(combo([(f_rec_126, 0.5), (f_up_volume_proxy, 0.5)]), 0.5),
|
||
])
|
||
)
|
||
|
||
for name, func in us_champs:
|
||
run_robustness(name, func, stocks_us, "US")
|
||
|
||
# ============= CN =============
|
||
prices_cn = data_manager.load("cn")
|
||
stocks_cn = prices_cn.drop(columns=["000300.SS"], errors="ignore")
|
||
|
||
print(f"\n{'='*95}")
|
||
print(" CN FINAL ROBUSTNESS — Champions vs Baseline")
|
||
print("=" * 95)
|
||
|
||
cn_champs = [
|
||
("BASELINE: rec+mom", f_rec_mom),
|
||
("up_capture+quality_mom",
|
||
combo([(f_up_capture, 0.5), (f_quality_mom, 0.5)])),
|
||
("recovery_63d+mom×gap",
|
||
combo([(f_rec_63, 0.5), (f_mom_x_gap, 0.5)])),
|
||
("down_resilience+quality_mom",
|
||
combo([(f_down_resilience, 0.5), (f_quality_mom, 0.5)])),
|
||
("up_capture+mom×gap",
|
||
combo([(f_up_capture, 0.5), (f_mom_x_gap, 0.5)])),
|
||
]
|
||
|
||
for name, func in cn_champs:
|
||
run_robustness(name, func, stocks_cn, "CN")
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|