Initial commit: quant backtesting framework with daily trading simulator
Backtesting engine supporting 11 strategies across US (S&P 500) and CN (CSI 300) markets with open-to-close execution, proportional + fixed per-trade fees. Daily trader (trader.py) with auto/morning/evening/simulate/status commands and cron-friendly `auto` mode for unattended daily runs on a server. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
0
strategies/__init__.py
Normal file
0
strategies/__init__.py
Normal file
60
strategies/adaptive_momentum.py
Normal file
60
strategies/adaptive_momentum.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class AdaptiveMomentumStrategy(Strategy):
|
||||
"""
|
||||
Momentum with dynamic position sizing via inverse-volatility weighting.
|
||||
|
||||
Combines two proven ideas:
|
||||
1. Momentum selection: pick top_n stocks by 12-1 month return
|
||||
2. Inverse-vol weighting: instead of equal-weight, allocate more
|
||||
to lower-vol winners (smoother ride, better risk-adjusted returns)
|
||||
3. Regime scaling: scale total exposure by inverse of market vol
|
||||
(reduce exposure in high-vol periods, increase in calm periods)
|
||||
|
||||
This addresses momentum's main weakness: concentration in high-vol
|
||||
names that crash hard in risk-off episodes.
|
||||
"""
|
||||
|
||||
def __init__(self, lookback: int = 252, skip: int = 21,
|
||||
vol_window: int = 60, top_n: int = 20):
|
||||
self.lookback = lookback
|
||||
self.skip = skip
|
||||
self.vol_window = vol_window
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# Momentum ranking
|
||||
momentum = data.shift(self.skip).pct_change(self.lookback - self.skip)
|
||||
|
||||
n_valid = momentum.notna().sum(axis=1)
|
||||
enough = n_valid >= self.top_n
|
||||
|
||||
score_rank = momentum.rank(axis=1, ascending=False, na_option="bottom")
|
||||
top_mask = (score_rank <= self.top_n) & enough.values.reshape(-1, 1)
|
||||
|
||||
# Inverse-vol weighting among selected stocks
|
||||
returns = data.pct_change()
|
||||
vol = returns.rolling(self.vol_window).std().replace(0, np.nan)
|
||||
inv_vol = (1.0 / vol).where(top_mask, 0.0)
|
||||
|
||||
row_sums = inv_vol.sum(axis=1).replace(0, np.nan)
|
||||
signals = inv_vol.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
# Regime scaling: compare current market vol to its 1-year median
|
||||
market_vol = vol.mean(axis=1)
|
||||
vol_median = market_vol.rolling(252).median()
|
||||
vol_scale = (vol_median / market_vol).clip(0.3, 1.5)
|
||||
signals = signals.mul(vol_scale, axis=0)
|
||||
|
||||
# Re-normalize so max total weight = 1.0
|
||||
row_totals = signals.sum(axis=1)
|
||||
overflow = row_totals > 1.0
|
||||
signals[overflow] = signals[overflow].div(row_totals[overflow], axis=0)
|
||||
|
||||
warmup = max(self.lookback, self.vol_window + 252)
|
||||
signals.iloc[:warmup] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
17
strategies/base.py
Normal file
17
strategies/base.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
class Strategy(ABC):
|
||||
"""
|
||||
Abstract base class for all strategies.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def generate_signals(self, data):
|
||||
"""
|
||||
Generates trading signals for each stock.
|
||||
|
||||
:param data: A pandas DataFrame of historical price data.
|
||||
:return: A pandas Series with the same index as the input data,
|
||||
containing the trading signals (e.g., 'buy', 'hold', 'sell').
|
||||
"""
|
||||
pass
|
||||
15
strategies/buy_and_hold.py
Normal file
15
strategies/buy_and_hold.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
class BuyAndHoldStrategy(Strategy):
|
||||
"""
|
||||
A simple buy and hold strategy.
|
||||
"""
|
||||
|
||||
def generate_signals(self, data):
|
||||
"""
|
||||
Generates equal weights for all assets.
|
||||
"""
|
||||
tickers = data.columns
|
||||
weights = pd.DataFrame(1 / len(tickers), index=data.index, columns=tickers)
|
||||
return weights
|
||||
55
strategies/dual_momentum.py
Normal file
55
strategies/dual_momentum.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class DualMomentumStrategy(Strategy):
|
||||
"""
|
||||
Dual momentum with multi-timeframe confirmation.
|
||||
|
||||
Unlike plain momentum (single 12-1m lookback), this requires agreement
|
||||
across THREE timeframes — short (1-3m), medium (3-6m), and long (6-12m).
|
||||
A stock must show positive returns on ALL three to qualify.
|
||||
|
||||
This filters out stocks riding a single spike and favors sustained trends.
|
||||
Position count is variable: when few stocks pass all three filters,
|
||||
the strategy naturally goes to partial cash.
|
||||
"""
|
||||
|
||||
def __init__(self, top_n: int = 20):
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# Three timeframe returns (each skipping most recent 5 days)
|
||||
skip = 5
|
||||
short_mom = data.shift(skip).pct_change(63 - skip) # ~3 month
|
||||
med_mom = data.shift(skip).pct_change(126 - skip) # ~6 month
|
||||
long_mom = data.shift(skip).pct_change(252 - skip) # ~12 month
|
||||
|
||||
# All three must be positive (absolute momentum across timeframes)
|
||||
all_positive = (short_mom > 0) & (med_mom > 0) & (long_mom > 0)
|
||||
|
||||
# Composite score: average percentile rank across timeframes
|
||||
short_rank = short_mom.rank(axis=1, pct=True, na_option="bottom")
|
||||
med_rank = med_mom.rank(axis=1, pct=True, na_option="bottom")
|
||||
long_rank = long_mom.rank(axis=1, pct=True, na_option="bottom")
|
||||
composite = (short_rank + med_rank + long_rank) / 3
|
||||
|
||||
# Only consider stocks passing absolute filter
|
||||
composite_filtered = composite.where(all_positive, np.nan)
|
||||
|
||||
n_valid = composite_filtered.notna().sum(axis=1)
|
||||
enough = n_valid >= 1
|
||||
|
||||
rank = composite_filtered.rank(axis=1, ascending=False, na_option="bottom")
|
||||
effective_n = n_valid.clip(upper=self.top_n)
|
||||
top_mask = (rank <= effective_n.values.reshape(-1, 1)) & enough.values.reshape(-1, 1)
|
||||
top_mask = top_mask & all_positive
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
signals.iloc[:252] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
32
strategies/inverse_vol.py
Normal file
32
strategies/inverse_vol.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class InverseVolatilityStrategy(Strategy):
|
||||
"""
|
||||
Risk parity via inverse-volatility weighting.
|
||||
|
||||
Allocates capital inversely proportional to each asset's realized volatility
|
||||
over a rolling window. Assets with lower recent volatility receive larger
|
||||
allocations, equalizing the risk contribution of each position.
|
||||
|
||||
This is a simple but robust baseline for risk-based portfolio construction.
|
||||
"""
|
||||
|
||||
def __init__(self, vol_window: int = 20):
|
||||
self.vol_window = vol_window
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
returns = data.pct_change()
|
||||
vol = returns.rolling(self.vol_window).std()
|
||||
|
||||
# Replace zero vol with NaN to avoid division by zero
|
||||
vol = vol.replace(0, np.nan)
|
||||
inv_vol = 1.0 / vol
|
||||
|
||||
row_sums = inv_vol.sum(axis=1).replace(0, np.nan)
|
||||
signals = inv_vol.div(row_sums, axis=0).fillna(0.0)
|
||||
signals.iloc[:self.vol_window] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
56
strategies/mean_reversion.py
Normal file
56
strategies/mean_reversion.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class MeanReversionStrategy(Strategy):
|
||||
"""
|
||||
Monthly "buy the dip" with momentum confirmation.
|
||||
|
||||
Among stocks with positive 12-month momentum, overweight those
|
||||
that dipped most in the past month. Rebalances monthly to keep
|
||||
turnover low. Combines long-term trend (avoid losers) with
|
||||
short-term mean reversion (buy winners on sale).
|
||||
"""
|
||||
|
||||
def __init__(self, mom_lookback: int = 252, dip_window: int = 21,
|
||||
rebal_freq: int = 21, top_n: int = 20):
|
||||
self.mom_lookback = mom_lookback
|
||||
self.dip_window = dip_window
|
||||
self.rebal_freq = rebal_freq
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
long_mom = data.pct_change(self.mom_lookback)
|
||||
has_momentum = long_mom > 0
|
||||
|
||||
short_ret = data.pct_change(self.dip_window)
|
||||
|
||||
# Among positive-momentum stocks, rank by biggest dip
|
||||
scores = short_ret.where(has_momentum, np.nan)
|
||||
|
||||
# Rank: most negative = rank 1 (biggest dip)
|
||||
rank = scores.rank(axis=1, ascending=True, na_option="bottom")
|
||||
n_valid = scores.notna().sum(axis=1)
|
||||
effective_n = n_valid.clip(upper=self.top_n)
|
||||
|
||||
top_mask = rank <= effective_n.values.reshape(-1, 1)
|
||||
top_mask = top_mask & has_momentum # ensure we only pick momentum stocks
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
warmup = self.mom_lookback + self.dip_window
|
||||
|
||||
# Only keep rebalance-day signals, forward-fill between them
|
||||
rebal_mask = pd.Series(False, index=data.index)
|
||||
rebal_indices = range(warmup, len(data), self.rebal_freq)
|
||||
rebal_mask.iloc[list(rebal_indices)] = True
|
||||
|
||||
# Zero out non-rebalance days then forward-fill
|
||||
signals[~rebal_mask] = np.nan
|
||||
signals = signals.ffill().fillna(0.0)
|
||||
|
||||
signals.iloc[:warmup] = 0.0
|
||||
return signals.shift(1).fillna(0.0)
|
||||
37
strategies/momentum.py
Normal file
37
strategies/momentum.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class MomentumStrategy(Strategy):
|
||||
"""
|
||||
Classic cross-sectional momentum strategy (Jegadeesh & Titman, 1993).
|
||||
|
||||
Ranks assets by their past (lookback - skip) day return, skipping the most
|
||||
recent `skip` days to avoid short-term mean reversion. Allocates equally
|
||||
among the top_n winners each period.
|
||||
|
||||
Default parameters approximate the 12-1 month academic factor.
|
||||
"""
|
||||
|
||||
def __init__(self, lookback: int = 252, skip: int = 21, top_n: int = 5):
|
||||
self.lookback = lookback # ~12 months
|
||||
self.skip = skip # ~1 month — skip to avoid reversal
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# Momentum: return from T-(lookback) to T-skip
|
||||
momentum = data.shift(self.skip).pct_change(self.lookback - self.skip)
|
||||
|
||||
n_valid = momentum.notna().sum(axis=1)
|
||||
enough_data = n_valid >= self.top_n
|
||||
|
||||
score_rank = momentum.rank(axis=1, ascending=False, na_option="bottom")
|
||||
top_mask = (score_rank <= self.top_n) & enough_data.values.reshape(-1, 1)
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
signals.iloc[:self.lookback] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
69
strategies/momentum_quality.py
Normal file
69
strategies/momentum_quality.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class MomentumQualityStrategy(Strategy):
|
||||
"""
|
||||
Momentum + quality factor strategy using price-derived signals only.
|
||||
|
||||
Quality proxy (from price data):
|
||||
- Return consistency: fraction of positive monthly returns over past year
|
||||
(stocks that grind up steadily are higher "quality" than volatile jumpers)
|
||||
- Low max drawdown: smaller peak-to-trough drop = more stable
|
||||
|
||||
Combined with momentum, this favors stocks with strong AND stable uptrends,
|
||||
filtering out lottery-ticket stocks that spike then crash.
|
||||
"""
|
||||
|
||||
def __init__(self, momentum_period: int = 252, skip: int = 21,
|
||||
quality_window: int = 252, top_n: int = 20):
|
||||
self.momentum_period = momentum_period
|
||||
self.skip = skip
|
||||
self.quality_window = quality_window
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# --- Momentum factor ---
|
||||
momentum = data.shift(self.skip).pct_change(self.momentum_period - self.skip)
|
||||
|
||||
# --- Quality factor 1: return consistency ---
|
||||
# Rolling 21-day (monthly) returns, then count fraction positive over past year
|
||||
monthly_ret = data.pct_change(21)
|
||||
consistency = monthly_ret.rolling(self.quality_window).apply(
|
||||
lambda x: (x > 0).sum() / len(x), raw=True
|
||||
)
|
||||
|
||||
# --- Quality factor 2: inverse max drawdown ---
|
||||
# Smaller drawdown = higher quality
|
||||
def rolling_max_dd(prices: pd.DataFrame, window: int) -> pd.DataFrame:
|
||||
rolling_max = prices.rolling(window).max()
|
||||
drawdown = prices / rolling_max - 1
|
||||
# Rolling worst drawdown (most negative)
|
||||
worst_dd = drawdown.rolling(window).min()
|
||||
# Invert: less negative = better, so negate
|
||||
return -worst_dd # higher = smaller drawdown = better
|
||||
inv_dd = rolling_max_dd(data, self.quality_window)
|
||||
|
||||
# --- Cross-sectional ranking ---
|
||||
mom_rank = momentum.rank(axis=1, pct=True, na_option="bottom")
|
||||
con_rank = consistency.rank(axis=1, pct=True, na_option="bottom")
|
||||
dd_rank = inv_dd.rank(axis=1, pct=True, na_option="bottom")
|
||||
|
||||
# Composite: momentum 50%, consistency 25%, drawdown 25%
|
||||
scores = 0.50 * mom_rank + 0.25 * con_rank + 0.25 * dd_rank
|
||||
|
||||
# --- Select top_n ---
|
||||
n_valid = scores.notna().sum(axis=1)
|
||||
enough = n_valid >= self.top_n
|
||||
score_rank = scores.rank(axis=1, ascending=False, na_option="bottom")
|
||||
top_mask = (score_rank <= self.top_n) & enough.values.reshape(-1, 1)
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
warmup = max(self.momentum_period, self.quality_window) + 21
|
||||
signals.iloc[:warmup] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
77
strategies/multi_factor.py
Normal file
77
strategies/multi_factor.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class MultiFactorStrategy(Strategy):
|
||||
"""
|
||||
Multi-factor strategy combining momentum and value signals with a market-timing filter.
|
||||
|
||||
Factors:
|
||||
- Momentum: past return from (momentum_period + skip) to skip days ago (avoids short-term reversal)
|
||||
- Value: rolling min / current price (inverted price-to-low ratio — cheaper = higher score)
|
||||
- Market timing: only invest when SPY is above its long-term moving average
|
||||
|
||||
Signal generation is fully vectorized — no Python loops over time.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tickers,
|
||||
benchmark: str = "SPY",
|
||||
window: int = 200,
|
||||
momentum_period: int = 230,
|
||||
momentum_skip: int = 20,
|
||||
value_period: int = 250,
|
||||
top_n: int = 5,
|
||||
):
|
||||
self.tickers = list(tickers)
|
||||
self.benchmark = benchmark
|
||||
self.window = window
|
||||
self.momentum_period = momentum_period
|
||||
self.momentum_skip = momentum_skip
|
||||
self.value_period = value_period
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
stock = data[self.tickers]
|
||||
|
||||
# --- Market timing filter ---
|
||||
spy_ma = data[self.benchmark].rolling(self.window).mean()
|
||||
market_up = (data[self.benchmark] > spy_ma).values # shape (T,)
|
||||
|
||||
# --- Momentum factor ---
|
||||
# Return from T-(momentum_period+skip) to T-skip, avoiding the last month
|
||||
momentum = stock.shift(self.momentum_skip).pct_change(self.momentum_period)
|
||||
|
||||
# --- Value factor ---
|
||||
# min_price_over_period / current_price (higher = more "undervalued" vs recent range)
|
||||
value = stock.rolling(self.value_period).min() / stock
|
||||
|
||||
# --- Cross-sectional ranking (each row ranked across assets) ---
|
||||
mom_rank = momentum.rank(axis=1, pct=True, na_option="bottom")
|
||||
val_rank = value.rank(axis=1, pct=True, na_option="bottom")
|
||||
scores = mom_rank + val_rank # combined score, higher = better
|
||||
|
||||
# --- Select top_n assets per row ---
|
||||
# Only allocate rows that have enough valid scores
|
||||
n_valid = scores.notna().sum(axis=1)
|
||||
enough_data = n_valid >= self.top_n
|
||||
|
||||
score_rank = scores.rank(axis=1, ascending=False, na_option="bottom")
|
||||
top_mask = (score_rank <= self.top_n) & enough_data.values.reshape(-1, 1)
|
||||
|
||||
# Equal-weight allocation among selected assets
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
# --- Apply market timing: zero out when SPY is below its MA ---
|
||||
signals[~market_up] = 0.0
|
||||
|
||||
# --- Zero out warm-up period ---
|
||||
warmup = max(self.window, self.momentum_period + self.momentum_skip, self.value_period)
|
||||
signals.iloc[:warmup] = 0.0
|
||||
|
||||
# Shift by 1: signal computed at close of day t trades at open of day t+1
|
||||
return signals.shift(1).fillna(0.0)
|
||||
66
strategies/recovery_momentum.py
Normal file
66
strategies/recovery_momentum.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class RecoveryMomentumStrategy(Strategy):
|
||||
"""
|
||||
Recovery + momentum composite factor strategy.
|
||||
|
||||
Combines the two strongest predictive factors (by IC analysis):
|
||||
1. Recovery: price relative to 63-day low (how far stock has bounced
|
||||
from its recent trough). Higher = stronger recovery trajectory.
|
||||
2. Long-term momentum: 12-1 month return. Avoids most recent month
|
||||
to sidestep short-term reversal noise.
|
||||
|
||||
Rank-combines both factors (50/50), selects top_n, equal-weights.
|
||||
Rebalances monthly to control turnover.
|
||||
|
||||
The recovery factor captures stocks in strong V-shaped rebounds —
|
||||
a pattern that tends to persist. Combined with momentum, it filters
|
||||
for stocks with both long-term strength AND recent acceleration.
|
||||
"""
|
||||
|
||||
def __init__(self, recovery_window: int = 63, mom_lookback: int = 252,
|
||||
mom_skip: int = 21, rebal_freq: int = 21, top_n: int = 10):
|
||||
self.recovery_window = recovery_window
|
||||
self.mom_lookback = mom_lookback
|
||||
self.mom_skip = mom_skip
|
||||
self.rebal_freq = rebal_freq
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# Factor 1: Recovery — price / rolling min
|
||||
recovery = data / data.rolling(self.recovery_window).min() - 1
|
||||
|
||||
# Factor 2: 12-1 month momentum
|
||||
momentum = data.shift(self.mom_skip).pct_change(self.mom_lookback - self.mom_skip)
|
||||
|
||||
# Cross-sectional percentile ranks
|
||||
rec_rank = recovery.rank(axis=1, pct=True, na_option="bottom")
|
||||
mom_rank = momentum.rank(axis=1, pct=True, na_option="bottom")
|
||||
|
||||
# Composite score (50/50)
|
||||
composite = 0.5 * rec_rank + 0.5 * mom_rank
|
||||
|
||||
# Select top_n
|
||||
rank = composite.rank(axis=1, ascending=False, na_option="bottom")
|
||||
n_valid = composite.notna().sum(axis=1)
|
||||
enough = n_valid >= self.top_n
|
||||
top_mask = (rank <= self.top_n) & enough.values.reshape(-1, 1)
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
# Monthly rebalance: keep only rebal-day signals, forward-fill
|
||||
warmup = max(self.mom_lookback, self.recovery_window)
|
||||
rebal_mask = pd.Series(False, index=data.index)
|
||||
rebal_indices = list(range(warmup, len(data), self.rebal_freq))
|
||||
rebal_mask.iloc[rebal_indices] = True
|
||||
|
||||
signals[~rebal_mask] = np.nan
|
||||
signals = signals.ffill().fillna(0.0)
|
||||
signals.iloc[:warmup] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
51
strategies/trend_following.py
Normal file
51
strategies/trend_following.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from strategies.base import Strategy
|
||||
|
||||
|
||||
class TrendFollowingStrategy(Strategy):
|
||||
"""
|
||||
Per-stock trend following with momentum ranking.
|
||||
|
||||
Two filters applied:
|
||||
1. Trend filter: only hold stocks trading above their own moving average
|
||||
(individual uptrend, not market-level timing)
|
||||
2. Momentum rank: among trending stocks, pick the top_n by 6-month return
|
||||
|
||||
This avoids the Multi-Factor problem of all-or-nothing market timing
|
||||
while still providing downside protection at the individual stock level.
|
||||
"""
|
||||
|
||||
def __init__(self, ma_window: int = 150, momentum_period: int = 126, top_n: int = 20):
|
||||
self.ma_window = ma_window
|
||||
self.momentum_period = momentum_period
|
||||
self.top_n = top_n
|
||||
|
||||
def generate_signals(self, data: pd.DataFrame) -> pd.DataFrame:
|
||||
# Per-stock trend filter: price > MA
|
||||
ma = data.rolling(self.ma_window).mean()
|
||||
in_uptrend = data > ma
|
||||
|
||||
# Momentum score among trending stocks
|
||||
momentum = data.pct_change(self.momentum_period)
|
||||
# Mask out stocks not in uptrend
|
||||
momentum_filtered = momentum.where(in_uptrend, np.nan)
|
||||
|
||||
# Rank and select top_n
|
||||
n_valid = momentum_filtered.notna().sum(axis=1)
|
||||
enough = n_valid >= 1
|
||||
|
||||
rank = momentum_filtered.rank(axis=1, ascending=False, na_option="bottom")
|
||||
effective_n = n_valid.clip(upper=self.top_n)
|
||||
top_mask = (rank <= effective_n.values.reshape(-1, 1)) & enough.values.reshape(-1, 1)
|
||||
# Ensure we only pick stocks that are actually in uptrend
|
||||
top_mask = top_mask & in_uptrend
|
||||
|
||||
raw = top_mask.astype(float)
|
||||
row_sums = raw.sum(axis=1).replace(0, np.nan)
|
||||
signals = raw.div(row_sums, axis=0).fillna(0.0)
|
||||
|
||||
warmup = max(self.ma_window, self.momentum_period)
|
||||
signals.iloc[:warmup] = 0.0
|
||||
|
||||
return signals.shift(1).fillna(0.0)
|
||||
Reference in New Issue
Block a user