feat: 15 nuovi indicatori quant (common + deribit + bybit + macro + sentiment)
Common (mcp_common): - indicators.py: vol_cone, hurst_exponent, half_life_mean_reversion, garch11_forecast, autocorrelation, rolling_sharpe, var_cvar - options.py (nuovo): oi_weighted_skew, smile_asymmetry, atm_vs_wings_vol, dealer_gamma_profile, vanna_charm_aggregate - microstructure.py (nuovo): orderbook_imbalance (ratio + microprice + slope) - stats.py (nuovo): cointegration_test Engle-Granger + ADF helper Deribit (+6 tool MCP): - get_dealer_gamma_profile (net dealer gamma + flip level) - get_vanna_charm (vanna/charm aggregati pesati OI) - get_oi_weighted_skew, get_smile_asymmetry, get_atm_vs_wings_vol - get_orderbook_imbalance Bybit (+2 tool MCP): - get_orderbook_imbalance, get_basis_term_structure (futures dated curve) Macro (+2 tool MCP): - get_yield_curve_slope (2y10y/5y30y + butterfly + regime) - get_breakeven_inflation (FRED T5YIE/T10YIE/T5YIFR) Sentiment (+3 tool MCP): - get_funding_arb_spread (opportunità arb compatte annualizzate) - get_liquidation_heatmap (heuristic da OI delta + funding extreme, no feed paid Coinglass) - get_cointegration_pairs (Engle-Granger su coppie crypto Binance hourly) Tutto in TDD pure-Python (no numpy/scipy in mcp_common). README aggiornato con elenco completo. 442 test totali verdi. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,5 +1,20 @@
|
||||
|
||||
from mcp_common.indicators import adx, atr, macd, rsi, sma
|
||||
import math
|
||||
|
||||
from mcp_common.indicators import (
|
||||
adx,
|
||||
atr,
|
||||
autocorrelation,
|
||||
garch11_forecast,
|
||||
half_life_mean_reversion,
|
||||
hurst_exponent,
|
||||
macd,
|
||||
rolling_sharpe,
|
||||
rsi,
|
||||
sma,
|
||||
var_cvar,
|
||||
vol_cone,
|
||||
)
|
||||
|
||||
|
||||
def test_rsi_simple():
|
||||
@@ -78,3 +93,168 @@ def test_adx_flat_market():
|
||||
# no directional movement → ADX near 0
|
||||
assert a["adx"] is not None
|
||||
assert a["adx"] < 5.0
|
||||
|
||||
|
||||
# ---------- vol_cone ----------
|
||||
|
||||
def _gbm_series(mu: float, sigma: float, n: int, seed: int = 42) -> list[float]:
|
||||
"""Mock GBM closes: deterministic for tests."""
|
||||
import random
|
||||
r = random.Random(seed)
|
||||
p = [100.0]
|
||||
for _ in range(n):
|
||||
z = r.gauss(0.0, 1.0)
|
||||
p.append(p[-1] * math.exp(mu / 252 + sigma / math.sqrt(252) * z))
|
||||
return p
|
||||
|
||||
|
||||
def test_vol_cone_returns_percentiles_per_window():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.5, n=400)
|
||||
out = vol_cone(closes, windows=[10, 30, 60])
|
||||
assert set(out.keys()) == {10, 30, 60}
|
||||
for w, stats in out.items():
|
||||
assert "current" in stats
|
||||
assert "p10" in stats and "p50" in stats and "p90" in stats
|
||||
assert stats["p10"] <= stats["p50"] <= stats["p90"]
|
||||
# annualized — sensible range for sigma=0.5
|
||||
assert 0.1 < stats["p50"] < 1.5
|
||||
|
||||
|
||||
def test_vol_cone_insufficient_data():
|
||||
out = vol_cone([100.0, 101.0], windows=[10, 30])
|
||||
assert out[10]["current"] is None
|
||||
assert out[30]["current"] is None
|
||||
|
||||
|
||||
# ---------- hurst_exponent ----------
|
||||
|
||||
def test_hurst_random_walk_near_half():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.3, n=500, seed=7)
|
||||
h = hurst_exponent(closes)
|
||||
assert h is not None
|
||||
# Random walk → Hurst ≈ 0.5; R/S bias positivo ben noto su sample finiti.
|
||||
# Bound largo: distinguere comunque random walk da trending forte (>0.85).
|
||||
assert 0.35 < h < 0.85
|
||||
|
||||
|
||||
def test_hurst_persistent_trend():
|
||||
# Strong monotonic trend → H >> 0.5
|
||||
closes = [100.0 + i * 0.5 + math.sin(i / 10) * 0.1 for i in range(400)]
|
||||
h = hurst_exponent(closes)
|
||||
assert h is not None
|
||||
assert h > 0.85
|
||||
|
||||
|
||||
def test_hurst_insufficient_data():
|
||||
assert hurst_exponent([1.0, 2.0, 3.0]) is None
|
||||
|
||||
|
||||
# ---------- half_life_mean_reversion ----------
|
||||
|
||||
def test_half_life_mean_reverting_series():
|
||||
"""OU process with theta=0.1 → half-life ≈ ln(2)/0.1 ≈ 6.93."""
|
||||
import random
|
||||
r = random.Random(123)
|
||||
theta = 0.1
|
||||
mu = 100.0
|
||||
sigma = 0.5
|
||||
s = [mu]
|
||||
for _ in range(500):
|
||||
s.append(s[-1] + theta * (mu - s[-1]) + sigma * r.gauss(0, 1))
|
||||
hl = half_life_mean_reversion(s)
|
||||
assert hl is not None
|
||||
# broad tolerance — finite-sample noise
|
||||
assert 3.0 < hl < 20.0
|
||||
|
||||
|
||||
def test_half_life_trending_returns_none():
|
||||
closes = [100.0 + i for i in range(200)]
|
||||
hl = half_life_mean_reversion(closes)
|
||||
# No mean reversion → returns None or +inf
|
||||
assert hl is None or hl > 1000
|
||||
|
||||
|
||||
# ---------- garch11_forecast ----------
|
||||
|
||||
def test_garch11_forecast_returns_positive_sigma():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.4, n=500, seed=11)
|
||||
out = garch11_forecast(closes)
|
||||
assert out is not None
|
||||
assert out["sigma_next"] > 0
|
||||
assert 0 < out["alpha"] < 1
|
||||
assert 0 < out["beta"] < 1
|
||||
assert out["alpha"] + out["beta"] < 1.0 # stationarity
|
||||
|
||||
|
||||
def test_garch11_insufficient_data():
|
||||
assert garch11_forecast([100.0, 101.0]) is None
|
||||
|
||||
|
||||
# ---------- autocorrelation ----------
|
||||
|
||||
def test_autocorrelation_white_noise_low():
|
||||
import random
|
||||
r = random.Random(1)
|
||||
rets = [r.gauss(0, 0.01) for _ in range(500)]
|
||||
out = autocorrelation(rets, max_lag=5)
|
||||
assert len(out) == 5
|
||||
# white noise → all autocorr ≈ 0 (within ±2/sqrt(N))
|
||||
bound = 2.0 / math.sqrt(len(rets))
|
||||
for lag, val in out.items():
|
||||
assert abs(val) < bound * 2 # generous
|
||||
|
||||
|
||||
def test_autocorrelation_lag1_strong_for_ar1():
|
||||
"""AR(1) with phi=0.7 → autocorr lag-1 ≈ 0.7."""
|
||||
import random
|
||||
r = random.Random(2)
|
||||
s = [0.0]
|
||||
for _ in range(500):
|
||||
s.append(0.7 * s[-1] + r.gauss(0, 0.1))
|
||||
out = autocorrelation(s, max_lag=3)
|
||||
assert out[1] > 0.5
|
||||
assert out[2] > 0.2 # geometric decay
|
||||
|
||||
|
||||
def test_autocorrelation_insufficient_data():
|
||||
assert autocorrelation([1.0], max_lag=5) == {}
|
||||
|
||||
|
||||
# ---------- rolling_sharpe ----------
|
||||
|
||||
def test_rolling_sharpe_positive_for_uptrend():
|
||||
closes = [100.0 * (1 + 0.001 * i) for i in range(252)]
|
||||
s = rolling_sharpe(closes, window=60)
|
||||
assert s is not None
|
||||
assert s["sharpe"] > 0
|
||||
assert s["sortino"] >= s["sharpe"] / 2 # sortino can be high if no downside
|
||||
|
||||
|
||||
def test_rolling_sharpe_zero_volatility():
|
||||
closes = [100.0] * 100
|
||||
s = rolling_sharpe(closes, window=60)
|
||||
assert s is not None
|
||||
assert s["sharpe"] == 0.0 # no variance → 0 by convention
|
||||
|
||||
|
||||
def test_rolling_sharpe_insufficient_data():
|
||||
assert rolling_sharpe([100.0, 101.0], window=60) is None
|
||||
|
||||
|
||||
# ---------- var_cvar ----------
|
||||
|
||||
def test_var_cvar_basic():
|
||||
import random
|
||||
r = random.Random(3)
|
||||
rets = [r.gauss(0.0005, 0.02) for _ in range(1000)]
|
||||
out = var_cvar(rets, confidences=[0.95, 0.99])
|
||||
assert "var_95" in out and "cvar_95" in out
|
||||
assert "var_99" in out and "cvar_99" in out
|
||||
# VaR is loss → positive number representing percentile loss
|
||||
assert out["var_95"] > 0
|
||||
assert out["cvar_95"] >= out["var_95"] # CVaR worse than VaR
|
||||
assert out["var_99"] >= out["var_95"]
|
||||
|
||||
|
||||
def test_var_cvar_insufficient_data():
|
||||
assert var_cvar([0.01], confidences=[0.95]) == {}
|
||||
|
||||
Reference in New Issue
Block a user