feat: 15 nuovi indicatori quant (common + deribit + bybit + macro + sentiment)
Common (mcp_common): - indicators.py: vol_cone, hurst_exponent, half_life_mean_reversion, garch11_forecast, autocorrelation, rolling_sharpe, var_cvar - options.py (nuovo): oi_weighted_skew, smile_asymmetry, atm_vs_wings_vol, dealer_gamma_profile, vanna_charm_aggregate - microstructure.py (nuovo): orderbook_imbalance (ratio + microprice + slope) - stats.py (nuovo): cointegration_test Engle-Granger + ADF helper Deribit (+6 tool MCP): - get_dealer_gamma_profile (net dealer gamma + flip level) - get_vanna_charm (vanna/charm aggregati pesati OI) - get_oi_weighted_skew, get_smile_asymmetry, get_atm_vs_wings_vol - get_orderbook_imbalance Bybit (+2 tool MCP): - get_orderbook_imbalance, get_basis_term_structure (futures dated curve) Macro (+2 tool MCP): - get_yield_curve_slope (2y10y/5y30y + butterfly + regime) - get_breakeven_inflation (FRED T5YIE/T10YIE/T5YIFR) Sentiment (+3 tool MCP): - get_funding_arb_spread (opportunità arb compatte annualizzate) - get_liquidation_heatmap (heuristic da OI delta + funding extreme, no feed paid Coinglass) - get_cointegration_pairs (Engle-Granger su coppie crypto Binance hourly) Tutto in TDD pure-Python (no numpy/scipy in mcp_common). README aggiornato con elenco completo. 442 test totali verdi. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,5 +1,20 @@
|
||||
|
||||
from mcp_common.indicators import adx, atr, macd, rsi, sma
|
||||
import math
|
||||
|
||||
from mcp_common.indicators import (
|
||||
adx,
|
||||
atr,
|
||||
autocorrelation,
|
||||
garch11_forecast,
|
||||
half_life_mean_reversion,
|
||||
hurst_exponent,
|
||||
macd,
|
||||
rolling_sharpe,
|
||||
rsi,
|
||||
sma,
|
||||
var_cvar,
|
||||
vol_cone,
|
||||
)
|
||||
|
||||
|
||||
def test_rsi_simple():
|
||||
@@ -78,3 +93,168 @@ def test_adx_flat_market():
|
||||
# no directional movement → ADX near 0
|
||||
assert a["adx"] is not None
|
||||
assert a["adx"] < 5.0
|
||||
|
||||
|
||||
# ---------- vol_cone ----------
|
||||
|
||||
def _gbm_series(mu: float, sigma: float, n: int, seed: int = 42) -> list[float]:
|
||||
"""Mock GBM closes: deterministic for tests."""
|
||||
import random
|
||||
r = random.Random(seed)
|
||||
p = [100.0]
|
||||
for _ in range(n):
|
||||
z = r.gauss(0.0, 1.0)
|
||||
p.append(p[-1] * math.exp(mu / 252 + sigma / math.sqrt(252) * z))
|
||||
return p
|
||||
|
||||
|
||||
def test_vol_cone_returns_percentiles_per_window():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.5, n=400)
|
||||
out = vol_cone(closes, windows=[10, 30, 60])
|
||||
assert set(out.keys()) == {10, 30, 60}
|
||||
for w, stats in out.items():
|
||||
assert "current" in stats
|
||||
assert "p10" in stats and "p50" in stats and "p90" in stats
|
||||
assert stats["p10"] <= stats["p50"] <= stats["p90"]
|
||||
# annualized — sensible range for sigma=0.5
|
||||
assert 0.1 < stats["p50"] < 1.5
|
||||
|
||||
|
||||
def test_vol_cone_insufficient_data():
|
||||
out = vol_cone([100.0, 101.0], windows=[10, 30])
|
||||
assert out[10]["current"] is None
|
||||
assert out[30]["current"] is None
|
||||
|
||||
|
||||
# ---------- hurst_exponent ----------
|
||||
|
||||
def test_hurst_random_walk_near_half():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.3, n=500, seed=7)
|
||||
h = hurst_exponent(closes)
|
||||
assert h is not None
|
||||
# Random walk → Hurst ≈ 0.5; R/S bias positivo ben noto su sample finiti.
|
||||
# Bound largo: distinguere comunque random walk da trending forte (>0.85).
|
||||
assert 0.35 < h < 0.85
|
||||
|
||||
|
||||
def test_hurst_persistent_trend():
|
||||
# Strong monotonic trend → H >> 0.5
|
||||
closes = [100.0 + i * 0.5 + math.sin(i / 10) * 0.1 for i in range(400)]
|
||||
h = hurst_exponent(closes)
|
||||
assert h is not None
|
||||
assert h > 0.85
|
||||
|
||||
|
||||
def test_hurst_insufficient_data():
|
||||
assert hurst_exponent([1.0, 2.0, 3.0]) is None
|
||||
|
||||
|
||||
# ---------- half_life_mean_reversion ----------
|
||||
|
||||
def test_half_life_mean_reverting_series():
|
||||
"""OU process with theta=0.1 → half-life ≈ ln(2)/0.1 ≈ 6.93."""
|
||||
import random
|
||||
r = random.Random(123)
|
||||
theta = 0.1
|
||||
mu = 100.0
|
||||
sigma = 0.5
|
||||
s = [mu]
|
||||
for _ in range(500):
|
||||
s.append(s[-1] + theta * (mu - s[-1]) + sigma * r.gauss(0, 1))
|
||||
hl = half_life_mean_reversion(s)
|
||||
assert hl is not None
|
||||
# broad tolerance — finite-sample noise
|
||||
assert 3.0 < hl < 20.0
|
||||
|
||||
|
||||
def test_half_life_trending_returns_none():
|
||||
closes = [100.0 + i for i in range(200)]
|
||||
hl = half_life_mean_reversion(closes)
|
||||
# No mean reversion → returns None or +inf
|
||||
assert hl is None or hl > 1000
|
||||
|
||||
|
||||
# ---------- garch11_forecast ----------
|
||||
|
||||
def test_garch11_forecast_returns_positive_sigma():
|
||||
closes = _gbm_series(mu=0.0, sigma=0.4, n=500, seed=11)
|
||||
out = garch11_forecast(closes)
|
||||
assert out is not None
|
||||
assert out["sigma_next"] > 0
|
||||
assert 0 < out["alpha"] < 1
|
||||
assert 0 < out["beta"] < 1
|
||||
assert out["alpha"] + out["beta"] < 1.0 # stationarity
|
||||
|
||||
|
||||
def test_garch11_insufficient_data():
|
||||
assert garch11_forecast([100.0, 101.0]) is None
|
||||
|
||||
|
||||
# ---------- autocorrelation ----------
|
||||
|
||||
def test_autocorrelation_white_noise_low():
|
||||
import random
|
||||
r = random.Random(1)
|
||||
rets = [r.gauss(0, 0.01) for _ in range(500)]
|
||||
out = autocorrelation(rets, max_lag=5)
|
||||
assert len(out) == 5
|
||||
# white noise → all autocorr ≈ 0 (within ±2/sqrt(N))
|
||||
bound = 2.0 / math.sqrt(len(rets))
|
||||
for lag, val in out.items():
|
||||
assert abs(val) < bound * 2 # generous
|
||||
|
||||
|
||||
def test_autocorrelation_lag1_strong_for_ar1():
|
||||
"""AR(1) with phi=0.7 → autocorr lag-1 ≈ 0.7."""
|
||||
import random
|
||||
r = random.Random(2)
|
||||
s = [0.0]
|
||||
for _ in range(500):
|
||||
s.append(0.7 * s[-1] + r.gauss(0, 0.1))
|
||||
out = autocorrelation(s, max_lag=3)
|
||||
assert out[1] > 0.5
|
||||
assert out[2] > 0.2 # geometric decay
|
||||
|
||||
|
||||
def test_autocorrelation_insufficient_data():
|
||||
assert autocorrelation([1.0], max_lag=5) == {}
|
||||
|
||||
|
||||
# ---------- rolling_sharpe ----------
|
||||
|
||||
def test_rolling_sharpe_positive_for_uptrend():
|
||||
closes = [100.0 * (1 + 0.001 * i) for i in range(252)]
|
||||
s = rolling_sharpe(closes, window=60)
|
||||
assert s is not None
|
||||
assert s["sharpe"] > 0
|
||||
assert s["sortino"] >= s["sharpe"] / 2 # sortino can be high if no downside
|
||||
|
||||
|
||||
def test_rolling_sharpe_zero_volatility():
|
||||
closes = [100.0] * 100
|
||||
s = rolling_sharpe(closes, window=60)
|
||||
assert s is not None
|
||||
assert s["sharpe"] == 0.0 # no variance → 0 by convention
|
||||
|
||||
|
||||
def test_rolling_sharpe_insufficient_data():
|
||||
assert rolling_sharpe([100.0, 101.0], window=60) is None
|
||||
|
||||
|
||||
# ---------- var_cvar ----------
|
||||
|
||||
def test_var_cvar_basic():
|
||||
import random
|
||||
r = random.Random(3)
|
||||
rets = [r.gauss(0.0005, 0.02) for _ in range(1000)]
|
||||
out = var_cvar(rets, confidences=[0.95, 0.99])
|
||||
assert "var_95" in out and "cvar_95" in out
|
||||
assert "var_99" in out and "cvar_99" in out
|
||||
# VaR is loss → positive number representing percentile loss
|
||||
assert out["var_95"] > 0
|
||||
assert out["cvar_95"] >= out["var_95"] # CVaR worse than VaR
|
||||
assert out["var_99"] >= out["var_95"]
|
||||
|
||||
|
||||
def test_var_cvar_insufficient_data():
|
||||
assert var_cvar([0.01], confidences=[0.95]) == {}
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from mcp_common.microstructure import orderbook_imbalance
|
||||
|
||||
|
||||
def test_orderbook_imbalance_balanced():
|
||||
bids = [[100.0, 1.0], [99.5, 1.0], [99.0, 1.0]]
|
||||
asks = [[100.5, 1.0], [101.0, 1.0], [101.5, 1.0]]
|
||||
out = orderbook_imbalance(bids, asks, depth=3)
|
||||
assert abs(out["imbalance_ratio"]) < 0.01 # bilanciato
|
||||
assert out["bid_volume"] == 3.0
|
||||
assert out["ask_volume"] == 3.0
|
||||
assert out["microprice"] is not None
|
||||
|
||||
|
||||
def test_orderbook_imbalance_bid_heavy():
|
||||
bids = [[100.0, 5.0], [99.5, 5.0]]
|
||||
asks = [[100.5, 1.0], [101.0, 1.0]]
|
||||
out = orderbook_imbalance(bids, asks, depth=2)
|
||||
assert out["imbalance_ratio"] > 0.5 # forte bid pressure
|
||||
assert out["bid_volume"] == 10.0
|
||||
assert out["ask_volume"] == 2.0
|
||||
|
||||
|
||||
def test_orderbook_imbalance_ask_heavy():
|
||||
bids = [[100.0, 1.0], [99.5, 1.0]]
|
||||
asks = [[100.5, 5.0], [101.0, 5.0]]
|
||||
out = orderbook_imbalance(bids, asks, depth=2)
|
||||
assert out["imbalance_ratio"] < -0.5
|
||||
|
||||
|
||||
def test_orderbook_imbalance_microprice_skew():
|
||||
"""Microprice è weighted mid: pesato bid/ask depth opposto."""
|
||||
bids = [[100.0, 9.0]]
|
||||
asks = [[101.0, 1.0]]
|
||||
out = orderbook_imbalance(bids, asks, depth=1)
|
||||
# large bid → microprice closer to ask (paradox: weighted by *opposite* size)
|
||||
assert out["microprice"] > 100.5
|
||||
|
||||
|
||||
def test_orderbook_imbalance_empty():
|
||||
out = orderbook_imbalance([], [], depth=5)
|
||||
assert out["imbalance_ratio"] is None
|
||||
assert out["microprice"] is None
|
||||
|
||||
|
||||
def test_orderbook_imbalance_one_sided():
|
||||
out = orderbook_imbalance([[100.0, 1.0]], [], depth=1)
|
||||
assert out["imbalance_ratio"] == 1.0 # all bid
|
||||
|
||||
|
||||
def test_orderbook_imbalance_slope():
|
||||
"""Slope = velocity of liquidity dropoff: ripido = poca liquidità in profondità."""
|
||||
bids_steep = [[100.0, 10.0], [99.0, 1.0]] # depth crolla → slope alto
|
||||
asks_steep = [[101.0, 10.0], [102.0, 1.0]]
|
||||
out = orderbook_imbalance(bids_steep, asks_steep, depth=2)
|
||||
assert out["bid_slope"] is not None
|
||||
# bid liquidity drops by 9 per 1 price unit → slope ~9
|
||||
assert out["bid_slope"] > 5.0
|
||||
@@ -0,0 +1,146 @@
|
||||
"""Test puri per mcp_common.options (logiche option-flow indipendenti
|
||||
dall'exchange).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from mcp_common.options import (
|
||||
atm_vs_wings_vol,
|
||||
dealer_gamma_profile,
|
||||
oi_weighted_skew,
|
||||
smile_asymmetry,
|
||||
vanna_charm_aggregate,
|
||||
)
|
||||
|
||||
|
||||
# ---------- oi_weighted_skew ----------
|
||||
|
||||
def test_oi_weighted_skew_balanced():
|
||||
"""OI distribuito 50/50 calls/puts → skew vicino a 0."""
|
||||
legs = [
|
||||
{"iv": 0.5, "delta": 0.5, "oi": 100, "option_type": "call"},
|
||||
{"iv": 0.5, "delta": -0.5, "oi": 100, "option_type": "put"},
|
||||
]
|
||||
out = oi_weighted_skew(legs)
|
||||
assert abs(out["skew"]) < 0.01
|
||||
|
||||
|
||||
def test_oi_weighted_skew_put_heavy():
|
||||
"""Put heavy → IV media puts > IV media calls → skew positivo (put > call)."""
|
||||
legs = [
|
||||
{"iv": 0.4, "delta": 0.5, "oi": 50, "option_type": "call"},
|
||||
{"iv": 0.7, "delta": -0.5, "oi": 500, "option_type": "put"},
|
||||
]
|
||||
out = oi_weighted_skew(legs)
|
||||
assert out["skew"] > 0
|
||||
assert out["call_iv_weighted"] > 0
|
||||
assert out["put_iv_weighted"] > out["call_iv_weighted"]
|
||||
|
||||
|
||||
def test_oi_weighted_skew_empty():
|
||||
out = oi_weighted_skew([])
|
||||
assert out == {"skew": None, "call_iv_weighted": None, "put_iv_weighted": None, "total_oi": 0}
|
||||
|
||||
|
||||
# ---------- smile_asymmetry ----------
|
||||
|
||||
def test_smile_asymmetry_symmetric():
|
||||
"""Smile simmetrico ATM → asymmetry ≈ 0."""
|
||||
legs = [
|
||||
{"strike": 80, "iv": 0.55, "option_type": "put"},
|
||||
{"strike": 90, "iv": 0.50, "option_type": "put"},
|
||||
{"strike": 100, "iv": 0.45, "option_type": "call"},
|
||||
{"strike": 110, "iv": 0.50, "option_type": "call"},
|
||||
{"strike": 120, "iv": 0.55, "option_type": "call"},
|
||||
]
|
||||
out = smile_asymmetry(legs, spot=100.0)
|
||||
assert out["atm_iv"] is not None
|
||||
assert abs(out["asymmetry"]) < 0.05
|
||||
|
||||
|
||||
def test_smile_asymmetry_put_skew():
|
||||
"""OTM puts (low strike) IV >> OTM calls (high strike) IV → asymmetry > 0."""
|
||||
legs = [
|
||||
{"strike": 80, "iv": 0.80, "option_type": "put"},
|
||||
{"strike": 100, "iv": 0.50, "option_type": "call"},
|
||||
{"strike": 120, "iv": 0.45, "option_type": "call"},
|
||||
]
|
||||
out = smile_asymmetry(legs, spot=100.0)
|
||||
assert out["asymmetry"] > 0.1
|
||||
|
||||
|
||||
def test_smile_asymmetry_no_atm():
|
||||
legs = [{"strike": 200, "iv": 0.5, "option_type": "call"}]
|
||||
out = smile_asymmetry(legs, spot=100.0)
|
||||
assert out["atm_iv"] is None
|
||||
|
||||
|
||||
# ---------- atm_vs_wings_vol ----------
|
||||
|
||||
def test_atm_vs_wings_vol_basic():
|
||||
legs = [
|
||||
{"strike": 90, "iv": 0.55, "delta": -0.25, "option_type": "put"},
|
||||
{"strike": 100, "iv": 0.45, "delta": 0.5, "option_type": "call"},
|
||||
{"strike": 110, "iv": 0.50, "delta": 0.25, "option_type": "call"},
|
||||
]
|
||||
out = atm_vs_wings_vol(legs, spot=100.0)
|
||||
assert out["atm_iv"] == pytest.approx(0.45, rel=1e-3)
|
||||
assert out["wing_25d_call_iv"] == pytest.approx(0.50, rel=1e-3)
|
||||
assert out["wing_25d_put_iv"] == pytest.approx(0.55, rel=1e-3)
|
||||
# ATM<wings → richness positiva
|
||||
assert out["wing_richness"] > 0
|
||||
|
||||
|
||||
def test_atm_vs_wings_vol_no_data():
|
||||
out = atm_vs_wings_vol([], spot=100.0)
|
||||
assert out["atm_iv"] is None
|
||||
|
||||
|
||||
# ---------- dealer_gamma_profile ----------
|
||||
|
||||
def test_dealer_gamma_profile_assumes_dealer_short_calls():
|
||||
"""Convention: dealer SHORT calls (sells calls to retail), LONG puts.
|
||||
Calls oi → negative dealer gamma, puts oi → positive dealer gamma.
|
||||
"""
|
||||
legs = [
|
||||
{"strike": 100, "gamma": 0.01, "oi": 1000, "option_type": "call"},
|
||||
{"strike": 100, "gamma": 0.01, "oi": 500, "option_type": "put"},
|
||||
]
|
||||
out = dealer_gamma_profile(legs, spot=100.0)
|
||||
# call gamma greater than put gamma at same strike → net dealer short gamma
|
||||
assert len(out["by_strike"]) == 1
|
||||
row = out["by_strike"][0]
|
||||
assert row["call_dealer_gamma"] < 0
|
||||
assert row["put_dealer_gamma"] > 0
|
||||
assert row["net_dealer_gamma"] < 0 # calls dominate
|
||||
assert out["total_net_dealer_gamma"] < 0
|
||||
|
||||
|
||||
def test_dealer_gamma_profile_empty():
|
||||
out = dealer_gamma_profile([], spot=100.0)
|
||||
assert out["by_strike"] == []
|
||||
assert out["total_net_dealer_gamma"] == 0.0
|
||||
|
||||
|
||||
# ---------- vanna_charm_aggregate ----------
|
||||
|
||||
def test_vanna_charm_aggregate_basic():
|
||||
legs = [
|
||||
{"strike": 100, "vanna": 0.05, "charm": -0.001, "oi": 1000, "option_type": "call"},
|
||||
{"strike": 100, "vanna": -0.05, "charm": 0.001, "oi": 500, "option_type": "put"},
|
||||
]
|
||||
out = vanna_charm_aggregate(legs, spot=100.0)
|
||||
assert out["total_vanna"] != 0 # some net exposure
|
||||
assert "total_charm" in out
|
||||
assert out["legs_analyzed"] == 2
|
||||
|
||||
|
||||
def test_vanna_charm_aggregate_skip_missing_greeks():
|
||||
legs = [
|
||||
{"strike": 100, "vanna": None, "charm": -0.001, "oi": 1000, "option_type": "call"},
|
||||
{"strike": 100, "vanna": 0.05, "charm": None, "oi": 500, "option_type": "put"},
|
||||
]
|
||||
out = vanna_charm_aggregate(legs, spot=100.0)
|
||||
# entrambe le legs hanno almeno una greca None → skippate
|
||||
assert out["legs_analyzed"] == 0
|
||||
@@ -0,0 +1,52 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import random
|
||||
|
||||
from mcp_common.stats import cointegration_test
|
||||
|
||||
|
||||
def test_cointegrated_synthetic_pair():
|
||||
"""Costruisco coppia cointegrata: B random walk, A = 2*B + noise stazionario."""
|
||||
r = random.Random(1)
|
||||
b = [100.0]
|
||||
for _ in range(300):
|
||||
b.append(b[-1] + r.gauss(0, 1))
|
||||
a = [2 * b[i] + r.gauss(0, 0.5) for i in range(len(b))]
|
||||
out = cointegration_test(a, b)
|
||||
assert out["cointegrated"] is True
|
||||
assert out["beta"] == pytest_approx(2.0, rel=0.05)
|
||||
assert out["adf_t_stat"] is not None
|
||||
assert out["adf_t_stat"] < -2.86
|
||||
|
||||
|
||||
def test_not_cointegrated_independent_walks():
|
||||
"""Due random walk indipendenti → spread non stazionario → no cointegration."""
|
||||
r = random.Random(2)
|
||||
a = [100.0]
|
||||
b = [100.0]
|
||||
for _ in range(300):
|
||||
a.append(a[-1] + r.gauss(0, 1))
|
||||
b.append(b[-1] + r.gauss(0, 1))
|
||||
out = cointegration_test(a, b)
|
||||
# Per due RW indipendenti, t-stat ADF è solitamente > -2.86 → non cointegrate
|
||||
assert out["cointegrated"] is False or out["adf_t_stat"] > -3.0
|
||||
|
||||
|
||||
def test_cointegration_short_series():
|
||||
out = cointegration_test([1.0, 2.0], [3.0, 4.0])
|
||||
assert out["cointegrated"] is None
|
||||
assert out["beta"] is None
|
||||
|
||||
|
||||
def test_cointegration_mismatched_length():
|
||||
out = cointegration_test([1.0, 2.0, 3.0], [1.0, 2.0])
|
||||
assert out["cointegrated"] is None
|
||||
|
||||
|
||||
def pytest_approx(value, rel):
|
||||
"""Tiny helper to avoid importing pytest just for approx."""
|
||||
class _Approx:
|
||||
def __eq__(self, other):
|
||||
return abs(other - value) <= abs(value) * rel
|
||||
return _Approx()
|
||||
Reference in New Issue
Block a user