feat(V2): migrazione common/ (indicators, options, microstructure, stats, http, audit, logging, mcp_bridge + auth)

This commit is contained in:
AdrianoDev
2026-04-30 18:12:11 +02:00
parent 04a34fc179
commit 3868ba60ce
9 changed files with 1411 additions and 0 deletions
+121
View File
@@ -0,0 +1,121 @@
"""Audit log strutturato per write endpoint MCP (place_order, cancel,
set_*, close_*, transfer_*). Usa un logger dedicato `mcp.audit` su stream
JSON.
Sink:
- stdout/stderr (sempre): tramite root JSON logger configurato da
`mcp_common.logging.configure_root_logging`.
- File JSONL persistente (opzionale): se env var `AUDIT_LOG_FILE` è
settata, aggiunge un `TimedRotatingFileHandler` che ruota a mezzanotte
con `AUDIT_LOG_BACKUP_DAYS` di retention (default 30). Una riga JSON
per record (formato `.jsonl`).
Per VPS produzione: setta `AUDIT_LOG_FILE=/var/log/cerbero-mcp/<service>.audit.jsonl`
con bind mount del volume `/var/log/cerbero-mcp` nel docker-compose.
Payload sensibile (api_key, secret) già filtrato dal SecretsFilter
globale; qui non si include creds.
"""
from __future__ import annotations
import logging
import os
from logging.handlers import TimedRotatingFileHandler
from typing import Any
from cerbero_mcp.common.auth import Principal
from cerbero_mcp.common.logging import SecretsFilter, get_json_logger
try:
from pythonjsonlogger.json import JsonFormatter as _JsonFormatter # noqa: N813
except ImportError:
from pythonjsonlogger.jsonlogger import JsonFormatter as _JsonFormatter # noqa: N813
_logger = get_json_logger("mcp.audit", level=logging.INFO)
_file_handler_attached = False
def _configure_audit_sink() -> None:
"""Aggiunge FileHandler al logger mcp.audit se AUDIT_LOG_FILE è settato.
Idempotente: chiamato la prima volta da audit_write_op, poi no-op.
"""
global _file_handler_attached
if _file_handler_attached:
return
file_path = os.environ.get("AUDIT_LOG_FILE", "").strip()
if not file_path:
_file_handler_attached = True
return
backup_days = int(os.environ.get("AUDIT_LOG_BACKUP_DAYS", "30"))
os.makedirs(os.path.dirname(file_path) or ".", exist_ok=True)
handler = TimedRotatingFileHandler(
file_path,
when="midnight",
interval=1,
backupCount=backup_days,
encoding="utf-8",
utc=True,
)
handler.setFormatter(_JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s"))
handler.addFilter(SecretsFilter())
_logger.addHandler(handler)
_file_handler_attached = True
def audit_write_op(
*,
principal: Principal | None,
action: str,
exchange: str,
target: str | None = None,
payload: dict[str, Any] | None = None,
result: dict[str, Any] | None = None,
error: str | None = None,
) -> None:
"""Emit a structured audit log record per write operation.
principal: chi ha invocato (None se anonimo, ma normalmente _check
impedisce di arrivare qui senza principal).
action: nome del tool (es. "place_order", "cancel_order").
exchange: identificatore servizio (deribit, bybit, alpaca, hyperliquid).
target: instrument/symbol/order_id su cui si agisce.
payload: input non-sensibile (qty, side, leverage, ecc.).
result: output del client (order_id, status, ecc.).
error: stringa errore se l'operazione ha fallito.
"""
_configure_audit_sink()
record: dict[str, Any] = {
"audit_event": "write_op",
"action": action,
"exchange": exchange,
"principal": principal.name if principal else None,
"target": target,
"payload": payload or {},
}
if result is not None:
record["result"] = _summarize_result(result)
if error is not None:
record["error"] = error
_logger.error("audit", extra=record)
else:
_logger.info("audit", extra=record)
def _summarize_result(result: dict[str, Any]) -> dict[str, Any]:
"""Estrae i campi rilevanti dal result (order_id, state, error code)
per evitare di loggare payload enormi.
"""
keys = (
"order_id", "order_link_id", "combo_instrument", "state", "status",
"code", "error", "stop_price", "tp_price", "transfer_id",
)
out: dict[str, Any] = {}
for k in keys:
if k in result:
out[k] = result[k]
if "orders" in result:
out["orders_count"] = len(result["orders"])
return out
+98
View File
@@ -0,0 +1,98 @@
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass, field
from functools import wraps
from fastapi import HTTPException, Request, status
@dataclass
class Principal:
name: str
capabilities: set[str] = field(default_factory=set)
@dataclass
class TokenStore:
tokens: dict[str, Principal]
def get(self, token: str) -> Principal | None:
return self.tokens.get(token)
def require_principal(request: Request) -> Principal:
auth = request.headers.get("Authorization", "")
if not auth.startswith("Bearer "):
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "missing bearer token")
token = auth[len("Bearer "):].strip()
store: TokenStore = request.app.state.token_store
principal = store.get(token)
if principal is None:
raise HTTPException(status.HTTP_403_FORBIDDEN, "invalid token")
return principal
def acl_requires(*, core: bool = False, observer: bool = False) -> Callable:
"""Decorator: require at least one matching capability."""
allowed: set[str] = set()
if core:
allowed.add("core")
if observer:
allowed.add("observer")
def decorator(func: Callable) -> Callable:
@wraps(func)
async def async_wrapper(*args, **kwargs):
principal = kwargs.get("principal")
if principal is None:
for a in args:
if isinstance(a, Principal):
principal = a
break
if principal is None or not (principal.capabilities & allowed):
raise HTTPException(
status.HTTP_403_FORBIDDEN,
f"capability required: {allowed}",
)
return await func(*args, **kwargs) if _is_coro(func) else func(*args, **kwargs)
@wraps(func)
def sync_wrapper(*args, **kwargs):
principal = kwargs.get("principal")
if principal is None:
for a in args:
if isinstance(a, Principal):
principal = a
break
if principal is None or not (principal.capabilities & allowed):
raise HTTPException(
status.HTTP_403_FORBIDDEN,
f"capability required: {allowed}",
)
return func(*args, **kwargs)
return async_wrapper if _is_coro(func) else sync_wrapper
return decorator
def _is_coro(func: Callable) -> bool:
import asyncio
return asyncio.iscoroutinefunction(func)
def load_token_store_from_files(
core_token_file: str | None,
observer_token_file: str | None,
) -> TokenStore:
tokens: dict[str, Principal] = {}
if core_token_file:
with open(core_token_file) as f:
tokens[f.read().strip()] = Principal(name="core", capabilities={"core"})
if observer_token_file:
with open(observer_token_file) as f:
tokens[f.read().strip()] = Principal(
name="observer", capabilities={"observer"}
)
return TokenStore(tokens=tokens)
+85
View File
@@ -0,0 +1,85 @@
"""HTTP client factory con retry/backoff su errori transient.
Wrap leggero attorno a httpx.AsyncClient: aggiunge AsyncHTTPTransport
con retries=N per gestire connection errors / DNS / refused. Per retry
su 5xx HTTP response usa `request_with_retry()` (decoratore separato).
Usage standard:
async with async_client(timeout=15) as http:
resp = await http.get(url)
Equivalente a httpx.AsyncClient(timeout=15) ma con retry transport su
errori di livello connessione.
"""
from __future__ import annotations
import asyncio
import logging
from collections.abc import Awaitable, Callable
from typing import Any, TypeVar
import httpx
logger = logging.getLogger(__name__)
T = TypeVar("T")
DEFAULT_RETRIES = 3
DEFAULT_TIMEOUT = 15.0
def async_client(
*,
timeout: float = DEFAULT_TIMEOUT,
retries: int = DEFAULT_RETRIES,
follow_redirects: bool = False,
**kwargs: Any,
) -> httpx.AsyncClient:
"""httpx.AsyncClient con AsyncHTTPTransport(retries=N) di default.
retries gestisce connection errors / refused / DNS — non 5xx HTTP.
"""
transport = httpx.AsyncHTTPTransport(retries=retries)
return httpx.AsyncClient(
timeout=timeout,
transport=transport,
follow_redirects=follow_redirects,
**kwargs,
)
async def call_with_retry(
fn: Callable[[], Awaitable[T]],
*,
max_attempts: int = 3,
base_delay: float = 0.5,
max_delay: float = 8.0,
retry_on: tuple[type[BaseException], ...] = (httpx.TransportError, httpx.TimeoutException),
) -> T:
"""Retry generico async con exponential backoff.
Ritenta `fn()` se solleva una delle exception in `retry_on`. Backoff
raddoppia (0.5, 1, 2, 4, ...) clipped a max_delay. Solleva l'ultima
exception se max_attempts raggiunto.
Usabile su SDK sincroni avvolti in asyncio.to_thread (pybit, alpaca):
result = await call_with_retry(lambda: client._run(self._http.get_tickers, ...))
"""
delay = base_delay
last_exc: BaseException | None = None
for attempt in range(1, max_attempts + 1):
try:
return await fn()
except retry_on as e:
last_exc = e
if attempt == max_attempts:
break
logger.warning(
"transient error, retrying (%d/%d) in %.1fs: %s",
attempt, max_attempts, delay, type(e).__name__,
)
await asyncio.sleep(delay)
delay = min(delay * 2, max_delay)
assert last_exc is not None
raise last_exc
+416
View File
@@ -0,0 +1,416 @@
from __future__ import annotations
import math
def sma(values: list[float], period: int) -> float | None:
if len(values) < period:
return None
return sum(values[-period:]) / period
def rsi(closes: list[float], period: int = 14) -> float | None:
if len(closes) < period + 1:
return None
gains: list[float] = []
losses: list[float] = []
for i in range(1, len(closes)):
delta = closes[i] - closes[i - 1]
gains.append(max(delta, 0.0))
losses.append(-min(delta, 0.0))
avg_gain = sum(gains[:period]) / period
avg_loss = sum(losses[:period]) / period
for i in range(period, len(gains)):
avg_gain = (avg_gain * (period - 1) + gains[i]) / period
avg_loss = (avg_loss * (period - 1) + losses[i]) / period
if avg_loss == 0:
return 100.0
rs = avg_gain / avg_loss
return 100.0 - (100.0 / (1.0 + rs))
def _ema_series(values: list[float], period: int) -> list[float]:
if len(values) < period:
return []
k = 2.0 / (period + 1)
seed = sum(values[:period]) / period
out = [seed]
for v in values[period:]:
out.append(out[-1] + k * (v - out[-1]))
return out
def macd(
closes: list[float],
fast: int = 12,
slow: int = 26,
signal: int = 9,
) -> dict[str, float | None]:
nothing: dict[str, float | None] = {"macd": None, "signal": None, "hist": None}
if len(closes) < slow + signal:
return nothing
ema_fast = _ema_series(closes, fast)
ema_slow = _ema_series(closes, slow)
offset = slow - fast
aligned_fast = ema_fast[offset:]
macd_line = [f - s for f, s in zip(aligned_fast, ema_slow, strict=False)]
if len(macd_line) < signal:
return nothing
signal_line = _ema_series(macd_line, signal)
if not signal_line:
return nothing
last_macd = macd_line[-1]
last_sig = signal_line[-1]
return {
"macd": last_macd,
"signal": last_sig,
"hist": last_macd - last_sig,
}
def atr(
highs: list[float],
lows: list[float],
closes: list[float],
period: int = 14,
) -> float | None:
if len(closes) < period + 1:
return None
trs: list[float] = []
for i in range(1, len(closes)):
tr = max(
highs[i] - lows[i],
abs(highs[i] - closes[i - 1]),
abs(lows[i] - closes[i - 1]),
)
trs.append(tr)
if len(trs) < period:
return None
avg = sum(trs[:period]) / period
for i in range(period, len(trs)):
avg = (avg * (period - 1) + trs[i]) / period
return avg
def adx(
highs: list[float],
lows: list[float],
closes: list[float],
period: int = 14,
) -> dict[str, float | None]:
nothing: dict[str, float | None] = {"adx": None, "+di": None, "-di": None}
if len(closes) < 2 * period + 1:
return nothing
trs: list[float] = []
plus_dms: list[float] = []
minus_dms: list[float] = []
for i in range(1, len(closes)):
tr = max(
highs[i] - lows[i],
abs(highs[i] - closes[i - 1]),
abs(lows[i] - closes[i - 1]),
)
up = highs[i] - highs[i - 1]
dn = lows[i - 1] - lows[i]
plus_dm = up if (up > dn and up > 0) else 0.0
minus_dm = dn if (dn > up and dn > 0) else 0.0
trs.append(tr)
plus_dms.append(plus_dm)
minus_dms.append(minus_dm)
atr_s = sum(trs[:period])
pdm_s = sum(plus_dms[:period])
mdm_s = sum(minus_dms[:period])
dxs: list[float] = []
pdi = mdi = 0.0
for i in range(period, len(trs)):
atr_s = atr_s - atr_s / period + trs[i]
pdm_s = pdm_s - pdm_s / period + plus_dms[i]
mdm_s = mdm_s - mdm_s / period + minus_dms[i]
pdi = 100.0 * pdm_s / atr_s if atr_s else 0.0
mdi = 100.0 * mdm_s / atr_s if atr_s else 0.0
s = pdi + mdi
dx = 100.0 * abs(pdi - mdi) / s if s else 0.0
dxs.append(dx)
if len(dxs) < period:
return nothing
adx_val = sum(dxs[:period]) / period
for i in range(period, len(dxs)):
adx_val = (adx_val * (period - 1) + dxs[i]) / period
return {"adx": adx_val, "+di": pdi, "-di": mdi}
# ───── Returns helper ─────
def _log_returns(closes: list[float]) -> list[float]:
out: list[float] = []
for i in range(1, len(closes)):
prev = closes[i - 1]
curr = closes[i]
if prev > 0 and curr > 0:
out.append(math.log(curr / prev))
return out
def _percentile(sorted_values: list[float], q: float) -> float:
if not sorted_values:
return 0.0
if len(sorted_values) == 1:
return sorted_values[0]
pos = q * (len(sorted_values) - 1)
lo = int(pos)
hi = min(lo + 1, len(sorted_values) - 1)
frac = pos - lo
return sorted_values[lo] + frac * (sorted_values[hi] - sorted_values[lo])
def _stddev(xs: list[float]) -> float:
if len(xs) < 2:
return 0.0
m = sum(xs) / len(xs)
var = sum((x - m) ** 2 for x in xs) / (len(xs) - 1)
return math.sqrt(var)
# ───── vol_cone ─────
def vol_cone(
closes: list[float],
windows: list[int] | None = None,
annualization: int = 252,
) -> dict[int, dict[str, float | None]]:
"""Realized vol cone: per ogni window restituisce vol corrente e percentili
storici (p10/p50/p90) di tutte le rolling windows del campione.
Annualizzata (default 252 trading days).
"""
windows = windows or [10, 20, 30, 60]
rets = _log_returns(closes)
out: dict[int, dict[str, float | None]] = {}
factor = math.sqrt(annualization)
for w in windows:
if len(rets) < w:
out[w] = {"current": None, "p10": None, "p50": None, "p90": None}
continue
rolling: list[float] = []
for i in range(w, len(rets) + 1):
window_rets = rets[i - w:i]
rolling.append(_stddev(window_rets) * factor)
rolling_sorted = sorted(rolling)
out[w] = {
"current": rolling[-1],
"p10": _percentile(rolling_sorted, 0.10),
"p50": _percentile(rolling_sorted, 0.50),
"p90": _percentile(rolling_sorted, 0.90),
}
return out
# ───── hurst_exponent ─────
def hurst_exponent(closes: list[float], min_lag: int = 2, max_lag: int = 100) -> float | None:
"""Hurst via R/S analysis su log-prices. H≈0.5 random walk, >0.5 trending,
<0.5 mean-reverting.
"""
if len(closes) < max(20, max_lag):
return None
log_p = [math.log(c) for c in closes if c > 0]
if len(log_p) < max(20, max_lag):
return None
upper = min(max_lag, len(log_p) // 2)
if upper < min_lag + 1:
return None
lags = list(range(min_lag, upper))
log_lags: list[float] = []
log_rs: list[float] = []
for lag in lags:
# Build N/lag non-overlapping segments; for each compute R/S
rs_vals: list[float] = []
n_segs = len(log_p) // lag
if n_segs < 1:
continue
for seg in range(n_segs):
chunk = log_p[seg * lag:(seg + 1) * lag]
diffs = [chunk[i] - chunk[i - 1] for i in range(1, len(chunk))]
if len(diffs) < 2:
continue
mean = sum(diffs) / len(diffs)
dev = [d - mean for d in diffs]
cum = []
acc = 0.0
for d in dev:
acc += d
cum.append(acc)
r = max(cum) - min(cum)
s = _stddev(diffs)
if s > 0:
rs_vals.append(r / s)
if rs_vals:
avg_rs = sum(rs_vals) / len(rs_vals)
if avg_rs > 0:
log_lags.append(math.log(lag))
log_rs.append(math.log(avg_rs))
if len(log_lags) < 4:
return None
# Linear regression slope = Hurst
n = len(log_lags)
mx = sum(log_lags) / n
my = sum(log_rs) / n
num = sum((log_lags[i] - mx) * (log_rs[i] - my) for i in range(n))
den = sum((log_lags[i] - mx) ** 2 for i in range(n))
if den == 0:
return None
return num / den
# ───── half_life_mean_reversion ─────
def half_life_mean_reversion(closes: list[float]) -> float | None:
"""Half-life via OU AR(1) fit: y_t - y_{t-1} = a + b*y_{t-1} + eps.
Half-life = -ln(2)/ln(1+b). Se b>=0 → no mean reversion → None.
"""
if len(closes) < 30:
return None
y_lag = closes[:-1]
delta = [closes[i] - closes[i - 1] for i in range(1, len(closes))]
n = len(y_lag)
mx = sum(y_lag) / n
my = sum(delta) / n
num = sum((y_lag[i] - mx) * (delta[i] - my) for i in range(n))
den = sum((y_lag[i] - mx) ** 2 for i in range(n))
if den == 0:
return None
b = num / den
if b >= 0:
return None
one_plus_b = 1.0 + b
if one_plus_b <= 0:
return None
return -math.log(2.0) / math.log(one_plus_b)
# ───── garch11_forecast ─────
def garch11_forecast(
closes: list[float],
max_iter: int = 50,
) -> dict[str, float] | None:
"""Forecast GARCH(1,1) one-step-ahead sigma via metodo dei momenti
semplificato (no MLE). Pure-Python: stima omega, alpha, beta tramite
iterazione di punto fisso minimizzando MSE sul squared-return tracking.
Sufficiente per ranking volatility regimes; non production-grade.
"""
rets = _log_returns(closes)
if len(rets) < 50:
return None
mean = sum(rets) / len(rets)
centered = [r - mean for r in rets]
sq = [r * r for r in centered]
# Sample variance as long-run mean
var_lr = sum(sq) / len(sq)
if var_lr <= 0:
return None
# Simple grid for (alpha, beta) minimizing MSE of sigma2 vs realized sq
best = (1e18, 0.05, 0.90)
for a in [0.02, 0.05, 0.08, 0.10, 0.15]:
for b in [0.80, 0.85, 0.88, 0.90, 0.93]:
if a + b >= 0.999:
continue
omega = var_lr * (1 - a - b)
if omega <= 0:
continue
sigma2 = var_lr
mse = 0.0
for s in sq[:-1]:
sigma2 = omega + a * s + b * sigma2
mse += (sigma2 - s) ** 2
if mse < best[0]:
best = (mse, a, b)
_, alpha, beta = best
omega = var_lr * (1 - alpha - beta)
sigma2 = var_lr
for s in sq:
sigma2 = omega + alpha * s + beta * sigma2
sigma2_next = omega + alpha * sq[-1] + beta * sigma2
return {
"sigma_next": math.sqrt(max(sigma2_next, 0.0)),
"alpha": alpha,
"beta": beta,
"omega": omega,
"long_run_sigma": math.sqrt(var_lr),
}
# ───── autocorrelation ─────
def autocorrelation(values: list[float], max_lag: int = 10) -> dict[int, float]:
"""Autocorrelation function (ACF) lag 1..max_lag. White noise → ≈ 0.
AR(1) phi → lag1 ≈ phi, lag-k ≈ phi^k.
"""
if len(values) < max_lag + 2:
return {}
n = len(values)
mean = sum(values) / n
dev = [v - mean for v in values]
var = sum(d * d for d in dev) / n
if var == 0:
return {lag: 0.0 for lag in range(1, max_lag + 1)}
out: dict[int, float] = {}
for lag in range(1, max_lag + 1):
cov = sum(dev[i] * dev[i + lag] for i in range(n - lag)) / n
out[lag] = cov / var
return out
# ───── rolling_sharpe ─────
def rolling_sharpe(
closes: list[float],
window: int = 60,
annualization: int = 252,
risk_free: float = 0.0,
) -> dict[str, float] | None:
"""Sharpe e Sortino rolling sull'ultimo `window` di log-returns.
Annualizzati. risk_free in tasso annualizzato.
"""
rets = _log_returns(closes)
if len(rets) < window:
return None
sample = rets[-window:]
daily_rf = risk_free / annualization
excess = [r - daily_rf for r in sample]
mean = sum(excess) / len(excess)
sd = _stddev(excess)
sharpe = (mean / sd) * math.sqrt(annualization) if sd > 0 else 0.0
downside = [e for e in excess if e < 0]
if downside:
ds_var = sum(d * d for d in downside) / len(excess)
ds_sd = math.sqrt(ds_var)
sortino = (mean / ds_sd) * math.sqrt(annualization) if ds_sd > 0 else 0.0
else:
sortino = sharpe * 2 # nessun downside → sortino "molto buono"
return {"sharpe": sharpe, "sortino": sortino, "mean_excess": mean, "stddev": sd}
# ───── var_cvar ─────
def var_cvar(returns: list[float], confidences: list[float] | None = None) -> dict[str, float]:
"""Historical VaR e CVaR (Expected Shortfall) ai livelli di confidenza.
returns: serie di rendimenti (qualsiasi periodicità). VaR/CVaR restituiti
come perdite positive (es. var_95=0.03 → -3% al 95%).
"""
confidences = confidences or [0.95, 0.99]
if len(returns) < 30:
return {}
sorted_rets = sorted(returns)
out: dict[str, float] = {}
for c in confidences:
tag = int(round(c * 100))
q = 1.0 - c
var = -_percentile(sorted_rets, q)
cutoff = -var
tail = [r for r in sorted_rets if r <= cutoff]
cvar = -(sum(tail) / len(tail)) if tail else var
out[f"var_{tag}"] = var
out[f"cvar_{tag}"] = cvar
return out
+81
View File
@@ -0,0 +1,81 @@
from __future__ import annotations
import logging
import os
import re
import sys
# pythonjsonlogger rinominato in .json; keep fallback per compat
try:
from pythonjsonlogger.json import JsonFormatter as _JsonFormatter # noqa: N813
except ImportError:
from pythonjsonlogger.jsonlogger import JsonFormatter as _JsonFormatter # noqa: N813
SECRET_PATTERNS = [
(re.compile(r"Bearer\s+[\w\-\._]+", re.IGNORECASE), "Bearer ***"),
(re.compile(r'("api_key"\s*:\s*")[^"]+(")'), r'\1***\2'),
(re.compile(r'("password"\s*:\s*")[^"]+(")'), r'\1***\2'),
(re.compile(r'("private_key"\s*:\s*")[^"]+(")'), r'\1***\2'),
(re.compile(r'("client_secret"\s*:\s*")[^"]+(")'), r'\1***\2'),
(re.compile(r"sk-[\w]{20,}"), "sk-***"),
]
class SecretsFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
msg = record.getMessage()
for pattern, replacement in SECRET_PATTERNS:
msg = pattern.sub(replacement, msg)
record.msg = msg
record.args = () # already formatted into msg
return True
def get_json_logger(name: str, level: int = logging.INFO) -> logging.Logger:
logger = logging.getLogger(name)
if logger.handlers:
return logger # already configured
logger.setLevel(level)
handler = logging.StreamHandler(sys.stderr)
formatter = _JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s")
handler.setFormatter(formatter)
handler.addFilter(SecretsFilter())
logger.addHandler(handler)
logger.propagate = False
return logger
def configure_root_logging(
*,
level: str | int | None = None,
format_type: str | None = None,
) -> None:
"""CER-P5-009: configura il root logger con JSON o text formatter.
Env overrides:
- LOG_LEVEL (default INFO)
- LOG_FORMAT=json|text (default json — production-ready structured log)
Applica SecretsFilter su entrambi i format.
"""
lvl_raw = level if level is not None else os.environ.get("LOG_LEVEL", "INFO")
lvl = logging.getLevelName(lvl_raw.upper()) if isinstance(lvl_raw, str) else lvl_raw
fmt = (format_type or os.environ.get("LOG_FORMAT") or "json").lower()
root = logging.getLogger()
# Rimuovi handler esistenti (basicConfig li avrebbe lasciati duplicati)
for h in list(root.handlers):
root.removeHandler(h)
handler = logging.StreamHandler(sys.stderr)
if fmt == "json":
handler.setFormatter(
_JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s")
)
else:
handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s")
)
handler.addFilter(SecretsFilter())
root.addHandler(handler)
root.setLevel(lvl)
+239
View File
@@ -0,0 +1,239 @@
"""Bridge MCP → endpoint REST esistenti.
Implementa manualmente JSON-RPC 2.0 MCP su `POST /mcp` (no SSE, risposta
diretta in body JSON). Supporta:
- initialize
- notifications/initialized
- tools/list
- tools/call
Claude Code config esempio:
{
"mcpServers": {
"cerbero-memory": {
"type": "http",
"url": "http://localhost:8080/mcp-memory/mcp",
"headers": {"Authorization": "Bearer <observer-token>"}
}
}
}
"""
from __future__ import annotations
import contextlib
from typing import Any
import httpx
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from cerbero_mcp.common.auth import TokenStore
MCP_PROTOCOL_VERSION = "2024-11-05"
def _derive_input_schemas(app: FastAPI, tool_names: list[str]) -> dict[str, dict]:
"""Estrae JSON schema del body Pydantic per ogni route POST /tools/<name>.
Risolve annotazioni lazy (PEP 563) via `typing.get_type_hints`.
Ritorna mapping {tool_name: json_schema}. Route senza body Pydantic o non
risolvibili vengono saltate: il chiamante userà un fallback.
"""
import typing
from pydantic import BaseModel
names_set = set(tool_names)
out: dict[str, dict] = {}
for route in app.routes:
path = getattr(route, "path", "")
if not path.startswith("/tools/"):
continue
name = path[len("/tools/"):]
if name not in names_set:
continue
endpoint = getattr(route, "endpoint", None)
if endpoint is None:
continue
try:
hints = typing.get_type_hints(endpoint)
except Exception:
continue
for pname, ann in hints.items():
if pname == "return":
continue
if isinstance(ann, type) and issubclass(ann, BaseModel):
with contextlib.suppress(Exception):
out[name] = ann.model_json_schema()
break
return out
def _make_proxy_handler(internal_base_url: str, tool_name: str, token: str):
async def handler(args: dict | None) -> Any:
async with httpx.AsyncClient(timeout=30.0) as c:
r = await c.post(
f"{internal_base_url}/tools/{tool_name}",
headers={"Authorization": f"Bearer {token}"} if token else {},
json=args or {},
)
if r.status_code >= 400:
raise RuntimeError(
f"tool {tool_name} failed: HTTP {r.status_code}{r.text[:500]}"
)
try:
return r.json()
except Exception:
return {"raw": r.text}
return handler
def mount_mcp_endpoint(
app: FastAPI,
*,
name: str,
version: str,
token_store: TokenStore,
internal_base_url: str,
tools: list[dict],
) -> None:
"""Registra un endpoint MCP JSON-RPC 2.0 su POST /mcp.
Ogni tool è proxato verso POST {internal_base_url}/tools/<name> con il
Bearer token del client MCP (preservando le ACL REST esistenti).
Args:
app: istanza FastAPI del service
name: nome server MCP
version: versione del service
token_store: lo stesso usato dai tool REST
internal_base_url: URL base interno (es. "http://localhost:9015")
tools: lista di {"name": str, "description": str, "input_schema"?: dict}
"""
tools_by_name = {t["name"]: t for t in tools}
# Auto-derive input schemas from FastAPI routes (Pydantic body models).
# Permette al LLM di conoscere i nomi dei parametri obbligatori invece di
# indovinarli. Se il tool ha `input_schema` esplicito, vince sull'auto-derive.
derived_schemas = _derive_input_schemas(app, [t["name"] for t in tools])
def _tool_defs() -> list[dict]:
defs = []
for t in tools:
schema = t.get("input_schema") or derived_schemas.get(t["name"]) or {
"type": "object",
"additionalProperties": True,
}
defs.append({
"name": t["name"],
"description": t.get("description", t["name"]),
"inputSchema": schema,
})
return defs
async def _handle_rpc(body: dict, token: str) -> dict | None:
rpc_id = body.get("id")
method = body.get("method")
params = body.get("params") or {}
# Notification (no id) → no response
if method == "notifications/initialized":
return None
if method == "initialize":
return {
"jsonrpc": "2.0",
"id": rpc_id,
"result": {
"protocolVersion": MCP_PROTOCOL_VERSION,
"capabilities": {"tools": {"listChanged": False}},
"serverInfo": {"name": name, "version": version},
},
}
if method == "tools/list":
return {
"jsonrpc": "2.0",
"id": rpc_id,
"result": {"tools": _tool_defs()},
}
if method == "tools/call":
tool_name = params.get("name", "")
args = params.get("arguments") or {}
if tool_name not in tools_by_name:
return {
"jsonrpc": "2.0",
"id": rpc_id,
"error": {"code": -32601, "message": f"tool non trovato: {tool_name}"},
}
handler = _make_proxy_handler(internal_base_url, tool_name, token)
try:
result = await handler(args)
return {
"jsonrpc": "2.0",
"id": rpc_id,
"result": {
"content": [
{
"type": "text",
"text": _to_text(result),
}
],
"isError": False,
},
}
except Exception as e:
return {
"jsonrpc": "2.0",
"id": rpc_id,
"result": {
"content": [{"type": "text", "text": str(e)}],
"isError": True,
},
}
return {
"jsonrpc": "2.0",
"id": rpc_id,
"error": {"code": -32601, "message": f"metodo non supportato: {method}"},
}
@app.post("/mcp")
async def mcp_entry(request: Request):
auth = request.headers.get("Authorization", "")
if not auth.startswith("Bearer "):
return JSONResponse({"error": "missing bearer token"}, status_code=401)
token = auth[len("Bearer "):].strip()
principal = token_store.get(token)
if principal is None:
return JSONResponse({"error": "invalid token"}, status_code=403)
body = await request.json()
# Batch support
if isinstance(body, list):
results = []
for item in body:
resp = await _handle_rpc(item, token)
if resp is not None:
results.append(resp)
return JSONResponse(results)
resp = await _handle_rpc(body, token)
if resp is None:
# Notification (no id) → 204 no content
return JSONResponse(None, status_code=204)
return JSONResponse(resp)
def _to_text(value: Any) -> str:
import json
if isinstance(value, str):
return value
try:
return json.dumps(value, ensure_ascii=False, indent=2)
except Exception:
return str(value)
+74
View File
@@ -0,0 +1,74 @@
"""Microstructure indicators: orderbook imbalance, slope, microprice.
Tutte le funzioni accettano bids/asks come list[list[price, qty]] (formato
standard dei ticker exchange) e ritornano metriche aggregate exchange-agnostic.
"""
from __future__ import annotations
def orderbook_imbalance(
bids: list[list[float]],
asks: list[list[float]],
depth: int = 10,
) -> dict[str, float | None]:
"""Imbalance ratio = (bid_vol - ask_vol) / (bid_vol + ask_vol) sui top-`depth`
livelli. Range [-1, +1]. Positivo = bid pressure, negativo = ask pressure.
Microprice (Stoll-Bertsimas): mid pesato dalla size opposta
→ P_micro = (P_bid * Q_ask + P_ask * Q_bid) / (Q_bid + Q_ask). Best level only.
Slope: variazione cumulata di volume per unità di prezzo (proxy per
liquidità in profondità).
"""
if not bids and not asks:
return {
"imbalance_ratio": None,
"bid_volume": 0.0,
"ask_volume": 0.0,
"microprice": None,
"bid_slope": None,
"ask_slope": None,
}
top_bids = bids[:depth]
top_asks = asks[:depth]
bid_vol = sum(q for _, q in top_bids)
ask_vol = sum(q for _, q in top_asks)
total = bid_vol + ask_vol
ratio = None if total == 0 else (bid_vol - ask_vol) / total
# Microprice: best bid, best ask. Weighted by opposite-side size.
microprice = None
if top_bids and top_asks:
bp, bq = top_bids[0]
ap, aq = top_asks[0]
denom = bq + aq
if denom > 0:
microprice = (bp * aq + ap * bq) / denom
bid_slope = _depth_slope(top_bids, ascending_price=False)
ask_slope = _depth_slope(top_asks, ascending_price=True)
return {
"imbalance_ratio": ratio,
"bid_volume": bid_vol,
"ask_volume": ask_vol,
"microprice": microprice,
"bid_slope": bid_slope,
"ask_slope": ask_slope,
}
def _depth_slope(levels: list[list[float]], ascending_price: bool) -> float | None:
"""Calcola |Δq / Δp| sul primo vs penultimo livello.
Slope alto = liquidità che crolla rapidamente in profondità (book sottile).
"""
if len(levels) < 2:
return None
p_first, q_first = levels[0]
p_last, q_last = levels[-1]
dp = abs(p_last - p_first)
if dp == 0:
return None
return abs(q_first - q_last) / dp
+201
View File
@@ -0,0 +1,201 @@
"""Logiche option-flow exchange-agnostiche.
Ogni funzione accetta una lista di "legs" (dizionari) con i campi rilevanti
e ritorna metriche aggregate. La normalizzazione exchange-specific dei dati
spetta al chiamante (es. mcp-deribit costruisce le legs da chain + ticker).
"""
from __future__ import annotations
from typing import Any
# Convention dealer gamma: i dealer sono SHORT calls (le vendono al retail) e
# LONG puts. Quando spot sale e dealer sono short calls, comprano underlying
# (positive feedback → vol amplificata). Quando spot scende e dealer long puts,
# vendono underlying (positive feedback). Net dealer gamma negativo → mercato
# instabile (squeeze in entrambe le direzioni).
def oi_weighted_skew(legs: list[dict[str, Any]]) -> dict[str, float | int | None]:
"""Skew aggregato pesato per OI: IV media puts - IV media calls.
Positivo = puts richer (paura), negativo = calls richer (greed).
"""
call_num = call_den = 0.0
put_num = put_den = 0.0
for leg in legs:
iv = leg.get("iv")
oi = leg.get("oi") or 0
if iv is None or oi <= 0:
continue
if leg.get("option_type") == "call":
call_num += iv * oi
call_den += oi
elif leg.get("option_type") == "put":
put_num += iv * oi
put_den += oi
call_iv = call_num / call_den if call_den > 0 else None
put_iv = put_num / put_den if put_den > 0 else None
skew = (put_iv - call_iv) if (call_iv is not None and put_iv is not None) else None
return {
"skew": skew,
"call_iv_weighted": call_iv,
"put_iv_weighted": put_iv,
"total_oi": int(call_den + put_den),
}
def smile_asymmetry(legs: list[dict[str, Any]], spot: float) -> dict[str, float | None]:
"""Smile asymmetry: differenza media IV otm puts vs otm calls a parità
di moneyness. Positivo = put-side richer (skew negativo classico equity).
"""
if spot <= 0 or not legs:
return {"atm_iv": None, "asymmetry": None, "otm_put_iv": None, "otm_call_iv": None}
# ATM IV: media IV strike entro ±1% da spot
atm_ivs = [leg["iv"] for leg in legs if leg.get("iv") is not None and abs(leg.get("strike", 0) - spot) / spot < 0.01]
atm_iv = sum(atm_ivs) / len(atm_ivs) if atm_ivs else None
otm_put_ivs = [
leg["iv"] for leg in legs
if leg.get("iv") is not None and leg.get("option_type") == "put" and leg.get("strike", 0) < spot * 0.95
]
otm_call_ivs = [
leg["iv"] for leg in legs
if leg.get("iv") is not None and leg.get("option_type") == "call" and leg.get("strike", 0) > spot * 1.05
]
otm_put = sum(otm_put_ivs) / len(otm_put_ivs) if otm_put_ivs else None
otm_call = sum(otm_call_ivs) / len(otm_call_ivs) if otm_call_ivs else None
asym = (otm_put - otm_call) if (otm_put is not None and otm_call is not None) else None
return {
"atm_iv": atm_iv,
"asymmetry": asym,
"otm_put_iv": otm_put,
"otm_call_iv": otm_call,
}
def atm_vs_wings_vol(legs: list[dict[str, Any]], spot: float) -> dict[str, float | None]:
"""IV ATM vs IV alle ali 25-delta. Wing richness > 0 → smile (kurtosis vol).
"""
if not legs:
return {"atm_iv": None, "wing_25d_call_iv": None, "wing_25d_put_iv": None, "wing_richness": None}
def _closest(target_delta: float, opt_type: str, tol: float = 0.1) -> float | None:
best = None
best_dist = float("inf")
for leg in legs:
d = leg.get("delta")
iv = leg.get("iv")
if d is None or iv is None or leg.get("option_type") != opt_type:
continue
dist = abs(abs(d) - abs(target_delta))
if dist < best_dist:
best_dist = dist
best = iv
return best if best_dist <= tol else None
# ATM IV: leg con delta più vicino a 0.5 (call) o -0.5 (put)
atm_call_iv = _closest(0.5, "call")
atm_put_iv = _closest(-0.5, "put")
atm_ivs = [v for v in (atm_call_iv, atm_put_iv) if v is not None]
atm_iv = sum(atm_ivs) / len(atm_ivs) if atm_ivs else None
wing_call = _closest(0.25, "call")
wing_put = _closest(-0.25, "put")
wing_avg = None
if wing_call is not None and wing_put is not None:
wing_avg = (wing_call + wing_put) / 2
richness = (wing_avg - atm_iv) if (wing_avg is not None and atm_iv is not None) else None
return {
"atm_iv": atm_iv,
"wing_25d_call_iv": wing_call,
"wing_25d_put_iv": wing_put,
"wing_richness": richness,
}
def dealer_gamma_profile(
legs: list[dict[str, Any]],
spot: float,
) -> dict[str, Any]:
"""Net dealer gamma per strike (assume dealer short calls + long puts).
Restituisce per strike: call_dealer_gamma (negativo), put_dealer_gamma
(positivo), net. Aggregato totale + zero-cross strike (gamma flip).
"""
by_strike: dict[float, dict[str, float]] = {}
for leg in legs:
strike = leg.get("strike")
gamma = leg.get("gamma")
oi = leg.get("oi") or 0
if strike is None or gamma is None or oi <= 0 or spot <= 0:
continue
contrib = float(gamma) * oi * (spot ** 2) * 0.01
entry = by_strike.setdefault(
float(strike),
{"strike": float(strike), "call_dealer_gamma": 0.0, "put_dealer_gamma": 0.0},
)
if leg.get("option_type") == "call":
entry["call_dealer_gamma"] -= contrib # dealer short calls
elif leg.get("option_type") == "put":
entry["put_dealer_gamma"] += contrib # dealer long puts
rows: list[dict[str, float]] = []
for s in sorted(by_strike.keys()):
e = by_strike[s]
e["net_dealer_gamma"] = e["call_dealer_gamma"] + e["put_dealer_gamma"]
rows.append(e)
flip_level = None
for a, b in zip(rows, rows[1:], strict=False):
if (a["net_dealer_gamma"] < 0 <= b["net_dealer_gamma"]) or (
a["net_dealer_gamma"] > 0 >= b["net_dealer_gamma"]
):
denom = b["net_dealer_gamma"] - a["net_dealer_gamma"]
if denom != 0:
frac = -a["net_dealer_gamma"] / denom
flip_level = round(a["strike"] + frac * (b["strike"] - a["strike"]), 2)
break
total = sum(r["net_dealer_gamma"] for r in rows)
return {
"by_strike": [
{
"strike": r["strike"],
"call_dealer_gamma": round(r["call_dealer_gamma"], 2),
"put_dealer_gamma": round(r["put_dealer_gamma"], 2),
"net_dealer_gamma": round(r["net_dealer_gamma"], 2),
}
for r in rows
],
"total_net_dealer_gamma": round(total, 2),
"gamma_flip_level": flip_level,
}
def vanna_charm_aggregate(
legs: list[dict[str, Any]],
spot: float,
) -> dict[str, Any]:
"""Vanna (∂delta/∂IV) e Charm (∂delta/∂t) aggregati pesati per OI.
Vanna positiva → IV up, calls hedge buys; charm negativa → time decay
pushes delta down (calls only).
"""
total_vanna = 0.0
total_charm = 0.0
legs_used = 0
for leg in legs:
vanna = leg.get("vanna")
charm = leg.get("charm")
oi = leg.get("oi") or 0
if vanna is None or charm is None or oi <= 0:
continue
sign = 1 if leg.get("option_type") == "call" else -1
total_vanna += float(vanna) * oi * sign
total_charm += float(charm) * oi * sign
legs_used += 1
return {
"total_vanna": total_vanna,
"total_charm": total_charm,
"legs_analyzed": legs_used,
"spot": spot,
}
+96
View File
@@ -0,0 +1,96 @@
"""Test statistici puri (cointegration, ADF, half-life già in indicators).
Nessuna dipendenza esterna: pure-Python.
"""
from __future__ import annotations
import math
def _ols_slope_intercept(xs: list[float], ys: list[float]) -> tuple[float, float] | None:
if len(xs) != len(ys) or len(xs) < 3:
return None
n = len(xs)
mx = sum(xs) / n
my = sum(ys) / n
num = sum((xs[i] - mx) * (ys[i] - my) for i in range(n))
den = sum((xs[i] - mx) ** 2 for i in range(n))
if den == 0:
return None
slope = num / den
intercept = my - slope * mx
return slope, intercept
def _adf_t_stat(series: list[float]) -> float | None:
"""Augmented Dickey-Fuller test stat semplificato (lag=0 → DF):
Δy_t = a + b*y_{t-1} + eps. t-stat di b vs zero.
Più negativo = più stazionario. Approssimazione: critical value ~ -2.86 al 5%.
"""
if len(series) < 30:
return None
y_lag = series[:-1]
delta = [series[i] - series[i - 1] for i in range(1, len(series))]
res = _ols_slope_intercept(y_lag, delta)
if res is None:
return None
b, a = res
n = len(y_lag)
mx = sum(y_lag) / n
den = sum((x - mx) ** 2 for x in y_lag)
if den == 0:
return None
fitted = [a + b * y_lag[i] for i in range(n)]
resid = [delta[i] - fitted[i] for i in range(n)]
rss = sum(r * r for r in resid)
if n - 2 <= 0:
return None
sigma2 = rss / (n - 2)
se_b = math.sqrt(sigma2 / den)
if se_b == 0:
return None
return b / se_b
def cointegration_test(
series_a: list[float],
series_b: list[float],
significance_t: float = -2.86,
) -> dict[str, float | bool | None]:
"""Engle-Granger cointegration:
1. OLS: y_t = alpha + beta * x_t + eps
2. ADF su residui: se t-stat < critical (-2.86 @ 5%) → cointegrate.
"""
if len(series_a) != len(series_b) or len(series_a) < 50:
return {
"cointegrated": None,
"beta": None,
"alpha": None,
"adf_t_stat": None,
"spread_mean": None,
"spread_std": None,
}
res = _ols_slope_intercept(series_b, series_a)
if res is None:
return {
"cointegrated": None,
"beta": None,
"alpha": None,
"adf_t_stat": None,
"spread_mean": None,
"spread_std": None,
}
beta, alpha = res
spread = [series_a[i] - alpha - beta * series_b[i] for i in range(len(series_a))]
t_stat = _adf_t_stat(spread)
cointegrated = (t_stat is not None and t_stat < significance_t)
n = len(spread)
mean = sum(spread) / n
var = sum((s - mean) ** 2 for s in spread) / (n - 1) if n > 1 else 0.0
return {
"cointegrated": cointegrated,
"beta": beta,
"alpha": alpha,
"adf_t_stat": t_stat,
"spread_mean": mean,
"spread_std": math.sqrt(var),
}