feat(state+runtime+gui): market_snapshots — calibrazione soglie da dati
Sistema dedicato di raccolta dati per scegliere le soglie dei filtri sui percentili reali invece di valori a istinto. Nuovi componenti: * state/migrations/0003_market_snapshots.sql — tabella + index, PK composta (timestamp, asset). Ogni colonna numerica è NULL-able per preservare la continuità della serie quando un singolo MCP fallisce. * state/models.py — MarketSnapshotRecord Pydantic. * state/repository.py — record_market_snapshot, list_market_snapshots, _row_to_market_snapshot. * runtime/market_snapshot_cycle.py — collettore best-effort che chiama spot/dvol/realized_vol/dealer_gamma/funding_perp/funding_cross/ liquidation_heatmap/macro per ogni asset; raccoglie gli errori in fetch_errors_json e segna fetch_ok=false ma persiste comunque la riga. * clients/deribit.py — generalizzati dealer_gamma_profile(currency), realized_vol(currency), spot_perp_price(asset). dealer_gamma_profile_eth resta come alias per la chiamata dell'entry cycle. * runtime/orchestrator.py — nuovo job APScheduler `market_snapshot` cron */15 con assets configurabili (default ETH+BTC); il consumer manual_actions ora dispatcha anche kind=run_cycle cycle=market_snapshot per la GUI. * gui/data_layer.py — load_market_snapshots, enqueue_run_cycle accetta market_snapshot; tipo MarketSnapshotRecord esposto. * gui/pages/6_📐_Calibrazione.py — selezione asset+finestra, conteggio fetch_ok, per ogni metrica: istogramma, soglia da strategy.yaml come vline rossa, percentili P5/P10/P25/P50/P75/P90/P95, % di tick che la soglia avrebbe filtrato. * gui/pages/1_📊_Status.py — bottone "📐 Forza snapshot" (4° del pannello Forza ciclo) per popolare la tabella senza aspettare il cron. 5 nuovi test sul collector (happy, fault tolerance, asset switch, macro fail, empty assets); test_orchestrator job set aggiornato. 368/368 tests pass; ruff clean; mypy strict src clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,192 @@
|
||||
"""Periodic market-snapshot collector.
|
||||
|
||||
Drives the ``market_snapshots`` table populated by the scheduler job
|
||||
``market_snapshot`` (cron */15 by default). For every traded asset the
|
||||
collector calls the same MCP feeds the entry/monitor cycles consume,
|
||||
but in **best-effort mode**: a single failure leaves the corresponding
|
||||
column NULL and the row is still persisted, with an error map in
|
||||
``fetch_errors_json`` for debugging. This keeps the time series
|
||||
continuous even when one of the feeds is briefly down — the
|
||||
distributions are what matters for threshold calibration, not the
|
||||
real-time correctness of any single tick.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from datetime import UTC, datetime
|
||||
from decimal import Decimal
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from cerbero_bite.clients._exceptions import McpError
|
||||
from cerbero_bite.state import connect, transaction
|
||||
from cerbero_bite.state.models import MarketSnapshotRecord
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from cerbero_bite.runtime.dependencies import RuntimeContext
|
||||
|
||||
__all__ = ["DEFAULT_ASSETS", "collect_market_snapshot"]
|
||||
|
||||
|
||||
_log = logging.getLogger("cerbero_bite.runtime.market_snapshot")
|
||||
|
||||
|
||||
DEFAULT_ASSETS: tuple[str, ...] = ("ETH", "BTC")
|
||||
|
||||
|
||||
async def _safe_call(
|
||||
label: str,
|
||||
factory: Callable[[], Awaitable[Any]],
|
||||
errors: dict[str, str],
|
||||
) -> Any:
|
||||
try:
|
||||
return await factory()
|
||||
except (McpError, Exception) as exc: # pragma: no branch — best-effort
|
||||
errors[label] = f"{type(exc).__name__}: {exc}"
|
||||
return None
|
||||
|
||||
|
||||
def _decimal_or_none(value: Any) -> Decimal | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, Decimal):
|
||||
return value
|
||||
try:
|
||||
return Decimal(str(value))
|
||||
except (ValueError, ArithmeticError):
|
||||
return None
|
||||
|
||||
|
||||
async def _collect_one(
|
||||
ctx: RuntimeContext, asset: str, *, when: datetime
|
||||
) -> MarketSnapshotRecord:
|
||||
errors: dict[str, str] = {}
|
||||
asset_upper = asset.upper()
|
||||
|
||||
spot = await _safe_call(
|
||||
"spot",
|
||||
lambda: ctx.deribit.spot_perp_price(asset_upper),
|
||||
errors,
|
||||
)
|
||||
dvol_value = await _safe_call(
|
||||
"dvol",
|
||||
lambda: ctx.deribit.latest_dvol(currency=asset_upper, now=when),
|
||||
errors,
|
||||
)
|
||||
rv = await _safe_call(
|
||||
"realized_vol",
|
||||
lambda: ctx.deribit.realized_vol(asset_upper),
|
||||
errors,
|
||||
)
|
||||
gamma = await _safe_call(
|
||||
"dealer_gamma",
|
||||
lambda: ctx.deribit.dealer_gamma_profile(asset_upper),
|
||||
errors,
|
||||
)
|
||||
funding_perp = await _safe_call(
|
||||
"funding_perp",
|
||||
lambda: ctx.hyperliquid.funding_rate_annualized(asset_upper),
|
||||
errors,
|
||||
)
|
||||
funding_cross = await _safe_call(
|
||||
"funding_cross",
|
||||
lambda: ctx.sentiment.funding_cross_median_annualized(asset_upper),
|
||||
errors,
|
||||
)
|
||||
heatmap = await _safe_call(
|
||||
"liquidation",
|
||||
lambda: ctx.sentiment.liquidation_heatmap(asset_upper),
|
||||
errors,
|
||||
)
|
||||
macro_days = await _safe_call(
|
||||
"macro",
|
||||
lambda: ctx.macro.next_high_severity_within(
|
||||
days=ctx.cfg.structure.dte_target,
|
||||
countries=list(ctx.cfg.entry.exclude_macro_countries),
|
||||
now=when,
|
||||
),
|
||||
errors,
|
||||
)
|
||||
|
||||
rv_30 = (rv or {}).get("rv_30d") if isinstance(rv, dict) else None
|
||||
iv_minus_rv_30 = (
|
||||
(rv or {}).get("iv_minus_rv_30d") if isinstance(rv, dict) else None
|
||||
)
|
||||
|
||||
return MarketSnapshotRecord(
|
||||
timestamp=when,
|
||||
asset=asset_upper,
|
||||
spot=_decimal_or_none(spot),
|
||||
dvol=_decimal_or_none(dvol_value),
|
||||
realized_vol_30d=_decimal_or_none(rv_30),
|
||||
iv_minus_rv=_decimal_or_none(iv_minus_rv_30),
|
||||
funding_perp_annualized=_decimal_or_none(funding_perp),
|
||||
funding_cross_annualized=_decimal_or_none(funding_cross),
|
||||
dealer_net_gamma=(
|
||||
_decimal_or_none(gamma.total_net_dealer_gamma)
|
||||
if gamma is not None
|
||||
else None
|
||||
),
|
||||
gamma_flip_level=(
|
||||
_decimal_or_none(gamma.gamma_flip_level)
|
||||
if gamma is not None
|
||||
else None
|
||||
),
|
||||
oi_delta_pct_4h=(
|
||||
_decimal_or_none(heatmap.oi_delta_pct_4h)
|
||||
if heatmap is not None
|
||||
else None
|
||||
),
|
||||
liquidation_long_risk=(
|
||||
heatmap.long_squeeze_risk if heatmap is not None else None
|
||||
),
|
||||
liquidation_short_risk=(
|
||||
heatmap.short_squeeze_risk if heatmap is not None else None
|
||||
),
|
||||
macro_days_to_event=(
|
||||
int(macro_days) if isinstance(macro_days, int) else None
|
||||
),
|
||||
fetch_ok=not errors,
|
||||
fetch_errors_json=(json.dumps(errors) if errors else None),
|
||||
)
|
||||
|
||||
|
||||
async def collect_market_snapshot(
|
||||
ctx: RuntimeContext,
|
||||
*,
|
||||
assets: tuple[str, ...] = DEFAULT_ASSETS,
|
||||
now: datetime | None = None,
|
||||
) -> int:
|
||||
"""Collect + persist one snapshot per asset. Returns count persisted.
|
||||
|
||||
The function is sync at heart (sequential per asset to keep MCP
|
||||
load light) but kept ``async def`` so APScheduler can schedule it
|
||||
directly. A single asset failing does not abort the loop — the
|
||||
other assets are still snapshotted.
|
||||
"""
|
||||
when = (now or datetime.now(UTC)).astimezone(UTC)
|
||||
persisted = 0
|
||||
|
||||
for asset in assets:
|
||||
try:
|
||||
record = await _collect_one(ctx, asset, when=when)
|
||||
except Exception: # pragma: no cover — defensive
|
||||
_log.exception("snapshot for %s failed catastrophically", asset)
|
||||
continue
|
||||
|
||||
try:
|
||||
conn = connect(ctx.db_path)
|
||||
try:
|
||||
with transaction(conn):
|
||||
ctx.repository.record_market_snapshot(conn, record)
|
||||
finally:
|
||||
conn.close()
|
||||
persisted += 1
|
||||
except Exception: # pragma: no cover — defensive
|
||||
_log.exception("persist snapshot for %s failed", asset)
|
||||
|
||||
if persisted:
|
||||
_log.info("market_snapshot persisted %d row(s)", persisted)
|
||||
return persisted
|
||||
@@ -29,6 +29,10 @@ from cerbero_bite.runtime.entry_cycle import EntryCycleResult, run_entry_cycle
|
||||
from cerbero_bite.runtime.health_check import HealthCheck, HealthCheckResult
|
||||
from cerbero_bite.runtime.lockfile import EngineLock
|
||||
from cerbero_bite.runtime.manual_actions_consumer import consume_manual_actions
|
||||
from cerbero_bite.runtime.market_snapshot_cycle import (
|
||||
DEFAULT_ASSETS,
|
||||
collect_market_snapshot,
|
||||
)
|
||||
from cerbero_bite.runtime.monitor_cycle import MonitorCycleResult, run_monitor_cycle
|
||||
from cerbero_bite.runtime.recovery import recover_state
|
||||
from cerbero_bite.runtime.scheduler import JobSpec, build_scheduler
|
||||
@@ -47,6 +51,7 @@ _CRON_MONITOR = "0 2,14 * * *"
|
||||
_CRON_HEALTH = "*/5 * * * *"
|
||||
_CRON_BACKUP = "0 * * * *"
|
||||
_CRON_MANUAL_ACTIONS = "*/1 * * * *"
|
||||
_CRON_MARKET_SNAPSHOT = "*/15 * * * *"
|
||||
_BACKUP_RETENTION_DAYS = 30
|
||||
|
||||
|
||||
@@ -194,6 +199,8 @@ class Orchestrator:
|
||||
health_cron: str = _CRON_HEALTH,
|
||||
backup_cron: str = _CRON_BACKUP,
|
||||
manual_actions_cron: str = _CRON_MANUAL_ACTIONS,
|
||||
market_snapshot_cron: str = _CRON_MARKET_SNAPSHOT,
|
||||
market_snapshot_assets: tuple[str, ...] = DEFAULT_ASSETS,
|
||||
backup_dir: Path | None = None,
|
||||
backup_retention_days: int = _BACKUP_RETENTION_DAYS,
|
||||
) -> AsyncIOScheduler:
|
||||
@@ -232,6 +239,11 @@ class Orchestrator:
|
||||
|
||||
await _safe("backup", _do)
|
||||
|
||||
async def _run_market_snapshot_via_action() -> None:
|
||||
await collect_market_snapshot(
|
||||
self._ctx, assets=market_snapshot_assets
|
||||
)
|
||||
|
||||
async def _manual_actions() -> None:
|
||||
async def _do() -> None:
|
||||
await consume_manual_actions(
|
||||
@@ -240,11 +252,20 @@ class Orchestrator:
|
||||
"entry": self.run_entry,
|
||||
"monitor": self.run_monitor,
|
||||
"health": self.run_health,
|
||||
"market_snapshot": _run_market_snapshot_via_action,
|
||||
},
|
||||
)
|
||||
|
||||
await _safe("manual_actions", _do)
|
||||
|
||||
async def _market_snapshot() -> None:
|
||||
async def _do() -> None:
|
||||
await collect_market_snapshot(
|
||||
self._ctx, assets=market_snapshot_assets
|
||||
)
|
||||
|
||||
await _safe("market_snapshot", _do)
|
||||
|
||||
self._scheduler = build_scheduler(
|
||||
[
|
||||
JobSpec(name="entry", cron=entry_cron, coro_factory=_entry),
|
||||
@@ -256,6 +277,11 @@ class Orchestrator:
|
||||
cron=manual_actions_cron,
|
||||
coro_factory=_manual_actions,
|
||||
),
|
||||
JobSpec(
|
||||
name="market_snapshot",
|
||||
cron=market_snapshot_cron,
|
||||
coro_factory=_market_snapshot,
|
||||
),
|
||||
]
|
||||
)
|
||||
return self._scheduler
|
||||
|
||||
Reference in New Issue
Block a user