Phase 4: orchestrator + cycles auto-execute

Componente runtime/ che cabla core+clients+state+safety in un engine
autonomo notify-only: nessuna conferma manuale, ordini combo
piazzati direttamente quando le regole passano. 311 test pass,
copertura totale 94%, runtime/ 90%, mypy strict pulito, ruff clean.

Moduli:
- runtime/alert_manager.py: escalation tree
  LOW/MEDIUM/HIGH/CRITICAL → audit + Telegram + kill switch.
- runtime/dependencies.py: build_runtime() costruisce
  RuntimeContext con tutti i client MCP, repository, audit log,
  kill switch, alert manager.
- runtime/entry_cycle.py: flusso settimanale (snapshot parallelo
  spot/dvol/funding/macro/holdings/equity → validate_entry →
  compute_bias → options_chain → select_strikes →
  liquidity_gate → sizing_engine → combo_builder.build →
  place_combo_order → notify_position_opened).
- runtime/monitor_cycle.py: loop 12h con dvol_history per il
  return_4h, exit_decision.evaluate, close auto-execute.
- runtime/health_check.py: probe parallelo MCP + SQLite +
  environment match; 3 strikes consecutivi → kill switch HIGH.
- runtime/recovery.py: riconciliazione SQLite vs broker
  all'avvio; mismatch → kill switch CRITICAL.
- runtime/scheduler.py: AsyncIOScheduler builder con cron entry
  (lun 14:00), monitor (02/14), health (5min).
- runtime/orchestrator.py: façade boot() + run_entry/monitor/health
  + install_scheduler + run_forever, con env check vs strategy.

CLI:
- start: avvia engine bloccante (asyncio.run + scheduler).
- dry-run --cycle entry|monitor|health: esegue un singolo ciclo
  per debug/test in produzione.
- stop: documenta lo shutdown via SIGTERM al container.

Documentazione:
- docs/06-operational-flow.md riscritto per il modello
  notify-only auto-execute (no conferma manuale, no memory,
  no brain-bridge).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-28 00:03:45 +02:00
parent 466e63dc19
commit 42b0fbe1ab
20 changed files with 3715 additions and 131 deletions
+139
View File
@@ -0,0 +1,139 @@
"""Tests for AlertManager."""
from __future__ import annotations
import json
from datetime import UTC, datetime
from pathlib import Path
import pytest
from pytest_httpx import HTTPXMock
from cerbero_bite.clients._base import HttpToolClient
from cerbero_bite.clients.telegram import TelegramClient
from cerbero_bite.runtime.alert_manager import AlertManager, Severity
from cerbero_bite.safety import AuditLog, iter_entries
from cerbero_bite.safety.kill_switch import KillSwitch
from cerbero_bite.state import Repository, connect, run_migrations, transaction
def _make_alert_manager(tmp_path: Path) -> tuple[AlertManager, Path, Path, KillSwitch]:
db_path = tmp_path / "state.sqlite"
audit_path = tmp_path / "audit.log"
conn = connect(db_path)
run_migrations(conn)
repo = Repository()
with transaction(conn):
repo.init_system_state(
conn, config_version="1.0.0", now=datetime(2026, 4, 27, 14, 0, tzinfo=UTC)
)
conn.close()
audit = AuditLog(audit_path)
times = iter(
datetime(2026, 4, 27, 14, m, tzinfo=UTC) for m in range(0, 50)
)
ks = KillSwitch(
connection_factory=lambda: connect(db_path),
repository=Repository(),
audit_log=audit,
clock=lambda: next(times),
)
telegram = TelegramClient(
HttpToolClient(
service="telegram",
base_url="http://mcp-telegram:9017",
token="t",
retry_max=1,
)
)
return AlertManager(telegram=telegram, audit_log=audit, kill_switch=ks), audit_path, db_path, ks
@pytest.mark.asyncio
async def test_low_emits_audit_only(tmp_path: Path, httpx_mock: HTTPXMock) -> None:
am, audit_path, _, ks = _make_alert_manager(tmp_path)
await am.low(source="test", message="just info")
entries = list(iter_entries(audit_path))
assert len(entries) == 1
assert entries[0].event == "ALERT"
assert entries[0].payload["severity"] == "low"
assert ks.is_armed() is False
# No telegram call expected
assert httpx_mock.get_requests() == []
@pytest.mark.asyncio
async def test_medium_calls_telegram_notify(tmp_path: Path, httpx_mock: HTTPXMock) -> None:
httpx_mock.add_response(
url="http://mcp-telegram:9017/tools/notify", json={"ok": True}
)
am, audit_path, _, ks = _make_alert_manager(tmp_path)
await am.medium(source="entry_cycle", message="snapshot delayed")
requests = httpx_mock.get_requests()
assert len(requests) == 1
body = json.loads(requests[0].read())
assert body["message"] == "[entry_cycle] snapshot delayed"
assert body["priority"] == "high"
assert body["tag"] == "entry_cycle"
assert ks.is_armed() is False
assert any(e.payload["severity"] == "medium" for e in iter_entries(audit_path))
@pytest.mark.asyncio
async def test_high_arms_kill_switch_and_calls_notify_alert(
tmp_path: Path, httpx_mock: HTTPXMock
) -> None:
httpx_mock.add_response(
url="http://mcp-telegram:9017/tools/notify_alert", json={"ok": True}
)
am, _, _, ks = _make_alert_manager(tmp_path)
await am.high(source="health", message="3 consecutive MCP failures")
body = json.loads(httpx_mock.get_request().read())
assert body == {
"source": "health",
"message": "3 consecutive MCP failures",
"priority": "high",
}
assert ks.is_armed() is True
@pytest.mark.asyncio
async def test_critical_arms_kill_switch_and_calls_notify_system_error(
tmp_path: Path, httpx_mock: HTTPXMock
) -> None:
httpx_mock.add_response(
url="http://mcp-telegram:9017/tools/notify_system_error", json={"ok": True}
)
am, _, _, ks = _make_alert_manager(tmp_path)
await am.critical(
source="audit_chain",
message="hash chain mismatch on line 142",
component="safety.audit_log",
)
body = json.loads(httpx_mock.get_request().read())
assert body["component"] == "safety.audit_log"
assert body["priority"] == "critical"
assert ks.is_armed() is True
@pytest.mark.asyncio
async def test_critical_when_already_armed_is_idempotent(
tmp_path: Path, httpx_mock: HTTPXMock
) -> None:
httpx_mock.add_response(
url="http://mcp-telegram:9017/tools/notify_system_error", json={"ok": True}
)
am, _, _, ks = _make_alert_manager(tmp_path)
ks.arm(reason="prior", source="manual")
assert ks.is_armed() is True
await am.critical(source="x", message="anomaly")
assert ks.is_armed() is True
@pytest.mark.asyncio
async def test_emit_with_severity_enum(tmp_path: Path, httpx_mock: HTTPXMock) -> None:
am, audit_path, _, _ks = _make_alert_manager(tmp_path)
await am.emit(Severity.LOW, source="t", message="m")
entries = list(iter_entries(audit_path))
assert entries[0].payload["severity"] == "low"
+55
View File
@@ -0,0 +1,55 @@
"""Tests for the runtime dependency container."""
from __future__ import annotations
from datetime import UTC, datetime
from pathlib import Path
from cerbero_bite.config import golden_config
from cerbero_bite.config.mcp_endpoints import load_endpoints
from cerbero_bite.runtime import build_runtime
from cerbero_bite.state import connect
def test_build_runtime_creates_state_and_audit_files(tmp_path: Path) -> None:
db_path = tmp_path / "state.sqlite"
audit_path = tmp_path / "audit.log"
ctx = build_runtime(
cfg=golden_config(),
endpoints=load_endpoints(env={}),
token="t",
db_path=db_path,
audit_path=audit_path,
clock=lambda: datetime(2026, 4, 27, 14, 0, tzinfo=UTC),
)
assert db_path.exists()
assert ctx.audit_log.path == audit_path
# system_state singleton initialised
conn = connect(db_path)
try:
state = ctx.repository.get_system_state(conn)
finally:
conn.close()
assert state is not None
assert state.config_version == ctx.cfg.config_version
def test_build_runtime_clients_pinned_to_endpoints(tmp_path: Path) -> None:
ctx = build_runtime(
cfg=golden_config(),
endpoints=load_endpoints(
env={"CERBERO_BITE_MCP_DERIBIT_URL": "http://localhost:9911"}
),
token="t",
db_path=tmp_path / "state.sqlite",
audit_path=tmp_path / "audit.log",
)
# type checks: every client is the right concrete type
assert ctx.deribit.SERVICE == "deribit"
assert ctx.macro.SERVICE == "macro"
assert ctx.sentiment.SERVICE == "sentiment"
assert ctx.hyperliquid.SERVICE == "hyperliquid"
assert ctx.portfolio.SERVICE == "portfolio"
assert ctx.telegram.SERVICE == "telegram"
+27
View File
@@ -0,0 +1,27 @@
"""Tests for the APScheduler bootstrap helper."""
from __future__ import annotations
import pytest
from cerbero_bite.runtime.scheduler import JobSpec, build_scheduler
async def _noop() -> None:
return None
def test_build_scheduler_registers_all_jobs() -> None:
specs = [
JobSpec(name="entry", cron="0 14 * * MON", coro_factory=_noop),
JobSpec(name="monitor", cron="0 2,14 * * *", coro_factory=_noop),
JobSpec(name="health", cron="*/5 * * * *", coro_factory=_noop),
]
sched = build_scheduler(specs)
job_ids = {j.id for j in sched.get_jobs()}
assert job_ids == {"entry", "monitor", "health"}
def test_build_scheduler_rejects_malformed_cron() -> None:
with pytest.raises(ValueError, match="cron must have 5 fields"):
build_scheduler([JobSpec(name="x", cron="0 14 * *", coro_factory=_noop)])