feat(gui): traduzione italiana, logo Cerbero, saldi live e Forza ciclo

* Localizzazione italiana di tutte le pagine (Stato, Audit, Equity,
  Storico, Posizione) e della home; date relative ("5s fa", "12m fa").
* Logo Cerbero (cane a tre teste) in src/cerbero_bite/gui/assets/
  cerbero_logo.png — sostituisce l'emoji 🐺 (lupo, semanticamente
  errata) sia come favicon (`page_icon`) sia in sidebar e header.
* Caricamento automatico di `.env` dal CWD all'avvio della CLI (skip
  sotto pytest tramite PYTEST_CURRENT_TEST), evitando di doversi
  esportare manualmente le 4 URL MCP. Aggiunto python-dotenv come
  dipendenza, `.env.example` committato come template, `.env` resta
  ignorato da git.
* Pagina Stato: nuovo pannello "Saldi exchange" che fa fetch live
  via gateway MCP (Deribit USDC + USDT, Hyperliquid USDC + opzionale
  USDT spot) con cache TTL 60s e bottone refresh; tile riassuntivi
  totale USD / EUR / cambio.
* Pagina Stato: nuovo pannello "Forza ciclo" con tre bottoni
  (entry/monitor/health) che accodano azioni `run_cycle` nella tabella
  manual_actions; il consumer dell'engine — quando in esecuzione —
  dispatcha al `Orchestrator.run_*` corrispondente.
* manual_actions: nuovo `kind="run_cycle"` nello schema
  ManualAction; consumer accetta dict di cycle_runners che
  l'orchestrator popola in install_scheduler. 3 nuovi test (dispatch
  entry, ciclo sconosciuto, fallback senza runner).
* gui/live_data.py — modulo dedicato al fetch MCP dalla GUI
  (relax controllato della regola "no MCP from GUI" solo per i saldi,
  non per i dati di trading).

363/363 tests pass; ruff clean; mypy strict src clean.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-30 14:11:40 +02:00
parent da88e7f746
commit 63d1aa4262
17 changed files with 730 additions and 222 deletions
+8
View File
@@ -74,6 +74,14 @@ def _phase0_notice(action: str) -> None:
@click.pass_context
def main(ctx: click.Context, log_dir: Path, log_level: str) -> None:
"""Cerbero Bite — rule-based ETH credit spread engine."""
# Load `.env` once at CLI entry, unless we are running under
# pytest (which sets ``PYTEST_CURRENT_TEST`` for the duration of
# the test). Existing env vars win over the file (override=False).
if "PYTEST_CURRENT_TEST" not in os.environ:
from dotenv import load_dotenv # noqa: PLC0415
load_dotenv(Path.cwd() / ".env", override=False)
configure_logging(log_dir=log_dir, level=log_level.upper())
ctx.ensure_object(dict)
ctx.obj["log_dir"] = log_dir
Binary file not shown.

After

Width:  |  Height:  |  Size: 668 KiB

+31 -9
View File
@@ -55,6 +55,7 @@ __all__ = [
"compute_payoff_curve",
"enqueue_arm_kill",
"enqueue_disarm_kill",
"enqueue_run_cycle",
"load_audit_chain_status",
"load_audit_tail",
"load_closed_positions",
@@ -91,11 +92,11 @@ class EngineSnapshot:
@property
def health_label(self) -> str:
return {
"running": "RUNNING",
"degraded": "DEGRADED",
"killed": "KILL SWITCH ARMED",
"stopped": "STOPPED",
"unknown": "UNKNOWN",
"running": "ATTIVO",
"degraded": "DEGRADATO",
"killed": "KILL SWITCH ARMATO",
"stopped": "FERMO",
"unknown": "SCONOSCIUTO",
}[self.health]
@@ -623,6 +624,27 @@ def enqueue_disarm_kill(
)
def enqueue_run_cycle(
*, cycle: str, db_path: Path | str = DEFAULT_DB_PATH
) -> int:
"""Queue a ``run_cycle`` action — engine must be running.
``cycle`` must be one of ``entry``, ``monitor``, ``health``. The
engine consumer dispatches the corresponding ``Orchestrator.run_*``
method on the next minute tick.
"""
cycle_norm = cycle.strip().lower()
if cycle_norm not in {"entry", "monitor", "health"}:
raise ValueError(
f"cycle must be entry|monitor|health, got '{cycle}'"
)
return _enqueue_action(
db_path=db_path,
kind="run_cycle",
payload={"cycle": cycle_norm},
)
def load_pending_manual_actions(
*, db_path: Path | str = DEFAULT_DB_PATH
) -> list[ManualAction]:
@@ -683,12 +705,12 @@ def humanize_age(seconds: float | None) -> str:
if seconds is None:
return ""
if seconds < 60:
return f"{int(seconds)}s ago"
return f"{int(seconds)}s fa"
if seconds < 3600:
return f"{int(seconds / 60)}m ago"
return f"{int(seconds / 60)}m fa"
if seconds < 86400:
return f"{seconds / 3600:.1f}h ago"
return f"{seconds / 86400:.1f}d ago"
return f"{seconds / 3600:.1f}h fa"
return f"{seconds / 86400:.1f}g fa"
def humanize_dt(value: datetime | None) -> str:
+220
View File
@@ -0,0 +1,220 @@
"""Live MCP fetch for the GUI (saldi exchange, FX rate).
The original architecture forbade the GUI from calling MCP services
(`docs/11-gui-streamlit.md`). For the "Saldi exchange" panel that
constraint is relaxed: the dashboard fetches balances on demand,
caches the result with Streamlit's TTL cache, and never holds the
async client open between renders. Every fetch is a one-shot:
* read endpoints + token from env / file (same path used by the CLI),
* spin up a short-lived ``httpx.AsyncClient``,
* query Deribit `get_account_summary` for both ``USDC`` and ``USDT``,
* query Hyperliquid `get_account_summary` (returns ``spot_usdc``,
``perps_equity`` etc.),
* query Macro `get_asset_price("EURUSD")` for FX,
* close the client and return a frozen dataclass to the page.
If a single exchange call fails the row is filled with ``error=...``
and the others are still rendered.
"""
from __future__ import annotations
import asyncio
import os
from dataclasses import dataclass
from datetime import UTC, datetime
from decimal import Decimal
from pathlib import Path
from typing import Any
import httpx
from cerbero_bite.clients._base import HttpToolClient
from cerbero_bite.clients.deribit import DeribitClient
from cerbero_bite.clients.hyperliquid import HyperliquidClient
from cerbero_bite.clients.macro import MacroClient
from cerbero_bite.config.mcp_endpoints import load_endpoints, load_token
__all__ = [
"BalanceRow",
"BalancesSnapshot",
"fetch_balances_sync",
]
_DERIBIT_CURRENCIES = ("USDC", "USDT")
@dataclass(frozen=True)
class BalanceRow:
"""One row of the balances table."""
exchange: str
currency: str
equity: Decimal | None
available: Decimal | None
unrealized_pnl: Decimal | None
error: str | None = None
@dataclass(frozen=True)
class BalancesSnapshot:
"""Result of one fetch_balances call (rows + meta)."""
rows: list[BalanceRow]
eur_usd_rate: Decimal | None
fetched_at: datetime
fx_error: str | None = None
def total_usd(self) -> Decimal:
total = Decimal(0)
for r in self.rows:
if r.equity is not None:
total += r.equity
return total
def total_eur(self) -> Decimal | None:
if self.eur_usd_rate is None or self.eur_usd_rate <= 0:
return None
return self.total_usd() / self.eur_usd_rate
def _decimal_or_none(value: Any) -> Decimal | None:
if value is None:
return None
try:
return Decimal(str(value))
except (ValueError, ArithmeticError):
return None
def _resolve_token() -> str:
"""Read the bearer token from disk, mirroring the CLI default chain."""
explicit = os.environ.get("CERBERO_BITE_CORE_TOKEN_FILE")
if explicit:
return load_token(path=Path(explicit))
# Fallback: project-relative `secrets/core.token` (typical local dev).
local = Path("secrets") / "core.token"
if local.is_file():
return load_token(path=local)
return load_token()
async def _fetch_deribit_currency(
deribit: DeribitClient, currency: str
) -> BalanceRow:
try:
summary = await deribit.get_account_summary(currency=currency)
except Exception as exc:
return BalanceRow(
exchange="deribit",
currency=currency,
equity=None,
available=None,
unrealized_pnl=None,
error=f"{type(exc).__name__}: {exc}",
)
return BalanceRow(
exchange="deribit",
currency=currency,
equity=_decimal_or_none(summary.get("equity")),
available=_decimal_or_none(summary.get("available_funds")),
unrealized_pnl=_decimal_or_none(summary.get("unrealized_pnl")),
)
async def _fetch_hyperliquid(hl: HyperliquidClient) -> list[BalanceRow]:
try:
summary = await hl.get_account_summary()
except Exception as exc:
return [
BalanceRow(
exchange="hyperliquid",
currency="USDC",
equity=None,
available=None,
unrealized_pnl=None,
error=f"{type(exc).__name__}: {exc}",
)
]
rows: list[BalanceRow] = [
BalanceRow(
exchange="hyperliquid",
currency="USDC",
equity=_decimal_or_none(summary.get("equity")),
available=_decimal_or_none(summary.get("available_balance")),
unrealized_pnl=_decimal_or_none(summary.get("unrealized_pnl")),
)
]
# Hyperliquid spot may also hold USDT; the MCP server exposes it
# under spot_usdt when present. Add a row only if the field is there
# so we don't render a confusing "0.00" against an asset the account
# never held.
spot_usdt = summary.get("spot_usdt")
if spot_usdt is not None:
rows.append(
BalanceRow(
exchange="hyperliquid",
currency="USDT",
equity=_decimal_or_none(spot_usdt),
available=_decimal_or_none(spot_usdt),
unrealized_pnl=Decimal(0),
)
)
return rows
async def _fetch_balances_async(*, timeout_s: float = 8.0) -> BalancesSnapshot:
endpoints = load_endpoints()
token = _resolve_token()
async with httpx.AsyncClient(timeout=timeout_s) as http_client:
def _client(service: str) -> HttpToolClient:
return HttpToolClient(
service=service,
base_url=endpoints.for_service(service),
token=token,
timeout_s=timeout_s,
retry_max=1,
client=http_client,
)
deribit = DeribitClient(_client("deribit"))
hl = HyperliquidClient(_client("hyperliquid"))
macro = MacroClient(_client("macro"))
deribit_results, hl_rows, (fx_value, fx_error) = await asyncio.gather(
asyncio.gather(
*(
_fetch_deribit_currency(deribit, cur)
for cur in _DERIBIT_CURRENCIES
)
),
_fetch_hyperliquid(hl),
_fetch_eur_usd(macro),
)
deribit_rows = list(deribit_results)
return BalancesSnapshot(
rows=[*deribit_rows, *hl_rows],
eur_usd_rate=fx_value,
fetched_at=datetime.now(UTC),
fx_error=fx_error,
)
async def _fetch_eur_usd(
macro: MacroClient,
) -> tuple[Decimal | None, str | None]:
try:
rate = await macro.eur_usd_rate()
except Exception as exc:
return None, f"{type(exc).__name__}: {exc}"
return rate, None
def fetch_balances_sync(*, timeout_s: float = 8.0) -> BalancesSnapshot:
"""Sync wrapper for Streamlit pages (which run in a sync context)."""
return asyncio.run(_fetch_balances_async(timeout_s=timeout_s))
+35 -25
View File
@@ -31,8 +31,8 @@ from cerbero_bite.gui.data_layer import (
load_engine_snapshot,
)
PAGE_TITLE = "Cerbero Bite — Dashboard"
PAGE_ICON = "🐺"
PAGE_TITLE = "Cerbero Bite — Cruscotto"
PAGE_ICON = str(Path(__file__).parent / "assets" / "cerbero_logo.png")
# ---------------------------------------------------------------------------
@@ -53,35 +53,38 @@ def _resolve_paths() -> tuple[Path, Path]:
_HEALTH_BADGES: dict[str, tuple[str, str]] = {
"running": ("🟢", "RUNNING"),
"degraded": ("🟡", "DEGRADED"),
"running": ("🟢", "ATTIVO"),
"degraded": ("🟡", "DEGRADATO"),
"killed": ("🔴", "KILL SWITCH"),
"stopped": ("", "STOPPED"),
"unknown": ("", "UNKNOWN"),
"stopped": ("", "FERMO"),
"unknown": ("", "SCONOSCIUTO"),
}
def _render_sidebar(db_path: Path, audit_path: Path) -> None:
snap = load_engine_snapshot(db_path=db_path)
icon, label = _HEALTH_BADGES.get(snap.health, ("", "UNKNOWN"))
icon, label = _HEALTH_BADGES.get(snap.health, ("", "SCONOSCIUTO"))
logo_path = Path(__file__).parent / "assets" / "cerbero_logo.png"
if logo_path.is_file():
st.sidebar.image(str(logo_path), use_container_width=True)
st.sidebar.markdown(f"### {icon} {label}")
if snap.kill_switch_armed:
st.sidebar.error(
f"**Kill switch armed**\n\n"
f"reason: {snap.kill_reason or ''}\n\n"
f"since: {humanize_dt(snap.kill_at)}"
f"**Kill switch armato**\n\n"
f"motivo: {snap.kill_reason or ''}\n\n"
f"da: {humanize_dt(snap.kill_at)}"
)
st.sidebar.metric(
"Last health check",
"Ultimo health check",
humanize_age(snap.last_health_check_age_s),
)
st.sidebar.metric("Open positions", snap.open_positions)
st.sidebar.metric("Posizioni aperte", snap.open_positions)
st.sidebar.caption(f"config: `{snap.config_version or ''}`")
st.sidebar.divider()
st.sidebar.caption("Read-only • localhost only")
st.sidebar.caption("Sola lettura • solo localhost")
st.sidebar.caption(f"db: `{db_path}`")
st.sidebar.caption(f"audit: `{audit_path}`")
@@ -102,34 +105,41 @@ def main() -> None:
db_path, audit_path = _resolve_paths()
_render_sidebar(db_path, audit_path)
st.title(f"{PAGE_ICON} Cerbero Bite")
logo_path = Path(__file__).parent / "assets" / "cerbero_logo.png"
header_cols = st.columns([1, 6])
if logo_path.is_file():
header_cols[0].image(str(logo_path), use_container_width=True)
header_cols[1].title("Cerbero Bite")
st.caption(
"Rule-based ETH credit-spread engine — read-only dashboard"
"Motore rule-based per credit spread su ETH — cruscotto in sola lettura"
)
st.markdown(
"""
Use the sidebar to navigate:
Usa la barra laterale per navigare:
- **Status** — engine health, kill switch, open positions, audit anchor
- **Audit** — live audit log stream + chain integrity verification
- **Stato** — salute del motore, kill switch, posizioni aperte, ancora audit
- **Audit** — streaming del registro audit + verifica integrità della catena
- **Equity** — P&L cumulato, drawdown, distribuzione per chiusura, statistiche mensili
- **Storico** — trade chiusi con filtri, KPI, esportazione CSV
- **Posizione** — drilldown sulla singola posizione con grafico payoff
The dashboard reads `data/state.sqlite` and `data/audit.log` directly;
it never calls MCP services or the broker. All write actions remain
on the CLI for now.
Il cruscotto legge `data/state.sqlite` e `data/audit.log` direttamente;
non interroga mai i servizi MCP né il broker. L'unico canale di
scrittura è la coda `manual_actions` per arm/disarm del kill switch.
"""
)
snap = load_engine_snapshot(db_path=db_path)
cols = st.columns(4)
cols[0].metric("Health", _HEALTH_BADGES[snap.health][1])
cols[0].metric("Salute motore", _HEALTH_BADGES[snap.health][1])
cols[1].metric(
"Kill switch",
"ARMED" if snap.kill_switch_armed else "DISARMED",
"ARMATO" if snap.kill_switch_armed else "DISARMATO",
)
cols[2].metric("Open positions", snap.open_positions)
cols[2].metric("Posizioni aperte", snap.open_positions)
cols[3].metric(
"Last health check",
"Ultimo health check",
humanize_age(snap.last_health_check_age_s),
)
+183 -55
View File
@@ -13,12 +13,14 @@ from cerbero_bite.gui.data_layer import (
EngineSnapshot,
enqueue_arm_kill,
enqueue_disarm_kill,
enqueue_run_cycle,
humanize_age,
humanize_dt,
load_engine_snapshot,
load_open_positions,
load_pending_manual_actions,
)
from cerbero_bite.gui.live_data import BalancesSnapshot, fetch_balances_sync
def _resolve_paths() -> tuple[Path, Path]:
@@ -35,78 +37,193 @@ _HEALTH_COLORS = {
"unknown": ("", "info"),
}
_TYPED_PHRASE = "yes I am sure"
_TYPED_PHRASE = "confermo"
def _render_force_cycle_panel(db_path: Path) -> None:
st.subheader("Forza ciclo")
st.caption(
"Accoda una richiesta di esecuzione immediata di un ciclo. Funziona "
"solo se il motore è in esecuzione (`cerbero-bite start`); il job "
"`manual_actions` consuma la coda ogni minuto."
)
cols = st.columns(3)
if cols[0].button(
"▶ Forza entry",
use_container_width=True,
help="Esegue subito una valutazione del ciclo entry.",
):
aid = enqueue_run_cycle(cycle="entry", db_path=db_path)
st.success(
f"✅ ciclo entry accodato (id #{aid}). "
"Il motore lo eseguirà entro ~1 minuto."
)
if cols[1].button(
"🔍 Forza monitor",
use_container_width=True,
help="Esegue subito un giro del monitor sulle posizioni aperte.",
):
aid = enqueue_run_cycle(cycle="monitor", db_path=db_path)
st.success(f"✅ ciclo monitor accodato (id #{aid}).")
if cols[2].button(
"💓 Forza health",
use_container_width=True,
help="Esegue subito un health check completo.",
):
aid = enqueue_run_cycle(cycle="health", db_path=db_path)
st.success(f"✅ ciclo health accodato (id #{aid}).")
@st.cache_data(ttl=60, show_spinner=False)
def _cached_balances() -> BalancesSnapshot:
"""Fetch balances at most once per minute per Streamlit session."""
return fetch_balances_sync(timeout_s=10.0)
def _render_balances_panel() -> None:
st.subheader("Saldi exchange")
refresh = st.button("🔄 Aggiorna saldi", help="Forza un nuovo fetch dagli MCP.")
if refresh:
_cached_balances.clear()
try:
snap = _cached_balances()
except Exception as exc:
st.error(
f"Impossibile leggere i saldi: {type(exc).__name__}: {exc}"
)
return
rows = []
for r in snap.rows:
rows.append(
{
"exchange": r.exchange,
"valuta": r.currency,
"equity": (
f"{float(r.equity):,.2f}"
if r.equity is not None
else ""
),
"disponibile": (
f"{float(r.available):,.2f}"
if r.available is not None
else ""
),
"P&L non realizzato": (
f"{float(r.unrealized_pnl):+.2f}"
if r.unrealized_pnl is not None
else ""
),
"errore": r.error or "",
}
)
st.dataframe(rows, use_container_width=True, hide_index=True)
cols = st.columns(3)
cols[0].metric("Totale USD", f"${float(snap.total_usd()):,.2f}")
eur = snap.total_eur()
cols[1].metric(
"Totale EUR",
f"{float(eur):,.2f}" if eur is not None else "",
)
cols[2].metric(
"Cambio EUR/USD",
f"{float(snap.eur_usd_rate):.4f}"
if snap.eur_usd_rate is not None
else "",
)
if snap.fx_error:
st.warning(f"FX non disponibile: {snap.fx_error}")
age = (
f" · letti {humanize_dt(snap.fetched_at)}"
if snap.fetched_at is not None
else ""
)
st.caption(
f"Cache TTL 60s · saldi letti dal gateway MCP{age}"
)
def _render_kill_switch_panel(db_path: Path, snap: EngineSnapshot) -> None:
st.subheader("Kill switch controls")
st.subheader("Comandi kill switch")
if snap.kill_switch_armed:
st.warning(
"Kill switch is **armed**. Disarming queues a `disarm_kill` "
"action; the engine consumer applies it on the next minute "
"tick and the transition is recorded in the audit chain."
"Kill switch **armato**. Disarmandolo viene accodata una "
"azione `disarm_kill`; il consumer del motore la applica al "
"prossimo tick di un minuto e la transizione viene registrata "
"nella catena audit."
)
with st.form("kill_disarm_form", clear_on_submit=True):
reason = st.text_input(
"Reason (required)",
placeholder="e.g. macro window passed",
"Motivo (obbligatorio)",
placeholder="es. finestra macro superata",
)
confirm = st.text_input(
f"Type `{_TYPED_PHRASE}` to confirm",
f"Scrivi `{_TYPED_PHRASE}` per confermare",
placeholder=_TYPED_PHRASE,
)
submitted = st.form_submit_button(
"🟢 Queue disarm",
"🟢 Accoda disarmo",
type="primary",
use_container_width=True,
)
if submitted:
if confirm.strip() != _TYPED_PHRASE:
st.error(f"Type exactly `{_TYPED_PHRASE}` to confirm.")
st.error(
f"Scrivi esattamente `{_TYPED_PHRASE}` per confermare."
)
elif not reason.strip():
st.error("Reason is required.")
st.error("Il motivo è obbligatorio.")
else:
aid = enqueue_disarm_kill(reason=reason, db_path=db_path)
st.success(
f"✅ disarm queued (id #{aid}). "
"The engine will pick it up within ~1 minute."
f"✅ disarmo accodato (id #{aid}). "
"Il motore lo applicherà entro ~1 minuto."
)
else:
st.info(
"Kill switch is **disarmed**. Arming queues an `arm_kill` "
"action; the engine consumer applies it on the next minute tick."
"Kill switch **disarmato**. Armandolo viene accodata una "
"azione `arm_kill`; il consumer del motore la applica al "
"prossimo tick di un minuto."
)
with st.form("kill_arm_form", clear_on_submit=True):
reason = st.text_input(
"Reason (required)",
placeholder="e.g. macro shock — pause trading",
"Motivo (obbligatorio)",
placeholder="es. shock macro — sospendi trading",
)
confirm = st.text_input(
f"Type `{_TYPED_PHRASE}` to confirm",
f"Scrivi `{_TYPED_PHRASE}` per confermare",
placeholder=_TYPED_PHRASE,
)
submitted = st.form_submit_button(
"🔴 Queue arm",
"🔴 Accoda armamento",
type="secondary",
use_container_width=True,
)
if submitted:
if confirm.strip() != _TYPED_PHRASE:
st.error(f"Type exactly `{_TYPED_PHRASE}` to confirm.")
st.error(
f"Scrivi esattamente `{_TYPED_PHRASE}` per confermare."
)
elif not reason.strip():
st.error("Reason is required.")
st.error("Il motivo è obbligatorio.")
else:
aid = enqueue_arm_kill(reason=reason, db_path=db_path)
st.success(
f"✅ arm queued (id #{aid}). "
"The engine will pick it up within ~1 minute."
f"✅ armamento accodato (id #{aid}). "
"Il motore lo applicherà entro ~1 minuto."
)
def render() -> None:
st.title("📊 Status")
st.caption("Engine health, kill switch, open positions and audit anchor.")
st.title("📊 Stato")
st.caption(
"Salute del motore, kill switch, posizioni aperte e ancora audit."
)
db_path, _ = _resolve_paths()
snap = load_engine_snapshot(db_path=db_path)
@@ -124,19 +241,29 @@ def render() -> None:
if snap.kill_switch_armed:
st.error(
f"**Kill switch armed** — engine will refuse new entries.\n\n"
f"- reason: `{snap.kill_reason or ''}`\n"
f"- since: `{humanize_dt(snap.kill_at)}`"
f"**Kill switch armato** — il motore rifiuterà nuove entrate.\n\n"
f"- motivo: `{snap.kill_reason or ''}`\n"
f"- da: `{humanize_dt(snap.kill_at)}`"
)
# Top metrics
cols = st.columns(4)
cols[0].metric("Open positions", snap.open_positions)
cols[0].metric("Posizioni aperte", snap.open_positions)
cols[1].metric(
"Last health check", humanize_age(snap.last_health_check_age_s)
"Ultimo health check", humanize_age(snap.last_health_check_age_s)
)
cols[2].metric("Started at", humanize_dt(snap.started_at))
cols[3].metric("Config version", snap.config_version or "")
cols[2].metric("Avviato il", humanize_dt(snap.started_at))
cols[3].metric("Versione config", snap.config_version or "")
st.divider()
# Saldi exchange (live MCP fetch, TTL 60s)
_render_balances_panel()
st.divider()
# Forza ciclo
_render_force_cycle_panel(db_path)
st.divider()
@@ -145,30 +272,30 @@ def render() -> None:
st.divider()
# Pending manual actions
# Azioni manuali pendenti
pending = load_pending_manual_actions(db_path=db_path)
if pending:
st.subheader("Pending manual actions")
st.subheader("Azioni manuali pendenti")
st.caption(
"Queued from this dashboard, not yet consumed. The engine "
"drains the queue every minute via the `manual_actions` job."
"Accodate da questo cruscotto, non ancora consumate. Il motore "
"drena la coda ogni minuto tramite il job `manual_actions`."
)
rows_pending = [
{
"id": a.id,
"kind": a.kind,
"tipo": a.kind,
"payload": a.payload_json or "",
"created_at": humanize_dt(a.created_at),
"creata il": humanize_dt(a.created_at),
}
for a in pending
]
st.dataframe(rows_pending, use_container_width=True, hide_index=True)
st.divider()
# Audit anchor
st.subheader("Audit anchor")
# Ancora audit
st.subheader("Ancora audit")
if snap.last_audit_hash is None:
st.info("No anchor recorded yet.")
st.info("Nessuna ancora registrata.")
else:
short = (
f"{snap.last_audit_hash[:12]}{snap.last_audit_hash[-12:]}"
@@ -177,32 +304,33 @@ def render() -> None:
)
st.code(short, language="text")
st.caption(
"Last hash chain head persisted in `system_state.last_audit_hash`. "
"On boot the orchestrator compares this with the audit-log file tail; "
"a mismatch arms the kill switch (CRITICAL)."
"Ultima testa della catena hash persistita in "
"`system_state.last_audit_hash`. All'avvio l'orchestrator la "
"confronta con la coda del file audit; un mismatch arma il "
"kill switch (CRITICAL)."
)
st.divider()
# Open positions table
st.subheader("Open positions")
# Tabella posizioni aperte
st.subheader("Posizioni aperte")
positions = load_open_positions(db_path=db_path)
if not positions:
st.info("No open positions.")
st.info("Nessuna posizione aperta.")
else:
rows = [
{
"proposal_id": str(p.proposal_id)[:8],
"spread": p.spread_type,
"asset": p.asset,
"n_contracts": p.n_contracts,
"credit_usd": f"{p.credit_usd:.2f}",
"max_loss_usd": f"{p.max_loss_usd:.2f}",
"short_strike": f"{p.short_strike}",
"long_strike": f"{p.long_strike}",
"status": p.status,
"opened_at": humanize_dt(p.opened_at),
"expiry": humanize_dt(p.expiry),
"n. contratti": p.n_contracts,
"credito (USD)": f"{p.credit_usd:.2f}",
"max perdita (USD)": f"{p.max_loss_usd:.2f}",
"strike short": f"{p.short_strike}",
"strike long": f"{p.long_strike}",
"stato": p.status,
"aperta il": humanize_dt(p.opened_at),
"scadenza": humanize_dt(p.expiry),
}
for p in positions
]
+18 -17
View File
@@ -27,8 +27,8 @@ def _resolve_paths() -> tuple[Path, Path]:
def render() -> None:
st.title("🔍 Audit")
st.caption(
"Append-only hash-chained audit log "
"(`data/audit.log`). Reading is non-mutating."
"Registro audit append-only con hash chain "
"(`data/audit.log`). La lettura non modifica nulla."
)
_, audit_path = _resolve_paths()
@@ -36,13 +36,13 @@ def render() -> None:
col_l, col_r = st.columns([1, 2])
with col_l:
st.subheader("Chain integrity")
if st.button("Verify chain", type="primary"):
with st.spinner("Walking the chain"):
st.subheader("Integrità catena")
if st.button("Verifica catena", type="primary"):
with st.spinner("Sto percorrendo la catena"):
status = load_audit_chain_status(audit_path=audit_path)
if status.ok:
st.success(
f"✅ chain integra fino a {status.entries_verified} eventi"
f"✅ catena integra fino a {status.entries_verified} eventi"
)
else:
st.error(
@@ -50,14 +50,15 @@ def render() -> None:
)
else:
st.caption(
"Click to recompute every line's hash and verify the prev-hash "
"linkage. Mismatch → CRITICAL alert in production."
"Premi per ricalcolare l'hash di ogni riga e verificare il "
"collegamento prev-hash. Mismatch → alert CRITICAL in "
"produzione."
)
with col_r:
st.subheader("Filters")
st.subheader("Filtri")
limit = st.slider(
"Last N entries",
"Ultimi N eventi",
min_value=10,
max_value=500,
value=100,
@@ -67,8 +68,8 @@ def render() -> None:
all_recent = load_audit_tail(audit_path=audit_path, limit=limit)
events_present = sorted({e.event for e in all_recent})
event_filter = st.selectbox(
"Event filter",
options=["(all)", *events_present],
"Filtro per evento",
options=["(tutti)", *events_present],
index=0,
)
@@ -83,16 +84,16 @@ def render() -> None:
st.divider()
# Filtered tail
# Tail filtrata
filtered = (
all_recent
if event_filter == "(all)"
if event_filter == "(tutti)"
else [e for e in all_recent if e.event == event_filter]
)
st.subheader(f"Last entries ({len(filtered)} shown)")
st.subheader(f"Ultimi eventi ({len(filtered)} mostrati)")
if not filtered:
st.info("No matching audit entries.")
st.info("Nessun evento corrisponde ai filtri.")
return
rows = []
@@ -106,7 +107,7 @@ def render() -> None:
rows.append(
{
"timestamp": humanize_dt(entry.timestamp),
"event": entry.event,
"evento": entry.event,
"payload": payload_pretty,
"hash": (
f"{entry.hash[:8]}{entry.hash[-8:]}"
+34 -28
View File
@@ -25,12 +25,15 @@ def _resolve_db() -> Path:
def _date_window(label: str) -> tuple[datetime | None, datetime | None]:
"""UI control for picking the analytics window."""
"""Selettore della finestra temporale per l'analitica."""
options = {
"All time": (None, None),
"Last 30 days": (datetime.now(UTC) - timedelta(days=30), None),
"Last 90 days": (datetime.now(UTC) - timedelta(days=90), None),
"Year to date": (datetime(datetime.now(UTC).year, 1, 1, tzinfo=UTC), None),
"Tutto lo storico": (None, None),
"Ultimi 30 giorni": (datetime.now(UTC) - timedelta(days=30), None),
"Ultimi 90 giorni": (datetime.now(UTC) - timedelta(days=90), None),
"Da inizio anno": (
datetime(datetime.now(UTC).year, 1, 1, tzinfo=UTC),
None,
),
}
pick = st.selectbox(label, list(options.keys()), index=0)
return options[pick]
@@ -39,28 +42,29 @@ def _date_window(label: str) -> tuple[datetime | None, datetime | None]:
def render() -> None:
st.title("📈 Equity")
st.caption(
"Cumulative realised P&L, drawdown, and per-trade distribution. "
"Computed from closed positions in `data/state.sqlite`."
"P&L realizzato cumulato, drawdown e distribuzione per trade. "
"Calcolato dalle posizioni chiuse in `data/state.sqlite`."
)
start, end = _date_window("Window")
start, end = _date_window("Finestra")
db_path = _resolve_db()
positions = load_closed_positions(db_path=db_path, start=start, end=end)
if not positions:
st.info(
"No closed positions in the selected window yet. "
"The equity curve will populate as soon as the engine closes its first trade."
"Nessuna posizione chiusa nella finestra selezionata. "
"La curva equity si popolerà non appena il motore chiuderà "
"il primo trade."
)
return
# KPI strip
# Striscia KPI
kpis = compute_kpis(positions)
cols = st.columns(5)
cols[0].metric("Closed trades", kpis.n_trades)
cols[0].metric("Trade chiusi", kpis.n_trades)
cols[1].metric("Win rate", f"{kpis.win_rate:.0%}")
cols[2].metric("Total P&L", f"${float(kpis.total_pnl_usd):+.2f}")
cols[2].metric("P&L totale", f"${float(kpis.total_pnl_usd):+.2f}")
cols[3].metric("Edge / trade", f"${float(kpis.edge_per_trade_usd):+.2f}")
cols[4].metric(
"Max drawdown",
@@ -82,14 +86,14 @@ def render() -> None:
}
)
st.subheader("Cumulative P&L (USD)")
st.subheader("P&L cumulato (USD)")
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df["timestamp"],
y=df["cumulative_pnl_usd"],
mode="lines+markers",
name="cumulative P&L",
name="P&L cumulato",
line={"color": "#2ecc71", "width": 2},
)
)
@@ -122,18 +126,18 @@ def render() -> None:
)
st.plotly_chart(dd_fig, use_container_width=True)
# PnL distribution
st.subheader("P&L distribution by close reason")
# Distribuzione P&L
st.subheader("Distribuzione P&L per motivo di chiusura")
by_reason: dict[str, list[float]] = {}
for pos in positions:
if pos.pnl_usd is None:
continue
by_reason.setdefault(pos.close_reason or "(unknown)", []).append(
by_reason.setdefault(pos.close_reason or "(sconosciuto)", []).append(
float(pos.pnl_usd)
)
counts = Counter(
(pos.close_reason or "(unknown)") for pos in positions
(pos.close_reason or "(sconosciuto)") for pos in positions
)
cols = st.columns(min(len(counts), 6) or 1)
for col, (reason, count) in zip(cols, counts.most_common(6), strict=False):
@@ -141,28 +145,30 @@ def render() -> None:
hist_fig = go.Figure()
for reason, pnls in by_reason.items():
hist_fig.add_trace(go.Histogram(x=pnls, name=reason, opacity=0.6, nbinsx=30))
hist_fig.add_trace(
go.Histogram(x=pnls, name=reason, opacity=0.6, nbinsx=30)
)
hist_fig.update_layout(
barmode="overlay",
height=320,
margin={"l": 10, "r": 10, "t": 30, "b": 10},
xaxis_title="P&L (USD)",
yaxis_title="trades",
yaxis_title="numero trade",
legend={"orientation": "h", "y": 1.1},
)
st.plotly_chart(hist_fig, use_container_width=True)
# Monthly table
st.subheader("Per-month stats")
# Tabella mensile
st.subheader("Statistiche mensili")
months = compute_monthly_stats(positions)
rows = [
{
"month": m.year_month,
"trades": m.n_trades,
"wins": m.n_wins,
"win_rate": f"{m.win_rate:.0%}",
"mese": m.year_month,
"trade": m.n_trades,
"vittorie": m.n_wins,
"win rate": f"{m.win_rate:.0%}",
"P&L (USD)": f"{float(m.pnl_usd):+.2f}",
"avg / trade": f"{float(m.avg_pnl_usd):+.2f}",
"media / trade": f"{float(m.avg_pnl_usd):+.2f}",
}
for m in months
]
+49 -38
View File
@@ -24,64 +24,73 @@ def _resolve_db() -> Path:
def _date_window() -> tuple[datetime | None, datetime | None]:
presets = {
"All time": (None, None),
"Last 7 days": (datetime.now(UTC) - timedelta(days=7), None),
"Last 30 days": (datetime.now(UTC) - timedelta(days=30), None),
"Last 90 days": (datetime.now(UTC) - timedelta(days=90), None),
"Year to date": (datetime(datetime.now(UTC).year, 1, 1, tzinfo=UTC), None),
"Tutto lo storico": (None, None),
"Ultimi 7 giorni": (datetime.now(UTC) - timedelta(days=7), None),
"Ultimi 30 giorni": (datetime.now(UTC) - timedelta(days=30), None),
"Ultimi 90 giorni": (datetime.now(UTC) - timedelta(days=90), None),
"Da inizio anno": (
datetime(datetime.now(UTC).year, 1, 1, tzinfo=UTC),
None,
),
}
pick = st.selectbox("Window", list(presets.keys()), index=0)
pick = st.selectbox("Finestra", list(presets.keys()), index=0)
return presets[pick]
def render() -> None:
st.title("📜 History")
st.caption("Closed trades with filters, KPI strip, and CSV export.")
st.title("📜 Storico")
st.caption(
"Trade chiusi con filtri, striscia KPI ed esportazione CSV."
)
db_path = _resolve_db()
start, end = _date_window()
positions = load_closed_positions(db_path=db_path, start=start, end=end)
# Sub-filter by close reason and PnL sign.
reason_options = sorted({p.close_reason or "(unknown)" for p in positions})
# Sotto-filtri per motivo di chiusura e segno P&L
reason_options = sorted(
{p.close_reason or "(sconosciuto)" for p in positions}
)
chosen_reasons = st.multiselect(
"Close reasons", options=reason_options, default=reason_options
"Motivi di chiusura",
options=reason_options,
default=reason_options,
)
pnl_filter = st.radio(
"P&L filter",
options=["all", "winners", "losers"],
"Filtro P&L",
options=["tutti", "vincenti", "perdenti"],
horizontal=True,
index=0,
)
filtered = []
for p in positions:
reason = p.close_reason or "(unknown)"
reason = p.close_reason or "(sconosciuto)"
if reason not in chosen_reasons:
continue
if pnl_filter == "winners" and (p.pnl_usd is None or p.pnl_usd <= 0):
if pnl_filter == "vincenti" and (p.pnl_usd is None or p.pnl_usd <= 0):
continue
if pnl_filter == "losers" and (p.pnl_usd is None or p.pnl_usd >= 0):
if pnl_filter == "perdenti" and (p.pnl_usd is None or p.pnl_usd >= 0):
continue
filtered.append(p)
# KPI strip
# Striscia KPI
kpis = compute_kpis(filtered)
cols = st.columns(6)
cols[0].metric("Trades", kpis.n_trades)
cols[0].metric("Trade", kpis.n_trades)
cols[1].metric("Win rate", f"{kpis.win_rate:.0%}")
cols[2].metric("Total P&L", f"${float(kpis.total_pnl_usd):+.2f}")
cols[3].metric("Avg win", f"${float(kpis.avg_win_usd):+.2f}")
cols[4].metric("Avg loss", f"${float(kpis.avg_loss_usd):+.2f}")
cols[2].metric("P&L totale", f"${float(kpis.total_pnl_usd):+.2f}")
cols[3].metric("Vittoria media", f"${float(kpis.avg_win_usd):+.2f}")
cols[4].metric("Perdita media", f"${float(kpis.avg_loss_usd):+.2f}")
cols[5].metric("Edge / trade", f"${float(kpis.edge_per_trade_usd):+.2f}")
st.divider()
if not filtered:
st.info("No trades match the current filters.")
st.info("Nessun trade corrisponde ai filtri correnti.")
return
# Build DataFrame for display + export
# DataFrame per visualizzazione + esportazione
rows = []
for p in filtered:
days_held = (
@@ -92,31 +101,33 @@ def render() -> None:
rows.append(
{
"proposal_id": str(p.proposal_id)[:8],
"spread_type": p.spread_type,
"spread": p.spread_type,
"asset": p.asset,
"n_contracts": p.n_contracts,
"short_strike": float(p.short_strike),
"long_strike": float(p.long_strike),
"credit_usd": float(p.credit_usd),
"max_loss_usd": float(p.max_loss_usd),
"pnl_usd": float(p.pnl_usd) if p.pnl_usd is not None else None,
"close_reason": p.close_reason or "(unknown)",
"days_held": days_held,
"opened_at": humanize_dt(p.opened_at),
"closed_at": humanize_dt(p.closed_at),
"expiry": humanize_dt(p.expiry),
"n. contratti": p.n_contracts,
"strike short": float(p.short_strike),
"strike long": float(p.long_strike),
"credito (USD)": float(p.credit_usd),
"max perdita (USD)": float(p.max_loss_usd),
"P&L (USD)": (
float(p.pnl_usd) if p.pnl_usd is not None else None
),
"motivo chiusura": p.close_reason or "(sconosciuto)",
"giorni tenuta": days_held,
"aperta il": humanize_dt(p.opened_at),
"chiusa il": humanize_dt(p.closed_at),
"scadenza": humanize_dt(p.expiry),
}
)
df = pd.DataFrame(rows)
st.dataframe(df, use_container_width=True, hide_index=True)
# CSV export
# Esportazione CSV
buf = io.StringIO()
df.to_csv(buf, index=False)
st.download_button(
"Download CSV",
"Scarica CSV",
data=buf.getvalue(),
file_name=f"cerbero_bite_history_{datetime.now(UTC).date()}.csv",
file_name=f"cerbero_bite_storico_{datetime.now(UTC).date()}.csv",
mime="text/csv",
)
+43 -44
View File
@@ -38,40 +38,41 @@ def _position_label(p: PositionRecord) -> str:
def _render_header(position: PositionRecord) -> None:
cols = st.columns(4)
cols[0].metric("status", position.status)
cols[0].metric("stato", position.status)
cols[1].metric("spread", position.spread_type)
cols[2].metric("contracts", position.n_contracts)
cols[3].metric("credit (USD)", f"${float(position.credit_usd):+.2f}")
cols[2].metric("contratti", position.n_contracts)
cols[3].metric("credito (USD)", f"${float(position.credit_usd):+.2f}")
st.caption(
f"`{position.proposal_id}` · opened {humanize_dt(position.opened_at)} · "
f"expiry {humanize_dt(position.expiry)}"
f"`{position.proposal_id}` · aperta il "
f"{humanize_dt(position.opened_at)} · scadenza "
f"{humanize_dt(position.expiry)}"
)
def _render_legs(position: PositionRecord) -> None:
st.subheader("Legs (entry snapshot)")
st.subheader("Gambe (snapshot all'entrata)")
rows = [
{
"leg": "short",
"instrument": position.short_instrument,
"gamba": "short",
"strumento": position.short_instrument,
"strike": float(position.short_strike),
"side": "SELL",
"lato": "VENDI",
"size": position.n_contracts,
"delta_at_entry": float(position.delta_at_entry),
"delta all'entrata": float(position.delta_at_entry),
},
{
"leg": "long",
"instrument": position.long_instrument,
"gamba": "long",
"strumento": position.long_instrument,
"strike": float(position.long_strike),
"side": "BUY",
"lato": "COMPRA",
"size": position.n_contracts,
"delta_at_entry": "", # only short delta is persisted
"delta all'entrata": "",
},
]
st.dataframe(rows, use_container_width=True, hide_index=True)
st.caption(
"Live mid/greeks are not pulled from MCP by the GUI. "
"Refresh shown by the engine via the Audit page."
"Mid e greche live non vengono richiesti agli MCP dal cruscotto. "
"Il refresh è demandato al motore: visibile nella pagina Audit."
)
@@ -79,25 +80,25 @@ def _render_distance(position: PositionRecord) -> None:
metrics = compute_distance_metrics(position)
cols = st.columns(5)
cols[0].metric(
"Short strike OTM",
"Short OTM %",
f"{metrics.short_strike_otm_pct:.1%}"
if metrics.short_strike_otm_pct is not None
else "",
)
cols[1].metric(
"Days to expiry",
"Giorni a scadenza",
metrics.days_to_expiry if metrics.days_to_expiry is not None else "",
)
cols[2].metric(
"Days held",
"Giorni in tenuta",
metrics.days_held if metrics.days_held is not None else "",
)
cols[3].metric("Δ at entry", f"{metrics.delta_at_entry:+.3f}")
cols[4].metric("Width % of spot", f"{metrics.width_pct_of_spot:.1%}")
cols[3].metric("Δ all'entrata", f"{metrics.delta_at_entry:+.3f}")
cols[4].metric("Larghezza % spot", f"{metrics.width_pct_of_spot:.1%}")
def _render_payoff(position: PositionRecord) -> None:
st.subheader("Payoff at expiry")
st.subheader("Payoff a scadenza")
curve = compute_payoff_curve(position)
fig = go.Figure()
@@ -107,7 +108,7 @@ def _render_payoff(position: PositionRecord) -> None:
y=curve.pnl_grid_usd,
mode="lines",
line={"color": "#3498db", "width": 2.5},
name="P&L at expiry",
name="P&L a scadenza",
fill="tozeroy",
fillcolor="rgba(52,152,219,0.10)",
)
@@ -143,21 +144,21 @@ def _render_payoff(position: PositionRecord) -> None:
line_dash="solid",
line_color="#7f8c8d",
opacity=0.4,
annotation_text=f"entry spot {curve.spot_at_entry:.0f}",
annotation_text=f"spot all'entrata {curve.spot_at_entry:.0f}",
annotation_position="bottom",
)
fig.update_layout(
height=380,
margin={"l": 10, "r": 10, "t": 30, "b": 10},
xaxis_title="ETH spot at expiry (USD)",
xaxis_title="ETH spot a scadenza (USD)",
yaxis_title="P&L (USD)",
legend={"orientation": "h", "y": 1.1},
)
st.plotly_chart(fig, use_container_width=True)
cols = st.columns(3)
cols[0].metric("Max profit", f"${curve.max_profit_usd:+.2f}")
cols[1].metric("Max loss", f"${curve.max_loss_usd:+.2f}")
cols[0].metric("Profitto massimo", f"${curve.max_profit_usd:+.2f}")
cols[1].metric("Perdita massima", f"${curve.max_loss_usd:+.2f}")
cols[2].metric(
"Breakeven",
f"{curve.breakeven:.2f}" if curve.breakeven is not None else "",
@@ -165,10 +166,10 @@ def _render_payoff(position: PositionRecord) -> None:
def _render_decisions(position: PositionRecord) -> None:
st.subheader("Decision history")
st.subheader("Storico decisioni")
decisions = load_decisions_for_position(position.proposal_id)
if not decisions:
st.info("No decisions recorded for this position yet.")
st.info("Nessuna decisione registrata per questa posizione.")
return
rows = []
@@ -180,46 +181,44 @@ def _render_decisions(position: PositionRecord) -> None:
rows.append(
{
"timestamp": humanize_dt(d.timestamp),
"decision_type": d.decision_type,
"action": d.action_taken or "",
"notes": d.notes or "",
"outputs": json.dumps(outputs, sort_keys=True)
if outputs
else "",
"tipo decisione": d.decision_type,
"azione": d.action_taken or "",
"note": d.notes or "",
"output": json.dumps(outputs, sort_keys=True) if outputs else "",
}
)
st.dataframe(rows, use_container_width=True, hide_index=True)
def render() -> None:
st.title("💼 Position")
st.title("💼 Posizione")
st.caption(
"Drilldown on the trade: legs, payoff at expiry, decision history. "
"All data is read from SQLite — no live MCP calls."
"Drilldown sul trade: gambe, payoff a scadenza, storico decisioni. "
"Tutti i dati arrivano da SQLite — nessuna chiamata MCP live."
)
db_path = _resolve_db()
open_pos = load_open_positions(db_path=db_path)
closed_recent = load_closed_positions(db_path=db_path)[-10:] # last 10
closed_recent = load_closed_positions(db_path=db_path)[-10:]
candidates: list[PositionRecord] = list(open_pos) + list(reversed(closed_recent))
if not candidates:
st.info(
"No positions to display. The page will populate once the "
"engine opens its first trade."
"Nessuna posizione da mostrare. La pagina si popolerà non "
"appena il motore aprirà il primo trade."
)
return
labels = {_position_label(p): p for p in candidates}
pick = st.selectbox(
"Position",
"Posizione",
options=list(labels.keys()),
index=0,
)
position = labels[pick]
# Allow deep-linking via ?proposal_id=...
# Deep-link via ?proposal_id=
qp = st.query_params.get("proposal_id")
if qp:
try:
@@ -228,7 +227,7 @@ def render() -> None:
if override is not None:
position = override
except ValueError:
st.warning(f"Invalid proposal_id query parameter: {qp}")
st.warning(f"Parametro proposal_id non valido: {qp}")
st.divider()
_render_header(position)
@@ -3,13 +3,17 @@
The GUI (and other out-of-band tooling) records operator intent in the
SQLite ``manual_actions`` table; this consumer pulls those rows and
dispatches them through the same primitives the engine uses internally
(``KillSwitch.arm`` / ``disarm``) so the audit chain remains the single
source of truth for state transitions.
(``KillSwitch.arm`` / ``disarm``, ``Orchestrator.run_*``) so the audit
chain remains the single source of truth for state transitions.
Currently supported kinds:
Supported kinds:
* ``arm_kill`` — payload ``{"reason": str}``; arms the kill switch.
* ``disarm_kill`` — payload ``{"reason": str}``; disarms it.
* ``run_cycle`` — payload ``{"cycle": "entry"|"monitor"|"health"}``;
forces an immediate run of the named cycle. Only available when the
consumer is invoked with a ``cycle_runners`` mapping (the orchestrator
populates it at scheduler-install time).
Future kinds (``force_close``, ``approve_proposal``,
``reject_proposal``) are recognised by the ``ManualAction`` schema but
@@ -21,6 +25,7 @@ from __future__ import annotations
import json
import logging
from collections.abc import Awaitable, Callable
from datetime import UTC, datetime
from typing import TYPE_CHECKING
@@ -30,7 +35,10 @@ from cerbero_bite.state import connect, transaction
if TYPE_CHECKING:
from cerbero_bite.runtime.dependencies import RuntimeContext
__all__ = ["consume_manual_actions"]
__all__ = ["CycleRunner", "consume_manual_actions"]
CycleRunner = Callable[[], Awaitable[object]]
_log = logging.getLogger("cerbero_bite.runtime.manual_actions")
@@ -48,7 +56,10 @@ def _parse_payload(raw: str | None) -> dict[str, object]:
async def consume_manual_actions(
ctx: RuntimeContext, *, now: datetime | None = None
ctx: RuntimeContext,
*,
cycle_runners: dict[str, CycleRunner] | None = None,
now: datetime | None = None,
) -> int:
"""Drain the queue. Return the number of actions processed.
@@ -83,6 +94,19 @@ async def consume_manual_actions(
elif action.kind == "disarm_kill":
reason = str(payload.get("reason", "manual via GUI"))
ctx.kill_switch.disarm(reason=reason, source="manual_gui")
elif action.kind == "run_cycle":
cycle = str(payload.get("cycle", "")).strip().lower()
if cycle_runners is None:
result = "not_supported"
_log.warning(
"run_cycle dispatched without cycle_runners; "
"falling back to not_supported"
)
elif cycle not in cycle_runners:
result = f"error: unknown cycle '{cycle}'"
else:
await cycle_runners[cycle]()
result = f"ok: ran {cycle}"
else:
result = "not_supported"
_log.warning(
+8 -1
View File
@@ -234,7 +234,14 @@ class Orchestrator:
async def _manual_actions() -> None:
async def _do() -> None:
await consume_manual_actions(self._ctx)
await consume_manual_actions(
self._ctx,
cycle_runners={
"entry": self.run_entry,
"monitor": self.run_monitor,
"health": self.run_health,
},
)
await _safe("manual_actions", _do)
+1
View File
@@ -130,6 +130,7 @@ class ManualAction(BaseModel):
"force_close",
"arm_kill",
"disarm_kill",
"run_cycle",
]
proposal_id: UUID | None = None
payload_json: str | None = None