feat: import 6 MCP services + common workspace
This commit is contained in:
@@ -0,0 +1,19 @@
|
||||
from option_mcp_common.models import (
|
||||
Event,
|
||||
EventPriority,
|
||||
EventType,
|
||||
L1State,
|
||||
L2Entry,
|
||||
L3Entry,
|
||||
UserInstruction,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"L1State",
|
||||
"L2Entry",
|
||||
"L3Entry",
|
||||
"Event",
|
||||
"EventPriority",
|
||||
"EventType",
|
||||
"UserInstruction",
|
||||
]
|
||||
@@ -0,0 +1,98 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from functools import wraps
|
||||
|
||||
from fastapi import HTTPException, Request, status
|
||||
|
||||
|
||||
@dataclass
|
||||
class Principal:
|
||||
name: str
|
||||
capabilities: set[str] = field(default_factory=set)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TokenStore:
|
||||
tokens: dict[str, Principal]
|
||||
|
||||
def get(self, token: str) -> Principal | None:
|
||||
return self.tokens.get(token)
|
||||
|
||||
|
||||
def require_principal(request: Request) -> Principal:
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if not auth.startswith("Bearer "):
|
||||
raise HTTPException(status.HTTP_401_UNAUTHORIZED, "missing bearer token")
|
||||
token = auth[len("Bearer "):].strip()
|
||||
store: TokenStore = request.app.state.token_store
|
||||
principal = store.get(token)
|
||||
if principal is None:
|
||||
raise HTTPException(status.HTTP_403_FORBIDDEN, "invalid token")
|
||||
return principal
|
||||
|
||||
|
||||
def acl_requires(*, core: bool = False, observer: bool = False) -> Callable:
|
||||
"""Decorator: require at least one matching capability."""
|
||||
allowed: set[str] = set()
|
||||
if core:
|
||||
allowed.add("core")
|
||||
if observer:
|
||||
allowed.add("observer")
|
||||
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
principal = kwargs.get("principal")
|
||||
if principal is None:
|
||||
for a in args:
|
||||
if isinstance(a, Principal):
|
||||
principal = a
|
||||
break
|
||||
if principal is None or not (principal.capabilities & allowed):
|
||||
raise HTTPException(
|
||||
status.HTTP_403_FORBIDDEN,
|
||||
f"capability required: {allowed}",
|
||||
)
|
||||
return await func(*args, **kwargs) if _is_coro(func) else func(*args, **kwargs)
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
principal = kwargs.get("principal")
|
||||
if principal is None:
|
||||
for a in args:
|
||||
if isinstance(a, Principal):
|
||||
principal = a
|
||||
break
|
||||
if principal is None or not (principal.capabilities & allowed):
|
||||
raise HTTPException(
|
||||
status.HTTP_403_FORBIDDEN,
|
||||
f"capability required: {allowed}",
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return async_wrapper if _is_coro(func) else sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def _is_coro(func: Callable) -> bool:
|
||||
import asyncio
|
||||
return asyncio.iscoroutinefunction(func)
|
||||
|
||||
|
||||
def load_token_store_from_files(
|
||||
core_token_file: str | None,
|
||||
observer_token_file: str | None,
|
||||
) -> TokenStore:
|
||||
tokens: dict[str, Principal] = {}
|
||||
if core_token_file:
|
||||
with open(core_token_file) as f:
|
||||
tokens[f.read().strip()] = Principal(name="core", capabilities={"core"})
|
||||
if observer_token_file:
|
||||
with open(observer_token_file) as f:
|
||||
tokens[f.read().strip()] = Principal(
|
||||
name="observer", capabilities={"observer"}
|
||||
)
|
||||
return TokenStore(tokens=tokens)
|
||||
@@ -0,0 +1,80 @@
|
||||
"""CER-P5-010: env validation policy — fail-fast per mandatory, soft per optional.
|
||||
|
||||
Usage al boot di ogni mcp `__main__.py`:
|
||||
|
||||
from option_mcp_common.env_validation import require_env, optional_env, summarize
|
||||
|
||||
creds_file = require_env("CREDENTIALS_FILE", "deribit credentials JSON path")
|
||||
host = optional_env("HOST", default="0.0.0.0")
|
||||
summarize(["CREDENTIALS_FILE", "HOST", "PORT"])
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MissingEnvError(RuntimeError):
|
||||
"""Mandatory env var absent or empty."""
|
||||
|
||||
|
||||
def require_env(name: str, description: str = "") -> str:
|
||||
"""Fail-fast: raise MissingEnvError se name non presente o vuoto.
|
||||
|
||||
Uscita dal processo con codice 2 se chiamato dal main(). Comporta
|
||||
logging chiaro del missing var prima dell'exit.
|
||||
"""
|
||||
val = (os.environ.get(name) or "").strip()
|
||||
if not val:
|
||||
msg = f"missing mandatory env var: {name}"
|
||||
if description:
|
||||
msg += f" ({description})"
|
||||
logger.error(msg)
|
||||
raise MissingEnvError(msg)
|
||||
return val
|
||||
|
||||
|
||||
def optional_env(name: str, *, default: str = "") -> str:
|
||||
"""Soft: ritorna env o default. Log INFO se default usato."""
|
||||
val = (os.environ.get(name) or "").strip()
|
||||
if not val:
|
||||
if default:
|
||||
logger.info("env %s not set, using default=%r", name, default)
|
||||
return default
|
||||
return val
|
||||
|
||||
|
||||
def summarize(names: list[str]) -> None:
|
||||
"""Log INFO di tutti gli env rilevanti con presenza (mask se SECRET/KEY/TOKEN)."""
|
||||
sensitive_tokens = ("SECRET", "KEY", "TOKEN", "PASSWORD", "CREDENTIAL", "WALLET")
|
||||
for n in names:
|
||||
val = os.environ.get(n)
|
||||
if val is None:
|
||||
logger.info("env[%s]: <unset>", n)
|
||||
continue
|
||||
if any(t in n.upper() for t in sensitive_tokens):
|
||||
logger.info("env[%s]: <set, %d chars>", n, len(val))
|
||||
else:
|
||||
logger.info("env[%s]: %s", n, val)
|
||||
|
||||
|
||||
def fail_fast_if_missing(names: list[str]) -> None:
|
||||
"""Verifica lista di nomi mandatory al boot. Exit 2 se uno solo manca.
|
||||
|
||||
Uso preferito: early call in main() per bloccare boot se config incompleta.
|
||||
"""
|
||||
missing: list[str] = []
|
||||
for n in names:
|
||||
if not (os.environ.get(n) or "").strip():
|
||||
missing.append(n)
|
||||
if missing:
|
||||
logger.error("boot aborted: missing mandatory env vars: %s", missing)
|
||||
print(
|
||||
f"FATAL: missing mandatory env vars: {missing}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(2)
|
||||
@@ -0,0 +1,139 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
def sma(values: list[float], period: int) -> float | None:
|
||||
if len(values) < period:
|
||||
return None
|
||||
return sum(values[-period:]) / period
|
||||
|
||||
|
||||
def rsi(closes: list[float], period: int = 14) -> float | None:
|
||||
if len(closes) < period + 1:
|
||||
return None
|
||||
gains: list[float] = []
|
||||
losses: list[float] = []
|
||||
for i in range(1, len(closes)):
|
||||
delta = closes[i] - closes[i - 1]
|
||||
gains.append(max(delta, 0.0))
|
||||
losses.append(-min(delta, 0.0))
|
||||
avg_gain = sum(gains[:period]) / period
|
||||
avg_loss = sum(losses[:period]) / period
|
||||
for i in range(period, len(gains)):
|
||||
avg_gain = (avg_gain * (period - 1) + gains[i]) / period
|
||||
avg_loss = (avg_loss * (period - 1) + losses[i]) / period
|
||||
if avg_loss == 0:
|
||||
return 100.0
|
||||
rs = avg_gain / avg_loss
|
||||
return 100.0 - (100.0 / (1.0 + rs))
|
||||
|
||||
|
||||
def _ema_series(values: list[float], period: int) -> list[float]:
|
||||
if len(values) < period:
|
||||
return []
|
||||
k = 2.0 / (period + 1)
|
||||
seed = sum(values[:period]) / period
|
||||
out = [seed]
|
||||
for v in values[period:]:
|
||||
out.append(out[-1] + k * (v - out[-1]))
|
||||
return out
|
||||
|
||||
|
||||
def macd(
|
||||
closes: list[float],
|
||||
fast: int = 12,
|
||||
slow: int = 26,
|
||||
signal: int = 9,
|
||||
) -> dict[str, float | None]:
|
||||
nothing: dict[str, float | None] = {"macd": None, "signal": None, "hist": None}
|
||||
if len(closes) < slow + signal:
|
||||
return nothing
|
||||
ema_fast = _ema_series(closes, fast)
|
||||
ema_slow = _ema_series(closes, slow)
|
||||
offset = slow - fast
|
||||
aligned_fast = ema_fast[offset:]
|
||||
macd_line = [f - s for f, s in zip(aligned_fast, ema_slow, strict=False)]
|
||||
if len(macd_line) < signal:
|
||||
return nothing
|
||||
signal_line = _ema_series(macd_line, signal)
|
||||
if not signal_line:
|
||||
return nothing
|
||||
last_macd = macd_line[-1]
|
||||
last_sig = signal_line[-1]
|
||||
return {
|
||||
"macd": last_macd,
|
||||
"signal": last_sig,
|
||||
"hist": last_macd - last_sig,
|
||||
}
|
||||
|
||||
|
||||
def atr(
|
||||
highs: list[float],
|
||||
lows: list[float],
|
||||
closes: list[float],
|
||||
period: int = 14,
|
||||
) -> float | None:
|
||||
if len(closes) < period + 1:
|
||||
return None
|
||||
trs: list[float] = []
|
||||
for i in range(1, len(closes)):
|
||||
tr = max(
|
||||
highs[i] - lows[i],
|
||||
abs(highs[i] - closes[i - 1]),
|
||||
abs(lows[i] - closes[i - 1]),
|
||||
)
|
||||
trs.append(tr)
|
||||
if len(trs) < period:
|
||||
return None
|
||||
avg = sum(trs[:period]) / period
|
||||
for i in range(period, len(trs)):
|
||||
avg = (avg * (period - 1) + trs[i]) / period
|
||||
return avg
|
||||
|
||||
|
||||
def adx(
|
||||
highs: list[float],
|
||||
lows: list[float],
|
||||
closes: list[float],
|
||||
period: int = 14,
|
||||
) -> dict[str, float | None]:
|
||||
nothing: dict[str, float | None] = {"adx": None, "+di": None, "-di": None}
|
||||
if len(closes) < 2 * period + 1:
|
||||
return nothing
|
||||
trs: list[float] = []
|
||||
plus_dms: list[float] = []
|
||||
minus_dms: list[float] = []
|
||||
for i in range(1, len(closes)):
|
||||
tr = max(
|
||||
highs[i] - lows[i],
|
||||
abs(highs[i] - closes[i - 1]),
|
||||
abs(lows[i] - closes[i - 1]),
|
||||
)
|
||||
up = highs[i] - highs[i - 1]
|
||||
dn = lows[i - 1] - lows[i]
|
||||
plus_dm = up if (up > dn and up > 0) else 0.0
|
||||
minus_dm = dn if (dn > up and dn > 0) else 0.0
|
||||
trs.append(tr)
|
||||
plus_dms.append(plus_dm)
|
||||
minus_dms.append(minus_dm)
|
||||
|
||||
atr_s = sum(trs[:period])
|
||||
pdm_s = sum(plus_dms[:period])
|
||||
mdm_s = sum(minus_dms[:period])
|
||||
dxs: list[float] = []
|
||||
pdi = mdi = 0.0
|
||||
for i in range(period, len(trs)):
|
||||
atr_s = atr_s - atr_s / period + trs[i]
|
||||
pdm_s = pdm_s - pdm_s / period + plus_dms[i]
|
||||
mdm_s = mdm_s - mdm_s / period + minus_dms[i]
|
||||
pdi = 100.0 * pdm_s / atr_s if atr_s else 0.0
|
||||
mdi = 100.0 * mdm_s / atr_s if atr_s else 0.0
|
||||
s = pdi + mdi
|
||||
dx = 100.0 * abs(pdi - mdi) / s if s else 0.0
|
||||
dxs.append(dx)
|
||||
|
||||
if len(dxs) < period:
|
||||
return nothing
|
||||
adx_val = sum(dxs[:period]) / period
|
||||
for i in range(period, len(dxs)):
|
||||
adx_val = (adx_val * (period - 1) + dxs[i]) / period
|
||||
return {"adx": adx_val, "+di": pdi, "-di": mdi}
|
||||
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# pythonjsonlogger rinominato in .json; keep fallback per compat
|
||||
try:
|
||||
from pythonjsonlogger.json import JsonFormatter as _JsonFormatter # noqa: N813
|
||||
except ImportError:
|
||||
from pythonjsonlogger.jsonlogger import JsonFormatter as _JsonFormatter # noqa: N813
|
||||
|
||||
SECRET_PATTERNS = [
|
||||
(re.compile(r"Bearer\s+[\w\-\._]+", re.IGNORECASE), "Bearer ***"),
|
||||
(re.compile(r'("api_key"\s*:\s*")[^"]+(")'), r'\1***\2'),
|
||||
(re.compile(r'("password"\s*:\s*")[^"]+(")'), r'\1***\2'),
|
||||
(re.compile(r'("private_key"\s*:\s*")[^"]+(")'), r'\1***\2'),
|
||||
(re.compile(r'("client_secret"\s*:\s*")[^"]+(")'), r'\1***\2'),
|
||||
(re.compile(r"sk-[\w]{20,}"), "sk-***"),
|
||||
]
|
||||
|
||||
|
||||
class SecretsFilter(logging.Filter):
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
msg = record.getMessage()
|
||||
for pattern, replacement in SECRET_PATTERNS:
|
||||
msg = pattern.sub(replacement, msg)
|
||||
record.msg = msg
|
||||
record.args = () # already formatted into msg
|
||||
return True
|
||||
|
||||
|
||||
def get_json_logger(name: str, level: int = logging.INFO) -> logging.Logger:
|
||||
logger = logging.getLogger(name)
|
||||
if logger.handlers:
|
||||
return logger # already configured
|
||||
logger.setLevel(level)
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
formatter = _JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
handler.addFilter(SecretsFilter())
|
||||
logger.addHandler(handler)
|
||||
logger.propagate = False
|
||||
return logger
|
||||
|
||||
|
||||
def configure_root_logging(
|
||||
*,
|
||||
level: str | int | None = None,
|
||||
format_type: str | None = None,
|
||||
) -> None:
|
||||
"""CER-P5-009: configura il root logger con JSON o text formatter.
|
||||
|
||||
Env overrides:
|
||||
- LOG_LEVEL (default INFO)
|
||||
- LOG_FORMAT=json|text (default json — production-ready structured log)
|
||||
|
||||
Applica SecretsFilter su entrambi i format.
|
||||
"""
|
||||
lvl_raw = level if level is not None else os.environ.get("LOG_LEVEL", "INFO")
|
||||
lvl = logging.getLevelName(lvl_raw.upper()) if isinstance(lvl_raw, str) else lvl_raw
|
||||
fmt = (format_type or os.environ.get("LOG_FORMAT") or "json").lower()
|
||||
|
||||
root = logging.getLogger()
|
||||
# Rimuovi handler esistenti (basicConfig li avrebbe lasciati duplicati)
|
||||
for h in list(root.handlers):
|
||||
root.removeHandler(h)
|
||||
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
if fmt == "json":
|
||||
handler.setFormatter(
|
||||
_JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s")
|
||||
)
|
||||
else:
|
||||
handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s")
|
||||
)
|
||||
handler.addFilter(SecretsFilter())
|
||||
root.addHandler(handler)
|
||||
root.setLevel(lvl)
|
||||
@@ -0,0 +1,239 @@
|
||||
"""Bridge MCP → endpoint REST esistenti.
|
||||
|
||||
Implementa manualmente JSON-RPC 2.0 MCP su `POST /mcp` (no SSE, risposta
|
||||
diretta in body JSON). Supporta:
|
||||
- initialize
|
||||
- notifications/initialized
|
||||
- tools/list
|
||||
- tools/call
|
||||
|
||||
Claude Code config esempio:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"cerbero-memory": {
|
||||
"type": "http",
|
||||
"url": "http://localhost:8080/mcp-memory/mcp",
|
||||
"headers": {"Authorization": "Bearer <observer-token>"}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from option_mcp_common.auth import TokenStore
|
||||
|
||||
MCP_PROTOCOL_VERSION = "2024-11-05"
|
||||
|
||||
|
||||
def _derive_input_schemas(app: FastAPI, tool_names: list[str]) -> dict[str, dict]:
|
||||
"""Estrae JSON schema del body Pydantic per ogni route POST /tools/<name>.
|
||||
|
||||
Risolve annotazioni lazy (PEP 563) via `typing.get_type_hints`.
|
||||
Ritorna mapping {tool_name: json_schema}. Route senza body Pydantic o non
|
||||
risolvibili vengono saltate: il chiamante userà un fallback.
|
||||
"""
|
||||
import typing
|
||||
from pydantic import BaseModel
|
||||
|
||||
names_set = set(tool_names)
|
||||
out: dict[str, dict] = {}
|
||||
for route in app.routes:
|
||||
path = getattr(route, "path", "")
|
||||
if not path.startswith("/tools/"):
|
||||
continue
|
||||
name = path[len("/tools/"):]
|
||||
if name not in names_set:
|
||||
continue
|
||||
endpoint = getattr(route, "endpoint", None)
|
||||
if endpoint is None:
|
||||
continue
|
||||
try:
|
||||
hints = typing.get_type_hints(endpoint)
|
||||
except Exception:
|
||||
continue
|
||||
for pname, ann in hints.items():
|
||||
if pname == "return":
|
||||
continue
|
||||
if isinstance(ann, type) and issubclass(ann, BaseModel):
|
||||
try:
|
||||
out[name] = ann.model_json_schema()
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
return out
|
||||
|
||||
|
||||
def _make_proxy_handler(internal_base_url: str, tool_name: str, token: str):
|
||||
async def handler(args: dict | None) -> Any:
|
||||
async with httpx.AsyncClient(timeout=30.0) as c:
|
||||
r = await c.post(
|
||||
f"{internal_base_url}/tools/{tool_name}",
|
||||
headers={"Authorization": f"Bearer {token}"} if token else {},
|
||||
json=args or {},
|
||||
)
|
||||
if r.status_code >= 400:
|
||||
raise RuntimeError(
|
||||
f"tool {tool_name} failed: HTTP {r.status_code} — {r.text[:500]}"
|
||||
)
|
||||
try:
|
||||
return r.json()
|
||||
except Exception:
|
||||
return {"raw": r.text}
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
def mount_mcp_endpoint(
|
||||
app: FastAPI,
|
||||
*,
|
||||
name: str,
|
||||
version: str,
|
||||
token_store: TokenStore,
|
||||
internal_base_url: str,
|
||||
tools: list[dict],
|
||||
) -> None:
|
||||
"""Registra un endpoint MCP JSON-RPC 2.0 su POST /mcp.
|
||||
|
||||
Ogni tool è proxato verso POST {internal_base_url}/tools/<name> con il
|
||||
Bearer token del client MCP (preservando le ACL REST esistenti).
|
||||
|
||||
Args:
|
||||
app: istanza FastAPI del service
|
||||
name: nome server MCP
|
||||
version: versione del service
|
||||
token_store: lo stesso usato dai tool REST
|
||||
internal_base_url: URL base interno (es. "http://localhost:9015")
|
||||
tools: lista di {"name": str, "description": str, "input_schema"?: dict}
|
||||
"""
|
||||
tools_by_name = {t["name"]: t for t in tools}
|
||||
|
||||
# Auto-derive input schemas from FastAPI routes (Pydantic body models).
|
||||
# Permette al LLM di conoscere i nomi dei parametri obbligatori invece di
|
||||
# indovinarli. Se il tool ha `input_schema` esplicito, vince sull'auto-derive.
|
||||
derived_schemas = _derive_input_schemas(app, [t["name"] for t in tools])
|
||||
|
||||
def _tool_defs() -> list[dict]:
|
||||
defs = []
|
||||
for t in tools:
|
||||
schema = t.get("input_schema") or derived_schemas.get(t["name"]) or {
|
||||
"type": "object",
|
||||
"additionalProperties": True,
|
||||
}
|
||||
defs.append({
|
||||
"name": t["name"],
|
||||
"description": t.get("description", t["name"]),
|
||||
"inputSchema": schema,
|
||||
})
|
||||
return defs
|
||||
|
||||
async def _handle_rpc(body: dict, token: str) -> dict | None:
|
||||
rpc_id = body.get("id")
|
||||
method = body.get("method")
|
||||
params = body.get("params") or {}
|
||||
|
||||
# Notification (no id) → no response
|
||||
if method == "notifications/initialized":
|
||||
return None
|
||||
|
||||
if method == "initialize":
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"result": {
|
||||
"protocolVersion": MCP_PROTOCOL_VERSION,
|
||||
"capabilities": {"tools": {"listChanged": False}},
|
||||
"serverInfo": {"name": name, "version": version},
|
||||
},
|
||||
}
|
||||
|
||||
if method == "tools/list":
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"result": {"tools": _tool_defs()},
|
||||
}
|
||||
|
||||
if method == "tools/call":
|
||||
tool_name = params.get("name", "")
|
||||
args = params.get("arguments") or {}
|
||||
if tool_name not in tools_by_name:
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"error": {"code": -32601, "message": f"tool non trovato: {tool_name}"},
|
||||
}
|
||||
handler = _make_proxy_handler(internal_base_url, tool_name, token)
|
||||
try:
|
||||
result = await handler(args)
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"result": {
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": _to_text(result),
|
||||
}
|
||||
],
|
||||
"isError": False,
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"result": {
|
||||
"content": [{"type": "text", "text": str(e)}],
|
||||
"isError": True,
|
||||
},
|
||||
}
|
||||
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": rpc_id,
|
||||
"error": {"code": -32601, "message": f"metodo non supportato: {method}"},
|
||||
}
|
||||
|
||||
@app.post("/mcp")
|
||||
async def mcp_entry(request: Request):
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if not auth.startswith("Bearer "):
|
||||
return JSONResponse({"error": "missing bearer token"}, status_code=401)
|
||||
token = auth[len("Bearer "):].strip()
|
||||
principal = token_store.get(token)
|
||||
if principal is None:
|
||||
return JSONResponse({"error": "invalid token"}, status_code=403)
|
||||
|
||||
body = await request.json()
|
||||
|
||||
# Batch support
|
||||
if isinstance(body, list):
|
||||
results = []
|
||||
for item in body:
|
||||
resp = await _handle_rpc(item, token)
|
||||
if resp is not None:
|
||||
results.append(resp)
|
||||
return JSONResponse(results)
|
||||
|
||||
resp = await _handle_rpc(body, token)
|
||||
if resp is None:
|
||||
# Notification (no id) → 204 no content
|
||||
return JSONResponse(None, status_code=204)
|
||||
return JSONResponse(resp)
|
||||
|
||||
|
||||
def _to_text(value: Any) -> str:
|
||||
import json
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
try:
|
||||
return json.dumps(value, ensure_ascii=False, indent=2)
|
||||
except Exception:
|
||||
return str(value)
|
||||
@@ -0,0 +1,98 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import StrEnum
|
||||
from functools import total_ordering
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@total_ordering
|
||||
class EventPriority(StrEnum):
|
||||
LOW = "low"
|
||||
NORMAL = "normal"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
def _rank(self) -> int:
|
||||
return ["low", "normal", "high", "critical"].index(self.value)
|
||||
|
||||
def __lt__(self, other: EventPriority) -> bool:
|
||||
return self._rank() < other._rank()
|
||||
|
||||
|
||||
class EventType(StrEnum):
|
||||
ALERT = "alert"
|
||||
USER_INSTRUCTION = "user_instruction"
|
||||
SYSTEM = "system"
|
||||
|
||||
|
||||
class L1State(BaseModel):
|
||||
"""Singleton row with current operational state."""
|
||||
|
||||
updated_at: str
|
||||
equity_total: float | None = None
|
||||
equity_by_exchange: dict[str, float] = Field(default_factory=dict)
|
||||
bias: str | None = None
|
||||
pnl_day: float | None = None
|
||||
pnl_total: float | None = None
|
||||
capital: float | None = None
|
||||
open_positions_count: int = 0
|
||||
greeks_aggregate: dict[str, float] = Field(default_factory=dict)
|
||||
notes: str | None = None
|
||||
|
||||
|
||||
class L2Entry(BaseModel):
|
||||
"""Reasoning entry — schema matches system_prompt v2.
|
||||
|
||||
`authored_by_model`: identifica l'LLM che ha generato la entry (es.
|
||||
"google/gemini-3-flash-preview" per core, "anthropic/claude-haiku-4-5"
|
||||
per worker). None se scritto da operatore umano via UI.
|
||||
"""
|
||||
|
||||
timestamp: str
|
||||
setup: str
|
||||
tesi: str | None = None
|
||||
tesi_check: str | None = None
|
||||
invalidation: str | None = None
|
||||
esito: str
|
||||
scostamento: str | None = None
|
||||
scostamento_sigma: float | None = None
|
||||
lezione: str | None = None
|
||||
sizing_note: str | None = None
|
||||
run_id: str | None = None
|
||||
user_instruction_id: int | None = None
|
||||
authored_by_model: str | None = None
|
||||
|
||||
|
||||
class L3Entry(BaseModel):
|
||||
"""Compacted pattern from L2 batch."""
|
||||
|
||||
created_at: str
|
||||
category: str # "pattern_errore" | "pattern_vincente" | "correlazione"
|
||||
summary: str
|
||||
source_l2_ids: list[int]
|
||||
|
||||
|
||||
class Event(BaseModel):
|
||||
id: int | None = None
|
||||
created_at: str
|
||||
expires_at: str
|
||||
type: EventType
|
||||
source: str
|
||||
priority: EventPriority
|
||||
payload: dict[str, Any]
|
||||
acked_at: str | None = None
|
||||
ack_outcome: str | None = None
|
||||
ack_notes: str | None = None
|
||||
|
||||
|
||||
class UserInstruction(BaseModel):
|
||||
id: int | None = None
|
||||
created_at: str
|
||||
text: str
|
||||
priority: EventPriority
|
||||
require_ack: bool = True
|
||||
source: str = "observer"
|
||||
acked_at: str | None = None
|
||||
ack_outcome: str | None = None
|
||||
@@ -0,0 +1,92 @@
|
||||
"""CER-016 hard guard server-side su place_order.
|
||||
|
||||
Caps configurabili via env (default safety-first, mirati a ~200 EUR single,
|
||||
1000 EUR aggregato, 3x max leverage).
|
||||
|
||||
Thresholds sono numerici semplici — l'operatore stabilisce unità (EUR/USD)
|
||||
via env; il server compara su un unico campo `notional` in valore monetario.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
|
||||
def _env_float(name: str, default: float) -> float:
|
||||
raw = os.environ.get(name)
|
||||
if not raw:
|
||||
return default
|
||||
try:
|
||||
return float(raw)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _env_int(name: str, default: int) -> int:
|
||||
raw = os.environ.get(name)
|
||||
if not raw:
|
||||
return default
|
||||
try:
|
||||
return int(raw)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def max_notional() -> float:
|
||||
return _env_float("CERBERO_MAX_NOTIONAL", 200.0)
|
||||
|
||||
|
||||
def max_aggregate() -> float:
|
||||
return _env_float("CERBERO_MAX_AGGREGATE", 1000.0)
|
||||
|
||||
|
||||
def max_leverage() -> int:
|
||||
return _env_int("CERBERO_MAX_LEVERAGE", 3)
|
||||
|
||||
|
||||
def _hard_reject(reason: str) -> None:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail={
|
||||
"error": "HARD_PROHIBITION",
|
||||
"message": reason,
|
||||
"caps": {
|
||||
"max_notional": max_notional(),
|
||||
"max_aggregate": max_aggregate(),
|
||||
"max_leverage": max_leverage(),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def enforce_leverage(leverage: int | float | None) -> int:
|
||||
"""Ritorna leverage applicabile. Default 3x se None. Reject se > cap."""
|
||||
cap = max_leverage()
|
||||
if leverage is None:
|
||||
return cap
|
||||
lev = int(leverage)
|
||||
if lev < 1:
|
||||
_hard_reject(f"leverage must be >= 1 (got {lev})")
|
||||
if lev > cap:
|
||||
_hard_reject(f"leverage {lev}x exceeds hard cap {cap}x")
|
||||
return lev
|
||||
|
||||
|
||||
def enforce_single_notional(notional: float, *, exchange: str, instrument: str) -> None:
|
||||
cap = max_notional()
|
||||
if notional > cap:
|
||||
_hard_reject(
|
||||
f"{exchange}.{instrument} notional {notional:.2f} exceeds single trade cap {cap:.2f}"
|
||||
)
|
||||
|
||||
|
||||
def enforce_aggregate(current_total: float, new_notional: float) -> None:
|
||||
cap = max_aggregate()
|
||||
total = current_total + new_notional
|
||||
if total > cap:
|
||||
_hard_reject(
|
||||
f"aggregate notional {total:.2f} (current {current_total:.2f} + new "
|
||||
f"{new_notional:.2f}) exceeds cap {cap:.2f}"
|
||||
)
|
||||
@@ -0,0 +1,220 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from collections.abc import Callable
|
||||
from contextlib import AbstractAsyncContextManager
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Request
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.responses import JSONResponse, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
|
||||
from option_mcp_common.auth import TokenStore
|
||||
|
||||
Lifespan = Callable[[FastAPI], AbstractAsyncContextManager[None]]
|
||||
|
||||
|
||||
def _error_envelope(
|
||||
*,
|
||||
type_: str,
|
||||
code: str,
|
||||
message: str,
|
||||
retryable: bool,
|
||||
suggested_fix: str | None = None,
|
||||
details: dict | None = None,
|
||||
request_id: str | None = None,
|
||||
) -> dict:
|
||||
env = {
|
||||
"error": {
|
||||
"type": type_,
|
||||
"code": code,
|
||||
"message": message,
|
||||
"retryable": retryable,
|
||||
},
|
||||
"request_id": request_id or uuid.uuid4().hex,
|
||||
"data_timestamp": datetime.now(UTC).isoformat(),
|
||||
}
|
||||
if suggested_fix:
|
||||
env["error"]["suggested_fix"] = suggested_fix
|
||||
if details:
|
||||
env["error"]["details"] = details
|
||||
return env
|
||||
|
||||
|
||||
class _TimestampInjectorMiddleware(BaseHTTPMiddleware):
|
||||
"""CER-P5-001: inietta data_timestamp nei response tool.
|
||||
|
||||
- Dict response: body gains `data_timestamp` se mancante.
|
||||
- List of dicts: ogni item gains `data_timestamp` se mancante.
|
||||
- Header `X-Data-Timestamp` sempre presente (universale per list primitive).
|
||||
Skips /health (già popolato) e /mcp (JSON-RPC bridge) e non-JSON responses.
|
||||
"""
|
||||
|
||||
async def dispatch(self, request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
path = request.url.path
|
||||
if not path.startswith("/tools/"):
|
||||
return response
|
||||
ctype = response.headers.get("content-type", "")
|
||||
if "application/json" not in ctype:
|
||||
return response
|
||||
body = b""
|
||||
async for chunk in response.body_iterator:
|
||||
body += chunk
|
||||
ts = datetime.now(UTC).isoformat()
|
||||
try:
|
||||
data = json.loads(body) if body else None
|
||||
except Exception:
|
||||
headers = dict(response.headers)
|
||||
headers["X-Data-Timestamp"] = ts
|
||||
return Response(
|
||||
content=body,
|
||||
status_code=response.status_code,
|
||||
headers=headers,
|
||||
media_type=response.media_type,
|
||||
)
|
||||
|
||||
modified = False
|
||||
if isinstance(data, dict) and "data_timestamp" not in data:
|
||||
data["data_timestamp"] = ts
|
||||
modified = True
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
if isinstance(item, dict) and "data_timestamp" not in item:
|
||||
item["data_timestamp"] = ts
|
||||
modified = True
|
||||
|
||||
headers = dict(response.headers)
|
||||
headers["X-Data-Timestamp"] = ts
|
||||
if modified:
|
||||
new_body = json.dumps(data, default=str).encode()
|
||||
headers.pop("content-length", None)
|
||||
return Response(
|
||||
content=new_body,
|
||||
status_code=response.status_code,
|
||||
headers=headers,
|
||||
media_type="application/json",
|
||||
)
|
||||
return Response(
|
||||
content=body,
|
||||
status_code=response.status_code,
|
||||
headers=headers,
|
||||
media_type=response.media_type,
|
||||
)
|
||||
|
||||
|
||||
def build_app(
|
||||
*,
|
||||
name: str,
|
||||
version: str,
|
||||
token_store: TokenStore,
|
||||
lifespan: Lifespan | None = None,
|
||||
) -> FastAPI:
|
||||
root_path = os.getenv("ROOT_PATH", "")
|
||||
app = FastAPI(title=name, version=version, root_path=root_path, lifespan=lifespan)
|
||||
app.state.token_store = token_store
|
||||
app.state.boot_at = time.time()
|
||||
|
||||
app.add_middleware(_TimestampInjectorMiddleware)
|
||||
|
||||
@app.middleware("http")
|
||||
async def _latency_header(request: Request, call_next):
|
||||
t0 = time.perf_counter()
|
||||
response = await call_next(request)
|
||||
dur_ms = (time.perf_counter() - t0) * 1000
|
||||
response.headers["X-Duration-Ms"] = f"{dur_ms:.2f}"
|
||||
return response
|
||||
|
||||
# CER-P5-002 error envelope: exception handlers globali
|
||||
@app.exception_handler(HTTPException)
|
||||
async def _http_exc(request: Request, exc: HTTPException):
|
||||
retryable = exc.status_code in (408, 429, 502, 503, 504)
|
||||
code_map = {
|
||||
400: "BAD_REQUEST", 401: "UNAUTHORIZED", 403: "FORBIDDEN",
|
||||
404: "NOT_FOUND", 408: "TIMEOUT", 409: "CONFLICT",
|
||||
422: "VALIDATION_ERROR", 429: "RATE_LIMIT",
|
||||
500: "INTERNAL_ERROR", 502: "UPSTREAM_ERROR",
|
||||
503: "UNAVAILABLE", 504: "GATEWAY_TIMEOUT",
|
||||
}
|
||||
code = code_map.get(exc.status_code, f"HTTP_{exc.status_code}")
|
||||
message = "HTTP error"
|
||||
details: dict | None = None
|
||||
detail = exc.detail
|
||||
# Preserve rail-style detail {"error": "..", "message": ".."} as code
|
||||
if isinstance(detail, dict):
|
||||
if isinstance(detail.get("error"), str):
|
||||
code = detail["error"].upper()
|
||||
message = str(detail.get("message") or detail.get("error") or message)
|
||||
details = detail
|
||||
elif isinstance(detail, str):
|
||||
message = detail
|
||||
return JSONResponse(
|
||||
status_code=exc.status_code,
|
||||
content=_error_envelope(
|
||||
type_="http_error",
|
||||
code=code,
|
||||
message=message,
|
||||
retryable=retryable,
|
||||
details=details,
|
||||
),
|
||||
)
|
||||
|
||||
@app.exception_handler(RequestValidationError)
|
||||
async def _validation_exc(request: Request, exc: RequestValidationError):
|
||||
errs = exc.errors()
|
||||
first_loc = ".".join(str(x) for x in errs[0]["loc"]) if errs else "body"
|
||||
suggestion = (
|
||||
f"check field '{first_loc}': "
|
||||
+ (errs[0]["msg"] if errs else "invalid input")
|
||||
)
|
||||
# Sanitize ctx values: pydantic v2 può mettere ValueError in ctx['error'],
|
||||
# non serializzabile JSON. Riduci a stringhe.
|
||||
safe_errs: list[dict] = []
|
||||
for e in errs[:5]:
|
||||
ne: dict = {}
|
||||
for k, v in e.items():
|
||||
if k == "ctx" and isinstance(v, dict):
|
||||
ne[k] = {ck: str(cv) for ck, cv in v.items()}
|
||||
else:
|
||||
ne[k] = v
|
||||
safe_errs.append(ne)
|
||||
return JSONResponse(
|
||||
status_code=422,
|
||||
content=_error_envelope(
|
||||
type_="validation_error",
|
||||
code="INVALID_INPUT",
|
||||
message=f"request body validation failed on {first_loc}",
|
||||
retryable=False,
|
||||
suggested_fix=suggestion,
|
||||
details={"errors": safe_errs},
|
||||
),
|
||||
)
|
||||
|
||||
@app.exception_handler(Exception)
|
||||
async def _unhandled(request: Request, exc: Exception):
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=_error_envelope(
|
||||
type_="internal_error",
|
||||
code="UNHANDLED_EXCEPTION",
|
||||
message=f"{type(exc).__name__}: {str(exc)[:300]}",
|
||||
retryable=True,
|
||||
),
|
||||
)
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {
|
||||
"status": "healthy",
|
||||
"name": name,
|
||||
"version": version,
|
||||
"uptime_seconds": int(time.time() - app.state.boot_at),
|
||||
"data_timestamp": datetime.now(UTC).isoformat(),
|
||||
}
|
||||
|
||||
return app
|
||||
@@ -0,0 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlite3
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class Database:
|
||||
path: Path
|
||||
conn: sqlite3.Connection | None = None
|
||||
|
||||
def connect(self) -> sqlite3.Connection:
|
||||
self.path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.conn = sqlite3.connect(
|
||||
str(self.path),
|
||||
isolation_level=None,
|
||||
check_same_thread=False,
|
||||
)
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
self.conn.execute("PRAGMA journal_mode=WAL")
|
||||
self.conn.execute("PRAGMA synchronous=NORMAL")
|
||||
self.conn.execute("PRAGMA foreign_keys=ON")
|
||||
return self.conn
|
||||
|
||||
def close(self) -> None:
|
||||
if self.conn is not None:
|
||||
self.conn.close()
|
||||
self.conn = None
|
||||
|
||||
|
||||
def run_migrations(conn: sqlite3.Connection, migrations: dict[int, str]) -> None:
|
||||
"""Idempotent migrations. `migrations` keys are monotonic version numbers."""
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS _schema_version (version INTEGER PRIMARY KEY)"
|
||||
)
|
||||
cur = conn.execute("SELECT COALESCE(MAX(version), 0) FROM _schema_version")
|
||||
current = cur.fetchone()[0]
|
||||
for version in sorted(migrations):
|
||||
if version <= current:
|
||||
continue
|
||||
conn.executescript(migrations[version])
|
||||
conn.execute("INSERT INTO _schema_version (version) VALUES (?)", (version,))
|
||||
Reference in New Issue
Block a user