From 18259325a1c989a6f931c400336e367389125313 Mon Sep 17 00:00:00 2001 From: AdrianoDev Date: Sat, 9 May 2026 20:45:06 +0200 Subject: [PATCH] chore(dashboard): mypy ignores + parse-success metric in Overview Co-Authored-By: Claude Opus 4.7 (1M context) --- scripts/smoke_run.py | 2 +- .../dashboard/pages/01_overview.py | 19 ++++++++++++++++++- .../dashboard/pages/02_ga_convergence.py | 2 +- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/scripts/smoke_run.py b/scripts/smoke_run.py index c0ed0e8..2c98a2d 100644 --- a/scripts/smoke_run.py +++ b/scripts/smoke_run.py @@ -3,7 +3,7 @@ from __future__ import annotations from pathlib import Path import numpy as np -import pandas as pd +import pandas as pd # type: ignore[import-untyped] from multi_swarm.genome.hypothesis import HypothesisAgentGenome, ModelTier from multi_swarm.llm.client import CompletionResult diff --git a/src/multi_swarm/dashboard/pages/01_overview.py b/src/multi_swarm/dashboard/pages/01_overview.py index a753eb0..42296f5 100644 --- a/src/multi_swarm/dashboard/pages/01_overview.py +++ b/src/multi_swarm/dashboard/pages/01_overview.py @@ -2,7 +2,12 @@ from __future__ import annotations import streamlit as st -from multi_swarm.dashboard.data import get_repo, get_run_overview, list_runs_df +from multi_swarm.dashboard.data import ( + evaluations_df, + get_repo, + get_run_overview, + list_runs_df, +) st.title("Overview") @@ -26,5 +31,17 @@ col2.metric("Cost (USD)", f"{overview['total_cost_usd']:.4f}") col3.metric("Started", overview["started_at"]) col4.metric("Completed", overview["completed_at"] or "—") +st.subheader("Statistiche evaluations") +evals = evaluations_df(repo, selected) +col5, col6, col7, col8 = st.columns(4) +if not evals.empty: + parse_success = 100 * (evals["parse_error"].isna().sum() / len(evals)) + col5.metric("Evaluations totali", len(evals)) + col6.metric("Parse success %", f"{parse_success:.1f}%") + col7.metric("Top fitness", f"{evals['fitness'].max():.3f}") + col8.metric("Median fitness", f"{evals['fitness'].median():.3f}") +else: + col5.metric("Evaluations totali", 0) + st.subheader("Config") st.json(overview["config"]) diff --git a/src/multi_swarm/dashboard/pages/02_ga_convergence.py b/src/multi_swarm/dashboard/pages/02_ga_convergence.py index 18a2f3e..f01cc80 100644 --- a/src/multi_swarm/dashboard/pages/02_ga_convergence.py +++ b/src/multi_swarm/dashboard/pages/02_ga_convergence.py @@ -1,6 +1,6 @@ from __future__ import annotations -import plotly.graph_objects as go +import plotly.graph_objects as go # type: ignore[import-untyped] import streamlit as st from multi_swarm.dashboard.data import generations_df, get_repo, list_runs_df