from pathlib import Path import numpy as np import pandas as pd import pytest from multi_swarm.genome.hypothesis import ModelTier from multi_swarm.llm.client import CompletionResult from multi_swarm.orchestrator.run import RunConfig, run_phase1 from multi_swarm.persistence.repository import Repository @pytest.fixture def synthetic_ohlcv(): idx = pd.date_range("2024-01-01", periods=500, freq="1h", tz="UTC") close = 100 + np.cumsum(np.random.RandomState(0).normal(0.01, 1.0, 500)) return pd.DataFrame( { "open": close, "high": close + 0.5, "low": close - 0.5, "close": close, "volume": 1.0, }, index=idx, ) @pytest.fixture def fake_llm(mocker): """LLM mock che ritorna sempre una strategia valida.""" fake = mocker.MagicMock() fake.complete.return_value = CompletionResult( text=( "```lisp\n(strategy " "(when (gt (indicator rsi 14) 70.0) (entry-short)) " "(when (lt (indicator rsi 14) 30.0) (entry-long)))\n```" ), input_tokens=200, output_tokens=80, tier=ModelTier.C, model="qwen", ) return fake def test_e2e_minimal_run_completes( tmp_path: Path, synthetic_ohlcv, fake_llm, mocker, ): cfg = RunConfig( run_name="e2e-test", population_size=5, n_generations=2, elite_k=1, tournament_k=2, p_crossover=0.5, seed=42, model_tier=ModelTier.C, symbol="BTC/USDT", timeframe="1h", fees_bp=5.0, n_trials_dsr=10, db_path=tmp_path / "runs.db", ) run_id = run_phase1(cfg, ohlcv=synthetic_ohlcv, llm=fake_llm) repo = Repository(db_path=tmp_path / "runs.db") run = repo.get_run(run_id) assert run["status"] == "completed" gens = repo.list_generations(run_id) assert len(gens) == 2 evals = repo.list_evaluations(run_id) assert len(evals) >= 5 # almeno una popolazione