33d8e275e7
Estende ModelTier a 5 livelli (S/A/B/C/D) con routing automatico: S/A/B via Anthropic SDK, C/D via OpenRouter (OpenAI SDK). Aggiunge prezzi per tier S (Opus), A (Sonnet placeholder) e D (Llama). Refactor LLMClient.complete con dispatch tramite tier_models map e helper _call_anthropic / _call_openrouter. Settings esposte per tutti e 5 i modelli env-configurabili. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
237 lines
9.1 KiB
Python
237 lines
9.1 KiB
Python
import pytest
|
|
|
|
from multi_swarm.genome.hypothesis import HypothesisAgentGenome, ModelTier
|
|
from multi_swarm.llm.client import CompletionResult, LLMClient
|
|
|
|
|
|
def make_genome(tier: ModelTier) -> HypothesisAgentGenome:
|
|
return HypothesisAgentGenome(
|
|
system_prompt="x",
|
|
feature_access=["close"],
|
|
temperature=0.9,
|
|
top_p=0.95,
|
|
model_tier=tier,
|
|
lookback_window=200,
|
|
cognitive_style="physicist",
|
|
)
|
|
|
|
|
|
def test_completion_tier_c_uses_openrouter(mocker):
|
|
fake_openai = mocker.MagicMock()
|
|
fake_response = mocker.MagicMock()
|
|
fake_response.choices = [mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))]
|
|
fake_response.usage = mocker.MagicMock(prompt_tokens=100, completion_tokens=200)
|
|
fake_openai.chat.completions.create.return_value = fake_response
|
|
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
|
g = make_genome(ModelTier.C)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
assert isinstance(out, CompletionResult)
|
|
assert out.text == "(strategy ...)"
|
|
assert out.input_tokens == 100
|
|
assert out.output_tokens == 200
|
|
assert out.tier == ModelTier.C
|
|
fake_openai.chat.completions.create.assert_called_once()
|
|
|
|
|
|
def test_completion_tier_b_uses_anthropic(mocker):
|
|
fake_anthropic = mocker.MagicMock()
|
|
fake_msg = mocker.MagicMock()
|
|
fake_msg.content = [mocker.MagicMock(text="(strategy ...)")]
|
|
fake_msg.usage = mocker.MagicMock(input_tokens=80, output_tokens=150)
|
|
fake_anthropic.messages.create.return_value = fake_msg
|
|
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
|
g = make_genome(ModelTier.B)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
assert out.text == "(strategy ...)"
|
|
assert out.input_tokens == 80
|
|
assert out.output_tokens == 150
|
|
assert out.tier == ModelTier.B
|
|
|
|
|
|
@pytest.mark.slow
|
|
def test_completion_retries_on_connection_error(mocker):
|
|
"""Retry esegue 3 tentativi su APIConnectionError, poi rilancia."""
|
|
import openai
|
|
|
|
fake_openai = mocker.MagicMock()
|
|
fake_openai.chat.completions.create.side_effect = openai.APIConnectionError(
|
|
request=mocker.MagicMock()
|
|
)
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
|
g = make_genome(ModelTier.C)
|
|
|
|
with pytest.raises(openai.APIConnectionError):
|
|
client.complete(g, system="sys", user="usr")
|
|
|
|
assert fake_openai.chat.completions.create.call_count == 3
|
|
|
|
|
|
def test_completion_uses_custom_model_tier_c(mocker):
|
|
fake_openai = mocker.MagicMock()
|
|
fake_response = mocker.MagicMock()
|
|
fake_response.choices = [
|
|
mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))
|
|
]
|
|
fake_response.usage = mocker.MagicMock(prompt_tokens=10, completion_tokens=20)
|
|
fake_openai.chat.completions.create.return_value = fake_response
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
|
|
|
client = LLMClient(
|
|
openrouter_api_key="or-x",
|
|
anthropic_api_key=None,
|
|
model_tier_c="deepseek/deepseek-chat",
|
|
)
|
|
g = make_genome(ModelTier.C)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
fake_openai.chat.completions.create.assert_called_once()
|
|
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "deepseek/deepseek-chat"
|
|
assert out.model == "deepseek/deepseek-chat"
|
|
|
|
|
|
def test_completion_uses_custom_model_tier_b(mocker):
|
|
fake_anthropic = mocker.MagicMock()
|
|
fake_msg = mocker.MagicMock()
|
|
fake_msg.content = [mocker.MagicMock(text="(strategy ...)")]
|
|
fake_msg.usage = mocker.MagicMock(input_tokens=10, output_tokens=20)
|
|
fake_anthropic.messages.create.return_value = fake_msg
|
|
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
|
|
|
client = LLMClient(
|
|
openrouter_api_key="or-x",
|
|
anthropic_api_key="an-x",
|
|
model_tier_b="claude-opus-4-7",
|
|
)
|
|
g = make_genome(ModelTier.B)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
fake_anthropic.messages.create.assert_called_once()
|
|
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-opus-4-7"
|
|
assert out.model == "claude-opus-4-7"
|
|
|
|
|
|
def test_completion_tier_s_uses_anthropic_with_opus(mocker):
|
|
fake_anthropic = mocker.MagicMock()
|
|
fake_msg = mocker.MagicMock()
|
|
fake_msg.content = [mocker.MagicMock(text="(strategy s)")]
|
|
fake_msg.usage = mocker.MagicMock(input_tokens=50, output_tokens=100)
|
|
fake_anthropic.messages.create.return_value = fake_msg
|
|
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
|
g = make_genome(ModelTier.S)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
fake_anthropic.messages.create.assert_called_once()
|
|
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-opus-4-7"
|
|
assert out.tier == ModelTier.S
|
|
assert out.model == "claude-opus-4-7"
|
|
|
|
|
|
def test_completion_tier_a_uses_anthropic_with_sonnet(mocker):
|
|
fake_anthropic = mocker.MagicMock()
|
|
fake_msg = mocker.MagicMock()
|
|
fake_msg.content = [mocker.MagicMock(text="(strategy a)")]
|
|
fake_msg.usage = mocker.MagicMock(input_tokens=40, output_tokens=80)
|
|
fake_anthropic.messages.create.return_value = fake_msg
|
|
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
|
g = make_genome(ModelTier.A)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
fake_anthropic.messages.create.assert_called_once()
|
|
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-sonnet-4-6"
|
|
assert out.tier == ModelTier.A
|
|
assert out.model == "claude-sonnet-4-6"
|
|
|
|
|
|
def test_completion_tier_d_uses_openrouter_with_llama(mocker):
|
|
fake_openai = mocker.MagicMock()
|
|
fake_response = mocker.MagicMock()
|
|
fake_response.choices = [
|
|
mocker.MagicMock(message=mocker.MagicMock(content="(strategy d)"))
|
|
]
|
|
fake_response.usage = mocker.MagicMock(prompt_tokens=30, completion_tokens=70)
|
|
fake_openai.chat.completions.create.return_value = fake_response
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
|
g = make_genome(ModelTier.D)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
fake_openai.chat.completions.create.assert_called_once()
|
|
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "meta-llama/llama-3.3-70b-instruct"
|
|
assert out.tier == ModelTier.D
|
|
assert out.model == "meta-llama/llama-3.3-70b-instruct"
|
|
|
|
|
|
def test_completion_uses_custom_model_tier_s(mocker):
|
|
fake_anthropic = mocker.MagicMock()
|
|
fake_msg = mocker.MagicMock()
|
|
fake_msg.content = [mocker.MagicMock(text="(strategy custom-s)")]
|
|
fake_msg.usage = mocker.MagicMock(input_tokens=10, output_tokens=20)
|
|
fake_anthropic.messages.create.return_value = fake_msg
|
|
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
|
|
|
client = LLMClient(
|
|
openrouter_api_key="or-x",
|
|
anthropic_api_key="an-x",
|
|
model_tier_s="claude-future-mega",
|
|
)
|
|
g = make_genome(ModelTier.S)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-future-mega"
|
|
assert out.model == "claude-future-mega"
|
|
|
|
|
|
def test_completion_tier_s_without_anthropic_key_raises(mocker):
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=mocker.MagicMock())
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
|
g = make_genome(ModelTier.S)
|
|
with pytest.raises(RuntimeError, match="tier S"):
|
|
client.complete(g, system="sys", user="usr")
|
|
|
|
|
|
@pytest.mark.slow
|
|
def test_completion_succeeds_after_one_retry(mocker):
|
|
"""Dopo 1 fallimento transient, il retry riesce al 2 tentativo."""
|
|
import openai
|
|
|
|
fake_response = mocker.MagicMock()
|
|
fake_response.choices = [
|
|
mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))
|
|
]
|
|
fake_response.usage = mocker.MagicMock(prompt_tokens=100, completion_tokens=200)
|
|
|
|
fake_openai = mocker.MagicMock()
|
|
fake_openai.chat.completions.create.side_effect = [
|
|
openai.APITimeoutError(request=mocker.MagicMock()),
|
|
fake_response,
|
|
]
|
|
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
|
|
|
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
|
g = make_genome(ModelTier.C)
|
|
out = client.complete(g, system="sys", user="usr")
|
|
|
|
assert isinstance(out, CompletionResult)
|
|
assert out.text == "(strategy ...)"
|
|
assert fake_openai.chat.completions.create.call_count == 2
|