refactor(llm): route all tiers via OpenRouter, drop Anthropic SDK
Tutti i tier (S/A/B/C/D) ora passano per OpenRouter via OpenAI SDK.
Modelli Anthropic raggiungibili via prefisso `anthropic/...`.
- pyproject: rimosso `anthropic>=0.39` da deps + uv.lock
- config: rimosso `anthropic_api_key` field
- LLMClient: dispatch unico, single client OpenAI con base_url OpenRouter
- defaults S/A/B → `anthropic/claude-{opus-4-7,sonnet-4-6}`
- retry exceptions: solo openai.* (drop anthropic.*)
- test rinominati e adattati: tier S/A/B mockano OpenAI con prefisso `anthropic/`
- rimosso test `tier_S_without_anthropic_key_raises` (non più rilevante)
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -25,7 +25,7 @@ def test_completion_tier_c_uses_openrouter(mocker):
|
||||
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.C)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
@@ -37,15 +37,15 @@ def test_completion_tier_c_uses_openrouter(mocker):
|
||||
fake_openai.chat.completions.create.assert_called_once()
|
||||
|
||||
|
||||
def test_completion_tier_b_uses_anthropic(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy ...)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=80, output_tokens=150)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
def test_completion_tier_b_uses_openrouter_with_anthropic_model(mocker):
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=80, completion_tokens=150)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.B)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
@@ -53,6 +53,9 @@ def test_completion_tier_b_uses_anthropic(mocker):
|
||||
assert out.input_tokens == 80
|
||||
assert out.output_tokens == 150
|
||||
assert out.tier == ModelTier.B
|
||||
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "anthropic/claude-sonnet-4-6"
|
||||
assert out.model == "anthropic/claude-sonnet-4-6"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@@ -66,7 +69,7 @@ def test_completion_retries_on_connection_error(mocker):
|
||||
)
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.C)
|
||||
|
||||
with pytest.raises(openai.APIConnectionError):
|
||||
@@ -87,7 +90,6 @@ def test_completion_uses_custom_model_tier_c(mocker):
|
||||
|
||||
client = LLMClient(
|
||||
openrouter_api_key="or-x",
|
||||
anthropic_api_key=None,
|
||||
model_tier_c="deepseek/deepseek-chat",
|
||||
)
|
||||
g = make_genome(ModelTier.C)
|
||||
@@ -100,63 +102,64 @@ def test_completion_uses_custom_model_tier_c(mocker):
|
||||
|
||||
|
||||
def test_completion_uses_custom_model_tier_b(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy ...)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=10, output_tokens=20)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [
|
||||
mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))
|
||||
]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=10, completion_tokens=20)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(
|
||||
openrouter_api_key="or-x",
|
||||
anthropic_api_key="an-x",
|
||||
model_tier_b="claude-opus-4-7",
|
||||
model_tier_b="anthropic/claude-opus-4-7",
|
||||
)
|
||||
g = make_genome(ModelTier.B)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
fake_anthropic.messages.create.assert_called_once()
|
||||
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "claude-opus-4-7"
|
||||
assert out.model == "claude-opus-4-7"
|
||||
fake_openai.chat.completions.create.assert_called_once()
|
||||
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "anthropic/claude-opus-4-7"
|
||||
assert out.model == "anthropic/claude-opus-4-7"
|
||||
|
||||
|
||||
def test_completion_tier_s_uses_anthropic_with_opus(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy s)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=50, output_tokens=100)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
def test_completion_tier_s_uses_openrouter_with_anthropic_model(mocker):
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [mocker.MagicMock(message=mocker.MagicMock(content="(strategy s)"))]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=50, completion_tokens=100)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.S)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
fake_anthropic.messages.create.assert_called_once()
|
||||
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "claude-opus-4-7"
|
||||
fake_openai.chat.completions.create.assert_called_once()
|
||||
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "anthropic/claude-opus-4-7"
|
||||
assert out.tier == ModelTier.S
|
||||
assert out.model == "claude-opus-4-7"
|
||||
assert out.model == "anthropic/claude-opus-4-7"
|
||||
|
||||
|
||||
def test_completion_tier_a_uses_anthropic_with_sonnet(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy a)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=40, output_tokens=80)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
def test_completion_tier_a_uses_openrouter_with_anthropic_model(mocker):
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [mocker.MagicMock(message=mocker.MagicMock(content="(strategy a)"))]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=40, completion_tokens=80)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.A)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
fake_anthropic.messages.create.assert_called_once()
|
||||
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "claude-sonnet-4-6"
|
||||
fake_openai.chat.completions.create.assert_called_once()
|
||||
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "anthropic/claude-sonnet-4-6"
|
||||
assert out.tier == ModelTier.A
|
||||
assert out.model == "claude-sonnet-4-6"
|
||||
assert out.model == "anthropic/claude-sonnet-4-6"
|
||||
|
||||
|
||||
def test_completion_tier_d_uses_openrouter_with_llama(mocker):
|
||||
@@ -169,7 +172,7 @@ def test_completion_tier_d_uses_openrouter_with_llama(mocker):
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.D)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
@@ -181,32 +184,25 @@ def test_completion_tier_d_uses_openrouter_with_llama(mocker):
|
||||
|
||||
|
||||
def test_completion_uses_custom_model_tier_s(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy custom-s)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=10, output_tokens=20)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [
|
||||
mocker.MagicMock(message=mocker.MagicMock(content="(strategy custom-s)"))
|
||||
]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=10, completion_tokens=20)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(
|
||||
openrouter_api_key="or-x",
|
||||
anthropic_api_key="an-x",
|
||||
model_tier_s="claude-future-mega",
|
||||
model_tier_s="anthropic/claude-future-mega",
|
||||
)
|
||||
g = make_genome(ModelTier.S)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
call_kwargs = fake_anthropic.messages.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "claude-future-mega"
|
||||
assert out.model == "claude-future-mega"
|
||||
|
||||
|
||||
def test_completion_tier_s_without_anthropic_key_raises(mocker):
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=mocker.MagicMock())
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
g = make_genome(ModelTier.S)
|
||||
with pytest.raises(RuntimeError, match="tier S"):
|
||||
client.complete(g, system="sys", user="usr")
|
||||
call_kwargs = fake_openai.chat.completions.create.call_args.kwargs
|
||||
assert call_kwargs["model"] == "anthropic/claude-future-mega"
|
||||
assert out.model == "anthropic/claude-future-mega"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@@ -227,7 +223,7 @@ def test_completion_succeeds_after_one_retry(mocker):
|
||||
]
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
client = LLMClient(openrouter_api_key="or-x")
|
||||
g = make_genome(ModelTier.C)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user