feat(llm): unified client for OpenRouter (Qwen) + Anthropic (Sonnet)
LLMClient instrada richieste in base a ModelTier del genome: - Tier C -> Qwen 2.5 72B via OpenRouter (chat completions) - Tier B -> Sonnet 4.6 via Anthropic (messages API) CompletionResult dataclass frozen con text, tokens, tier, model. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,53 @@
|
||||
from multi_swarm.genome.hypothesis import HypothesisAgentGenome, ModelTier
|
||||
from multi_swarm.llm.client import CompletionResult, LLMClient
|
||||
|
||||
|
||||
def make_genome(tier: ModelTier) -> HypothesisAgentGenome:
|
||||
return HypothesisAgentGenome(
|
||||
system_prompt="x",
|
||||
feature_access=["close"],
|
||||
temperature=0.9,
|
||||
top_p=0.95,
|
||||
model_tier=tier,
|
||||
lookback_window=200,
|
||||
cognitive_style="physicist",
|
||||
)
|
||||
|
||||
|
||||
def test_completion_tier_c_uses_openrouter(mocker):
|
||||
fake_openai = mocker.MagicMock()
|
||||
fake_response = mocker.MagicMock()
|
||||
fake_response.choices = [mocker.MagicMock(message=mocker.MagicMock(content="(strategy ...)"))]
|
||||
fake_response.usage = mocker.MagicMock(prompt_tokens=100, completion_tokens=200)
|
||||
fake_openai.chat.completions.create.return_value = fake_response
|
||||
|
||||
mocker.patch("multi_swarm.llm.client.OpenAI", return_value=fake_openai)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key=None)
|
||||
g = make_genome(ModelTier.C)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
assert isinstance(out, CompletionResult)
|
||||
assert out.text == "(strategy ...)"
|
||||
assert out.input_tokens == 100
|
||||
assert out.output_tokens == 200
|
||||
assert out.tier == ModelTier.C
|
||||
fake_openai.chat.completions.create.assert_called_once()
|
||||
|
||||
|
||||
def test_completion_tier_b_uses_anthropic(mocker):
|
||||
fake_anthropic = mocker.MagicMock()
|
||||
fake_msg = mocker.MagicMock()
|
||||
fake_msg.content = [mocker.MagicMock(text="(strategy ...)")]
|
||||
fake_msg.usage = mocker.MagicMock(input_tokens=80, output_tokens=150)
|
||||
fake_anthropic.messages.create.return_value = fake_msg
|
||||
mocker.patch("multi_swarm.llm.client.Anthropic", return_value=fake_anthropic)
|
||||
|
||||
client = LLMClient(openrouter_api_key="or-x", anthropic_api_key="an-x")
|
||||
g = make_genome(ModelTier.B)
|
||||
out = client.complete(g, system="sys", user="usr")
|
||||
|
||||
assert out.text == "(strategy ...)"
|
||||
assert out.input_tokens == 80
|
||||
assert out.output_tokens == 150
|
||||
assert out.tier == ModelTier.B
|
||||
Reference in New Issue
Block a user