feat(mcp-docugen): Task 4-6 template_store, llm_client, generation_store
- TemplateStore: CRUD filesystem + asset dir, frontmatter YAML roundtrip, path traversal rejection - OpenRouterClient: async httpx con retry backoff esponenziale (5xx, 429, timeout), no-retry su 4xx, parse usage/cost - GenerationStore: SQLite aiosqlite con schema generations + ephemeral_assets, cleanup TTL, stats aggregate Root pyproject aggiornato con respx + pytest-cov dev deps. 19 + 11 + 9 + 6 = 45 test totali, tutti passed. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,173 @@
|
||||
import httpx
|
||||
import pytest
|
||||
import respx
|
||||
|
||||
from mcp_docugen.llm_client import (
|
||||
LLMAuthError,
|
||||
LLMEmptyResponse,
|
||||
LLMInvalidResponse,
|
||||
LLMRateLimit,
|
||||
LLMTimeout,
|
||||
LLMUpstreamError,
|
||||
OpenRouterClient,
|
||||
)
|
||||
|
||||
|
||||
def _success_body(text: str = "output text") -> dict:
|
||||
return {
|
||||
"id": "gen-1",
|
||||
"choices": [{"message": {"role": "assistant", "content": text}}],
|
||||
"model": "anthropic/claude-sonnet-4",
|
||||
"usage": {
|
||||
"prompt_tokens": 100,
|
||||
"completion_tokens": 200,
|
||||
"total_cost": 0.01,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_success():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(200, json=_success_body("hello"))
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk", base_url="https://openrouter.ai/api/v1", timeout=5
|
||||
)
|
||||
resp = await client.chat(
|
||||
model="anthropic/claude-sonnet-4", system="sys", user="user"
|
||||
)
|
||||
assert resp.text == "hello"
|
||||
assert resp.tokens_in == 100
|
||||
assert resp.tokens_out == 200
|
||||
assert resp.cost_usd == 0.01
|
||||
assert resp.model == "anthropic/claude-sonnet-4"
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_retries_on_5xx():
|
||||
route = respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
side_effect=[
|
||||
httpx.Response(503),
|
||||
httpx.Response(502),
|
||||
httpx.Response(200, json=_success_body()),
|
||||
]
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
resp = await client.chat(model="m", system="s", user="u")
|
||||
assert resp.text == "output text"
|
||||
assert route.call_count == 3
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_exhausts_retries_5xx():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(500)
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMUpstreamError):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_retries_on_429():
|
||||
route = respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
side_effect=[
|
||||
httpx.Response(429),
|
||||
httpx.Response(200, json=_success_body()),
|
||||
]
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
resp = await client.chat(model="m", system="s", user="u")
|
||||
assert route.call_count == 2
|
||||
assert resp.text == "output text"
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_exhausts_retries_429():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(429)
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMRateLimit):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_no_retry_on_401():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(401)
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMAuthError):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_timeout():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
side_effect=httpx.ReadTimeout("timeout")
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=1,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMTimeout):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_invalid_response_shape():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(200, json={"no": "choices"})
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMInvalidResponse):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
|
||||
|
||||
@respx.mock
|
||||
async def test_chat_empty_content():
|
||||
respx.post("https://openrouter.ai/api/v1/chat/completions").mock(
|
||||
return_value=httpx.Response(200, json=_success_body(text=""))
|
||||
)
|
||||
client = OpenRouterClient(
|
||||
api_key="sk",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
timeout=5,
|
||||
retry_base_delay=0,
|
||||
)
|
||||
with pytest.raises(LLMEmptyResponse):
|
||||
await client.chat(model="m", system="s", user="u")
|
||||
Reference in New Issue
Block a user