e8705dcd0b
- Renderer: orchestratore generate() — validazione strict variabili,
materializzazione image vars come asset effimeri su disco + URL rewrite,
asset paths template da ./assets/X -> {PUBLIC_BASE_URL}/assets/<t>/X,
integrazione LLM error -> record success=0
- FastAPI sub-app: GET /health (no auth), /assets/{t}/{f} (auth+traversal check),
/generated/{gen_id}/{f} (410 su scaduto o mancante)
- FastMCP server con 6 tool: template_create/update/delete/list/get,
document_generate. Tools esposti anche via mcp.tools dict per test.
- main.build_app() compone http_app + FastMCP mount su /mcp + auth middleware
+ lifespan cleanup task TTL (24h). run() entry point per script console.
68 test passed. Build Docker arca-mcp-docugen:dev verificata,
/health endpoint risponde correttamente nel container.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
275 lines
8.4 KiB
Python
275 lines
8.4 KiB
Python
import base64
|
|
from unittest.mock import AsyncMock
|
|
|
|
import pytest
|
|
|
|
from mcp_docugen.generation_store import GenerationStore
|
|
from mcp_docugen.llm_client import LLMResponse, OpenRouterClient
|
|
from mcp_docugen.models import TemplateFrontmatter, TemplateVariable
|
|
from mcp_docugen.renderer import (
|
|
ImageTooLarge,
|
|
InvalidImageEncoding,
|
|
InvalidVariableType,
|
|
MissingVariables,
|
|
Renderer,
|
|
)
|
|
from mcp_docugen.template_store import TemplateStore
|
|
|
|
|
|
@pytest.fixture
|
|
async def env(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="# Output",
|
|
model="anthropic/claude-sonnet-4",
|
|
tokens_in=10,
|
|
tokens_out=20,
|
|
cost_usd=0.001,
|
|
latency_ms=100,
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="anthropic/claude-sonnet-4",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(
|
|
name="fattura",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="cliente", type="string")],
|
|
)
|
|
await template_store.create(
|
|
name="fattura", frontmatter=fm, body="Cliente: {{cliente}}"
|
|
)
|
|
return renderer, llm
|
|
|
|
|
|
async def test_generate_happy_path_string_var(env):
|
|
renderer, llm = env
|
|
result = await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="# Ordine",
|
|
variables={"cliente": "ACME"},
|
|
instructions=None,
|
|
)
|
|
assert result.markdown == "# Output"
|
|
assert result.model == "anthropic/claude-sonnet-4"
|
|
assert result.tokens.input == 10
|
|
call_kwargs = llm.chat.await_args.kwargs
|
|
assert "ACME" in call_kwargs["user"]
|
|
assert "Cliente: ACME" in call_kwargs["system"]
|
|
|
|
|
|
async def test_generate_missing_required_variable_raises(env):
|
|
renderer, _ = env
|
|
with pytest.raises(MissingVariables) as exc:
|
|
await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="x",
|
|
variables={},
|
|
instructions=None,
|
|
)
|
|
assert "cliente" in str(exc.value)
|
|
|
|
|
|
async def test_generate_wrong_type_raises(env):
|
|
renderer, _ = env
|
|
with pytest.raises(InvalidVariableType):
|
|
await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="x",
|
|
variables={
|
|
"cliente": {
|
|
"kind": "image",
|
|
"data_b64": "x",
|
|
"mime": "image/png",
|
|
}
|
|
},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_generate_image_variable_is_saved_and_rewritten(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="# OK", model="m", tokens_in=1, tokens_out=1, cost_usd=0, latency_ms=10
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="m",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(
|
|
name="report",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await template_store.create(name="report", frontmatter=fm, body="")
|
|
|
|
png = b"\x89PNG\r\n\x1a\n" + b"\x00" * 100
|
|
img_var = {
|
|
"kind": "image",
|
|
"data_b64": base64.b64encode(png).decode(),
|
|
"mime": "image/png",
|
|
}
|
|
result = await renderer.generate(
|
|
template_name="report",
|
|
content_md="content",
|
|
variables={"foto": img_var},
|
|
instructions=None,
|
|
)
|
|
|
|
assert result.ephemeral_assets_urls
|
|
url = result.ephemeral_assets_urls[0]
|
|
assert url.startswith("https://mcp.example.com/generated/")
|
|
assert url.endswith(".png")
|
|
gen_dir = tmp_path / "generated" / result.generation_id
|
|
assert gen_dir.exists()
|
|
assert any(gen_dir.iterdir())
|
|
|
|
call_kwargs = llm.chat.await_args.kwargs
|
|
assert "https://mcp.example.com/generated/" in call_kwargs["system"]
|
|
|
|
|
|
async def test_image_too_large_raises(env):
|
|
renderer, _ = env
|
|
big = b"x" * (11 * 1024 * 1024)
|
|
img_var = {
|
|
"kind": "image",
|
|
"data_b64": base64.b64encode(big).decode(),
|
|
"mime": "image/png",
|
|
}
|
|
fm = TemplateFrontmatter(
|
|
name="big",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await renderer.template_store.create(name="big", frontmatter=fm, body="{{foto}}")
|
|
with pytest.raises(ImageTooLarge):
|
|
await renderer.generate(
|
|
template_name="big",
|
|
content_md="x",
|
|
variables={"foto": img_var},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_invalid_base64_raises(env):
|
|
renderer, _ = env
|
|
fm = TemplateFrontmatter(
|
|
name="bad",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await renderer.template_store.create(name="bad", frontmatter=fm, body="{{foto}}")
|
|
with pytest.raises(InvalidImageEncoding):
|
|
await renderer.generate(
|
|
template_name="bad",
|
|
content_md="x",
|
|
variables={
|
|
"foto": {
|
|
"kind": "image",
|
|
"data_b64": "!!!not-base64!!!",
|
|
"mime": "image/png",
|
|
}
|
|
},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_template_asset_paths_are_rewritten(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="out", model="m", tokens_in=1, tokens_out=1, cost_usd=0, latency_ms=10
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="m",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(name="brand", description="x")
|
|
assets = [
|
|
{
|
|
"filename": "logo.png",
|
|
"data_b64": base64.b64encode(b"\x89PNG").decode(),
|
|
"mime": "image/png",
|
|
}
|
|
]
|
|
await template_store.create(
|
|
name="brand",
|
|
frontmatter=fm,
|
|
body="Header  footer",
|
|
assets=assets,
|
|
)
|
|
await renderer.generate(
|
|
template_name="brand",
|
|
content_md="x",
|
|
variables={},
|
|
instructions=None,
|
|
)
|
|
system_prompt = llm.chat.await_args.kwargs["system"]
|
|
assert "https://mcp.example.com/assets/brand/logo.png" in system_prompt
|
|
assert "./assets/logo.png" not in system_prompt
|
|
|
|
|
|
async def test_generate_uses_frontmatter_model_override(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="out",
|
|
model="openai/gpt-4o",
|
|
tokens_in=1,
|
|
tokens_out=1,
|
|
cost_usd=0,
|
|
latency_ms=10,
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="anthropic/claude-sonnet-4",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(name="x", description="y", model="openai/gpt-4o")
|
|
await template_store.create(name="x", frontmatter=fm, body="b")
|
|
await renderer.generate(
|
|
template_name="x", content_md="c", variables={}, instructions=None
|
|
)
|
|
assert llm.chat.await_args.kwargs["model"] == "openai/gpt-4o"
|