b32669caa7
Problema: i template puntavano a un path host hardcoded
(stylesheet: /home/adriano/.../themes/tielogic.css), quindi il file .md
generato non era portabile — su un'altra macchina md-to-pdf non trovava
il CSS e produceva PDF senza stile.
Soluzione: il Renderer legge il CSS da Settings.inline_stylesheet_path
(default /app/themes/tielogic.css nel container) e lo inietta come
blocco <style>...</style> subito dopo il frontmatter YAML del Markdown
restituito dall'LLM. Il file .md risultante è autocontenuto e portabile.
- renderer.py: nuovo arg inline_stylesheet_path + funzione
_inject_inline_stylesheet (idempotente, gestisce Markdown senza
frontmatter, no-op se CSS vuoto)
- config.py: Settings.inline_stylesheet_path: Path | None
- main.py: passa il path al Renderer
- mcp-docugen.Dockerfile: COPY themes ./themes nello stage builder per
trasportare /app/themes/tielogic.css nell'immagine runtime
- templates_seed/{offerta,report-analisi}/template.md: rimossa la riga
`stylesheet:` dal frontmatter di output + regola tassativa che vieta
all'LLM di emettere blocchi <style> di sua iniziativa (evita
conflitti di cascade visti in test)
- 4 nuovi test unit (76 totali): iniezione dopo frontmatter, prepend
quando frontmatter assente, no-op CSS vuoto, integrazione full E2E
via Renderer.generate
scripts/bundle-css.py: utility per fixare file .md legacy che
referenziavano stylesheet: come path host (sostituisce la riga con
<style> inline pescando il CSS dal repo)
README aggiornato con rationale e workflow.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
343 lines
11 KiB
Python
343 lines
11 KiB
Python
import base64
|
|
from unittest.mock import AsyncMock
|
|
|
|
import pytest
|
|
|
|
from mcp_docugen.generation_store import GenerationStore
|
|
from mcp_docugen.llm_client import LLMResponse, OpenRouterClient
|
|
from mcp_docugen.models import TemplateFrontmatter, TemplateVariable
|
|
from mcp_docugen.renderer import (
|
|
ImageTooLarge,
|
|
InvalidImageEncoding,
|
|
InvalidVariableType,
|
|
MissingVariables,
|
|
Renderer,
|
|
)
|
|
from mcp_docugen.template_store import TemplateStore
|
|
|
|
|
|
@pytest.fixture
|
|
async def env(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="# Output",
|
|
model="anthropic/claude-sonnet-4",
|
|
tokens_in=10,
|
|
tokens_out=20,
|
|
cost_usd=0.001,
|
|
latency_ms=100,
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="anthropic/claude-sonnet-4",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(
|
|
name="fattura",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="cliente", type="string")],
|
|
)
|
|
await template_store.create(
|
|
name="fattura", frontmatter=fm, body="Cliente: {{cliente}}"
|
|
)
|
|
return renderer, llm
|
|
|
|
|
|
async def test_generate_happy_path_string_var(env):
|
|
renderer, llm = env
|
|
result = await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="# Ordine",
|
|
variables={"cliente": "ACME"},
|
|
instructions=None,
|
|
)
|
|
assert result.markdown == "# Output"
|
|
assert result.model == "anthropic/claude-sonnet-4"
|
|
assert result.tokens.input == 10
|
|
call_kwargs = llm.chat.await_args.kwargs
|
|
assert "ACME" in call_kwargs["user"]
|
|
assert "Cliente: ACME" in call_kwargs["system"]
|
|
|
|
|
|
async def test_generate_missing_required_variable_raises(env):
|
|
renderer, _ = env
|
|
with pytest.raises(MissingVariables) as exc:
|
|
await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="x",
|
|
variables={},
|
|
instructions=None,
|
|
)
|
|
assert "cliente" in str(exc.value)
|
|
|
|
|
|
async def test_generate_wrong_type_raises(env):
|
|
renderer, _ = env
|
|
with pytest.raises(InvalidVariableType):
|
|
await renderer.generate(
|
|
template_name="fattura",
|
|
content_md="x",
|
|
variables={
|
|
"cliente": {
|
|
"kind": "image",
|
|
"data_b64": "x",
|
|
"mime": "image/png",
|
|
}
|
|
},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_generate_image_variable_is_saved_and_rewritten(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="# OK", model="m", tokens_in=1, tokens_out=1, cost_usd=0, latency_ms=10
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="m",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(
|
|
name="report",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await template_store.create(name="report", frontmatter=fm, body="")
|
|
|
|
png = b"\x89PNG\r\n\x1a\n" + b"\x00" * 100
|
|
img_var = {
|
|
"kind": "image",
|
|
"data_b64": base64.b64encode(png).decode(),
|
|
"mime": "image/png",
|
|
}
|
|
result = await renderer.generate(
|
|
template_name="report",
|
|
content_md="content",
|
|
variables={"foto": img_var},
|
|
instructions=None,
|
|
)
|
|
|
|
assert result.ephemeral_assets_urls
|
|
url = result.ephemeral_assets_urls[0]
|
|
assert url.startswith("https://mcp.example.com/generated/")
|
|
assert url.endswith(".png")
|
|
gen_dir = tmp_path / "generated" / result.generation_id
|
|
assert gen_dir.exists()
|
|
assert any(gen_dir.iterdir())
|
|
|
|
call_kwargs = llm.chat.await_args.kwargs
|
|
assert "https://mcp.example.com/generated/" in call_kwargs["system"]
|
|
|
|
|
|
async def test_image_too_large_raises(env):
|
|
renderer, _ = env
|
|
big = b"x" * (11 * 1024 * 1024)
|
|
img_var = {
|
|
"kind": "image",
|
|
"data_b64": base64.b64encode(big).decode(),
|
|
"mime": "image/png",
|
|
}
|
|
fm = TemplateFrontmatter(
|
|
name="big",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await renderer.template_store.create(name="big", frontmatter=fm, body="{{foto}}")
|
|
with pytest.raises(ImageTooLarge):
|
|
await renderer.generate(
|
|
template_name="big",
|
|
content_md="x",
|
|
variables={"foto": img_var},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_invalid_base64_raises(env):
|
|
renderer, _ = env
|
|
fm = TemplateFrontmatter(
|
|
name="bad",
|
|
description="x",
|
|
required_variables=[TemplateVariable(name="foto", type="image")],
|
|
)
|
|
await renderer.template_store.create(name="bad", frontmatter=fm, body="{{foto}}")
|
|
with pytest.raises(InvalidImageEncoding):
|
|
await renderer.generate(
|
|
template_name="bad",
|
|
content_md="x",
|
|
variables={
|
|
"foto": {
|
|
"kind": "image",
|
|
"data_b64": "!!!not-base64!!!",
|
|
"mime": "image/png",
|
|
}
|
|
},
|
|
instructions=None,
|
|
)
|
|
|
|
|
|
async def test_template_asset_paths_are_rewritten(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="out", model="m", tokens_in=1, tokens_out=1, cost_usd=0, latency_ms=10
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="m",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(name="brand", description="x")
|
|
assets = [
|
|
{
|
|
"filename": "logo.png",
|
|
"data_b64": base64.b64encode(b"\x89PNG").decode(),
|
|
"mime": "image/png",
|
|
}
|
|
]
|
|
await template_store.create(
|
|
name="brand",
|
|
frontmatter=fm,
|
|
body="Header  footer",
|
|
assets=assets,
|
|
)
|
|
await renderer.generate(
|
|
template_name="brand",
|
|
content_md="x",
|
|
variables={},
|
|
instructions=None,
|
|
)
|
|
system_prompt = llm.chat.await_args.kwargs["system"]
|
|
assert "https://mcp.example.com/assets/brand/logo.png" in system_prompt
|
|
assert "./assets/logo.png" not in system_prompt
|
|
|
|
|
|
async def test_generate_uses_frontmatter_model_override(tmp_path):
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="out",
|
|
model="openai/gpt-4o",
|
|
tokens_in=1,
|
|
tokens_out=1,
|
|
cost_usd=0,
|
|
latency_ms=10,
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="anthropic/claude-sonnet-4",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
)
|
|
fm = TemplateFrontmatter(name="x", description="y", model="openai/gpt-4o")
|
|
await template_store.create(name="x", frontmatter=fm, body="b")
|
|
await renderer.generate(
|
|
template_name="x", content_md="c", variables={}, instructions=None
|
|
)
|
|
assert llm.chat.await_args.kwargs["model"] == "openai/gpt-4o"
|
|
|
|
|
|
def test_inject_inline_stylesheet_after_frontmatter():
|
|
from mcp_docugen.renderer import _inject_inline_stylesheet
|
|
|
|
md = "---\ntitle: T\n---\n\n# Body\n"
|
|
out = _inject_inline_stylesheet(md, "body { color: red; }")
|
|
assert out.startswith("---\ntitle: T\n---\n")
|
|
assert "<style>\nbody { color: red; }\n</style>" in out
|
|
body_idx = out.find("# Body")
|
|
style_idx = out.find("<style>")
|
|
assert 0 < style_idx < body_idx
|
|
|
|
|
|
def test_inject_inline_stylesheet_no_frontmatter_prepends():
|
|
from mcp_docugen.renderer import _inject_inline_stylesheet
|
|
|
|
out = _inject_inline_stylesheet("# Body only", "h1 { color: blue; }")
|
|
assert out.startswith("<style>\nh1 { color: blue; }\n</style>")
|
|
|
|
|
|
def test_inject_inline_stylesheet_empty_css_is_noop():
|
|
from mcp_docugen.renderer import _inject_inline_stylesheet
|
|
|
|
md = "---\nx: 1\n---\nbody"
|
|
assert _inject_inline_stylesheet(md, "") == md
|
|
assert _inject_inline_stylesheet(md, " \n") == md
|
|
|
|
|
|
async def test_generate_injects_inline_stylesheet_when_configured(tmp_path):
|
|
css_path = tmp_path / "tielogic.css"
|
|
css_path.write_text("body { font-family: Inter; }", encoding="utf-8")
|
|
|
|
template_store = TemplateStore(base_dir=tmp_path / "templates")
|
|
generation_store = GenerationStore(
|
|
db_path=tmp_path / "gen.db",
|
|
generated_dir=tmp_path / "generated",
|
|
)
|
|
await generation_store.init()
|
|
|
|
llm = AsyncMock(spec=OpenRouterClient)
|
|
llm.chat.return_value = LLMResponse(
|
|
text="---\ntitle: T\n---\n\n# Body\n",
|
|
model="anthropic/claude-sonnet-4",
|
|
tokens_in=1,
|
|
tokens_out=1,
|
|
cost_usd=0.0,
|
|
latency_ms=10,
|
|
)
|
|
renderer = Renderer(
|
|
template_store=template_store,
|
|
generation_store=generation_store,
|
|
llm=llm,
|
|
public_base_url="https://mcp.example.com",
|
|
default_model="anthropic/claude-sonnet-4",
|
|
asset_ttl_days=30,
|
|
max_image_size_mb=10,
|
|
inline_stylesheet_path=css_path,
|
|
)
|
|
fm = TemplateFrontmatter(name="t", description="d")
|
|
await template_store.create(name="t", frontmatter=fm, body="b")
|
|
|
|
result = await renderer.generate(
|
|
template_name="t", content_md="c", variables={}, instructions=None
|
|
)
|
|
assert "<style>" in result.markdown
|
|
assert "font-family: Inter" in result.markdown
|
|
assert result.markdown.startswith("---\n")
|