chore(v2): restructure monorepo to src/ layout with uv

Aligns the repo with the python-project-spec-design.md template chosen
for V2.0.0. Big move, no logic changes. The 3 pre-existing test
failures (test_recipes::test_update_recipe, test_recipes::
test_recipe_versioning, test_tasks::test_reorder_tasks, plus the
client test_save_measurement_proxy) survive unchanged.

Layout changes
- server/        -> src/backend/
- server/middleware/ -> src/backend/api/middleware/
- server/routers/    -> src/backend/api/routers/
- server/models/     -> src/backend/models/orm/
- server/schemas/    -> src/backend/models/api/
- server/uploads/    -> uploads/ (project root, mounted volume)
- server/tests/      -> src/backend/tests/
- client/            -> src/frontend/flask_app/ (Flask kept; React
  deroga is documented in CLAUDE.md, justified by tablet UX, USB
  caliper/barcode workflow and Fabric.js integration)

Tooling
- pyproject.toml: monorepo with [project] core deps and
  optional-dependencies server / client / dev. Replaces both
  server/requirements.txt and client/requirements.txt.
- uv.lock + .python-version (3.11) committed for reproducible builds.
- Dockerfile (root, backend) and Dockerfile.frontend rewritten to use
  uv sync --frozen --no-dev --extra server|client; legacy Dockerfiles
  preserved as Dockerfile.legacy for reference but excluded from build
  context via .dockerignore.
- docker-compose.dev.yml + docker-compose.yml: build context now ".",
  dockerfile pointing to the root files.

Code adjustments forced by the move
- Every "from config|database|models|schemas|services|routers|middleware
  import ..." rewritten to its src.backend.* equivalent (50+ files
  including indented inline imports inside test bodies).
- src/backend/migrations/env.py: insert project root into sys.path so
  alembic can resolve src.backend.* imports regardless of cwd.
- src/backend/config.py: env_file ../../.env (was ../.env), upload_path
  resolves project root via parents[2].
- src/backend/tests/conftest.py + tests: import ... from src.backend.*
  instead of bare names; old per-directory pytest.ini files removed in
  favor of root pyproject.toml [tool.pytest.ini_options].
- .gitignore: uploads/ at root, src/frontend/flask_app/static/css/
  tailwind.css path; .dockerignore tightened.
- CLAUDE.md: rewrote sections "Layout del repository", "Comandi di
  Sviluppo", "Database & Migrations", "Test", "i18n", and all path
  references throughout the architecture sections.

Verified
- uv lock resolves 77 packages; uv sync --extra server --extra client
  --extra dev installs cleanly.
- uv run pytest: 171 passed, 4 pre-existing failures.
- uv run alembic -c src/backend/migrations/alembic.ini check loads
  config and metadata (errors only on the absent local MySQL).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-25 12:26:47 +02:00
parent 86df67f2e5
commit 1a0431366f
174 changed files with 2568 additions and 308 deletions
View File
+97
View File
@@ -0,0 +1,97 @@
"""Authentication service - password hashing, API key management."""
import secrets
from datetime import datetime
import bcrypt
from sqlalchemy import select, update
from sqlalchemy.ext.asyncio import AsyncSession
from src.backend.models.orm.user import User
def hash_password(password: str) -> str:
"""Hash a password using bcrypt."""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Verify a password against its hash."""
return bcrypt.checkpw(
plain_password.encode("utf-8"), hashed_password.encode("utf-8")
)
def generate_api_key() -> str:
"""Generate a secure 64-character API key."""
return secrets.token_urlsafe(48) # 64 chars base64
async def authenticate_user(
db: AsyncSession, username: str, password: str
) -> User | None:
"""Authenticate user by username and password."""
result = await db.execute(
select(User).where(User.username == username, User.active == True)
)
user = result.scalar_one_or_none()
if user is None or not verify_password(password, user.password_hash):
return None
return user
async def login_user(db: AsyncSession, user: User) -> str:
"""Generate API key and update last_login for user."""
api_key = generate_api_key()
await db.execute(
update(User)
.where(User.id == user.id)
.values(api_key=api_key, last_login=datetime.utcnow())
)
await db.flush()
return api_key
async def logout_user(db: AsyncSession, user: User) -> None:
"""Invalidate user's API key."""
await db.execute(
update(User).where(User.id == user.id).values(api_key=None)
)
await db.flush()
async def create_user(
db: AsyncSession,
username: str,
password: str,
display_name: str,
email: str | None = None,
roles: list[str] | None = None,
is_admin: bool = False,
language_pref: str = "it",
theme_pref: str = "light",
) -> User:
"""Create a new user with hashed password."""
user = User(
username=username,
password_hash=hash_password(password),
display_name=display_name,
email=email,
roles=roles or [],
is_admin=is_admin,
language_pref=language_pref,
theme_pref=theme_pref,
)
db.add(user)
await db.flush()
await db.refresh(user)
return user
async def regenerate_api_key(db: AsyncSession, user_id: int) -> str:
"""Regenerate API key for a user."""
new_key = generate_api_key()
await db.execute(
update(User).where(User.id == user_id).values(api_key=new_key)
)
await db.flush()
return new_key
@@ -0,0 +1,77 @@
"""Measurement service - pass/fail calculation, data storage."""
from decimal import Decimal
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.backend.models.orm.measurement import Measurement
from src.backend.models.orm.task import RecipeSubtask
def calculate_pass_fail(
value: float, subtask: RecipeSubtask
) -> tuple[str, float | None]:
"""Calculate pass/fail status and deviation based on tolerances.
Returns (status, deviation) where status is 'pass', 'warning', or 'fail'.
Logic:
- If value is outside UTL/LTL -> 'fail'
- If value is outside UWL/LWL but inside UTL/LTL -> 'warning'
- Otherwise -> 'pass'
"""
deviation = None
if subtask.nominal is not None:
deviation = float(Decimal(str(value)) - Decimal(str(subtask.nominal)))
# Check fail (outside tolerance limits)
if subtask.utl is not None and value > float(subtask.utl):
return "fail", deviation
if subtask.ltl is not None and value < float(subtask.ltl):
return "fail", deviation
# Check warning (outside warning limits)
if subtask.uwl is not None and value > float(subtask.uwl):
return "warning", deviation
if subtask.lwl is not None and value < float(subtask.lwl):
return "warning", deviation
return "pass", deviation
async def save_measurement(
db: AsyncSession,
subtask_id: int,
version_id: int,
measured_by: int,
value: float,
lot_number: str | None = None,
serial_number: str | None = None,
input_method: str = "manual",
) -> Measurement:
"""Save a single measurement with auto-calculated pass/fail."""
# Get subtask for tolerance values
result = await db.execute(
select(RecipeSubtask).where(RecipeSubtask.id == subtask_id)
)
subtask = result.scalar_one_or_none()
if subtask is None:
raise ValueError(f"Subtask {subtask_id} not found")
pass_fail, deviation = calculate_pass_fail(value, subtask)
measurement = Measurement(
subtask_id=subtask_id,
version_id=version_id,
measured_by=measured_by,
value=value,
pass_fail=pass_fail,
deviation=deviation,
lot_number=lot_number,
serial_number=serial_number,
input_method=input_method,
)
db.add(measurement)
await db.flush()
await db.refresh(measurement)
return measurement
+585
View File
@@ -0,0 +1,585 @@
"""Recipe service - business logic for copy-on-write versioning."""
import math
from typing import Optional
from fastapi import HTTPException, status
from sqlalchemy import func, select, update
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from src.backend.models.orm.measurement import Measurement
from src.backend.models.orm.recipe import Recipe, RecipeVersion
from src.backend.models.orm.setting import RecipeVersionAudit
from src.backend.models.orm.task import RecipeSubtask, RecipeTask
from src.backend.models.orm.user import User
from src.backend.models.api.recipe import RecipeCreate, RecipeUpdate
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
async def _get_recipe_or_404(db: AsyncSession, recipe_id: int) -> Recipe:
"""Return a recipe or raise 404."""
result = await db.execute(select(Recipe).where(Recipe.id == recipe_id))
recipe = result.scalar_one_or_none()
if recipe is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Recipe not found")
return recipe
async def _get_current_version(
db: AsyncSession, recipe_id: int
) -> Optional[RecipeVersion]:
"""Return the current version for a recipe (with tasks/subtasks loaded)."""
result = await db.execute(
select(RecipeVersion)
.where(
RecipeVersion.recipe_id == recipe_id,
RecipeVersion.is_current == True, # noqa: E712
)
.options(
selectinload(RecipeVersion.tasks).selectinload(RecipeTask.subtasks)
)
)
return result.scalar_one_or_none()
async def _copy_tasks_to_version(
db: AsyncSession,
source_version: RecipeVersion,
target_version: RecipeVersion,
) -> None:
"""Deep-copy all tasks and subtasks from *source* to *target* version."""
for task in source_version.tasks:
new_task = RecipeTask(
version_id=target_version.id,
order_index=task.order_index,
title=task.title,
directive=task.directive,
description=task.description,
file_path=task.file_path,
file_type=task.file_type,
annotations_json=task.annotations_json,
)
db.add(new_task)
await db.flush() # get new_task.id
for sub in task.subtasks:
new_sub = RecipeSubtask(
task_id=new_task.id,
marker_number=sub.marker_number,
description=sub.description,
measurement_type=sub.measurement_type,
nominal=sub.nominal,
utl=sub.utl,
uwl=sub.uwl,
lwl=sub.lwl,
ltl=sub.ltl,
unit=sub.unit,
image_path=sub.image_path,
)
db.add(new_sub)
await db.flush()
async def _write_audit(
db: AsyncSession,
recipe_id: int,
old_version_id: Optional[int],
new_version_id: int,
changed_by: int,
change_type: str,
change_reason: Optional[str] = None,
) -> None:
"""Persist an audit record for a version change."""
audit = RecipeVersionAudit(
recipe_id=recipe_id,
old_version_id=old_version_id,
new_version_id=new_version_id,
changed_by=changed_by,
change_type=change_type,
change_reason=change_reason,
)
db.add(audit)
await db.flush()
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
async def create_recipe(
db: AsyncSession,
data: RecipeCreate,
user: User,
) -> Recipe:
"""Create a recipe together with its first version (v1).
Returns the newly created Recipe with the current version loaded.
"""
# Check code uniqueness
existing = await db.execute(
select(Recipe).where(Recipe.code == data.code)
)
if existing.scalar_one_or_none() is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Recipe code '{data.code}' already exists",
)
recipe = Recipe(
code=data.code,
name=data.name,
description=data.description,
image_path=data.image_path,
created_by=user.id,
)
db.add(recipe)
await db.flush()
version = RecipeVersion(
recipe_id=recipe.id,
version_number=1,
is_current=True,
created_by=user.id,
change_notes="Initial version",
)
db.add(version)
await db.flush()
await _write_audit(
db,
recipe_id=recipe.id,
old_version_id=None,
new_version_id=version.id,
changed_by=user.id,
change_type="CREATE",
change_reason="Recipe created",
)
# Reload recipe with versions + tasks eagerly loaded
result = await db.execute(
select(Recipe)
.where(Recipe.id == recipe.id)
.options(
selectinload(Recipe.versions)
.selectinload(RecipeVersion.tasks)
.selectinload(RecipeTask.subtasks)
)
)
return result.scalar_one()
async def create_new_version(
db: AsyncSession,
recipe_id: int,
data: RecipeUpdate,
user: User,
) -> RecipeVersion:
"""Copy-on-write: create a new version by cloning current tasks/subtasks.
1. Fetch current version (with tasks + subtasks)
2. Create a new RecipeVersion with incremented version_number
3. Deep-copy all tasks + subtasks into the new version
4. Apply updates from *data* to the recipe header
5. Flip is_current: old -> False, new -> True
6. Write audit trail
"""
recipe = await _get_recipe_or_404(db, recipe_id)
if not recipe.active:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Cannot update an inactive recipe",
)
current = await _get_current_version(db, recipe_id)
if current is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No current version found for this recipe",
)
old_version_id = current.id
new_version_number = current.version_number + 1
# Mark old version as non-current
await db.execute(
update(RecipeVersion)
.where(RecipeVersion.id == current.id)
.values(is_current=False)
)
# Create new version
new_version = RecipeVersion(
recipe_id=recipe_id,
version_number=new_version_number,
is_current=True,
created_by=user.id,
change_notes=data.change_notes,
)
db.add(new_version)
await db.flush()
# Deep-copy tasks + subtasks
await _copy_tasks_to_version(db, source_version=current, target_version=new_version)
# Apply task-level updates (file_path, annotations_json) to the first task
if data.file_path is not None or data.annotations_json is not None:
# Reload new version's tasks
result_tasks = await db.execute(
select(RecipeTask)
.where(RecipeTask.version_id == new_version.id)
.order_by(RecipeTask.order_index)
)
tasks = list(result_tasks.scalars().all())
if tasks:
first_task = tasks[0]
if data.file_path is not None:
first_task.file_path = data.file_path
# Auto-detect file_type from extension
if data.file_type:
first_task.file_type = data.file_type
elif data.file_path.lower().endswith(".pdf"):
first_task.file_type = "pdf"
else:
first_task.file_type = "image"
if data.annotations_json is not None:
first_task.annotations_json = data.annotations_json
else:
# No tasks yet — create a default task to hold the drawing
default_task = RecipeTask(
version_id=new_version.id,
order_index=0,
title="Technical Drawing",
file_path=data.file_path,
file_type=data.file_type or (
"pdf" if data.file_path and data.file_path.lower().endswith(".pdf")
else "image"
),
annotations_json=data.annotations_json,
)
db.add(default_task)
await db.flush()
# Apply header updates
update_fields: dict = {}
if data.name is not None:
update_fields["name"] = data.name
if data.description is not None:
update_fields["description"] = data.description
if data.image_path is not None:
update_fields["image_path"] = data.image_path
if update_fields:
await db.execute(
update(Recipe).where(Recipe.id == recipe_id).values(**update_fields)
)
# Audit
await _write_audit(
db,
recipe_id=recipe_id,
old_version_id=old_version_id,
new_version_id=new_version.id,
changed_by=user.id,
change_type="UPDATE",
change_reason=data.change_notes,
)
await db.flush()
# Reload the version with tasks for the response
result = await db.execute(
select(RecipeVersion)
.where(RecipeVersion.id == new_version.id)
.options(
selectinload(RecipeVersion.tasks).selectinload(RecipeTask.subtasks)
)
)
return result.scalar_one()
async def get_current_version(
db: AsyncSession, recipe_id: int
) -> RecipeVersion:
"""Return the current version with tasks and subtasks.
Raises 404 if the recipe or its current version cannot be found.
"""
await _get_recipe_or_404(db, recipe_id)
version = await _get_current_version(db, recipe_id)
if version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No current version found for this recipe",
)
return version
async def get_measurement_count(
db: AsyncSession, recipe_id: int, version_number: int
) -> int:
"""Return the number of measurements recorded against a specific version."""
# Resolve version_id from recipe_id + version_number
result = await db.execute(
select(RecipeVersion.id).where(
RecipeVersion.recipe_id == recipe_id,
RecipeVersion.version_number == version_number,
)
)
version_id = result.scalar_one_or_none()
if version_id is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Version {version_number} not found for recipe {recipe_id}",
)
count_result = await db.execute(
select(func.count()).select_from(Measurement).where(
Measurement.version_id == version_id
)
)
return count_result.scalar_one()
async def version_has_measurements(db: AsyncSession, version_id: int) -> bool:
"""Check if any measurements exist for the given version."""
result = await db.execute(
select(func.count()).select_from(Measurement).where(
Measurement.version_id == version_id
)
)
return result.scalar_one() > 0
async def update_current_version(
db: AsyncSession,
recipe_id: int,
data: RecipeUpdate,
) -> RecipeVersion:
"""Update recipe header in-place on the current version (no copy-on-write)."""
# Apply header updates (name, description, image_path)
update_fields: dict = {}
if data.name is not None:
update_fields["name"] = data.name
if data.description is not None:
update_fields["description"] = data.description
if data.image_path is not None:
update_fields["image_path"] = data.image_path
if update_fields:
await db.execute(
update(Recipe).where(Recipe.id == recipe_id).values(**update_fields)
)
# Apply task-level file updates to first task (same logic as create_new_version)
current = await _get_current_version(db, recipe_id)
if data.file_path is not None or data.annotations_json is not None:
result_tasks = await db.execute(
select(RecipeTask)
.where(RecipeTask.version_id == current.id)
.order_by(RecipeTask.order_index)
)
tasks = list(result_tasks.scalars().all())
if tasks:
first_task = tasks[0]
if data.file_path is not None:
first_task.file_path = data.file_path
if data.file_type:
first_task.file_type = data.file_type
elif data.file_path.lower().endswith(".pdf"):
first_task.file_type = "pdf"
else:
first_task.file_type = "image"
if data.annotations_json is not None:
first_task.annotations_json = data.annotations_json
else:
# No tasks yet — create a default task to hold the drawing
default_task = RecipeTask(
version_id=current.id,
order_index=0,
title="Technical Drawing",
file_path=data.file_path,
file_type=data.file_type or (
"pdf" if data.file_path and data.file_path.lower().endswith(".pdf")
else "image"
),
annotations_json=data.annotations_json,
)
db.add(default_task)
await db.flush()
# Reload version with tasks for response
result = await db.execute(
select(RecipeVersion)
.where(RecipeVersion.id == current.id)
.options(
selectinload(RecipeVersion.tasks).selectinload(RecipeTask.subtasks)
)
)
return result.scalar_one()
async def list_recipes(
db: AsyncSession,
page: int = 1,
per_page: int = 20,
search: Optional[str] = None,
) -> dict:
"""Return a paginated list of active recipes.
Each recipe includes its current version (with tasks).
"""
base_filter = [Recipe.active == True] # noqa: E712
if search:
like_pattern = f"%{search}%"
base_filter.append(
(Recipe.name.ilike(like_pattern)) | (Recipe.code.ilike(like_pattern))
)
# Total count
count_stmt = select(func.count()).select_from(Recipe).where(*base_filter)
total = (await db.execute(count_stmt)).scalar_one()
pages = max(1, math.ceil(total / per_page))
offset = (page - 1) * per_page
# Fetch recipes with versions and tasks for thumbnail display
stmt = (
select(Recipe)
.where(*base_filter)
.options(
selectinload(Recipe.versions)
.selectinload(RecipeVersion.tasks)
)
.order_by(Recipe.name.asc())
.offset(offset)
.limit(per_page)
)
result = await db.execute(stmt)
recipes = result.scalars().all()
return {
"items": recipes,
"total": total,
"page": page,
"per_page": per_page,
"pages": pages,
}
async def get_recipe_detail(db: AsyncSession, recipe_id: int) -> Recipe:
"""Return a single recipe with its current version + tasks loaded."""
result = await db.execute(
select(Recipe)
.where(Recipe.id == recipe_id)
.options(
selectinload(Recipe.versions)
.selectinload(RecipeVersion.tasks)
.selectinload(RecipeTask.subtasks)
)
)
recipe = result.scalar_one_or_none()
if recipe is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Recipe not found"
)
return recipe
async def get_recipe_by_code(db: AsyncSession, code: str) -> Recipe:
"""Return a recipe by its barcode/code, with current version loaded."""
result = await db.execute(
select(Recipe)
.where(Recipe.code == code, Recipe.active == True) # noqa: E712
.options(
selectinload(Recipe.versions)
.selectinload(RecipeVersion.tasks)
.selectinload(RecipeTask.subtasks)
)
)
recipe = result.scalar_one_or_none()
if recipe is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"No active recipe found with code '{code}'",
)
return recipe
async def deactivate_recipe(
db: AsyncSession, recipe_id: int, user: User
) -> Recipe:
"""Soft-delete a recipe by setting active=False."""
recipe = await _get_recipe_or_404(db, recipe_id)
if not recipe.active:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Recipe is already inactive",
)
await db.execute(
update(Recipe).where(Recipe.id == recipe_id).values(active=False)
)
# Audit
current = await _get_current_version(db, recipe_id)
if current is not None:
await _write_audit(
db,
recipe_id=recipe_id,
old_version_id=current.id,
new_version_id=current.id,
changed_by=user.id,
change_type="RETIRE",
change_reason="Recipe deactivated",
)
await db.flush()
await db.refresh(recipe)
return recipe
async def list_versions(
db: AsyncSession, recipe_id: int
) -> list[RecipeVersion]:
"""Return all versions of a recipe, ordered by version_number desc."""
await _get_recipe_or_404(db, recipe_id)
result = await db.execute(
select(RecipeVersion)
.where(RecipeVersion.recipe_id == recipe_id)
.order_by(RecipeVersion.version_number.desc())
.options(
selectinload(RecipeVersion.tasks).selectinload(RecipeTask.subtasks)
)
)
return list(result.scalars().all())
async def get_version_detail(
db: AsyncSession, recipe_id: int, version_number: int
) -> RecipeVersion:
"""Return a specific version of a recipe with tasks loaded."""
await _get_recipe_or_404(db, recipe_id)
result = await db.execute(
select(RecipeVersion)
.where(
RecipeVersion.recipe_id == recipe_id,
RecipeVersion.version_number == version_number,
)
.options(
selectinload(RecipeVersion.tasks).selectinload(RecipeTask.subtasks)
)
)
version = result.scalar_one_or_none()
if version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Version {version_number} not found for recipe {recipe_id}",
)
return version
+429
View File
@@ -0,0 +1,429 @@
"""Report generation service — PDF reports via Jinja2 + Plotly/Kaleido + WeasyPrint."""
import base64
from datetime import datetime
from pathlib import Path
import plotly.graph_objects as go
import plotly.io as pio
from jinja2 import Environment, FileSystemLoader
from sqlalchemy import and_, select
from sqlalchemy.ext.asyncio import AsyncSession
from weasyprint import HTML
from src.backend.config import settings
from src.backend.models.orm.measurement import Measurement
from src.backend.models.orm.recipe import Recipe, RecipeVersion
from src.backend.models.orm.setting import SystemSetting
from src.backend.models.orm.task import RecipeSubtask, RecipeTask
from src.backend.models.orm.user import User
from src.backend.services.spc_service import (
compute_capability,
compute_control_chart,
compute_histogram,
compute_summary,
)
# Jinja2 environment for report templates
_templates_dir = Path(__file__).parent.parent / "templates" / "reports"
_jinja_env = Environment(
loader=FileSystemLoader(str(_templates_dir)),
autoescape=True,
)
async def _query_measurements(
db: AsyncSession,
recipe_id: int,
version_id: int | None = None,
subtask_id: int | None = None,
date_from: datetime | None = None,
date_to: datetime | None = None,
operator_id: int | None = None,
lot_number: str | None = None,
serial_number: str | None = None,
) -> list[Measurement]:
"""Query measurements with common filters (duplicated from statistics router)."""
filters = []
if version_id is not None:
filters.append(Measurement.version_id == version_id)
else:
version_ids = select(RecipeVersion.id).where(
RecipeVersion.recipe_id == recipe_id
)
filters.append(Measurement.version_id.in_(version_ids))
if subtask_id is not None:
filters.append(Measurement.subtask_id == subtask_id)
if date_from is not None:
filters.append(Measurement.measured_at >= date_from)
if date_to is not None:
filters.append(Measurement.measured_at <= date_to)
if operator_id is not None:
filters.append(Measurement.measured_by == operator_id)
if lot_number is not None:
filters.append(Measurement.lot_number == lot_number)
if serial_number is not None:
filters.append(Measurement.serial_number == serial_number)
query = (
select(Measurement)
.where(and_(*filters) if filters else True)
.order_by(Measurement.measured_at.asc())
)
result = await db.execute(query)
return list(result.scalars().all())
async def _get_company_info(db: AsyncSession) -> dict:
"""Read company logo path and company name from system_settings."""
info = {"logo_base64": None, "company_name": "TieMeasureFlow"}
# Company name
result = await db.execute(
select(SystemSetting).where(SystemSetting.setting_key == "company_name")
)
setting = result.scalar_one_or_none()
if setting:
info["company_name"] = setting.setting_value
# Company logo
result = await db.execute(
select(SystemSetting).where(SystemSetting.setting_key == "company_logo_path")
)
setting = result.scalar_one_or_none()
if setting and setting.setting_value:
logo_path = settings.upload_path / setting.setting_value
if logo_path.exists():
with open(logo_path, "rb") as f:
logo_bytes = f.read()
# Detect mime type
suffix = logo_path.suffix.lower()
mime = {".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".svg": "image/svg+xml"}.get(suffix, "image/png")
info["logo_base64"] = f"data:{mime};base64,{base64.b64encode(logo_bytes).decode()}"
return info
async def _get_recipe_info(db: AsyncSession, recipe_id: int) -> dict:
"""Get recipe code and name."""
result = await db.execute(select(Recipe).where(Recipe.id == recipe_id))
recipe = result.scalar_one_or_none()
if recipe:
return {"code": recipe.code, "name": recipe.name, "id": recipe.id}
return {"code": "???", "name": "Unknown", "id": recipe_id}
async def _get_subtask_info(db: AsyncSession, subtask_id: int) -> dict | None:
"""Get subtask details including tolerances."""
result = await db.execute(
select(RecipeSubtask).where(RecipeSubtask.id == subtask_id)
)
st = result.scalar_one_or_none()
if st is None:
return None
return {
"id": st.id,
"marker_number": st.marker_number,
"description": st.description,
"unit": st.unit,
"nominal": float(st.nominal) if st.nominal is not None else None,
"utl": float(st.utl) if st.utl is not None else None,
"uwl": float(st.uwl) if st.uwl is not None else None,
"lwl": float(st.lwl) if st.lwl is not None else None,
"ltl": float(st.ltl) if st.ltl is not None else None,
}
async def _get_operator_map(db: AsyncSession, user_ids: set[int]) -> dict[int, str]:
"""Build a map of user_id -> display name."""
if not user_ids:
return {}
result = await db.execute(select(User).where(User.id.in_(user_ids)))
users = result.scalars().all()
return {u.id: u.display_name or u.username for u in users}
def _generate_control_chart_svg(chart_data, subtask_info: dict | None) -> str:
"""Generate control chart SVG using Plotly + Kaleido."""
if not chart_data.values:
return ""
fig = go.Figure()
# Measurement values
x_vals = list(range(1, len(chart_data.values) + 1))
fig.add_trace(go.Scatter(
x=x_vals, y=chart_data.values,
mode="lines+markers",
name="Values",
line=dict(color="#2563EB", width=2),
marker=dict(size=5),
))
# Mean line
fig.add_hline(y=chart_data.mean, line=dict(color="#64748B", dash="dash", width=1.5),
annotation_text=f"Mean: {chart_data.mean:.4f}")
# UCL / LCL
fig.add_hline(y=chart_data.ucl, line=dict(color="#EF4444", dash="dot", width=1),
annotation_text=f"UCL: {chart_data.ucl:.4f}")
fig.add_hline(y=chart_data.lcl, line=dict(color="#EF4444", dash="dot", width=1),
annotation_text=f"LCL: {chart_data.lcl:.4f}")
# Tolerance limits
if chart_data.utl is not None:
fig.add_hline(y=chart_data.utl, line=dict(color="#DC2626", width=1.5),
annotation_text=f"UTL: {chart_data.utl}")
if chart_data.ltl is not None:
fig.add_hline(y=chart_data.ltl, line=dict(color="#DC2626", width=1.5),
annotation_text=f"LTL: {chart_data.ltl}")
# Out of control points
if chart_data.out_of_control:
ooc_x = [x_vals[i] for i in chart_data.out_of_control]
ooc_y = [chart_data.values[i] for i in chart_data.out_of_control]
fig.add_trace(go.Scatter(
x=ooc_x, y=ooc_y,
mode="markers",
name="Out of control",
marker=dict(color="#EF4444", size=10, symbol="x"),
))
fig.update_layout(
title="Control Chart",
xaxis_title="Measurement #",
yaxis_title="Value",
template="plotly_white",
width=700, height=350,
margin=dict(l=60, r=30, t=40, b=40),
showlegend=False,
)
svg_bytes = pio.to_image(fig, format="svg", engine="kaleido")
return svg_bytes.decode("utf-8")
def _generate_histogram_svg(hist_data, subtask_info: dict | None) -> str:
"""Generate histogram SVG using Plotly + Kaleido."""
if not hist_data.bins:
return ""
fig = go.Figure()
# Histogram bars
bin_centers = [(hist_data.bins[i] + hist_data.bins[i + 1]) / 2 for i in range(len(hist_data.counts))]
bin_width = hist_data.bins[1] - hist_data.bins[0] if len(hist_data.bins) > 1 else 1
fig.add_trace(go.Bar(
x=bin_centers, y=hist_data.counts,
width=bin_width * 0.9,
marker_color="#2563EB",
opacity=0.7,
name="Frequency",
))
# Normal curve overlay
if hist_data.normal_x and hist_data.normal_y:
fig.add_trace(go.Scatter(
x=hist_data.normal_x, y=hist_data.normal_y,
mode="lines",
name="Normal curve",
line=dict(color="#EF4444", width=2),
))
# Tolerance lines
if subtask_info:
if subtask_info.get("utl") is not None:
fig.add_vline(x=subtask_info["utl"], line=dict(color="#DC2626", width=1.5, dash="dash"),
annotation_text="UTL")
if subtask_info.get("ltl") is not None:
fig.add_vline(x=subtask_info["ltl"], line=dict(color="#DC2626", width=1.5, dash="dash"),
annotation_text="LTL")
if subtask_info.get("nominal") is not None:
fig.add_vline(x=subtask_info["nominal"], line=dict(color="#22C55E", width=1.5),
annotation_text="Nom")
fig.update_layout(
title="Histogram",
xaxis_title="Value",
yaxis_title="Frequency",
template="plotly_white",
width=700, height=350,
margin=dict(l=60, r=30, t=40, b=40),
bargap=0.05,
showlegend=False,
)
svg_bytes = pio.to_image(fig, format="svg", engine="kaleido")
return svg_bytes.decode("utf-8")
async def generate_spc_report(
db: AsyncSession,
recipe_id: int,
subtask_id: int,
version_id: int | None = None,
date_from: datetime | None = None,
date_to: datetime | None = None,
operator_id: int | None = None,
lot_number: str | None = None,
serial_number: str | None = None,
) -> bytes:
"""Generate SPC PDF report and return bytes.
Includes: summary, capability indices, control chart SVG, histogram SVG.
"""
# 1. Query measurements
measurements = await _query_measurements(
db, recipe_id, version_id, subtask_id,
date_from, date_to, operator_id, lot_number, serial_number,
)
# 2. Compute SPC data
pass_fail_values = [m.pass_fail for m in measurements]
values = [float(m.value) for m in measurements]
timestamps = [m.measured_at for m in measurements]
summary = compute_summary(pass_fail_values)
subtask_info = await _get_subtask_info(db, subtask_id)
tol_utl = subtask_info["utl"] if subtask_info else None
tol_ltl = subtask_info["ltl"] if subtask_info else None
tol_uwl = subtask_info["uwl"] if subtask_info else None
tol_lwl = subtask_info["lwl"] if subtask_info else None
tol_nominal = subtask_info["nominal"] if subtask_info else None
capability = compute_capability(values, tol_utl, tol_ltl, tol_nominal)
control_chart = compute_control_chart(
values, timestamps, tol_utl, tol_uwl, tol_lwl, tol_ltl, tol_nominal
)
histogram = compute_histogram(values)
# 3. Generate SVG charts
control_chart_svg = _generate_control_chart_svg(control_chart, subtask_info)
histogram_svg = _generate_histogram_svg(histogram, subtask_info)
# 4. Get company info and recipe info
company = await _get_company_info(db)
recipe_info = await _get_recipe_info(db, recipe_id)
# 5. Build filter description
filters_desc = []
if version_id:
filters_desc.append(f"Version: {version_id}")
if date_from:
filters_desc.append(f"From: {date_from.strftime('%Y-%m-%d')}")
if date_to:
filters_desc.append(f"To: {date_to.strftime('%Y-%m-%d')}")
if lot_number:
filters_desc.append(f"Lot: {lot_number}")
if serial_number:
filters_desc.append(f"Serial: {serial_number}")
# 6. Render HTML template
template = _jinja_env.get_template("spc_report.html")
html_content = template.render(
company=company,
recipe=recipe_info,
subtask=subtask_info,
summary=summary,
capability=capability,
control_chart_svg=control_chart_svg,
histogram_svg=histogram_svg,
filters_desc=filters_desc,
generated_at=datetime.now(),
n_measurements=len(measurements),
)
# 7. Convert HTML to PDF
pdf_bytes = HTML(string=html_content).write_pdf()
return pdf_bytes
async def generate_measurement_report(
db: AsyncSession,
recipe_id: int,
subtask_id: int | None = None,
version_id: int | None = None,
date_from: datetime | None = None,
date_to: datetime | None = None,
operator_id: int | None = None,
lot_number: str | None = None,
serial_number: str | None = None,
) -> bytes:
"""Generate measurement table PDF report and return bytes."""
# 1. Query measurements
measurements = await _query_measurements(
db, recipe_id, version_id, subtask_id,
date_from, date_to, operator_id, lot_number, serial_number,
)
# 2. Get recipe info
recipe_info = await _get_recipe_info(db, recipe_id)
company = await _get_company_info(db)
# 3. Get subtask map and operator map
subtask_ids = {m.subtask_id for m in measurements}
operator_ids = {m.measured_by for m in measurements}
subtask_map = {}
for sid in subtask_ids:
info = await _get_subtask_info(db, sid)
if info:
subtask_map[sid] = info
operator_map = await _get_operator_map(db, operator_ids)
# 4. Build table rows
rows = []
for i, m in enumerate(measurements, 1):
st_info = subtask_map.get(m.subtask_id, {})
rows.append({
"num": i,
"subtask": f"#{st_info.get('marker_number', '?')}{st_info.get('description', '?')}",
"value": f"{float(m.value):.4f}",
"unit": st_info.get("unit", "mm"),
"pass_fail": m.pass_fail,
"measured_at": m.measured_at.strftime("%Y-%m-%d %H:%M"),
"operator": operator_map.get(m.measured_by, f"ID {m.measured_by}"),
"lot_number": m.lot_number or "",
"serial_number": m.serial_number or "",
})
# 5. Summary
pass_fail_values = [m.pass_fail for m in measurements]
summary = compute_summary(pass_fail_values)
# 6. Build filter description
filters_desc = []
if subtask_id:
si = subtask_map.get(subtask_id)
if si:
filters_desc.append(f"Point: #{si['marker_number']}{si['description']}")
if version_id:
filters_desc.append(f"Version: {version_id}")
if date_from:
filters_desc.append(f"From: {date_from.strftime('%Y-%m-%d')}")
if date_to:
filters_desc.append(f"To: {date_to.strftime('%Y-%m-%d')}")
if lot_number:
filters_desc.append(f"Lot: {lot_number}")
if serial_number:
filters_desc.append(f"Serial: {serial_number}")
# 7. Render template
template = _jinja_env.get_template("measurement_report.html")
html_content = template.render(
company=company,
recipe=recipe_info,
rows=rows,
summary=summary,
filters_desc=filters_desc,
generated_at=datetime.now(),
n_measurements=len(measurements),
)
# 8. Convert to PDF
pdf_bytes = HTML(string=html_content).write_pdf()
return pdf_bytes
+261
View File
@@ -0,0 +1,261 @@
"""SPC (Statistical Process Control) computation service.
Pure functions — no DB dependencies. Receives lists of floats and tolerance limits,
returns structured data matching schemas in schemas/statistics.py.
Uses only Python stdlib (math, statistics). No numpy/scipy needed.
"""
import math
import statistics as stats
from datetime import datetime
from src.backend.models.api.statistics import (
CapabilityData,
ControlChartData,
HistogramData,
SummaryData,
)
def compute_summary(
pass_fail_values: list[str],
) -> SummaryData:
"""Compute pass/fail/warning summary from a list of pass_fail strings.
Args:
pass_fail_values: List of "pass", "warning", or "fail" strings.
Returns:
SummaryData with counts and rates.
"""
total = len(pass_fail_values)
if total == 0:
return SummaryData(
total=0,
pass_count=0,
warning_count=0,
fail_count=0,
pass_rate=0.0,
warning_rate=0.0,
fail_rate=0.0,
)
pass_count = pass_fail_values.count("pass")
warning_count = pass_fail_values.count("warning")
fail_count = pass_fail_values.count("fail")
return SummaryData(
total=total,
pass_count=pass_count,
warning_count=warning_count,
fail_count=fail_count,
pass_rate=round(pass_count / total * 100, 2),
warning_rate=round(warning_count / total * 100, 2),
fail_rate=round(fail_count / total * 100, 2),
)
def compute_capability(
values: list[float],
utl: float | None,
ltl: float | None,
nominal: float | None,
) -> CapabilityData:
"""Compute capability indices Cp, Cpk, Pp, Ppk.
- Cp = (UTL - LTL) / (6 * sigma_within)
- Cpk = min((UTL - mean) / (3 * sigma), (mean - LTL) / (3 * sigma))
- Pp/Ppk: same formulas using population std dev (same data, no subgrouping)
Args:
values: Measured values.
utl: Upper Tolerance Limit (None if not defined).
ltl: Lower Tolerance Limit (None if not defined).
nominal: Nominal value (None if not defined).
Returns:
CapabilityData with indices and statistics.
"""
n = len(values)
if n < 2:
return CapabilityData(
cp=None, cpk=None, pp=None, ppk=None,
mean=values[0] if n == 1 else 0.0,
std_dev=0.0, n=n,
utl=utl, ltl=ltl, nominal=nominal,
)
mean = stats.mean(values)
# Population std dev for Pp/Ppk
std_dev_pop = stats.pstdev(values)
# Sample std dev for Cp/Cpk
std_dev_sample = stats.stdev(values)
cp = cpk = pp = ppk = None
if utl is not None and ltl is not None and std_dev_sample > 0:
cp = round((utl - ltl) / (6 * std_dev_sample), 4)
if std_dev_sample > 0:
cpk_values = []
if utl is not None:
cpk_values.append((utl - mean) / (3 * std_dev_sample))
if ltl is not None:
cpk_values.append((mean - ltl) / (3 * std_dev_sample))
if cpk_values:
cpk = round(min(cpk_values), 4)
if utl is not None and ltl is not None and std_dev_pop > 0:
pp = round((utl - ltl) / (6 * std_dev_pop), 4)
if std_dev_pop > 0:
ppk_values = []
if utl is not None:
ppk_values.append((utl - mean) / (3 * std_dev_pop))
if ltl is not None:
ppk_values.append((mean - ltl) / (3 * std_dev_pop))
if ppk_values:
ppk = round(min(ppk_values), 4)
return CapabilityData(
cp=cp, cpk=cpk, pp=pp, ppk=ppk,
mean=round(mean, 6),
std_dev=round(std_dev_sample, 6),
n=n,
utl=utl, ltl=ltl, nominal=nominal,
)
def compute_control_chart(
values: list[float],
timestamps: list[datetime],
utl: float | None,
uwl: float | None,
lwl: float | None,
ltl: float | None,
nominal: float | None,
) -> ControlChartData:
"""Compute control chart data with UCL/LCL and out-of-control detection.
UCL = mean + 3*sigma
LCL = mean - 3*sigma
Out-of-control: points outside UCL/LCL.
Args:
values: Measured values in chronological order.
timestamps: Corresponding timestamps.
utl/uwl/lwl/ltl: Tolerance/warning limits.
nominal: Nominal value.
Returns:
ControlChartData with values, limits, and OOC indices.
"""
n = len(values)
if n == 0:
return ControlChartData(
values=[], timestamps=[], mean=0.0, ucl=0.0, lcl=0.0,
utl=utl, uwl=uwl, lwl=lwl, ltl=ltl, nominal=nominal,
out_of_control=[],
)
mean = stats.mean(values)
if n >= 2:
sigma = stats.stdev(values)
else:
sigma = 0.0
ucl = mean + 3 * sigma
lcl = mean - 3 * sigma
# Detect out-of-control points (outside UCL/LCL)
out_of_control = []
for i, v in enumerate(values):
if v > ucl or v < lcl:
out_of_control.append(i)
return ControlChartData(
values=values,
timestamps=timestamps,
mean=round(mean, 6),
ucl=round(ucl, 6),
lcl=round(lcl, 6),
utl=utl,
uwl=uwl,
lwl=lwl,
ltl=ltl,
nominal=nominal,
out_of_control=out_of_control,
)
def compute_histogram(
values: list[float],
n_bins: int = 20,
) -> HistogramData:
"""Compute histogram bin data and normal curve overlay.
Args:
values: Measured values.
n_bins: Number of histogram bins (default 20).
Returns:
HistogramData with bins, counts, and normal curve points.
"""
n = len(values)
if n == 0:
return HistogramData(
bins=[], counts=[], normal_x=[], normal_y=[],
mean=0.0, std_dev=0.0, n=0,
)
mean = stats.mean(values)
std_dev = stats.pstdev(values) if n >= 2 else 0.0
min_val = min(values)
max_val = max(values)
# Avoid zero-width range
if max_val == min_val:
max_val = min_val + 1.0
bin_width = (max_val - min_val) / n_bins
bins = [round(min_val + i * bin_width, 6) for i in range(n_bins + 1)]
# Count values per bin
counts = [0] * n_bins
for v in values:
idx = int((v - min_val) / bin_width)
if idx >= n_bins:
idx = n_bins - 1
counts[idx] += 1
# Normal curve overlay (100 points)
normal_x: list[float] = []
normal_y: list[float] = []
if std_dev > 0:
n_curve_points = 100
x_min = mean - 4 * std_dev
x_max = mean + 4 * std_dev
x_step = (x_max - x_min) / (n_curve_points - 1)
for i in range(n_curve_points):
x = x_min + i * x_step
# Normal PDF: (1 / (sigma * sqrt(2*pi))) * exp(-0.5 * ((x-mu)/sigma)^2)
y = (1.0 / (std_dev * math.sqrt(2 * math.pi))) * math.exp(
-0.5 * ((x - mean) / std_dev) ** 2
)
# Scale to match histogram: y * n * bin_width
y_scaled = y * n * bin_width
normal_x.append(round(x, 6))
normal_y.append(round(y_scaled, 4))
return HistogramData(
bins=bins,
counts=counts,
normal_x=normal_x,
normal_y=normal_y,
mean=round(mean, 6),
std_dev=round(std_dev, 6),
n=n,
)
+155
View File
@@ -0,0 +1,155 @@
"""Business logic for stations and recipe assignments.
Routers must call into these functions rather than manipulating models directly.
All functions are async and accept an AsyncSession; they flush but do NOT commit
(commit is handled by the FastAPI get_db dependency).
"""
from typing import Optional
from fastapi import HTTPException, status
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.backend.models.orm.recipe import Recipe
from src.backend.models.orm.station import Station, StationRecipeAssignment
from src.backend.models.orm.user import User
from src.backend.models.api.station import StationCreate, StationUpdate
async def create_station(
db: AsyncSession, data: StationCreate, creator: User,
) -> Station:
existing = await db.execute(select(Station).where(Station.code == data.code))
if existing.scalar_one_or_none() is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Station code '{data.code}' already exists",
)
station = Station(
code=data.code,
name=data.name,
location=data.location,
notes=data.notes,
active=data.active,
created_by=creator.id,
)
db.add(station)
await db.flush()
await db.refresh(station)
return station
async def get_station(db: AsyncSession, station_id: int) -> Station:
result = await db.execute(select(Station).where(Station.id == station_id))
station = result.scalar_one_or_none()
if station is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Station not found",
)
return station
async def get_station_by_code(db: AsyncSession, code: str) -> Optional[Station]:
result = await db.execute(select(Station).where(Station.code == code))
return result.scalar_one_or_none()
async def list_stations(db: AsyncSession, active_only: bool = False) -> list[Station]:
query = select(Station).order_by(Station.code)
if active_only:
query = query.where(Station.active == True) # noqa: E712
result = await db.execute(query)
return list(result.scalars().all())
async def update_station(
db: AsyncSession, station_id: int, data: StationUpdate,
) -> Station:
station = await get_station(db, station_id)
for field, value in data.model_dump(exclude_unset=True).items():
setattr(station, field, value)
await db.flush()
await db.refresh(station)
return station
async def delete_station(db: AsyncSession, station_id: int) -> None:
station = await get_station(db, station_id)
# Explicitly delete assignments so the ORM cascade fires within the
# current session (SQLite test DB does not enforce FK CASCADE without
# PRAGMA foreign_keys = ON; production MySQL handles it at DB level too,
# but explicit ORM deletion is engine-agnostic and safer).
assignments = await db.execute(
select(StationRecipeAssignment).where(
StationRecipeAssignment.station_id == station_id
)
)
for assignment in assignments.scalars().all():
await db.delete(assignment)
await db.delete(station)
await db.flush()
async def assign_recipe(
db: AsyncSession, station_id: int, recipe_id: int, assigner: User,
) -> StationRecipeAssignment:
await get_station(db, station_id)
recipe_row = await db.execute(select(Recipe).where(Recipe.id == recipe_id))
if recipe_row.scalar_one_or_none() is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Recipe not found",
)
existing = await db.execute(
select(StationRecipeAssignment).where(
StationRecipeAssignment.station_id == station_id,
StationRecipeAssignment.recipe_id == recipe_id,
)
)
if existing.scalar_one_or_none() is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Recipe already assigned to this station",
)
assignment = StationRecipeAssignment(
station_id=station_id, recipe_id=recipe_id, assigned_by=assigner.id,
)
db.add(assignment)
await db.flush()
await db.refresh(assignment)
return assignment
async def unassign_recipe(
db: AsyncSession, station_id: int, recipe_id: int,
) -> None:
result = await db.execute(
select(StationRecipeAssignment).where(
StationRecipeAssignment.station_id == station_id,
StationRecipeAssignment.recipe_id == recipe_id,
)
)
assignment = result.scalar_one_or_none()
if assignment is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Assignment not found",
)
await db.delete(assignment)
await db.flush()
async def list_station_recipes(
db: AsyncSession, station_id: int,
) -> list[Recipe]:
"""Return active recipes assigned to this station, ordered by code."""
await get_station(db, station_id)
result = await db.execute(
select(Recipe)
.join(StationRecipeAssignment, StationRecipeAssignment.recipe_id == Recipe.id)
.where(
StationRecipeAssignment.station_id == station_id,
Recipe.active == True, # noqa: E712
)
.order_by(Recipe.code)
)
return list(result.scalars().all())