Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 39208aadab |
@@ -226,6 +226,120 @@ class LineShapeMatcher:
|
|||||||
np.array(picked_y, np.int32),
|
np.array(picked_y, np.int32),
|
||||||
np.array(picked_b, np.int8))
|
np.array(picked_b, np.int8))
|
||||||
|
|
||||||
|
# --- Save / Load (Halcon-style write_shape_model / read_shape_model)
|
||||||
|
|
||||||
|
def save_model(self, path: str) -> None:
|
||||||
|
"""Salva matcher addestrato su disco (formato .npz).
|
||||||
|
|
||||||
|
Persiste: parametri, template_gray, mask, e tutte le varianti
|
||||||
|
pre-computate (con piramide). Halcon-equivalent write_shape_model.
|
||||||
|
Caso d'uso: training offline su workstation, deploy su macchina
|
||||||
|
di linea senza re-train (zero secondi di startup matching).
|
||||||
|
"""
|
||||||
|
if not self.variants:
|
||||||
|
raise RuntimeError("Modello non addestrato: chiamare train() prima.")
|
||||||
|
# Flatten varianti in array piatti (npz non ama dataclass nested)
|
||||||
|
n_vars = len(self.variants)
|
||||||
|
n_levels = len(self.variants[0].levels)
|
||||||
|
var_meta = np.zeros((n_vars, 6), dtype=np.float32) # ang, scale, kh, kw, cxl, cyl
|
||||||
|
all_dx, all_dy, all_bin, all_offsets = [], [], [], []
|
||||||
|
offset = 0
|
||||||
|
all_offsets_per_level = [[] for _ in range(n_levels)]
|
||||||
|
all_dx_per_level = [[] for _ in range(n_levels)]
|
||||||
|
all_dy_per_level = [[] for _ in range(n_levels)]
|
||||||
|
all_bin_per_level = [[] for _ in range(n_levels)]
|
||||||
|
for vi, var in enumerate(self.variants):
|
||||||
|
var_meta[vi] = (
|
||||||
|
var.angle_deg, var.scale, var.kh, var.kw,
|
||||||
|
var.cx_local, var.cy_local,
|
||||||
|
)
|
||||||
|
for li, lvl in enumerate(var.levels):
|
||||||
|
all_offsets_per_level[li].append(len(all_dx_per_level[li]))
|
||||||
|
all_dx_per_level[li].extend(lvl.dx.tolist())
|
||||||
|
all_dy_per_level[li].extend(lvl.dy.tolist())
|
||||||
|
all_bin_per_level[li].extend(lvl.bin.tolist())
|
||||||
|
for li in range(n_levels):
|
||||||
|
all_offsets_per_level[li].append(len(all_dx_per_level[li]))
|
||||||
|
|
||||||
|
out = {
|
||||||
|
"_format_version": np.array([1], dtype=np.int32),
|
||||||
|
"params": np.array([
|
||||||
|
self.num_features, self.weak_grad, self.strong_grad,
|
||||||
|
self.angle_range_deg[0], self.angle_range_deg[1],
|
||||||
|
self.angle_step_deg,
|
||||||
|
self.scale_range[0], self.scale_range[1], self.scale_step,
|
||||||
|
self.spread_radius, self.min_feature_spacing,
|
||||||
|
self.pyramid_levels, self.top_score_factor,
|
||||||
|
int(self.use_polarity),
|
||||||
|
], dtype=np.float64),
|
||||||
|
"template_gray": self.template_gray,
|
||||||
|
"train_mask": self._train_mask,
|
||||||
|
"var_meta": var_meta,
|
||||||
|
"n_levels": np.array([n_levels], dtype=np.int32),
|
||||||
|
}
|
||||||
|
for li in range(n_levels):
|
||||||
|
out[f"dx_l{li}"] = np.asarray(all_dx_per_level[li], dtype=np.int32)
|
||||||
|
out[f"dy_l{li}"] = np.asarray(all_dy_per_level[li], dtype=np.int32)
|
||||||
|
out[f"bin_l{li}"] = np.asarray(all_bin_per_level[li], dtype=np.int8)
|
||||||
|
out[f"offsets_l{li}"] = np.asarray(all_offsets_per_level[li], dtype=np.int32)
|
||||||
|
np.savez_compressed(path, **out)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_model(cls, path: str) -> "LineShapeMatcher":
|
||||||
|
"""Carica matcher pre-addestrato da .npz salvato con save_model.
|
||||||
|
|
||||||
|
Halcon-equivalent read_shape_model. Bypassa completamente train():
|
||||||
|
deploy production = istantaneo.
|
||||||
|
"""
|
||||||
|
data = np.load(path, allow_pickle=False)
|
||||||
|
params = data["params"]
|
||||||
|
m = cls(
|
||||||
|
num_features=int(params[0]),
|
||||||
|
weak_grad=float(params[1]),
|
||||||
|
strong_grad=float(params[2]),
|
||||||
|
angle_range_deg=(float(params[3]), float(params[4])),
|
||||||
|
angle_step_deg=float(params[5]),
|
||||||
|
scale_range=(float(params[6]), float(params[7])),
|
||||||
|
scale_step=float(params[8]),
|
||||||
|
spread_radius=int(params[9]),
|
||||||
|
min_feature_spacing=int(params[10]),
|
||||||
|
pyramid_levels=int(params[11]),
|
||||||
|
top_score_factor=float(params[12]),
|
||||||
|
use_polarity=bool(int(params[13])),
|
||||||
|
)
|
||||||
|
tpl = data["template_gray"]
|
||||||
|
if tpl.ndim > 0 and tpl.size > 0:
|
||||||
|
m.template_gray = tpl
|
||||||
|
m.template_size = (tpl.shape[1], tpl.shape[0])
|
||||||
|
mk = data["train_mask"]
|
||||||
|
m._train_mask = mk if mk.size > 0 else None
|
||||||
|
var_meta = data["var_meta"]
|
||||||
|
n_levels = int(data["n_levels"][0])
|
||||||
|
offsets_l = [data[f"offsets_l{li}"] for li in range(n_levels)]
|
||||||
|
dx_l = [data[f"dx_l{li}"] for li in range(n_levels)]
|
||||||
|
dy_l = [data[f"dy_l{li}"] for li in range(n_levels)]
|
||||||
|
bin_l = [data[f"bin_l{li}"] for li in range(n_levels)]
|
||||||
|
m.variants = []
|
||||||
|
n_vars = var_meta.shape[0]
|
||||||
|
for vi in range(n_vars):
|
||||||
|
ang, scale, kh, kw, cxl, cyl = var_meta[vi]
|
||||||
|
levels = []
|
||||||
|
for li in range(n_levels):
|
||||||
|
i0 = int(offsets_l[li][vi])
|
||||||
|
i1 = int(offsets_l[li][vi + 1])
|
||||||
|
levels.append(_LevelFeatures(
|
||||||
|
dx=dx_l[li][i0:i1].copy(),
|
||||||
|
dy=dy_l[li][i0:i1].copy(),
|
||||||
|
bin=bin_l[li][i0:i1].copy(),
|
||||||
|
n=i1 - i0,
|
||||||
|
))
|
||||||
|
m.variants.append(_Variant(
|
||||||
|
angle_deg=float(ang), scale=float(scale),
|
||||||
|
levels=levels, kh=int(kh), kw=int(kw),
|
||||||
|
cx_local=float(cxl), cy_local=float(cyl),
|
||||||
|
))
|
||||||
|
return m
|
||||||
|
|
||||||
def set_angle_range_around(
|
def set_angle_range_around(
|
||||||
self, center_deg: float, tolerance_deg: float,
|
self, center_deg: float, tolerance_deg: float,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|||||||
Reference in New Issue
Block a user