ecu-tests/deprecated/gen_lin_api.py
Hosam-Eldin Mostafa 90be834102 refactor: retire LIN API generator (move to deprecated/)
With AlmTester now the single contributor-facing API, the generator at
``scripts/gen_lin_api.py`` and its output at
``tests/hardware/_generated/`` have no live consumer — the previous
commit inlined the enum classes they used to provide into
``tests/hardware/alm_helpers.py``.

Moves both to ``deprecated/`` rather than deleting outright. The
deprecated layout is self-describing:

    deprecated/
      README.md          — retirement rationale + revival instructions
      gen_lin_api.py     — was scripts/gen_lin_api.py
      _generated/
        __init__.py
        lin_api.py       — last-emitted typed frame classes + IntEnums

A note in deprecated/README.md spells out the conditions that would
make reviving the generator worthwhile (a second ECU joins, the LDF
churns fast enough to make hand-syncing miss changes, mypy-in-CI gets
adopted) and the exact command to regenerate.

Docs:

- 22_generated_lin_api.md now leads with a retired-layer banner. The
  body is preserved as the design-of-record for the historical layer.
- 05_architecture_overview.md gets a refreshed "Test-side layering"
  Mermaid (AlmTester → FrameIO → LinInterface) plus a "retired layer"
  bullet pointing at deprecated/. The "Three independent entry points"
  section is annotated rather than removed — the gen_lin_api path
  there is now historical reference.

Verified: pytest --collect-only collects 87 tests; 40 unit + mock
tests still pass. The retirement is invisible to the live framework.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-15 01:24:12 +02:00

275 lines
8.5 KiB
Python

#!/usr/bin/env python3
"""Generate tests/hardware/_generated/lin_api.py from an LDF.
Reads an LDF via ldfparser, emits a single Python file containing:
- One ``IntEnum`` per ``Signal_encoding_types`` block that has logical values
- One class per pure-physical encoding type with PHY_MIN / PHY_MAX / SCALE / OFFSET / UNIT
- One class per frame with NAME / FRAME_ID / LENGTH / PUBLISHER / SIGNALS /
SIGNAL_LAYOUT and classmethods ``send`` / ``receive`` / ``read_signal``
that delegate to a ``FrameIO`` passed in by the caller
- A ``SIGNAL_ENCODINGS`` dict mapping signal name → encoding class
Generation rules and the rationale for this layer live in
``docs/22_generated_lin_api.md``.
Usage:
python scripts/gen_lin_api.py vendor/4SEVEN_color_lib_test.ldf
python scripts/gen_lin_api.py <ldf> --out path/to/out.py
"""
from __future__ import annotations
import argparse
import hashlib
import re
from pathlib import Path
from ldfparser import parse_ldf
GENERATOR_VERSION = 1
# --- name normalisation ----------------------------------------------------
def _pascal(name: str) -> str:
"""``ALM_Req_A`` -> ``AlmReqA``; ``LED_State`` -> ``LedState``.
Names without underscores pass through unchanged so already-PascalCase
identifiers like ``ColorConfigFrameRed`` survive intact.
"""
if "_" not in name:
return name
return "".join(p[:1].upper() + p[1:].lower() for p in name.split("_") if p)
def _enum_member(info: str) -> str:
"""LDF info text -> enum member name.
Steps: drop anything after the first ``(`` (parenthetical clarifications
that bloat the name), uppercase, collapse non-identifier runs to ``_``,
strip leading/trailing ``_``. Empty results fall back to ``VALUE``; names
starting with a digit get a ``V_`` prefix.
"""
head = info.split("(", 1)[0]
s = re.sub(r"[^A-Za-z0-9]+", "_", head).strip("_").upper()
if not s:
return "VALUE"
if s[0].isdigit():
return f"V_{s}"
return s
def _suffix_collisions(pairs):
"""If two entries share a member name, suffix all colliding entries with ``_0X<hex>``."""
counts = {}
for name, _ in pairs:
counts[name] = counts.get(name, 0) + 1
out = []
for name, value in pairs:
if counts[name] > 1:
out.append((f"{name}_0X{value:02X}", value))
else:
out.append((name, value))
return out
# --- ldfparser duck-typing -------------------------------------------------
# Avoid importing internal ldfparser.encoding classes so generator-side
# imports don't break across ldfparser revisions.
def _is_logical(converter) -> bool:
return hasattr(converter, "info") and hasattr(converter, "phy_value")
def _is_physical(converter) -> bool:
return hasattr(converter, "scale") and hasattr(converter, "offset")
def _encoding_kind(enc) -> str:
convs = enc.get_converters()
has_log = any(_is_logical(c) for c in convs)
has_phy = any(_is_physical(c) for c in convs)
if has_log and has_phy:
return "mixed"
if has_log:
return "logical"
return "physical"
# --- emitters --------------------------------------------------------------
def emit_enum(enc) -> str:
convs = enc.get_converters()
pairs = [
(_enum_member(c.info), int(c.phy_value))
for c in convs if _is_logical(c)
]
pairs.sort(key=lambda kv: kv[1])
pairs = _suffix_collisions(pairs)
physical_comments = [
f" # physical_value {p.phy_min}..{p.phy_max} scale={p.scale} offset={p.offset} unit={p.unit!r} — pass int directly"
for p in convs if _is_physical(p)
]
suffix = " (logical + physical)" if physical_comments else ""
lines = [
f"class {_pascal(enc.name)}(IntEnum):",
f' """Signal_encoding_types.{enc.name}{suffix}"""',
]
for name, value in pairs:
lines.append(f" {name} = 0x{value:02X}")
lines.extend(physical_comments)
return "\n".join(lines)
def emit_physical_class(enc) -> str:
convs = enc.get_converters()
phys = [c for c in convs if _is_physical(c)]
p = phys[0] # multiple physical ranges in one encoding are rare
return "\n".join([
f"class {_pascal(enc.name)}:",
f' """Signal_encoding_types.{enc.name} (physical)."""',
f" PHY_MIN = {p.phy_min}",
f" PHY_MAX = {p.phy_max}",
f" SCALE = {p.scale}",
f" OFFSET = {p.offset}",
f" UNIT = {p.unit!r}",
])
def emit_frame(frame) -> str:
layout = sorted(frame.signal_map, key=lambda t: t[0])
publisher_name = frame.publisher.name
lines = [
f"class {_pascal(frame.name)}:",
f' """LDF frame {frame.name} — published by {publisher_name}."""',
f' NAME = "{frame.name}"',
f" FRAME_ID = 0x{frame.frame_id:02X}",
f" LENGTH = {frame.length}",
f' PUBLISHER = "{publisher_name}"',
" SIGNALS: tuple[str, ...] = (",
]
for _, sig in layout:
lines.append(f' "{sig.name}",')
lines.append(" )")
lines.append(" SIGNAL_LAYOUT: tuple[tuple[int, str, int], ...] = (")
for offset, sig in layout:
lines.append(f' ({offset}, "{sig.name}", {sig.width}),')
lines.append(" )")
lines.extend([
"",
" @classmethod",
' def send(cls, fio: "FrameIO", **signals) -> None:',
" fio.send(cls.NAME, **signals)",
"",
" @classmethod",
' def receive(cls, fio: "FrameIO", timeout: float = 1.0):',
" return fio.receive(cls.NAME, timeout=timeout)",
"",
" @classmethod",
" def read_signal(",
' cls, fio: "FrameIO", signal: str, *,',
" timeout: float = 1.0, default=None,",
" ):",
" return fio.read_signal(cls.NAME, signal, timeout=timeout, default=default)",
])
return "\n".join(lines)
def emit_signal_encodings_map(ldf) -> str:
pairs = []
for sig in ldf.get_signals():
enc = sig.encoding_type
if enc is not None:
pairs.append((sig.name, _pascal(enc.name)))
pairs.sort()
lines = ["SIGNAL_ENCODINGS: dict[str, type] = {"]
for sig, enc in pairs:
lines.append(f' "{sig}": {enc},')
lines.append("}")
return "\n".join(lines)
# --- main ------------------------------------------------------------------
def render(ldf_path: Path) -> str:
ldf = parse_ldf(str(ldf_path))
src_hash = hashlib.sha256(ldf_path.read_bytes()).hexdigest()[:12]
header = (
f'"""AUTO-GENERATED from {ldf_path.name}\n'
f'SHA256: {src_hash}\n'
f'DO NOT EDIT — re-run: python scripts/gen_lin_api.py {ldf_path}\n'
f'Generator version: {GENERATOR_VERSION}\n'
f'"""'
)
imports = (
"from __future__ import annotations\n"
"\n"
"from enum import IntEnum\n"
"from typing import TYPE_CHECKING\n"
"\n"
"if TYPE_CHECKING:\n"
" from frame_io import FrameIO"
)
encoding_sections = []
for enc in ldf.get_signal_encoding_types():
kind = _encoding_kind(enc)
if kind in ("logical", "mixed"):
encoding_sections.append(emit_enum(enc))
else:
encoding_sections.append(emit_physical_class(enc))
frame_sections = [emit_frame(f) for f in ldf.frames]
parts = [
header,
imports,
"# === Encoding types ========================================================",
*encoding_sections,
"# === Frames ================================================================",
*frame_sections,
"# === Signal → encoding map =================================================",
emit_signal_encodings_map(ldf),
]
return "\n\n\n".join(parts) + "\n"
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument("ldf", type=Path, help="Path to the LDF file")
parser.add_argument(
"--out",
type=Path,
default=Path("tests/hardware/_generated/lin_api.py"),
help="Output path (default: %(default)s)",
)
args = parser.parse_args()
if not args.ldf.is_file():
raise SystemExit(f"LDF not found: {args.ldf}")
rendered = render(args.ldf)
args.out.parent.mkdir(parents=True, exist_ok=True)
args.out.write_text(rendered)
ldf = parse_ldf(str(args.ldf))
print(
f"wrote {args.out} "
f"({len(ldf.frames)} frames, "
f"{len(list(ldf.get_signal_encoding_types()))} encoding types)"
)
return 0
if __name__ == "__main__":
raise SystemExit(main())