From 5a5b0c956387ca5c70565ebb644de4efbcdf91d6 Mon Sep 17 00:00:00 2001 From: Hosam-Eldin Mostafa Date: Tue, 12 May 2026 01:07:00 +0200 Subject: [PATCH] Add docker file for test framework with documentation --- .dockerignore | 49 ++ docker/Dockerfile | 106 ++++ docker/README.md | 496 +++++++++++++++ docker/compose.hw.yml | 59 ++ docs/20_docker_image.md | 538 ++++++++++++++++ docs/21_yocto_image_for_raspberry_pi.md | 800 ++++++++++++++++++++++++ docs/README.md | 2 + 7 files changed, 2050 insertions(+) create mode 100644 .dockerignore create mode 100644 docker/Dockerfile create mode 100644 docker/README.md create mode 100644 docker/compose.hw.yml create mode 100644 docs/20_docker_image.md create mode 100644 docs/21_yocto_image_for_raspberry_pi.md diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..5234a53 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,49 @@ +# Build-context excludes for docker/Dockerfile. +# Keeps the image small and prevents proprietary / generated content +# from sneaking in. + +# Local venv (we build a fresh one inside the image) +.venv/ +venv/ + +# Generated test artifacts — produced inside the container, not from outside +reports/* +!reports/.gitkeep +!reports/README.keep +htmlcov/ +.coverage +.coverage.* + +# Python caches +__pycache__/ +*.py[cod] +*.egg-info/ +.pytest_cache/ +.mypy_cache/ +.ruff_cache/ + +# IDE / OS +.git/ +.gitignore +.vscode/ +.idea/ +.DS_Store +Thumbs.db +*.swp + +# Documentation builds (not docs source — keep that) +docs/_build/ + +# Deprecated BabyLIN SDK + native libs (would balloon image + leak proprietary code) +vendor/BabyLIN library/ +vendor/BabyLIN_library.py +vendor/BLCInterfaceExample.py +vendor/mock_babylin_wrapper.py +vendor/*.sdf +vendor/Example.sdf + +# Other artifacts you don't want round-tripping into the image +melexis-pkgs.tar.gz + +# Docker itself doesn't need to copy its own files into the image +docker/ diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..fbb3500 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,106 @@ +# syntax=docker/dockerfile:1.6 +# +# ecu-tests image — mock-only by default, hardware variant via: +# --build-arg INCLUDE_MELEXIS=1 +# +# Build context = repository root. Always invoke from there: +# +# docker build -f docker/Dockerfile -t ecu-tests:mock . +# +# DOCKER_BUILDKIT=1 docker build \ +# -f docker/Dockerfile -t ecu-tests:hw \ +# --build-arg INCLUDE_MELEXIS=1 \ +# --secret id=melexis_tarball,src=./melexis-pkgs.tar.gz \ +# . +# +# See docs/20_docker_image.md for the full reference, including how +# to produce melexis-pkgs.tar.gz from a licensed Melexis IDE install. + +ARG PYTHON_VERSION=3.11 + +# ────────────────────────────────────────────────────────────────────── +# Stage 1: builder — install deps into a venv under /opt/venv +# ────────────────────────────────────────────────────────────────────── +FROM python:${PYTHON_VERSION}-slim AS builder + +ARG INCLUDE_MELEXIS=0 + +# Build-time OS deps: +# build-essential, libffi-dev — for any wheel that needs to compile +# libusb-1.0-0 — pyserial uses it on some adapters +# git — VCS deps in requirements.txt (if any) +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential \ + libffi-dev \ + libusb-1.0-0 \ + git \ + && rm -rf /var/lib/apt/lists/* + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +RUN python -m venv /opt/venv +ENV PATH="/opt/venv/bin:${PATH}" + +WORKDIR /build +COPY requirements.txt ./ +RUN pip install --upgrade pip wheel \ + && pip install -r requirements.txt + +# Melexis packages — passed in via BuildKit secret so the proprietary +# tarball never lands in an image layer. Skipped entirely when +# INCLUDE_MELEXIS=0 (the mock-only path). +RUN --mount=type=secret,id=melexis_tarball,required=false \ + if [ "$INCLUDE_MELEXIS" = "1" ]; then \ + set -e; \ + test -s /run/secrets/melexis_tarball \ + || { echo 'INCLUDE_MELEXIS=1 but no melexis_tarball secret bound'; exit 2; }; \ + SITE_PACKAGES=$(python -c "import site; print(site.getsitepackages()[0])"); \ + tar -xzf /run/secrets/melexis_tarball -C "$SITE_PACKAGES"; \ + python -c "import pylin, pymumclient; print('melexis pkgs OK')"; \ + fi + + +# ────────────────────────────────────────────────────────────────────── +# Stage 2: runtime — slim image with the venv + repo +# ────────────────────────────────────────────────────────────────────── +FROM python:${PYTHON_VERSION}-slim AS runtime + +# Runtime-only OS deps. tini handles signal forwarding so Ctrl-C tears +# pytest down cleanly. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + libusb-1.0-0 \ + ca-certificates \ + tini \ + && rm -rf /var/lib/apt/lists/* + +# Pull the prebuilt venv (with Melexis pkgs if requested) from builder. +COPY --from=builder /opt/venv /opt/venv + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PATH="/opt/venv/bin:${PATH}" + +# The repo. .dockerignore at the build-context root excludes .venv, +# reports/, vendor/BabyLIN*, __pycache__, etc. +WORKDIR /workspace +COPY . /workspace + +# Reports volume so artifacts survive the container's lifetime. +RUN mkdir -p /reports +VOLUME ["/reports"] + +# Drop root. Inherit the host's serial group at runtime via +# `--group-add dialout` when you bind-mount /dev/ttyUSB*. +RUN useradd -m -u 1000 -s /bin/bash tester \ + && chown -R tester:tester /workspace /reports +USER tester + +ENTRYPOINT ["/usr/bin/tini", "--"] + +# Safe default: collect-only of the non-hardware suite. An accidental +# `docker run ecu-tests:hw` will list tests, not fire bench actions. +CMD ["pytest", "-m", "not hardware", "--collect-only", "-q"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..89c9444 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,496 @@ +# Docker — quick reference + +Full reference: [`docs/20_docker_image.md`](../docs/20_docker_image.md). +This file is just the copy-paste commands. + +| File | What it is | +|---|---| +| `Dockerfile` | Multi-stage image. Mock-only by default; hardware variant via `--build-arg INCLUDE_MELEXIS=1` + a BuildKit secret carrying the Melexis Python packages. | +| `compose.hw.yml` | docker-compose service for the hardware variant — host networking, USB device passthrough, reports volume, bench-config bind mount. | +| `../.dockerignore` | Excludes `.venv/`, `reports/*`, the deprecated BabyLIN SDK, generated caches, etc. | + +All commands below assume you're running them **from the repo root** +inside a WSL2 distro (Ubuntu / Debian / …) with `docker` already on +the `$PATH`. If `docker --version` doesn't work yet, install it first +— see [§ Prerequisites](#prerequisites--install-docker-on-wsl). + +--- + +## Prerequisites — install Docker on WSL + +If `docker --version` already prints a version, skip this section. + +Two install paths. **Option A** (Docker Desktop) is the easy one +that most teams use. **Option B** (Docker Engine directly inside +WSL2) is for environments where Docker Desktop's licensing or +policies are blocked. + +### Option A — Docker Desktop on Windows (WSL2 backend, recommended) + +The Windows host runs Docker Desktop; your WSL2 distro talks to it. +You install the daemon in exactly one place (Windows) and it appears +seamlessly inside every enabled WSL distro. + +1. **Make sure WSL2 is current** (Windows PowerShell, admin): + ```powershell + wsl --install # no-op if WSL is already there + wsl --set-default-version 2 + wsl --update + ``` + Reboot if Windows prompts. + +2. **Verify your distro is on WSL 2** (not WSL 1): + ```powershell + wsl -l -v + ``` + The `VERSION` column should read `2` for your distro. If it shows + `1`, convert: `wsl --set-version 2`. + +3. **Install Docker Desktop**: + - Download . + - During install, leave **"Use WSL 2 instead of Hyper-V"** ticked. + - Launch Docker Desktop after install completes. + +4. **Enable WSL integration** (one-time): + - Docker Desktop → **Settings** → **Resources** → **WSL Integration**. + - Toggle on integration for every WSL distro you'll run `docker` + from (Ubuntu, Debian, …). + - Click **Apply & Restart**. + +5. **Verify from inside WSL**: + ```bash + docker --version + docker run --rm hello-world + ``` + `hello-world` should print "Hello from Docker!" and exit 0. + +### Option B — Docker Engine inside WSL2 (no Docker Desktop) + +Use this when Docker Desktop isn't allowed (corporate / license +policy) or when you want a single isolated Linux install. + +1. **Enable `systemd` in WSL2** (Docker's daemon expects it). In + your WSL distro edit `/etc/wsl.conf`: + ```ini + [boot] + systemd=true + ``` + Then from Windows PowerShell: + ```powershell + wsl --shutdown + ``` + Reopen the WSL terminal; check `systemctl --version` runs. + +2. **Install Docker Engine** (Ubuntu / Debian example — Docker's + official apt repo): + ```bash + # Remove anything old that might shadow the new install + sudo apt-get remove -y docker docker-engine docker.io containerd runc 2>/dev/null || true + + # Add Docker's apt key + repo + sudo apt-get update + sudo apt-get install -y ca-certificates curl gnupg lsb-release + sudo install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ + | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ + | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null + + # Install + sudo apt-get update + sudo apt-get install -y docker-ce docker-ce-cli containerd.io \ + docker-buildx-plugin docker-compose-plugin + ``` + +3. **Run without `sudo`**: + ```bash + sudo usermod -aG docker $USER + ``` + Log out and back into WSL (or `exec su -l $USER`) so the new group + membership takes effect. + +4. **Start the daemon**: + ```bash + sudo systemctl enable --now docker + ``` + +5. **Verify**: + ```bash + docker --version + docker run --rm hello-world + ``` + +### Hardware-only — pass the Owon PSU USB device into WSL with `usbipd-win` + +The mock image needs nothing beyond Docker itself. The hardware +image needs the Owon PSU's USB-serial adapter exposed inside WSL2. +Windows doesn't share USB devices with WSL2 out of the box; the +de-facto bridge is [`usbipd-win`](https://github.com/dorssel/usbipd-win). + +1. **Install `usbipd-win` on Windows** (PowerShell, admin): + ```powershell + winget install --interactive --exact dorssel.usbipd-win + ``` + Reboot. + +2. **List USB devices** to find the BUSID of the serial adapter: + ```powershell + usbipd list + ``` + Look for a row that describes your adapter — "USB Serial", + "CH340", "FT232", "Owon" — and note its `BUSID` (e.g. `2-3`). + +3. **Bind the device** (one-time per device, admin): + ```powershell + usbipd bind --busid 2-3 + ``` + +4. **Attach the device to WSL** (every time you plug it in, normal + user): + ```powershell + usbipd attach --wsl --busid 2-3 + ``` + +5. **Confirm it appeared inside WSL**: + ```bash + ls /dev/ttyUSB* + ``` + You should see `/dev/ttyUSB0` (or similar). That's the path you + pass to `docker run --device /dev/ttyUSB0:/dev/ttyUSB0`. + +If you want the device to re-attach automatically every time you +plug it in, use `usbipd attach --auto-attach --wsl --busid 2-3` +(consult `usbipd --help` for the full set of options). + +### MUM network access (192.168.7.2) + +The MUM presents itself as a **USB-RNDIS Ethernet adapter** on +Windows. With Docker Desktop's WSL2 backend, `--network host` in +the container reaches the MUM automatically — no extra setup beyond +plugging the MUM in and seeing it appear in `ipconfig` (it should +add an interface with a 192.168.7.x address on the Windows side). + +If you went with Option B (Engine in WSL2), the MUM still works +because the WSL2 distro shares the Windows network stack for +host-mode containers. + +### Sanity check before the hardware run + +```bash +# Docker reachable from WSL? +docker version + +# USB-serial visible in WSL? +ls -la /dev/ttyUSB* + +# MUM reachable? +ping -c 2 192.168.7.2 +``` + +If all three succeed you're ready for the hardware run below. + +--- + +## Mock-only image (CI-ready, no hardware needed) + +### Build + +```bash +docker build -f docker/Dockerfile -t ecu-tests:mock . +``` + +### Run the mock suite + +```bash +mkdir -p reports + +docker run --rm \ + -v "$PWD/reports:/reports" \ + ecu-tests:mock \ + pytest -m "not hardware" -v \ + --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html +``` + +When the container exits, `reports/report.html` and +`reports/junit.xml` are on the host. Open the HTML report: + +```bash +xdg-open reports/report.html # Linux +open reports/report.html # macOS +start reports\report.html # Windows +``` + +### Interactive shell + +```bash +docker run --rm -it -v "$PWD:/workspace" ecu-tests:mock bash +``` + +Edit files on the host, run `pytest` inside the container — code +changes show up immediately. + +--- + +## Hardware image (real bench) + +### One-time setup — Melexis packages + +`pylin` / `pymumclient` / `pylinframe` ship inside the Melexis IDE, +not on PyPI. Bundle them into a tarball that you'll pass as a +BuildKit secret: + +```bash +# Adjust the path to where Melexis IDE is installed +MELEXIS_SITE="/mnt/c/Program Files/Melexis/Melexis IDE/plugins/com.melexis.mlxide.python_1.2.0.202408130945/python/Lib/site-packages" + +tar -czf melexis-pkgs.tar.gz \ + -C "$MELEXIS_SITE" \ + pylin pymumclient pylinframe +``` + +The tarball is gitignored (see `.dockerignore`) and never enters +any image layer — BuildKit's `--mount=type=secret` only exposes it +to the single `RUN` step that copies the packages into +`/opt/venv/lib/python3.x/site-packages/`. + +> **License**: the resulting image contains proprietary Melexis +> code. Treat it like the Melexis IDE itself — keep it on a private +> registry, not Docker Hub. + +### Build + +```bash +DOCKER_BUILDKIT=1 docker build \ + -f docker/Dockerfile -t ecu-tests:hw \ + --build-arg INCLUDE_MELEXIS=1 \ + --secret id=melexis_tarball,src=./melexis-pkgs.tar.gz \ + . +``` + +Verify the Melexis packages landed inside the image: + +```bash +docker run --rm ecu-tests:hw \ + python -c "import pylin, pymumclient, pylinframe; print('OK')" +``` + +### Run the hardware suite — direct `docker run` + +```bash +docker run --rm \ + --network host \ + --device /dev/ttyUSB0:/dev/ttyUSB0 \ + --group-add dialout \ + -v "$PWD/reports:/reports" \ + -v "$PWD/config/test_config.yaml:/workspace/config/test_config.yaml:ro" \ + -e ECU_TESTS_CONFIG=/workspace/config/test_config.yaml \ + ecu-tests:hw \ + pytest -m "hardware and mum and not slow" -v \ + --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html +``` + +The flags, in plain English: + +| Flag | Reason | +|---|---| +| `--network host` | MUM is at `192.168.7.2` via USB-RNDIS on the host; bridge networking would hide it. | +| `--device /dev/ttyUSB0:/dev/ttyUSB0` | Pass the Owon PSU's USB-serial device into the container. Adjust to whatever `ls /dev/ttyUSB*` shows on the host. | +| `--group-add dialout` | Without it, the `tester` user can't open the serial device. | +| `-v config/test_config.yaml:…:ro` | Tweak bench config without rebuilding the image. | + +### Run via docker-compose + +```bash +docker compose -f docker/compose.hw.yml build +docker compose -f docker/compose.hw.yml up --abort-on-container-exit +``` + +Same effect as the `docker run` above, but the parameters are +checked into `compose.hw.yml` so all you remember is the file path. + +### Iteration — edit-on-host, run-in-container + +```bash +docker run --rm -it \ + --network host \ + --device /dev/ttyUSB0:/dev/ttyUSB0 \ + --group-add dialout \ + -v "$PWD:/workspace" \ + -v "$PWD/reports:/reports" \ + ecu-tests:hw \ + bash +``` + +Inside the container: + +```bash +# Run a specific file +pytest tests/hardware/test_overvolt.py -v -s + +# Or one parametrized case +pytest "tests/hardware/test_overvolt.py::test_template_voltage_status_parametrized[overvoltage]" -v -s + +# Or the settle characterization +pytest -m psu_settling -v -s +``` + +--- + +## Where everything lives + +After `docker build` and `docker run`, three different stores hold +three different things. Knowing the difference saves time when you +want to find a report, free disk space, or confirm "did my build +actually succeed?" + +| Thing | Lives where | How you access it | +|---|---|---| +| The **image** | Docker daemon's content-addressed layer store. Not a single file. | `docker images`, `docker inspect`, `docker history` | +| A **running / stopped container** | Daemon's runtime state. Ephemeral when `--rm` is used. | `docker ps`, `docker ps -a`, `docker logs`, `docker exec` | +| The **test reports** | Host filesystem at `./reports/`, via the `-v` bind-mount in every run command. Survives container deletion. | `ls reports/`, open `reports/report.html` | + +### The image + +You **don't** navigate to it as files — query it through `docker`: + +```bash +docker images # all images on this daemon +docker images ecu-tests # just the ones tagged ecu-tests +docker inspect ecu-tests:mock # full metadata (JSON) +docker history ecu-tests:mock # layer-by-layer breakdown +``` + +The on-disk location is daemon-internal: + +| Host setup | Backing store | +|---|---| +| Native Docker Engine on Linux (Option B in the install section) | `/var/lib/docker/overlay2/…` | +| Docker Desktop + WSL2 (Option A) | Inside a hidden WSL2 distro `docker-desktop-data`. Windows side: `%LOCALAPPDATA%\Docker\wsl\disk\docker_data.vhdx`. **Don't poke directly** — always use the `docker` CLI. | + +Images persist across reboots until you delete them: + +```bash +docker rmi ecu-tests:mock # one image +docker system prune -a # everything unused (careful) +docker system df # what's eating disk +``` + +### A running container + +`docker run …` creates a container from the image. The container has +its own writable filesystem layer on top of the image's read-only +layers. The image is unchanged when the container exits. + +```bash +docker ps # running right now +docker ps -a # all, including exited +docker logs # captured stdout / stderr +docker exec -it bash # shell into a still-running container +``` + +Every run command in this README uses `--rm`, so the container is +deleted the moment it exits. The **image** stays. The **reports** +(see below) stay too because they're on the host filesystem, not +inside the container. + +### Inside the container — what the Dockerfile lays out + +``` +/ (container root) +├── opt/ +│ └── venv/ ← Python venv with all pip-installed deps +├── workspace/ ← the repo, copied in at build time +│ ├── ecu_framework/ +│ ├── tests/ +│ ├── config/ +│ └── … +├── reports/ ← mount point for the host's ./reports/ +└── home/tester/ ← unprivileged user home (uid 1000) +``` + +Peek at the layout from a throwaway container: + +```bash +docker run --rm -it ecu-tests:mock bash +# inside: +ls /workspace +ls /opt/venv/bin +which pytest +``` + +`/workspace` is a **frozen snapshot of the repo from the moment you +ran `docker build`**. Edits to files on the host afterwards do NOT +show up inside the image — unless you bind-mount the repo at run +time: + +```bash +docker run --rm -it -v "$PWD:/workspace" ecu-tests:mock bash +``` + +(That's exactly what the "Iteration" example does.) + +### Reports on the host — what you actually look at + +Every `docker run` command in this README includes a bind-mount: + +``` +-v "$PWD/reports:/reports" +``` + +The container writes its outputs to `/reports/`; the daemon's +bind-mount makes those writes show up on the host at `./reports/` +in your repo. After the container exits, the files are still there: + +``` +/ +└── reports/ + ├── report.html ← open this in a browser + ├── junit.xml ← machine-readable for CI + ├── summary.md + └── requirements_coverage.json +``` + +`--rm` deletes the container; it does **not** touch the bind-mounted +host directory. + +### Three commands cover 95% of "where is it?" + +```bash +docker images ecu-tests # is the image there? +docker run --rm -v "$PWD/reports:/reports" \ + ecu-tests:mock pytest -m "not hardware" -q +ls reports/ # outputs landed where? +``` + +--- + +## Platform notes + +- **Linux**: works as shown above. +- **WSL2 (Windows)**: USB devices need `usbipd-win` to bind them + into the WSL2 distro; from there they appear as `/dev/ttyUSB0` + exactly like on native Linux. Docker Desktop bridges WSL2 to the + host network, so `--network host` reaches the MUM normally. +- **macOS Docker Desktop**: USB passthrough is **not** supported. + Workaround is to run a TCP-to-serial bridge on the host + (`socat`) and have the container connect to that — fiddly, + documented in `docs/20_docker_image.md` §4.3 as a non-default + path. + +--- + +## Troubleshooting + +| Symptom | Likely cause | Fix | +|---|---|---| +| `ModuleNotFoundError: No module named 'pylin'` | Image built without `INCLUDE_MELEXIS=1` | Rebuild with the build-arg + secret | +| `Permission denied: '/dev/ttyUSB0'` | Missing `--group-add dialout` | Add it (or the group that owns the device on the host) | +| MUM unreachable at 192.168.7.2 | Bridge network instead of host network | Add `--network host` (Linux); on macOS see §4.3 | +| Empty `reports/` after run | `/reports` not bind-mounted | Add `-v "$PWD/reports:/reports"` | +| HTML report missing styling | Forgot `--self-contained-html` | Pytest renders the report without inlined CSS otherwise | + +See [`docs/20_docker_image.md`](../docs/20_docker_image.md) §8 for +the full table. diff --git a/docker/compose.hw.yml b/docker/compose.hw.yml new file mode 100644 index 0000000..c9adb10 --- /dev/null +++ b/docker/compose.hw.yml @@ -0,0 +1,59 @@ +# docker-compose configuration for the hardware variant. +# +# Usage (from repository root): +# +# # One-time: stage the Melexis packages alongside this file. +# # See docs/20_docker_image.md §5. +# tar -czf melexis-pkgs.tar.gz \ +# -C "/path/to/Melexis/site-packages" \ +# pylin pymumclient pylinframe +# +# # Build +# docker compose -f docker/compose.hw.yml build +# +# # Run hardware suite once and exit +# docker compose -f docker/compose.hw.yml up --abort-on-container-exit +# +# Adjust /dev/ttyUSB0 to whatever the Owon PSU enumerates as on the host. + +services: + ecu-tests: + image: ecu-tests:hw + build: + context: .. # build context = repo root + dockerfile: docker/Dockerfile + args: + INCLUDE_MELEXIS: "1" + secrets: + - melexis_tarball + + # MUM at 192.168.7.2 is exposed by the host's USB-RNDIS interface. + # Bridge networking would hide it; host mode shares the namespace. + network_mode: host + + # Owon PSU passthrough. List every USB-serial adapter the bench + # uses here; the framework's resolver will pick the right one. + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # The container user (uid 1000, 'tester') must be in the host + # group that owns the serial device — typically 'dialout' on + # Debian-style systems. + group_add: + - dialout + + volumes: + - ../reports:/reports # report outputs + - ../config/test_config.yaml:/workspace/config/test_config.yaml:ro # bench config (read-only) + + environment: + ECU_TESTS_CONFIG: /workspace/config/test_config.yaml + + command: > + pytest -m "hardware and mum and not slow" -v + --junitxml=/reports/junit.xml + --html=/reports/report.html --self-contained-html + +secrets: + melexis_tarball: + file: ../melexis-pkgs.tar.gz diff --git a/docs/20_docker_image.md b/docs/20_docker_image.md new file mode 100644 index 0000000..488b634 --- /dev/null +++ b/docs/20_docker_image.md @@ -0,0 +1,538 @@ +# Docker Image for the ECU Test Framework + +This guide covers packaging the framework into a Docker image so it +can run as a reproducible unit on developer laptops, CI runners, and +host machines that talk to a real bench. + +There are **two distinct images** to keep separate in your head: + +| Image | Purpose | Hardware? | Where it runs | +|---|---|---|---| +| **`ecu-tests:mock`** | Unit tests, mock-LIN smoke tests, plugin self-tests, doc/coverage generation | None | Any developer laptop, CI runner | +| **`ecu-tests:hw`** | Real-bench tests against a MUM and/or an Owon PSU | Yes (USB serial, network reachable MUM) | Lab machine attached to the bench | + +The two share the same Dockerfile and a build-arg switch — the +hardware variant adds device-passthrough config and the Melexis +packages. + +--- + +## 1. Why dockerize? + +| Pain | What the image fixes | +|---|---| +| "Works on my machine" — different pyserial, ldfparser, pytest versions | Pinned `requirements.txt`, frozen base image, deterministic build | +| Onboarding a new developer takes a day | `docker run …` and you're testing | +| CI flake from a system Python upgrade | Image is the unit, CI doesn't care about the runner's Python | +| Auditors / security ask "what software runs on the bench?" | A single OCI artifact with a known digest | + +What dockerization **does not** fix: + +- It does not get you the Melexis `pylin` / `pymumclient` / + `pylinframe` packages. Those are **not on PyPI**; they ship inside + the Melexis IDE installer. You have to provide them at build time + (see §5). +- It does not magically pass USB devices through. Hardware tests + need explicit `--device` flags (see §4). +- It does not paper over OS-level requirements (host network mode + on Linux, USB/IP on Windows/WSL, etc.). + +--- + +## 2. Architecture + +``` + ┌──────────────────────┐ + │ Owon PSU │ + ┌─── --device /dev/ttyUSB0 ─┤ /dev/ttyUSB0 etc. │ + │ └──────────────────────┘ +┌───────────────┴───────────────┐ +│ ecu-tests:hw container │ +│ │ +│ /workspace │ ┌──────────────────────┐ +│ ├── ecu_framework/ │ │ MUM │ +│ ├── tests/ │ │ 192.168.7.2 (RNDIS) │ +│ ├── config/ ◄───┤ │ +│ └── vendor/melexis/ │ └──────────────────────┘ +│ ├── pylin/ │ (--network host) +│ ├── pymumclient/ │ +│ └── pylinframe/ │ +│ │ +│ /reports ◄─── -v $PWD/reports:/reports +└───────────────────────────────┘ +``` + +Key choices: + +- **`/workspace`** is the repo. Either baked into the image (default + for CI) or bind-mounted from the host (for iteration). +- **`/reports`** is a volume so report HTML/XML lands on the host + filesystem and survives the container. +- **The Melexis packages** live under `vendor/melexis/` inside the + image (or bind-mounted; see §5). The framework imports them via + `pylin` and `pymumclient` because the `vendor/automated_lin_test/ + install_packages.sh` script copies them into `site-packages` of + the venv during image build. +- **MUM access**: the MUM appears as a network device at + `192.168.7.2`. On Linux you use `--network host` so the container + shares the host's USB-RNDIS interface; on Windows/macOS Desktop + the picture is more nuanced (§4.3). +- **PSU access**: the Owon is a USB-serial device. Pass it through + with `--device /dev/ttyUSB0:/dev/ttyUSB0` and inside the container + configure `config.power_supply.port: /dev/ttyUSB0`. + +--- + +## 3. Dockerfile + +A multi-stage Dockerfile keeps the runtime image lean. The `builder` +stage compiles wheels (and runs `pip install` against a writable +filesystem); the `runtime` stage only contains what's needed to +execute tests. + +Save as `docker/Dockerfile`: + +```dockerfile +# syntax=docker/dockerfile:1.6 +# +# ecu-tests image — mock-only by default, hardware variant via +# --build-arg INCLUDE_MELEXIS=1 +# +# Build: +# docker build -f docker/Dockerfile -t ecu-tests:mock . +# docker build -f docker/Dockerfile -t ecu-tests:hw \ +# --build-arg INCLUDE_MELEXIS=1 \ +# --secret id=melexis_tarball,src=./melexis-pkgs.tar.gz \ +# . +# +# The hardware build needs the Melexis Python packages bundled into +# a tarball (pylin/, pymumclient/, pylinframe/ — three directories). +# See docs/20_docker_image.md §5. + +ARG PYTHON_VERSION=3.11 + +# ────────────────────────────────────────────────────────────────────── +# Stage 1: builder — pip-install deps into a venv under /opt/venv +# ────────────────────────────────────────────────────────────────────── +FROM python:${PYTHON_VERSION}-slim AS builder + +ARG INCLUDE_MELEXIS=0 + +# OS deps: +# build-essential, libffi-dev — for any wheel that needs a compiler +# libusb-1.0-0 — pyserial uses it at runtime; keep both +# builder and runtime parity +# git — only if requirements.txt references VCS deps +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential \ + libffi-dev \ + libusb-1.0-0 \ + git \ + && rm -rf /var/lib/apt/lists/* + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +RUN python -m venv /opt/venv +ENV PATH="/opt/venv/bin:${PATH}" + +WORKDIR /build +COPY requirements.txt ./ +RUN pip install --upgrade pip wheel \ + && pip install -r requirements.txt + +# Melexis packages — bundled in via Docker BuildKit secret so the +# proprietary tarball never ends up in an image layer. +RUN --mount=type=secret,id=melexis_tarball,required=false \ + if [ "$INCLUDE_MELEXIS" = "1" ]; then \ + set -e; \ + test -s /run/secrets/melexis_tarball \ + || { echo 'INCLUDE_MELEXIS=1 but no melexis_tarball secret bound'; exit 2; }; \ + SITE_PACKAGES=$(python -c "import site; print(site.getsitepackages()[0])"); \ + tar -xzf /run/secrets/melexis_tarball -C "$SITE_PACKAGES"; \ + python -c "import pylin, pymumclient; print('melexis pkgs OK')"; \ + fi + +# ────────────────────────────────────────────────────────────────────── +# Stage 2: runtime — slim image with just the venv + repo +# ────────────────────────────────────────────────────────────────────── +FROM python:${PYTHON_VERSION}-slim AS runtime + +# Runtime-only OS deps. pyserial needs libusb at runtime for some +# USB-serial chips; ldfparser is pure Python. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + libusb-1.0-0 \ + ca-certificates \ + tini \ + && rm -rf /var/lib/apt/lists/* + +# Pull the prebuilt venv (with Melexis pkgs if requested) from builder. +COPY --from=builder /opt/venv /opt/venv + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PATH="/opt/venv/bin:${PATH}" + +# Repo. .dockerignore should exclude .venv, reports, vendor/BabyLIN*, +# __pycache__, .pytest_cache. +WORKDIR /workspace +COPY . /workspace + +# Reports live on a mounted volume so they survive the container. +RUN mkdir -p /reports +VOLUME ["/reports"] + +# Drop privileges. Inherit any host-side serial group via runtime +# `--group-add` (USB-serial devices on Linux are typically owned by +# the dialout group). +RUN useradd -m -u 1000 -s /bin/bash tester +USER tester + +# tini handles signal forwarding so Ctrl-C cleanly tears down pytest. +ENTRYPOINT ["/usr/bin/tini", "--"] + +# Default: collect-only so an accidental `docker run` doesn't fire +# hardware tests on a misconfigured bench. +CMD ["pytest", "-m", "not hardware", "--collect-only", "-q"] +``` + +A matching `.dockerignore` (place at repo root): + +``` +.git +.venv +__pycache__ +.pytest_cache +.coverage* +reports/* +!reports/.gitkeep +htmlcov +*.egg-info +vendor/BabyLIN library +vendor/BabyLIN_library.py +docs/_build +``` + +--- + +## 4. Building & running + +> **Don't have Docker yet?** Install steps for WSL (both Docker +> Desktop and Docker-Engine-in-WSL paths, plus `usbipd-win` for USB +> passthrough) live in +> [`docker/README.md`](../docker/README.md#prerequisites--install-docker-on-wsl). + +### 4.1 Mock-only image (the CI image) + +```bash +# Build +docker build -f docker/Dockerfile -t ecu-tests:mock . + +# Run the mock suite, write reports to ./reports +docker run --rm \ + -v "$PWD/reports:/reports" \ + -e ECU_TESTS_CONFIG=config/test_config.yaml \ + ecu-tests:mock \ + pytest -m "not hardware" -v --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html +``` + +Works on any Linux/macOS/Windows host that runs Docker. No hardware +involved. Suitable for GitHub Actions, GitLab CI, Jenkins, etc. + +### 4.2 Hardware image — local Linux bench + +```bash +# Bundle the Melexis packages (one-time, on a machine that has Melexis IDE) +tar -czf melexis-pkgs.tar.gz \ + -C "/path/to/Melexis/site-packages" \ + pylin pymumclient pylinframe + +# Build hardware image +DOCKER_BUILDKIT=1 docker build \ + -f docker/Dockerfile \ + -t ecu-tests:hw \ + --build-arg INCLUDE_MELEXIS=1 \ + --secret id=melexis_tarball,src=./melexis-pkgs.tar.gz \ + . + +# Run hardware tests +docker run --rm \ + --network host \ + --device /dev/ttyUSB0:/dev/ttyUSB0 \ + --group-add dialout \ + -v "$PWD/reports:/reports" \ + -v "$PWD/config/test_config.yaml:/workspace/config/test_config.yaml:ro" \ + -e ECU_TESTS_CONFIG=/workspace/config/test_config.yaml \ + ecu-tests:hw \ + pytest -m "hardware and mum" -v \ + --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html +``` + +The flags: + +| Flag | Why | +|---|---| +| `--network host` | The MUM is reachable at `192.168.7.2` via USB-RNDIS on the host. Bridged networking would hide that interface. | +| `--device /dev/ttyUSB0:/dev/ttyUSB0` | Owon PSU passthrough. Adjust to whatever `ls /dev/ttyUSB*` reports on the host. | +| `--group-add dialout` | Without it the container's `tester` user can't open the serial port. | +| `-v config/test_config.yaml:…:ro` | Lets you tweak bench config without rebuilding. | + +### 4.3 Hardware image — Windows / WSL2 / macOS + +| Host | What works | Caveat | +|---|---|---| +| Windows Docker Desktop | Use `usbipd-win` to forward the USB-serial adapter into the WSL2 backend, then `--device /dev/ttyUSB0`. MUM access via `--network host` works because Docker Desktop bridges WSL2 to the host network. | The COM-port name in the host shell is irrelevant; the container sees a Linux device file. | +| WSL2 (no Docker Desktop) | Same usbipd-win flow. | The WSL2 distro must be the active integration target for Docker. | +| macOS Docker Desktop | **USB passthrough is not supported.** | Workaround: run a thin TCP-to-serial bridge on the host (e.g. `socat`) and have the container connect to that. Documented but fiddly. | + +For Windows-native (no WSL), Docker Desktop's "Windows container" +mode can pass through COM ports but isn't tested with this framework. + +### 4.4 Interactive / iteration mode + +When you're developing tests, bind-mount the repo so edits show up +without rebuilding: + +```bash +docker run --rm -it \ + --network host \ + --device /dev/ttyUSB0:/dev/ttyUSB0 \ + --group-add dialout \ + -v "$PWD:/workspace" \ + -v "$PWD/reports:/reports" \ + ecu-tests:hw \ + bash +``` + +Inside the container: + +```bash +pytest tests/hardware/test_mum_alm_animation.py -v +``` + +--- + +## 5. The Melexis-package obstacle + +`pylin`, `pymumclient`, and `pylinframe` ship inside the **Melexis +IDE** installation, not on PyPI: + +``` +C:\Program Files\Melexis\Melexis IDE\plugins\com.melexis.mlxide.python_\python\Lib\site-packages\ + ├── pylin/ + ├── pymumclient/ + └── pylinframe/ +``` + +`vendor/automated_lin_test/install_packages.sh` copies them into a +host venv. For Docker, the equivalent is a tarball passed as a build +secret: + +```bash +# Once per machine that has Melexis IDE installed, or once on a +# build server that has a snapshot. Adjust the path to your install. +MELEXIS_SITE="/mnt/c/Program Files/Melexis/Melexis IDE/plugins/com.melexis.mlxide.python_1.2.0.202408130945/python/Lib/site-packages" + +tar -czf melexis-pkgs.tar.gz \ + -C "$MELEXIS_SITE" \ + pylin pymumclient pylinframe +``` + +Pass to `docker build` via BuildKit secret as shown above. The +secret content is **not** baked into any image layer; it's mounted +only for the `RUN` statement that consumes it. + +### License hygiene + +- Don't push `ecu-tests:hw` to a public registry — the layer that + copied the Melexis files into `site-packages` carries proprietary + code. +- Use a private registry (internal Harbor, GitHub Container Registry + with a private repo, AWS ECR, …) gated by the same access controls + as the Melexis IDE itself. +- For a public mock-only image, build with `--build-arg + INCLUDE_MELEXIS=0` (the default) and the proprietary bits never + enter the image. + +--- + +## 6. docker-compose example + +`docker/compose.hw.yml`: + +```yaml +services: + ecu-tests: + image: ecu-tests:hw + build: + context: .. + dockerfile: docker/Dockerfile + args: + INCLUDE_MELEXIS: "1" + secrets: + - melexis_tarball + network_mode: host # MUM reachable at 192.168.7.2 + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + group_add: + - dialout + volumes: + - ../reports:/reports + - ../config/test_config.yaml:/workspace/config/test_config.yaml:ro + environment: + ECU_TESTS_CONFIG: /workspace/config/test_config.yaml + command: > + pytest -m "hardware and mum" -v + --junitxml=/reports/junit.xml + --html=/reports/report.html --self-contained-html + +secrets: + melexis_tarball: + file: ../melexis-pkgs.tar.gz +``` + +Build & run: + +```bash +docker compose -f docker/compose.hw.yml build +docker compose -f docker/compose.hw.yml up --abort-on-container-exit +``` + +--- + +## 7. CI/CD integration + +### GitHub Actions — mock-only + +```yaml +# .github/workflows/test-mock.yml +name: tests (mock) +on: [push, pull_request] +jobs: + mock: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - name: Build mock image + run: docker build -f docker/Dockerfile -t ecu-tests:mock . + - name: Run mock suite + run: | + mkdir -p reports + docker run --rm -v "$PWD/reports:/reports" ecu-tests:mock \ + pytest -m "not hardware" -v \ + --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html + - uses: actions/upload-artifact@v4 + if: always() + with: + name: reports + path: reports/ +``` + +### Self-hosted runner for hardware + +A self-hosted runner on the lab machine, labelled `bench`, runs the +hardware job. The runner has `melexis-pkgs.tar.gz` cached locally +and the USB-serial port at a known path: + +```yaml +# .github/workflows/test-hw.yml +name: tests (hardware) +on: + workflow_dispatch: + schedule: + - cron: '0 4 * * *' # nightly at 04:00 lab time +jobs: + hardware: + runs-on: [self-hosted, bench] + steps: + - uses: actions/checkout@v4 + - name: Build hardware image + run: | + DOCKER_BUILDKIT=1 docker build \ + -f docker/Dockerfile -t ecu-tests:hw \ + --build-arg INCLUDE_MELEXIS=1 \ + --secret id=melexis_tarball,src=/var/lib/bench/melexis-pkgs.tar.gz \ + . + - name: Run hardware suite + run: | + mkdir -p reports + docker run --rm \ + --network host \ + --device /dev/ttyUSB0:/dev/ttyUSB0 \ + --group-add dialout \ + -v "$PWD/reports:/reports" \ + ecu-tests:hw \ + pytest -m "hardware and not slow" -v \ + --junitxml=/reports/junit.xml \ + --html=/reports/report.html --self-contained-html + - uses: actions/upload-artifact@v4 + if: always() + with: + name: hw-reports + path: reports/ +``` + +--- + +## 8. Troubleshooting + +| Symptom | Likely cause | Fix | +|---|---|---| +| `ModuleNotFoundError: No module named 'pylin'` | Image built without `INCLUDE_MELEXIS=1`, or the tarball was empty | Verify with `docker run --rm ecu-tests:hw python -c "import pylin; print(pylin.__file__)"` | +| `serial.SerialException: could not open port` | USB device not passed through, or wrong path | `--device /dev/ttyUSB0:/dev/ttyUSB0` on the host; check with `ls /dev/ttyUSB*` on the host first | +| `Permission denied: '/dev/ttyUSB0'` | Container user not in the host's serial-group | Add `--group-add dialout` (or whatever group owns the device on the host) | +| MUM unreachable at 192.168.7.2 | Container on bridge network instead of host network | Add `--network host`. On Docker Desktop, Windows/macOS, see §4.3 | +| Reports empty / not on host | `/reports` not bind-mounted | `-v "$PWD/reports:/reports"` | +| Build fails on Apple Silicon | Multi-arch wheels missing for some dep | Add `--platform linux/amd64` to `docker build` and use Rosetta emulation, or rebuild from source | +| Tests run as root accidentally | Custom `USER` override at runtime | Don't pass `--user 0`; the image runs as `tester` (uid 1000) on purpose | +| pytest-html missing CSS in report | Forgot `--self-contained-html` | Add it to the pytest command line so the HTML stands alone | + +--- + +## 9. Limitations and intentional non-goals + +- **No GUI** — `docker run` doesn't render LED color, smoothness of + fade, or any other optical property. Hardware tests still assert + only what's on the LIN bus, just like running the framework + natively. +- **No firmware flashing yet** — the `HexFlasher` is a scaffold; + baking a working UDS flasher into the image is future work. When + it lands, the flashing path will need access to the same serial + device and the same network as the tests. +- **No live monitoring** — the image runs a pytest invocation and + exits. If you want a long-lived "test agent" container, wrap + pytest in a daemon (e.g. a small Flask app that triggers runs on + webhook); not provided here. +- **No multi-bench orchestration** — one container, one bench. For + N benches, run N containers with distinct `--device` / + `--network` configs, ideally orchestrated by Compose or + Kubernetes. +- **Deprecated BabyLIN path** — the image deliberately does **not** + package the BabyLIN SDK. If you genuinely need it on a legacy rig, + see `docs/08_babylin_internals.md` and add the SDK directly to + the host venv; don't try to dockerize the deprecated path. + +--- + +## 10. Related docs + +- [`docs/02_configuration_resolution.md`](02_configuration_resolution.md) + — how `ECU_TESTS_CONFIG` and `OWON_PSU_CONFIG` envs feed the test + fixtures (used by the container). +- [`docs/12_using_the_framework.md`](12_using_the_framework.md) — + the non-container reference flow. +- [`docs/14_power_supply.md`](14_power_supply.md) — PSU port + resolution (cross-platform). The container sees Linux device + paths. +- [`docs/21_yocto_image_for_raspberry_pi.md`](21_yocto_image_for_raspberry_pi.md) + — if you'd rather have the framework run *on* an embedded board + rather than from a container on a host PC. +- `vendor/automated_lin_test/install_packages.sh` — the native-venv + equivalent of the Docker Melexis-bundle step. diff --git a/docs/21_yocto_image_for_raspberry_pi.md b/docs/21_yocto_image_for_raspberry_pi.md new file mode 100644 index 0000000..7d881ab --- /dev/null +++ b/docs/21_yocto_image_for_raspberry_pi.md @@ -0,0 +1,800 @@ +# Yocto Image for Raspberry Pi — ECU Test Framework as a Bench Appliance + +This guide explains how to build a custom Linux distribution with +the Yocto Project so a Raspberry Pi *is* the test bench: power it +on, it boots, the test framework runs the configured suites against +a connected MUM and ECU, and reports land in a known location (or +get pushed to a server). No PC in the loop. + +If you only want to run the framework on a stock Raspberry Pi OS +install, you're looking at +[`docs/09_raspberry_pi_deployment.md`](09_raspberry_pi_deployment.md). +If you want a pre-baked Pi OS image (still Debian, just snapshotted +with the framework already installed), see +[`docs/10_build_custom_image.md`](10_build_custom_image.md). This +document is about Yocto specifically — building a minimal, hardened, +reproducible OS from sources around the framework. + +--- + +## 1. Why Yocto vs. Raspberry Pi OS? + +| Concern | Raspberry Pi OS (Debian) | Yocto | +|---|---|---| +| First image up | Hours | First build: a day. Subsequent builds: hours. | +| Image size | ~2–4 GB minimum (Lite) | ~150–500 MB realistic | +| Reproducibility | Snapshot of `apt` state at image time | Full source pinning via layer revisions | +| Auditing "what's installed" | `dpkg -l` of a moving target | Single manifest, version-pinned | +| Hardening / removing surface | Have to disable / uninstall | Just don't include the recipe | +| Boot time to test | 30–60 s | 5–15 s with a tuned image | +| Building a fleet | Re-snapshot per change | Rebuild image, push artifact | +| Build host requirements | Pi + SD card | Linux build host with ~100 GB free and ~16 GB RAM ideally | + +Pick Yocto when **the Pi is a deployed appliance**, not a +workstation — a permanent bench, a HIL rack, a customer-shipped test +fixture. For day-to-day developer work the Pi OS path is fine. + +--- + +## 2. Architecture + +``` + ┌────────────────────────────────┐ + │ Raspberry Pi (Yocto image) │ + │ │ + ┌── Ethernet ───┤ 192.168.7.1 (host on RNDIS) │ + │ │ │ +┌───────────────┴──────────┐ │ ecu-test-framework systemd │ +│ MUM @ 192.168.7.2 │ │ service: │ +│ USB-RNDIS (or wired) │ │ pytest -m "hardware and │ +└──────────────────────────┘ │ mum and not slow" │ + │ │ + │ /opt/ecu-tests/ (the repo) │ + │ /opt/ecu-tests/.venv/ │ + │ │ + ┌── USB-serial ─┤ /dev/ttyUSB0 (Owon PSU) │ + │ │ │ +┌───────────────┴──────────┐ │ /var/log/ecu-tests/ │ +│ Owon PSU │ │ report.html, junit.xml, │ +└──────────────────────────┘ │ summary.md │ + │ │ + │ rsync/scp/HTTP push of /var/ │ + │ log/ecu-tests/ to a server │ + └────────────────────────────────┘ +``` + +Key choices made by this document: + +- **`meta-raspberrypi`** as the BSP for `raspberrypi4-64` (or `-3`, + `-cm4`, depending on your hardware). +- **`meta-openembedded`** for Python + general userspace. +- **A new layer `meta-ecu-tests`** holds: the framework recipe, + recipes for the non-PyPI Python deps, the image recipe, and the + systemd unit. +- **`systemd`** init system (Yocto's `core-image-minimal` defaults + to sysvinit; we override to `systemd`). +- **Pinned Yocto release**: `scarthgap` (LTS, May 2024). Pick a + current LTS at build time; this doc shows scarthgap. + +--- + +## 3. Build-host prerequisites + +A Linux machine (Ubuntu 22.04 LTS or Debian 12 are the smoothest; +WSL2 works but is slower and consumes a lot of disk). + +**Resources:** + +- 100 GB free disk (the first `bitbake` run downloads sources and + builds toolchains) +- 16 GB RAM ideal, 8 GB workable +- Multi-core CPU; expect 1–4 h for the first image build + +**Packages (Ubuntu 22.04):** + +```bash +sudo apt update +sudo apt install -y \ + gawk wget git diffstat unzip texinfo gcc build-essential chrpath socat \ + cpio python3 python3-pip python3-pexpect xz-utils debianutils iputils-ping \ + python3-git python3-jinja2 python3-subunit zstd liblz4-tool file locales \ + libacl1 +sudo locale-gen en_US.UTF-8 +``` + +Make sure your user can run docker if you plan to use the +`kas-container` shortcut; not required for the manual `bitbake` +flow shown here. + +--- + +## 4. Layer layout + +``` +~/yocto/ +├── poky/ # Yocto core, ~3 GB +├── meta-openembedded/ # community python/network/etc. layers +├── meta-raspberrypi/ # Raspberry Pi BSP +├── meta-ecu-tests/ # ← we create this +│ ├── conf/ +│ │ └── layer.conf +│ ├── recipes-ecu-tests/ +│ │ ├── ecu-test-framework_git.bb +│ │ ├── ecu-test-framework/ +│ │ │ ├── ecu-test-framework.service +│ │ │ ├── ecu-test-runner.sh +│ │ │ └── push-reports.sh +│ │ └── python3-melexis/ +│ │ ├── python3-pylin_1.2.0.bb +│ │ ├── python3-pymumclient_1.2.0.bb +│ │ └── python3-pylinframe_1.2.0.bb +│ ├── recipes-python/ +│ │ └── python3-ldfparser_.bb +│ └── recipes-images/ +│ └── ecu-tests-image.bb +└── build/ # bitbake's TMPDIR (huge) +``` + +--- + +## 5. Setting up the build environment + +### 5.1 Clone Yocto + BSP + needed layers + +```bash +mkdir -p ~/yocto && cd ~/yocto + +BRANCH=scarthgap + +git clone -b $BRANCH https://git.yoctoproject.org/git/poky +git clone -b $BRANCH https://git.openembedded.org/meta-openembedded +git clone -b $BRANCH https://git.yoctoproject.org/git/meta-raspberrypi +``` + +### 5.2 Bootstrap the build directory + +```bash +source poky/oe-init-build-env build +# you are now in ~/yocto/build/ +``` + +### 5.3 Tell bitbake which layers exist + +`conf/bblayers.conf` should look like: + +```bitbake +BBLAYERS ?= " \ + ${TOPDIR}/../poky/meta \ + ${TOPDIR}/../poky/meta-poky \ + ${TOPDIR}/../poky/meta-yocto-bsp \ + ${TOPDIR}/../meta-openembedded/meta-oe \ + ${TOPDIR}/../meta-openembedded/meta-python \ + ${TOPDIR}/../meta-openembedded/meta-networking \ + ${TOPDIR}/../meta-raspberrypi \ + ${TOPDIR}/../meta-ecu-tests \ +" +``` + +(`meta-ecu-tests` will be created in §6 — bitbake will warn until +it exists, that's fine.) + +### 5.4 Configure the build target + +`conf/local.conf` — append/edit: + +```bitbake +MACHINE = "raspberrypi4-64" + +DISTRO = "poky" + +# Init manager +DISTRO_FEATURES:append = " systemd" +VIRTUAL-RUNTIME:init_manager = "systemd" +VIRTUAL-RUNTIME:initscripts = "" +DISTRO_FEATURES_BACKFILL_CONSIDERED += "sysvinit" + +# We want SSH for first-boot diagnosis +EXTRA_IMAGE_FEATURES ?= "debug-tweaks ssh-server-openssh" + +# Make sure Python 3 ends up in the image +IMAGE_INSTALL:append = " python3 python3-modules" + +# Speed up downloads and rebuilds by sharing caches +DL_DIR ?= "${TOPDIR}/downloads" +SSTATE_DIR ?= "${TOPDIR}/sstate-cache" +BB_NUMBER_THREADS = "${@oe.utils.cpu_count()}" +PARALLEL_MAKE = "-j${@oe.utils.cpu_count()}" + +# Raspberry Pi specifics +ENABLE_UART = "1" # serial console on UART +RPI_USE_U_BOOT = "0" +DISABLE_RPI_BOOT_LOGO = "1" +``` + +For a Raspberry Pi 3, change `MACHINE = "raspberrypi3-64"`. For +Compute Module 4: `MACHINE = "raspberrypi-cm4"`. Each lists in +`meta-raspberrypi/conf/machine/`. + +--- + +## 6. Create `meta-ecu-tests` + +### 6.1 Skeleton + +```bash +cd ~/yocto +mkdir -p meta-ecu-tests/{conf,recipes-ecu-tests,recipes-python,recipes-images} +mkdir -p meta-ecu-tests/recipes-ecu-tests/ecu-test-framework +mkdir -p meta-ecu-tests/recipes-ecu-tests/python3-melexis +``` + +`meta-ecu-tests/conf/layer.conf`: + +```bitbake +BBPATH .= ":${LAYERDIR}" +BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + +BBFILE_COLLECTIONS += "ecu-tests" +BBFILE_PATTERN_ecu-tests = "^${LAYERDIR}/" +BBFILE_PRIORITY_ecu-tests = "10" +LAYERSERIES_COMPAT_ecu-tests = "scarthgap" + +LAYERDEPENDS_ecu-tests = "core meta-python openembedded-layer raspberrypi" +``` + +### 6.2 Recipe — the framework itself + +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework_git.bb`: + +```bitbake +SUMMARY = "ECU Test Framework (pytest-based MUM/PSU bench runner)" +DESCRIPTION = "Hardware-in-the-loop test suite for the 4SEVEN ALM ECU." +LICENSE = "CLOSED" +LIC_FILES_CHKSUM = "" + +SRC_URI = " \ + git://your-git-host/ecu-tests.git;branch=main;protocol=https \ + file://ecu-test-framework.service \ + file://ecu-test-runner.sh \ + file://push-reports.sh \ +" + +SRCREV = "${AUTOREV}" +PV = "0.1+git${SRCPV}" +S = "${WORKDIR}/git" + +inherit systemd + +SYSTEMD_SERVICE:${PN} = "ecu-test-framework.service" +SYSTEMD_AUTO_ENABLE = "enable" + +RDEPENDS:${PN} = " \ + python3-pytest \ + python3-pytest-html \ + python3-pytest-cov \ + python3-pytest-xdist \ + python3-pyserial \ + python3-pyyaml \ + python3-ldfparser \ + python3-pylin \ + python3-pymumclient \ + python3-pylinframe \ + bash \ +" + +do_install() { + install -d ${D}/opt/ecu-tests + cp -a ${S}/* ${D}/opt/ecu-tests/ + # Strip any committed venv / cache / reports + rm -rf ${D}/opt/ecu-tests/.venv ${D}/opt/ecu-tests/.pytest_cache \ + ${D}/opt/ecu-tests/reports/* + install -d ${D}/var/log/ecu-tests + install -d ${D}/etc/ecu-tests + install -m 0755 ${WORKDIR}/ecu-test-runner.sh ${D}/opt/ecu-tests/ + install -m 0755 ${WORKDIR}/push-reports.sh ${D}/opt/ecu-tests/ + install -d ${D}${systemd_system_unitdir} + install -m 0644 ${WORKDIR}/ecu-test-framework.service \ + ${D}${systemd_system_unitdir}/ +} + +FILES:${PN} = " \ + /opt/ecu-tests \ + /var/log/ecu-tests \ + /etc/ecu-tests \ + ${systemd_system_unitdir}/ecu-test-framework.service \ +" +``` + +Replace `git://your-git-host/ecu-tests.git;branch=main;protocol=https` +with your actual remote. For an air-gapped build, ship the repo as +a tarball: `SRC_URI = "file://ecu-tests.tar.gz"` and place it next +to the recipe. + +### 6.3 Recipe — runner script + +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework/ecu-test-runner.sh`: + +```bash +#!/bin/sh +set -eu + +REPO=/opt/ecu-tests +LOG=/var/log/ecu-tests +RUN_TS=$(date -u +%Y%m%dT%H%M%SZ) +OUT="$LOG/$RUN_TS" +mkdir -p "$OUT" + +cd "$REPO" + +# Marker selection lives in /etc/ecu-tests/marker (a single line, e.g.: +# hardware and mum and not slow +# Defaults to a safe non-slow MUM run. +MARKER=$(cat /etc/ecu-tests/marker 2>/dev/null || echo "hardware and mum and not slow") + +ECU_TESTS_CONFIG=/etc/ecu-tests/test_config.yaml \ +python3 -m pytest -m "$MARKER" -v \ + --junitxml="$OUT/junit.xml" \ + --html="$OUT/report.html" --self-contained-html \ + --tb=short 2>&1 | tee "$OUT/run.log" || true + +# Symlink "latest" for convenience +ln -sfn "$RUN_TS" "$LOG/latest" + +# Optional: rsync to a server. Reads RSYNC_DEST from +# /etc/ecu-tests/push.env. Silently no-ops if unset. +. /etc/ecu-tests/push.env 2>/dev/null || true +[ -n "${RSYNC_DEST:-}" ] && /opt/ecu-tests/push-reports.sh "$OUT" "$RSYNC_DEST" || true +``` + +`push-reports.sh` is a thin `rsync` wrapper — left as an exercise +for your network setup (or replace with `curl` to an HTTP collector, +or `mosquitto_pub` to MQTT — whatever your infra prefers). + +### 6.4 Recipe — systemd unit + +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework/ecu-test-framework.service`: + +```ini +[Unit] +Description=ECU Test Framework one-shot run +After=network-online.target dev-ttyUSB0.device +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/opt/ecu-tests/ecu-test-runner.sh +StandardOutput=journal +StandardError=journal + +[Install] +WantedBy=multi-user.target +``` + +Pair this with a `.timer` if you want periodic runs, or leave as a +one-shot triggered by reboot or `systemctl start +ecu-test-framework.service` over SSH. + +For continuous runs (every N minutes), add +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework/ecu-test-framework.timer`: + +```ini +[Unit] +Description=Run ECU tests every 30 minutes + +[Timer] +OnBootSec=2min +OnUnitActiveSec=30min +Unit=ecu-test-framework.service + +[Install] +WantedBy=timers.target +``` + +…and add it to `SYSTEMD_SERVICE:${PN}` in the recipe. + +### 6.5 Recipe — `python3-ldfparser` + +ldfparser **is** on PyPI but isn't in stock OpenEmbedded. Add a +minimal recipe at +`meta-ecu-tests/recipes-python/python3-ldfparser_0.27.0.bb` +(update the version): + +```bitbake +SUMMARY = "Pure-Python LDF parser (LIN Description File)" +HOMEPAGE = "https://github.com/c4deszes/ldfparser" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;md5=" + +SRC_URI = "https://files.pythonhosted.org/packages/source/l/ldfparser/ldfparser-${PV}.tar.gz" +SRC_URI[sha256sum] = "" + +S = "${WORKDIR}/ldfparser-${PV}" + +inherit setuptools3 pypi + +RDEPENDS:${PN} = "python3-lark python3-bitstruct" +``` + +Run `bitbake -c devshell python3-ldfparser` and use +`devtool` to compute the hashes if you don't already have them, or +fetch them with: + +```bash +pip download ldfparser==0.27.0 --no-deps -d /tmp/ldfparser +sha256sum /tmp/ldfparser/ldfparser-0.27.0.tar.gz +``` + +### 6.6 Recipes — Melexis non-PyPI packages + +`pylin`, `pymumclient`, and `pylinframe` ship inside the Melexis IDE +installer; they're **not** on PyPI and you must source them from a +legally-licensed Melexis install. Each gets a recipe that consumes a +pre-staged tarball under `${BSPDIR}/downloads/`. + +Stage once on the build host: + +```bash +# On a machine that has Melexis IDE installed +MELEXIS_SITE="/mnt/c/Program Files/Melexis/Melexis IDE/plugins/com.melexis.mlxide.python_1.2.0.202408130945/python/Lib/site-packages" +mkdir -p ~/yocto/downloads +for pkg in pylin pymumclient pylinframe; do + tar -czf ~/yocto/downloads/${pkg}-1.2.0.tar.gz -C "$MELEXIS_SITE" $pkg +done +``` + +`meta-ecu-tests/recipes-ecu-tests/python3-melexis/python3-pylin_1.2.0.bb`: + +```bitbake +SUMMARY = "Melexis pylin — proprietary, not redistributable" +DESCRIPTION = "Vendored copy of the pylin package shipped with Melexis IDE." +LICENSE = "Proprietary" +LIC_FILES_CHKSUM = "" + +# License Mode: +# This recipe ships proprietary code. Yocto will refuse to build unless +# you whitelist it. In your conf/local.conf: +# LICENSE_FLAGS_ACCEPTED += "commercial_pylin commercial_pymumclient commercial_pylinframe" +LICENSE_FLAGS = "commercial_pylin" + +# The tarball must be pre-staged at DL_DIR/pylin-${PV}.tar.gz +SRC_URI = "file://pylin-${PV}.tar.gz" +SRC_URI[sha256sum] = "" + +S = "${WORKDIR}" + +RDEPENDS:${PN} = "python3 python3-modules" + +do_install() { + install -d ${D}${PYTHON_SITEPACKAGES_DIR} + cp -a ${S}/pylin ${D}${PYTHON_SITEPACKAGES_DIR}/ +} + +FILES:${PN} = "${PYTHON_SITEPACKAGES_DIR}/pylin" +``` + +`python3-pymumclient_1.2.0.bb` and `python3-pylinframe_1.2.0.bb` +are the same shape with the package name and `LICENSE_FLAGS` +swapped. + +Add to `conf/local.conf`: + +```bitbake +LICENSE_FLAGS_ACCEPTED += "commercial_pylin commercial_pymumclient commercial_pylinframe" +``` + +> **License hygiene**: the resulting image embeds proprietary +> packages. Treat the image artifact as proprietary — same access +> controls as the Melexis IDE installer. + +### 6.7 Image recipe + +`meta-ecu-tests/recipes-images/ecu-tests-image.bb`: + +```bitbake +SUMMARY = "ECU bench image — Raspberry Pi as a test runner" +DESCRIPTION = "Minimal Linux image that boots, configures network, \ +and runs the ECU test framework on a schedule." + +LICENSE = "MIT" + +IMAGE_FEATURES += "ssh-server-openssh" + +IMAGE_INSTALL = " \ + packagegroup-core-boot \ + packagegroup-core-ssh-openssh \ + ${CORE_IMAGE_EXTRA_INSTALL} \ + \ + python3 \ + python3-pip \ + python3-pytest \ + python3-pytest-html \ + python3-pytest-cov \ + python3-pytest-xdist \ + python3-pyserial \ + python3-pyyaml \ + \ + python3-ldfparser \ + python3-pylin \ + python3-pymumclient \ + python3-pylinframe \ + \ + ecu-test-framework \ + \ + rsync openssh-sftp-server curl \ + htop nano vim-tiny \ + kernel-modules \ + chrony \ +" + +# Be explicit about init system in the image +DISTRO_FEATURES:append = " systemd" +VIRTUAL-RUNTIME:init_manager = "systemd" + +inherit core-image + +# Size constraint (raise if you add a lot of debug tools) +IMAGE_OVERHEAD_FACTOR = "1.3" +IMAGE_ROOTFS_EXTRA_SPACE = "524288" +``` + +--- + +## 7. Network configuration + +The bench MUM exposes itself as a USB-RNDIS Ethernet device at +`192.168.7.2/24` with the host expected at `192.168.7.1`. Bake the +host-side address into the image so the Pi takes it automatically. + +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework/files/20-mum.network` +(append to the recipe's `SRC_URI` and `do_install`): + +```ini +[Match] +# usbX is what the Pi's kernel names the USB-RNDIS device. Verify +# with `ip link` on a running image and adjust if needed (it may be +# enxXXXXXXXXXXXX based on MAC address). +Name=usb0 enx* + +[Network] +Address=192.168.7.1/24 +LinkLocalAddressing=no +IPMasquerade=no +ConfigureWithoutCarrier=yes +``` + +The recipe installs this to `/etc/systemd/network/20-mum.network`. +`systemd-networkd` is already enabled when `systemd` is the init +manager. + +For a wired connection to the lab network as well, add a second +profile: + +```ini +[Match] +Name=eth0 + +[Network] +DHCP=yes +``` + +--- + +## 8. USB / serial configuration + +The Owon PSU is a USB-serial device, typically `/dev/ttyUSB0`. To +keep the path stable across reboots when the host has other USB +adapters, add a udev rule. + +`meta-ecu-tests/recipes-ecu-tests/ecu-test-framework/files/99-owon-psu.rules`: + +``` +# Adjust idVendor/idProduct for your specific adapter (check `lsusb` on +# a booted image). The symlink lets config use /dev/owon_psu instead of +# /dev/ttyUSBn, which can shift if multiple adapters are present. +SUBSYSTEM=="tty", ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="7523", \ + MODE="0660", GROUP="dialout", SYMLINK+="owon_psu" +``` + +Install to `/etc/udev/rules.d/99-owon-psu.rules`. The framework's +`config/test_config.yaml` then carries `port: /dev/owon_psu` +regardless of enumeration order. + +--- + +## 9. The configuration file shipped in the image + +`/etc/ecu-tests/test_config.yaml` (installed by the recipe): + +```yaml +interface: + type: mum + host: 192.168.7.2 + lin_device: lin0 + power_device: power_out0 + bitrate: 19200 + boot_settle_seconds: 0.5 + ldf_path: /opt/ecu-tests/vendor/4SEVEN_color_lib_test.ldf + +flash: + enabled: false + +power_supply: + enabled: true + port: /dev/owon_psu # from the udev rule + baudrate: 115200 + timeout: 2.0 + parity: N + stopbits: 1 + idn_substr: OWON + do_set: true + set_voltage: 13.0 + set_current: 1.0 +``` + +And `/etc/ecu-tests/marker` (single line): + +``` +hardware and mum and not slow +``` + +Operators can edit either over SSH without rebuilding the image. + +--- + +## 10. Build, flash, boot + +### 10.1 Build + +From `~/yocto/build/`: + +```bash +bitbake ecu-tests-image +``` + +First run: 1–4 h depending on your machine. Subsequent rebuilds +(with `sstate-cache` intact): minutes. + +Output ends up at +`~/yocto/build/tmp/deploy/images/raspberrypi4-64/ecu-tests-image-raspberrypi4-64.wic.bz2`. + +### 10.2 Flash + +```bash +# Find the SD card +lsblk +# Assume /dev/sdX is the SD card; double-check before running! + +bzcat ~/yocto/build/tmp/deploy/images/raspberrypi4-64/ecu-tests-image-raspberrypi4-64.wic.bz2 \ + | sudo dd of=/dev/sdX bs=4M conv=fsync status=progress +sync +``` + +Or use `bmaptool` from Yocto for faster flashing of sparse images. + +### 10.3 First boot + +- Insert the SD card into the Pi. +- Connect: power, USB-Ethernet (MUM), USB-serial (Owon PSU), and + either Ethernet or HDMI+keyboard for diagnosis. +- Boot. +- SSH in: `ssh root@` (no password by default thanks to + `debug-tweaks` — disable that for production builds, see §13). + +```bash +journalctl -u ecu-test-framework.service -e +ls /var/log/ecu-tests/latest +cat /var/log/ecu-tests/latest/junit.xml | head +``` + +--- + +## 11. Updating the image + +There are three ways to push updates without a full re-flash: + +| Approach | When | How | +|---|---|---| +| Re-flash | Major changes, package adds | `bitbake ecu-tests-image` → flash | +| In-place git pull | Test-code-only changes | `git -C /opt/ecu-tests pull && systemctl restart ecu-test-framework` | +| RAUC / Mender A/B | Production fleets | Adds an A/B partition layout and an update agent; out of scope for this doc | + +For developer iteration, the git-pull path is fastest. The image +should ship with the framework's git remote so `git pull` works +out of the box. + +--- + +## 12. Air-gapped or no-network builds + +Yocto can fetch everything locally if you stage: + +1. `downloads/` populated by a one-time `bitbake -c fetchall + ecu-tests-image` on a connected machine. +2. `sstate-cache/` similarly. + +Then on the air-gapped builder set: + +```bitbake +BB_NO_NETWORK = "1" +BB_FETCH_PREMIRRORONLY = "1" +``` + +And copy `downloads/` and `sstate-cache/` from the staging machine. + +--- + +## 13. Hardening for production + +Before shipping the image to a customer or a permanent installation: + +- **Disable `debug-tweaks`** in `EXTRA_IMAGE_FEATURES`. This + reinstates root password requirement, removes the empty-password + bypass, and hardens the SSH config. +- **Set a real `ROOT_HOME` password** in a `.bbappend` for the base + recipe, OR provision SSH keys at first boot, OR enforce + password-only logins. +- **Read-only rootfs** — Yocto supports `IMAGE_FEATURES += + "read-only-rootfs"`. Anything mutable (configs, logs) needs to + move to `/var` on tmpfs or a persistent partition. +- **Watchdog** — enable the hardware watchdog so the Pi reboots if + it locks up. `meta-raspberrypi` exposes the BCM watchdog; pair + with `systemd`'s `WatchdogSec=`. +- **Boot-time integrity** — `secureboot` is not viable on Raspberry + Pi to the degree it is on automotive ECUs, but you can checksum + the rootfs and refuse to run tests if it's been tampered with. +- **TLS for report uploads** — if the push step talks HTTP/MQTT to + a collector, pin the server certificate. + +--- + +## 14. Troubleshooting + +| Symptom | Likely cause | Fix | +|---|---|---| +| `do_compile` fails for `python3-pylin` with "license not accepted" | Missing `LICENSE_FLAGS_ACCEPTED` entry | Add the three `commercial_*` flags to `conf/local.conf` | +| `pylin` not importable in the running image | Recipe installed to the wrong site-packages path | Confirm `PYTHON_SITEPACKAGES_DIR` in the recipe matches the image's actual path (`python3 -c "import site; print(site.getsitepackages())"` on a booted image) | +| MUM unreachable at boot | `systemd-networkd` profile didn't match the USB iface | `ip link` to find the real name; widen the `Name=` glob | +| Tests fail with "ECU not responding" | Same as above, or the MUM hasn't come up by the time the timer fires | Add `After=network-online.target` (already done) and a startup delay in the runner | +| PSU port not found | udev rule didn't match the adapter; check `lsusb` for VID/PID | Adjust the rule and rebuild, or fall back to `/dev/ttyUSB0` and hope nothing else enumerates first | +| `bitbake` runs out of disk | TMPDIR fills up | Mount `~/yocto/build` on its own disk, or change `TMPDIR` in `local.conf` to a bigger volume | +| First build runs forever | All-from-source compile | This is normal; subsequent builds use the populated `sstate-cache` | +| Image too big for the SD card | Too many extras in `IMAGE_INSTALL` | Trim `htop nano vim-tiny chrony` etc. if you don't need them | + +--- + +## 15. What this gives you vs. running on the Pi directly + +| | Pi OS + `pi_install.sh` | Yocto image | +|---|---|---| +| Reproducible | ad-hoc | yes | +| Image footprint | ~2 GB | ~400 MB realistic | +| Boot to first test | ~45 s | ~12 s with a tuned image | +| Updates over the air | manual | feasible with RAUC/Mender | +| Day-to-day dev | comfortable | painful — every change rebuilds | +| Auditing the OS | dpkg snapshot | full source manifest | + +Use the Yocto path when the Pi is part of a **deliverable**, when +you need to ship N benches identical, or when an auditor needs a +list of every byte on the device. + +--- + +## 16. Related docs + +- [`docs/09_raspberry_pi_deployment.md`](09_raspberry_pi_deployment.md) + — run the framework on stock Raspberry Pi OS (the lighter path). +- [`docs/10_build_custom_image.md`](10_build_custom_image.md) — a + preseeded Pi OS image, the middle ground between vanilla Pi OS + and a Yocto build. +- [`docs/20_docker_image.md`](20_docker_image.md) — if you'd rather + the framework run from a container on a regular Linux host rather + than as the host itself. +- [`docs/14_power_supply.md`](14_power_supply.md) — PSU port + resolution; on the image the udev rule makes `/dev/owon_psu` + stable, so the resolver's job is trivial. +- [`docs/02_configuration_resolution.md`](02_configuration_resolution.md) + — how `ECU_TESTS_CONFIG` selects `/etc/ecu-tests/test_config.yaml` + at runtime. +- `vendor/automated_lin_test/install_packages.sh` — the equivalent + of the Melexis-recipe step for a developer venv on a workstation. diff --git a/docs/README.md b/docs/README.md index 871797f..90e92e5 100644 --- a/docs/README.md +++ b/docs/README.md @@ -22,6 +22,8 @@ A guided tour of the ECU testing framework. Start here: 18. `14_power_supply.md` — Owon PSU control, configuration, tests, and quick demo script 19. `15_report_properties_cheatsheet.md` — Standardized keys for record_property/rp across suites 20. `19_frame_io_and_alm_helpers.md` — Hardware-test helpers: `FrameIO` (generic LDF I/O) and `AlmTester` (ALM_Node domain), plus the `tests/hardware/_test_case_template.py` starting point +21. `20_docker_image.md` — Containerizing the framework: mock-only CI image, hardware-passthrough image, the Melexis-package obstacle, compose & CI examples +22. `21_yocto_image_for_raspberry_pi.md` — Building a Yocto image that turns a Raspberry Pi into a self-contained test bench (BSP layout, recipes, network/USB config, deploy & maintenance) Related references: