# syntax=docker/dockerfile:1.6
# ──────────────────────────────────────────────────────────────────────────
# ecu-tests Dockerfile — multi-stage build for the ECU testing framework.
#
# Produces two flavours of the same image, switched by a build-arg:
#
#   docker build -f docker/Dockerfile -t ecu-tests:mock .
#       → "mock" flavour: just enough to run mock + unit tests in CI.
#         No proprietary code inside the image.
#
#   DOCKER_BUILDKIT=1 docker build \
#       -f docker/Dockerfile -t ecu-tests:hw \
#       --build-arg INCLUDE_MELEXIS=1 \
#       --build-context melexis-bundle=./melexis-bundle \
#       .
#       → "hw" flavour: also bundles the full Melexis set (mlx, pylin,
#         pylinframe, pymumclient, pymlxabc, pymlxchip, pymlxexceptions,
#         pymlxgdb, pymlxhex, pymlxloader) so hardware tests can drive a
#         real MUM. The tarball is passed via a named build context
#         (`--build-context`) bind-mounted at /melexis-bundle for one
#         RUN step — see docs/20_docker_image.md §5.
#
# A matching ../.dockerignore at the repo root excludes .venv/, reports/*,
# the deprecated BabyLIN SDK, Python caches, etc. so the build context
# stays small and proprietary content doesn't leak into image layers.
# ──────────────────────────────────────────────────────────────────────────

# `# syntax=` (line 1) opts in to the BuildKit Dockerfile frontend, which
# is required for the `--mount=type=secret` syntax used below. Without
# it, `docker build` falls back to the legacy frontend and `--secret`
# silently does nothing.

# Build-time argument: which Python interpreter version to base both stages
# on. Declared *before* the first FROM so both stages can interpolate it.
ARG PYTHON_VERSION=3.11


# ╔══════════════════════════════════════════════════════════════════════╗
# ║ Stub stage — "melexis-bundle"                                        ║
# ║                                                                      ║
# ║ A no-op `scratch` stage that the builder bind-mounts from when       ║
# ║ extracting the Melexis tarball. For hw builds the caller overrides   ║
# ║ this stage with `--build-context melexis-bundle=<dir>` so the dir    ║
# ║ that contains `melexis-pkgs.tar.gz` shows up under /melexis-bundle.  ║
# ║                                                                      ║
# ║ Why this dance: BuildKit's `--mount=type=secret` is capped at 500    ║
# ║ KiB (secrets are meant for keys, not blobs). `--mount=type=bind`     ║
# ║ has no size limit and never lands in an image layer either, but it   ║
# ║ needs a source to mount from. A named build context overriding a    ║
# ║ stub stage gives us "optional, file-of-any-size, never-in-image"   ║
# ║ semantics without polluting the default build context.               ║
# ╚══════════════════════════════════════════════════════════════════════╝
FROM scratch AS melexis-bundle


# ╔══════════════════════════════════════════════════════════════════════╗
# ║ Stage 1 — "builder"                                                  ║
# ║                                                                      ║
# ║ Installs Python dependencies into a clean venv at /opt/venv. We do   ║
# ║ this in a separate stage so the final runtime image doesn't carry    ║
# ║ compilers, headers, pip caches, or the build-time apt index.         ║
# ╚══════════════════════════════════════════════════════════════════════╝

# Base on the official python:3.11-slim image. "slim" = Debian-based,
# ~150 MB, no compilers. We add what we need explicitly below.
# `AS builder` names this stage so the runtime stage can pull from it.
FROM python:${PYTHON_VERSION}-slim AS builder

# Build-arg redeclared inside the stage (Docker scoping rule: ARGs declared
# before the first FROM are global *names* but each stage that wants to
# use the value has to redeclare). Default 0 = mock-only build.
ARG INCLUDE_MELEXIS=0

# Install build-time OS packages:
#   build-essential, libffi-dev — toolchain for any pip wheel that needs
#                                 a C compiler (rare but possible).
#   libusb-1.0-0                — runtime lib pyserial pulls in on some
#                                 USB-serial adapters. Keep parity with
#                                 the runtime stage so behaviour matches.
#   git                         — only needed if requirements.txt
#                                 references a VCS dep (current file
#                                 doesn't, but kept for forward compat).
# `--no-install-recommends` skips Debian's "suggested" extras → smaller.
# `rm -rf /var/lib/apt/lists/*` deletes the apt index so it doesn't
# bloat this layer (the runtime stage will install its own anyway).
RUN apt-get update \
 && apt-get install -y --no-install-recommends \
        build-essential \
        libffi-dev \
        libusb-1.0-0 \
        git \
 && rm -rf /var/lib/apt/lists/*

# Environment knobs for the rest of the build:
#   PYTHONDONTWRITEBYTECODE=1   — don't create __pycache__/*.pyc files
#                                  during pip install (saves layer space).
#   PIP_NO_CACHE_DIR=1          — pip won't keep its download cache, so
#                                  this layer is smaller.
#   PIP_DISABLE_PIP_VERSION_CHECK=1 — silence the "pip is outdated"
#                                  network call on every invocation.
ENV PYTHONDONTWRITEBYTECODE=1 \
    PIP_NO_CACHE_DIR=1 \
    PIP_DISABLE_PIP_VERSION_CHECK=1

# Create a clean virtual environment at /opt/venv. Doing this instead of
# installing into the system Python lets us COPY the whole venv to the
# runtime stage as one self-contained tree.
RUN python -m venv /opt/venv

# Prepend the venv's bin/ to PATH so subsequent `pip` and `python` calls
# in this stage use the venv interpreter — no need to write
# /opt/venv/bin/pip everywhere.
ENV PATH="/opt/venv/bin:${PATH}"

# Set up the working directory used only for the build steps. The repo
# itself lands at /workspace in the runtime stage; /build is throwaway.
WORKDIR /build

# Copy *only* requirements.txt first. Docker caches each layer by the
# hash of its inputs, so as long as requirements.txt doesn't change,
# the slow `pip install` below is reused from cache — even if every
# .py in the repo has changed. This is the classic "layer caching"
# trick for dependency installs.
COPY requirements.txt ./

# Install dependencies into the venv. `pip install --upgrade pip wheel`
# ensures we use a modern pip that understands current wheel formats
# before pulling project deps.
RUN pip install --upgrade pip wheel \
 && pip install -r requirements.txt

# Melexis packages step — only runs when INCLUDE_MELEXIS=1.
#
# `RUN --mount=type=bind,from=melexis-bundle,…` mounts the named context
# (or its scratch stub, for mock builds) read-only at /melexis-bundle for
# the duration of this RUN only. No image layer ever contains the
# tarball — the bind mount is torn down before the layer is committed.
#
# Hw build supplies the real bundle:
#     --build-context melexis-bundle=<dir holding melexis-pkgs.tar.gz>
# Mock build omits it and the stub `scratch` stage applies, yielding an
# empty /melexis-bundle that the `if` below never reads.
RUN --mount=type=bind,from=melexis-bundle,target=/melexis-bundle,readonly \
    if [ "$INCLUDE_MELEXIS" = "1" ]; then \
        set -e; \
        # Sanity-check: hw build was requested but the bundle wasn't bound.
        # Fail loudly here rather than producing a "looks-fine" image that
        # then crashes on `import pylin` at runtime.
        test -s /melexis-bundle/melexis-pkgs.tar.gz \
          || { echo 'INCLUDE_MELEXIS=1 but melexis-pkgs.tar.gz missing — pass --build-context melexis-bundle=<dir>'; exit 2; }; \
        # Discover the venv's site-packages dir (path varies per Python
        # version) and extract the tarball directly into it. The tarball
        # contains the full Melexis set (mlx, pylin, pylinframe,
        # pymumclient, pymlxabc, pymlxchip, pymlxexceptions, pymlxgdb,
        # pymlxhex, pymlxloader, pyldfparser, pymbdfparser, pymelibu,
        # pymelibuframe) — they slot in as proper packages.
        SITE_PACKAGES=$(python -c "import site; print(site.getsitepackages()[0])"); \
        tar -xzf /melexis-bundle/melexis-pkgs.tar.gz -C "$SITE_PACKAGES"; \
        # Smoke-test the imports inside the builder so a corrupt or
        # incomplete tarball fails the build instead of producing a
        # broken runtime image. `import pylin` transitively pulls in
        # pymlxabc, so checking it here catches missing transitive deps.
        python -c "import pylin, pymumclient, pymlxabc; print('melexis pkgs OK')"; \
    fi


# ╔══════════════════════════════════════════════════════════════════════╗
# ║ Stage 2 — "runtime"                                                  ║
# ║                                                                      ║
# ║ Slim final image. Pulls the pre-built /opt/venv from the builder     ║
# ║ stage but doesn't carry compilers, headers, or pip caches.           ║
# ╚══════════════════════════════════════════════════════════════════════╝

# Fresh base image (same Python version) so we don't inherit any of the
# builder stage's apt history or temp files.
FROM python:${PYTHON_VERSION}-slim AS runtime

# Runtime-only OS deps. The list is deliberately short:
#   libusb-1.0-0     — pyserial runtime dependency for some USB-serial
#                       adapters (the Owon PSU's adapter included).
#   ca-certificates — HTTPS trust store, so pip / requests / curl can
#                       verify TLS certificates if a test ever reaches
#                       out to a network resource.
#   tini            — the ~100 KB init wrapper we use as PID 1; see the
#                       ENTRYPOINT block below for why.
RUN apt-get update \
 && apt-get install -y --no-install-recommends \
        libusb-1.0-0 \
        ca-certificates \
        tini \
 && rm -rf /var/lib/apt/lists/*

# Copy the prebuilt venv (with Melexis pkgs already inside, if requested)
# from the builder stage. This is the *one* layer that carries all the
# Python deps — no `pip install` runs in the runtime stage.
# `--from=builder` references the stage we named with `AS builder`.
COPY --from=builder /opt/venv /opt/venv

# Runtime env:
#   PYTHONDONTWRITEBYTECODE=1   — don't litter the image with .pyc files
#                                  at first import.
#   PYTHONUNBUFFERED=1          — disable stdio buffering so pytest output
#                                  streams to `docker logs` in real time
#                                  instead of in 4 KB chunks.
#   PATH                        — venv's bin/ takes precedence over the
#                                  system Python, so plain `pytest` finds
#                                  the right one.
ENV PYTHONDONTWRITEBYTECODE=1 \
    PYTHONUNBUFFERED=1 \
    PATH="/opt/venv/bin:${PATH}"

# /workspace is where the framework lives at runtime. WORKDIR also
# becomes the cwd for any `RUN`, `CMD`, or `docker exec` from here on.
WORKDIR /workspace

# Copy the whole repo (filtered by ../.dockerignore which excludes
# .venv/, reports/*, vendor/BabyLIN library/, __pycache__, etc.).
# This is a single layer; rebuilding it triggers when any included
# file changes, but the previous pip-install layer is cached.
COPY . /workspace

# Create /reports and declare it as a volume mount point. The VOLUME
# directive tells Docker "this path is intended to be a bind-mount from
# the host"; users supply `-v $PWD/reports:/reports` at run time and
# pytest's output lands on the host filesystem instead of disappearing
# with the container.
RUN mkdir -p /reports
VOLUME ["/reports"]

# Create an unprivileged user (uid 1000, the typical first-user uid on
# Linux). Running pytest as non-root is the secure default — even if a
# test does something unexpected, it can't trash /etc or escape into
# host paths it shouldn't see.
# `chown -R` on /workspace and /reports lets the new user write to both
# without needing sudo at runtime.
RUN useradd -m -u 1000 -s /bin/bash tester \
 && chown -R tester:tester /workspace /reports

# Switch to the unprivileged user for everything below this line.
USER tester

# ── ENTRYPOINT — see explanation in docs/20_docker_image.md §3 ────────
#
# Why tini and not pytest directly:
#
#   1. Signals: `docker stop` sends SIGTERM to PID 1. If pytest is PID 1
#      it doesn't always forward signals to xdist workers and may take
#      the full 10 s grace period before Docker SIGKILLs. tini forwards
#      signals correctly.
#
#   2. Zombie reaping: when a child exits in Linux it becomes a zombie
#      until its parent calls wait(). PID 1 *inherits* every orphaned
#      process — and pytest doesn't reap them. tini does. Long
#      parametrized runs with subprocesses would otherwise leak.
#
#   3. Exit code propagation: tini exits with its child's exit code, so
#      `docker run … && echo ok` works the way you'd expect.
#
# The `--` is the POSIX "end of options" marker. It tells tini to stop
# looking for tini-specific flags and exec everything after it as the
# command. Belt-and-suspenders in case the CMD starts with a `-flag`.
#
# At runtime the daemon assembles: `/usr/bin/tini -- <CMD tokens>` and
# tini exec()s the CMD as its child.
ENTRYPOINT ["/usr/bin/tini", "--"]

# Safe default command: collect-only of the *non-hardware* suite. An
# accidental `docker run ecu-tests:hw` will list tests, not start firing
# bench actions. Users override this at run time with their actual
# pytest invocation.
CMD ["pytest", "-m", "not hardware", "--collect-only", "-q"]
