261 lines
10 KiB
Python
261 lines
10 KiB
Python
"""
|
|
Custom pytest plugin to enhance test reports with detailed metadata.
|
|
|
|
Why we need this plugin:
|
|
- Surface business-facing info (Title, Description, Requirements, Steps, Expected Result) in the HTML report for quick review.
|
|
- Map tests to requirement IDs and produce a requirements coverage JSON artifact for traceability.
|
|
- Emit a compact CI summary (summary.md) for dashboards and PR comments.
|
|
|
|
How it works (high level):
|
|
- During collection, we track all test nodeids for later "unmapped" reporting.
|
|
- During test execution, we parse the test function's docstring and markers to extract metadata and requirement IDs; we attach these as user_properties on the report.
|
|
- We add custom columns (Title, Requirements) to the HTML table.
|
|
- At the end of the run, we write two artifacts into reports/: requirements_coverage.json and summary.md.
|
|
"""
|
|
|
|
import os
|
|
import re
|
|
import json
|
|
import datetime as _dt
|
|
import pytest
|
|
|
|
# -----------------------------
|
|
# Session-scoped state for reports
|
|
# -----------------------------
|
|
# Track all collected tests (nodeids) so we can later highlight tests that had no requirement mapping.
|
|
_ALL_COLLECTED_TESTS: set[str] = set()
|
|
# Map requirement ID (e.g., REQ-001) -> set of nodeids that cover it.
|
|
_REQ_TO_TESTS: dict[str, set[str]] = {}
|
|
# Nodeids that did map to at least one requirement.
|
|
_MAPPED_TESTS: set[str] = set()
|
|
|
|
|
|
def _normalize_req_id(token: str) -> str | None:
|
|
"""Normalize requirement token to REQ-XXX form.
|
|
|
|
Accepts markers like 'req_001' or strings like 'REQ-001'.
|
|
Returns None if not a recognizable requirement. This provides a single
|
|
canonical format for coverage mapping and reporting.
|
|
"""
|
|
token = token.strip()
|
|
m1 = re.fullmatch(r"req_(\d{1,3})", token, re.IGNORECASE)
|
|
if m1:
|
|
return f"REQ-{int(m1.group(1)):03d}"
|
|
m2 = re.fullmatch(r"REQ[-_ ]?(\d{1,3})", token, re.IGNORECASE)
|
|
if m2:
|
|
return f"REQ-{int(m2.group(1)):03d}"
|
|
return None
|
|
|
|
|
|
def _extract_req_ids_from_docstring(docstring: str) -> list[str]:
|
|
"""Parse the 'Requirements:' line in the docstring and return REQ-XXX tokens.
|
|
|
|
Supports comma- or whitespace-separated tokens and normalizes them.
|
|
"""
|
|
reqs: list[str] = []
|
|
req_match = re.search(r"Requirements:\s*(.+)", docstring)
|
|
if req_match:
|
|
raw = req_match.group(1)
|
|
# split by comma or whitespace
|
|
parts = re.split(r"[\s,]+", raw)
|
|
for p in parts:
|
|
rid = _normalize_req_id(p)
|
|
if rid:
|
|
reqs.append(rid)
|
|
return list(dict.fromkeys(reqs)) # dedupe, preserve order
|
|
|
|
|
|
def pytest_configure(config):
|
|
# Ensure reports directory exists early so downstream hooks can write artifacts safely
|
|
os.makedirs("reports", exist_ok=True)
|
|
|
|
|
|
def pytest_collection_modifyitems(session, config, items):
|
|
# Track all collected tests for unmapped detection (for the final coverage JSON)
|
|
for item in items:
|
|
_ALL_COLLECTED_TESTS.add(item.nodeid)
|
|
|
|
|
|
# (Legacy makereport implementation removed in favor of the hookwrapper below.)
|
|
|
|
|
|
def pytest_html_results_table_header(cells):
|
|
"""Add custom columns to HTML report table.
|
|
|
|
Why: Make the most important context (Title and Requirements) visible at a glance
|
|
in the HTML report table without opening each test details section.
|
|
"""
|
|
cells.insert(2, '<th class="sortable" data-column-type="text">Title</th>')
|
|
cells.insert(3, '<th class="sortable" data-column-type="text">Requirements</th>')
|
|
|
|
|
|
def pytest_html_results_table_row(report, cells):
|
|
"""Add custom data to HTML report table rows.
|
|
|
|
We pull the user_properties attached during makereport and render the
|
|
Title and Requirements columns for each test row.
|
|
"""
|
|
# Get title from user properties
|
|
title = ""
|
|
requirements = ""
|
|
|
|
for prop in getattr(report, 'user_properties', []):
|
|
if prop[0] == "title":
|
|
title = prop[1]
|
|
elif prop[0] == "requirements":
|
|
requirements = prop[1]
|
|
|
|
cells.insert(2, f'<td class="col-title">{title}</td>')
|
|
cells.insert(3, f'<td class="col-requirements">{requirements}</td>')
|
|
|
|
|
|
@pytest.hookimpl(hookwrapper=True)
|
|
def pytest_runtest_makereport(item, call):
|
|
"""Active hook: attach metadata to reports and build requirement coverage.
|
|
|
|
Why hook at makereport:
|
|
- We want to attach metadata to the test report object so it shows up in
|
|
the HTML and JUnit outputs via user_properties.
|
|
- We also build the requirements mapping here because we have both markers
|
|
and docstrings available on the test item.
|
|
"""
|
|
outcome = yield
|
|
report = outcome.get_result()
|
|
|
|
if call.when == "call" and hasattr(item, "function"):
|
|
# Add test metadata from docstring: parse Title, Description, Requirements,
|
|
# Test Steps, and Expected Result. Each is optional and extracted if present.
|
|
if item.function.__doc__:
|
|
docstring = item.function.__doc__.strip()
|
|
|
|
# Extract and add all metadata
|
|
metadata: dict[str, str] = {}
|
|
|
|
# Title
|
|
title_match = re.search(r"Title:\s*(.+)", docstring)
|
|
if title_match:
|
|
metadata["title"] = title_match.group(1).strip()
|
|
|
|
# Description
|
|
desc_match = re.search(r"Description:\s*(.+?)(?=\n\s*(?:Requirements|Test Steps|Expected Result))", docstring, re.DOTALL)
|
|
if desc_match:
|
|
metadata["description"] = " ".join(desc_match.group(1).strip().split())
|
|
|
|
# Requirements
|
|
req_match = re.search(r"Requirements:\s*(.+)", docstring)
|
|
if req_match:
|
|
metadata["requirements"] = req_match.group(1).strip()
|
|
|
|
# Test steps
|
|
steps_match = re.search(r"Test Steps:\s*(.+?)(?=\n\s*Expected Result)", docstring, re.DOTALL)
|
|
if steps_match:
|
|
steps = steps_match.group(1).strip()
|
|
steps_clean = re.sub(r"\n\s*\d+\.\s*", " | ", steps)
|
|
metadata["test_steps"] = steps_clean.strip(" |")
|
|
|
|
# Expected result
|
|
result_match = re.search(r"Expected Result:\s*(.+?)(?=\n\s*\"\"\"|\Z)", docstring, re.DOTALL)
|
|
if result_match:
|
|
expected = " ".join(result_match.group(1).strip().split())
|
|
metadata["expected_result"] = expected.replace("- ", "• ")
|
|
|
|
# Add all metadata as user properties (HTML plugin reads these)
|
|
if metadata:
|
|
if not hasattr(report, "user_properties"):
|
|
report.user_properties = []
|
|
for key, value in metadata.items():
|
|
report.user_properties.append((key, value))
|
|
|
|
# Build requirement coverage mapping
|
|
nodeid = item.nodeid
|
|
req_ids: set[str] = set()
|
|
|
|
# From markers: allow @pytest.mark.req_001 style to count toward coverage
|
|
for mark in item.iter_markers():
|
|
rid = _normalize_req_id(mark.name)
|
|
if rid:
|
|
req_ids.add(rid)
|
|
|
|
# From docstring line 'Requirements:'
|
|
for rid in _extract_req_ids_from_docstring(docstring):
|
|
req_ids.add(rid)
|
|
|
|
# Update global maps for coverage JSON
|
|
if req_ids:
|
|
_MAPPED_TESTS.add(nodeid)
|
|
for rid in req_ids:
|
|
bucket = _REQ_TO_TESTS.setdefault(rid, set())
|
|
bucket.add(nodeid)
|
|
|
|
|
|
def pytest_terminal_summary(terminalreporter, exitstatus):
|
|
"""Write CI-friendly summary and requirements coverage JSON.
|
|
|
|
Why we write these artifacts:
|
|
- requirements_coverage.json → Machine-readable traceability matrix for CI dashboards.
|
|
- summary.md → Quick textual summary that can be surfaced in PR checks or CI job logs.
|
|
"""
|
|
# Compute stats
|
|
stats = terminalreporter.stats
|
|
def _count(key):
|
|
return len(stats.get(key, []))
|
|
|
|
results = {
|
|
"passed": _count("passed"),
|
|
"failed": _count("failed"),
|
|
"skipped": _count("skipped"),
|
|
"error": _count("error"),
|
|
"xfailed": _count("xfailed"),
|
|
"xpassed": _count("xpassed"),
|
|
"rerun": _count("rerun"),
|
|
"total": sum(len(v) for v in stats.values()),
|
|
"collected": getattr(terminalreporter, "_numcollected", None),
|
|
}
|
|
|
|
# Prepare JSON payload for requirements coverage and quick links to artifacts
|
|
coverage = {
|
|
"generated_at": _dt.datetime.now().astimezone().isoformat(),
|
|
"results": results,
|
|
"requirements": {rid: sorted(list(nodes)) for rid, nodes in sorted(_REQ_TO_TESTS.items())},
|
|
"unmapped_tests": sorted(list(_ALL_COLLECTED_TESTS - _MAPPED_TESTS)),
|
|
"files": {
|
|
"html": "reports/report.html",
|
|
"junit": "reports/junit.xml",
|
|
"summary_md": "reports/summary.md",
|
|
},
|
|
}
|
|
|
|
# Write JSON coverage file
|
|
json_path = os.path.join("reports", "requirements_coverage.json")
|
|
try:
|
|
with open(json_path, "w", encoding="utf-8") as f:
|
|
json.dump(coverage, f, indent=2)
|
|
except Exception as e:
|
|
terminalreporter.write_line(f"[conftest_plugin] Failed to write {json_path}: {e}")
|
|
|
|
# Write Markdown summary for CI consumption
|
|
md_path = os.path.join("reports", "summary.md")
|
|
try:
|
|
lines = [
|
|
"# Test Run Summary",
|
|
"",
|
|
f"Generated: {coverage['generated_at']}",
|
|
"",
|
|
f"- Collected: {results.get('collected')}",
|
|
f"- Passed: {results['passed']}",
|
|
f"- Failed: {results['failed']}",
|
|
f"- Skipped: {results['skipped']}",
|
|
f"- Errors: {results['error']}",
|
|
f"- XFailed: {results['xfailed']}",
|
|
f"- XPassed: {results['xpassed']}",
|
|
f"- Rerun: {results['rerun']}",
|
|
"",
|
|
"## Artifacts",
|
|
"- HTML Report: ./report.html",
|
|
"- JUnit XML: ./junit.xml",
|
|
"- Requirements Coverage (JSON): ./requirements_coverage.json",
|
|
]
|
|
with open(md_path, "w", encoding="utf-8") as f:
|
|
f.write("\n".join(lines) + "\n")
|
|
except Exception as e:
|
|
terminalreporter.write_line(f"[conftest_plugin] Failed to write {md_path}: {e}") |