Skip to content

Commit

Permalink
Issue #5/#7 refactor metrics plugin from conftest to onw module
Browse files Browse the repository at this point in the history
  • Loading branch information
soxofaan committed Jul 22, 2024
1 parent 10d431a commit ed6e218
Show file tree
Hide file tree
Showing 4 changed files with 135 additions and 89 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/benchmarks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
run: |
cd qa/benchmarks
mkdir report
pytest -k dummy --html report/report.html --self-contained-html --junit-xml=report/report.xml
pytest -k dummy --html report/report.html --self-contained-html --test-metrics=report/metrics.json
env:
OPENEO_AUTH_METHOD: client_credentials
OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED: ${{ secrets.OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED }}
Expand Down
90 changes: 5 additions & 85 deletions qa/benchmarks/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import json
import logging
import os
import random
from pathlib import Path
from typing import Any, Callable, List, Tuple, Union
from typing import Callable

import openeo
import pytest
Expand All @@ -12,6 +10,10 @@
# TODO: how to make sure the logging/printing from this plugin is actually visible by default?
_log = logging.getLogger(__name__)

pytest_plugins = [
"apex_algorithm_qa_tools.test_metrics",
]


def pytest_addoption(parser):
parser.addoption(
Expand All @@ -22,33 +24,6 @@ def pytest_addoption(parser):
type=int,
help="Only run random selected subset benchmarks.",
)
parser.addoption(
"--openeo-metrics",
metavar="path",
action="store",
dest="openeo_metrics_path",
default=None,
help="File to store openEO metrics.",
)


def pytest_configure(config):
openeo_metrics_path = config.getoption("openeo_metrics_path")
if (
openeo_metrics_path
# Don't register on xdist worker nodes
and not hasattr(config, "workerinput")
):
config.pluginmanager.register(
# TODO: create config for this path
OpeneoMetricReporter(openeo_metrics_path),
name="openeo_metrics_reporter",
)


def pytest_unconfigure(config):
if config.pluginmanager.hasplugin("openeo_metrics_report"):
config.pluginmanager.unregister(name="openeo_metrics_reporter")


def pytest_collection_modifyitems(session, config, items):
Expand All @@ -66,61 +41,6 @@ def pytest_collection_modifyitems(session, config, items):
items[:] = random.sample(items, k=subset_size)


@pytest.fixture
def openeo_metric(request: pytest.FixtureRequest) -> Callable[[str, Any], None]:
"""
Fixture to record openEO metrics during openEO tests/benchmarks,
which will be stored in the pytest node's "user_properties".
Collect and export these metrics with OpeneoMetricReporter.
"""

def append(name: str, value: Any):
_get_openeo_metrics(request.node.user_properties).append((name, value))

return append


def _get_openeo_metrics(user_properties: List[Tuple[str, Any]]) -> List:
for name, value in user_properties:
if name == OpeneoMetricReporter.USER_PROPERTY_KEY:
return value
# Not found: create it
metrics = []
user_properties.append((OpeneoMetricReporter.USER_PROPERTY_KEY, metrics))
return metrics


class OpeneoMetricReporter:
# TODO: isolate all this openeo_metrics stuff to proper plugin
USER_PROPERTY_KEY = "openeo_metrics"

def __init__(self, path: Union[str, Path]):
self.path = Path(path)
self.metrics = []

def pytest_runtest_logreport(self, report: pytest.TestReport):
if report.when == "call":
self.metrics.append(
{
"nodeid": report.nodeid,
"outcome": report.outcome,
"openeo_metrics": _get_openeo_metrics(report.user_properties),
"duration": report.duration,
"start": report.start,
"stop": report.stop,
"longrepr": repr(report.longrepr),
}
)

def pytest_sessionfinish(self, session):
with self.path.open("w") as f:
json.dump(self.metrics, f, indent=2)

def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", f"Generated openEO metrics report: {self.path}")


def _get_client_credentials_env_var(url: str) -> str:
"""
Get client credentials env var name for a given backend URL.
Expand Down
6 changes: 3 additions & 3 deletions qa/benchmarks/tests/test_dummy.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
"""


def test_dummy(openeo_metric):
def test_dummy(test_metric):
x = 3
y = 5
openeo_metric("x", x)
openeo_metric("y", y)
test_metric("x squared", x * x)
test_metric("y", y)
assert x + y == 7


Expand Down
126 changes: 126 additions & 0 deletions qa/tools/apex_algorithm_qa_tools/test_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
"""
Pytest plugin to record test/benchmark metrics to a JSON file.
Usage:
- Enable the plugin in `conftest.py`:
```python
pytest_plugins = [
"apex_algorithm_qa_tools.test_metrics",
]
```
- Use the `test_metric` fixture to record test metrics:
```python
def test_dummy(test_metric):
x = 3
test_metric("x squared", x*x)
...
- Run the tests with `--test-metrics=path/to/metrics.json`
to store metrics in a JSON file
"""

import json
from pathlib import Path
from typing import Any, Callable, List, Tuple, Union

import pytest

_TEST_METRICS_PATH = "test_metrics_path"
_TEST_METRICS_REPORTER = "test_metrics_reporter"


def pytest_addoption(parser):
parser.addoption(
"--test-metrics",
metavar="PATH",
action="store",
dest=_TEST_METRICS_PATH,
default=None,
help="Path to JSON file to store test/benchmark metrics.",
)


def pytest_configure(config):
test_metrics_path = config.getoption(_TEST_METRICS_PATH)
if (
test_metrics_path
# Don't register on xdist worker nodes
and not hasattr(config, "workerinput")
):
config.pluginmanager.register(
MetricsReporter(path=test_metrics_path),
name=_TEST_METRICS_REPORTER,
)


def pytest_unconfigure(config):
if config.pluginmanager.hasplugin(_TEST_METRICS_REPORTER):
config.pluginmanager.unregister(name=_TEST_METRICS_REPORTER)


class MetricsReporter:
def __init__(
self, path: Union[str, Path], user_properties_key: str = "test_metrics"
):
self.path = Path(path)
self.metrics: List[dict] = []
self.user_properties_key = user_properties_key

def pytest_runtest_logreport(self, report: pytest.TestReport):
if report.when == "call":
self.metrics.append(
{
"nodeid": report.nodeid,
"report": {
"outcome": report.outcome,
"duration": report.duration,
"start": report.start,
"stop": report.stop,
},
"metrics": self.get_test_metrics(report.user_properties),
}
)

def pytest_sessionfinish(self, session):
with self.path.open("w", encoding="utf8") as f:
json.dump(self.metrics, f, indent=2)

def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", f"Generated test metrics report: {self.path}")

def get_test_metrics(
self, user_properties: List[Tuple[str, Any]]
) -> List[Tuple[str, Any]]:
"""
Extract existing test metrics items from user properties
or create new one.
"""
for name, value in user_properties:
if name == self.user_properties_key:
return value
# Not found: create it
metrics = []
user_properties.append((self.user_properties_key, metrics))
return metrics


@pytest.fixture
def test_metric(
pytestconfig: pytest.Config, request: pytest.FixtureRequest
) -> Callable[[str, Any], None]:
"""
Fixture to record a test metrics during openEO tests/benchmarks,
which will be stored in the pytest node's "user_properties".
"""

reporter = pytestconfig.pluginmanager.get_plugin(_TEST_METRICS_REPORTER)

def append(name: str, value: Any):
reporter.get_test_metrics(request.node.user_properties).append((name, value))

return append

0 comments on commit ed6e218

Please sign in to comment.