From 3bd04fd3b42e527edf531cc151148c48100cdf56 Mon Sep 17 00:00:00 2001 From: Frank Niessink Date: Thu, 13 Jun 2024 21:06:20 +0200 Subject: [PATCH] Update quality tools. - Add pyproject-fmt to quality tools. - Remove safety from quality tools. - Remove duplication between unittest.sh scripts. - Remove duplication between quality.sh scripts. - Remove duplication between pip-compile.sh scripts. - Remove duplication between pip-install.sh scripts. - Add type checking to Python tests. Closes #8928. --- .circleci/config.yml | 19 +- .../workflows/application-tests-quality.yml | 23 +++ .github/workflows/feature-tests-quality.yml | 4 - .github/workflows/release-quality.yml | 23 +++ ci/base.sh | 24 +-- ci/pip-base.sh | 17 ++ ci/pipx-base.sh | 21 ++ ci/python_files_and_folders.py | 13 ++ ci/quality-base.sh | 63 ++++++ ci/requirements_files.py | 27 +++ ci/spec.py | 8 +- ci/unittest-base.sh | 11 +- components/api_server/ci/pip-compile.sh | 7 +- components/api_server/ci/pip-install.sh | 8 +- components/api_server/ci/quality.sh | 31 +-- components/api_server/ci/unittest.sh | 9 +- components/api_server/pyproject.toml | 149 ++++++++------- .../requirements/requirements-dev.txt | 4 +- .../api_server/requirements/requirements.txt | 4 +- components/api_server/src/utils/functions.py | 2 +- components/api_server/tests/base.py | 5 +- .../tests/initialization/test_migrations.py | 179 +++++++++--------- .../tests/model/test_issue_tracker.py | 2 +- .../tests/model/test_transformations.py | 4 +- .../routes/plugins/test_route_auth_plugin.py | 2 +- .../api_server/tests/routes/test_auth.py | 2 +- .../tests/routes/test_measurement.py | 3 +- .../api_server/tests/routes/test_report.py | 35 ++-- .../api_server/tests/routes/test_settings.py | 2 +- .../api_server/tests/routes/test_source.py | 4 +- components/collector/.vulture_ignore_list.py | 137 +++++++++++++- components/collector/ci/pip-compile.sh | 7 +- components/collector/ci/pip-install.sh | 8 +- components/collector/ci/quality.sh | 33 +--- components/collector/ci/unittest.sh | 9 +- components/collector/pyproject.toml | 134 +++++++------ .../test_security_warnings.py | 2 +- components/frontend/ci/quality.sh | 5 +- components/frontend/ci/unittest.sh | 5 +- components/notifier/ci/pip-compile.sh | 7 +- components/notifier/ci/pip-install.sh | 8 +- components/notifier/ci/quality.sh | 32 +--- components/notifier/ci/unittest.sh | 9 +- components/notifier/pyproject.toml | 111 ++++++----- .../tests/database/test_measurements.py | 6 +- .../notifier/tests/database/test_reports.py | 6 +- .../notifier/tests/notifier/test_notifier.py | 2 +- components/shared_code/ci/pip-compile.sh | 6 +- components/shared_code/ci/pip-install.sh | 8 +- components/shared_code/ci/quality.sh | 34 +--- components/shared_code/ci/unittest.sh | 9 +- components/shared_code/pyproject.toml | 128 +++++++------ components/shared_code/tests/shared/base.py | 3 + .../shared/database/test_connection_params.py | 2 +- .../shared/database/test_measurements.py | 8 +- .../tests/shared/model/test_measurement.py | 3 +- .../tests/shared/model/test_report.py | 8 +- .../tests/shared_data_model/meta/base.py | 10 +- .../tests/shared_data_model/meta/test_base.py | 13 +- .../shared_data_model/meta/test_data_model.py | 99 ++++------ .../shared_data_model/meta/test_entity.py | 27 ++- .../shared_data_model/meta/test_metric.py | 19 +- .../shared_data_model/meta/test_parameter.py | 98 ++++------ .../shared_data_model/meta/test_source.py | 23 +-- .../shared_data_model/meta/test_subject.py | 2 - .../shared_data_model/test_parameters.py | 11 +- docs/ci/pip-compile.sh | 7 +- docs/ci/pip-install.sh | 8 +- docs/ci/quality.sh | 34 +--- docs/ci/unittest.sh | 9 +- docs/pyproject.toml | 124 ++++++------ release/.vulture_ignore_list.py | 0 release/ci/pip-compile.sh | 7 +- release/ci/pip-install.sh | 6 +- release/ci/quality.sh | 6 + release/pyproject.toml | 55 +++++- release/release.py | 40 +++- release/requirements/requirements-dev.txt | 21 +- .../application_tests/.vulture_ignore_list.py | 0 tests/application_tests/ci/pip-compile.sh | 7 +- tests/application_tests/ci/pip-install.sh | 6 +- tests/application_tests/ci/quality.sh | 6 + tests/application_tests/ci/unittest.sh | 3 + tests/application_tests/pyproject.toml | 72 ++++++- .../requirements/requirements-dev.txt | 26 ++- tests/application_tests/src/test_api.py | 4 +- tests/application_tests/src/test_report.py | 33 ++-- tests/feature_tests/ci/pip-compile.sh | 7 +- tests/feature_tests/ci/pip-install.sh | 6 +- tests/feature_tests/ci/quality.sh | 31 +-- tests/feature_tests/pyproject.toml | 72 ++++--- 91 files changed, 1358 insertions(+), 969 deletions(-) create mode 100644 .github/workflows/application-tests-quality.yml create mode 100644 .github/workflows/release-quality.yml create mode 100644 ci/pip-base.sh create mode 100644 ci/pipx-base.sh create mode 100644 ci/python_files_and_folders.py create mode 100644 ci/quality-base.sh create mode 100644 ci/requirements_files.py create mode 100644 release/.vulture_ignore_list.py create mode 100755 release/ci/quality.sh create mode 100644 tests/application_tests/.vulture_ignore_list.py create mode 100755 tests/application_tests/ci/quality.sh create mode 100755 tests/application_tests/ci/unittest.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 44bffeafb2..2e4a5bb0bc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ jobs: auth: username: $DOCKERHUB_USERNAME password: $DOCKERHUB_PASSWORD - parallelism: 5 + parallelism: 6 steps: - checkout - run: | @@ -19,7 +19,8 @@ jobs: 1) component=components/notifier;; 2) component=components/api_server;; 3) component=components/shared_code;; - 4) component=tests/feature_tests;; + 4) component=tests/application_tests;; + 5) component=tests/feature_tests;; esac cd $component mkdir -p build @@ -36,6 +37,10 @@ jobs: path: components/api_server/build - store_artifacts: path: components/shared_code/build + - store_artifacts: + path: components/application_tests/build + - store_artifacts: + path: components/feature_tests/build unittest_frontend: docker: @@ -51,7 +56,7 @@ jobs: ci/unittest.sh ci/quality.sh - unittest_docs: + unittest_other: machine: image: default steps: @@ -64,6 +69,12 @@ jobs: ci/pip-install.sh ci/unittest.sh ci/quality.sh + - run: | + cd release + python3 -m venv venv + . venv/bin/activate + ci/pip-install.sh + ci/quality.sh application_tests: machine: @@ -117,7 +128,7 @@ workflows: context: QualityTime - unittest_frontend: context: QualityTime - - unittest_docs: + - unittest_other: context: QualityTime - docker/hadolint: context: QualityTime diff --git a/.github/workflows/application-tests-quality.yml b/.github/workflows/application-tests-quality.yml new file mode 100644 index 0000000000..aeae88f33f --- /dev/null +++ b/.github/workflows/application-tests-quality.yml @@ -0,0 +1,23 @@ +name: Application tests quality + +on: [push] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4.1.7 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + - name: Install dependencies + run: | + cd tests/application_tests + ci/pip-install.sh + - name: Quality + run: | + cd tests/application_tests + ci/quality.sh diff --git a/.github/workflows/feature-tests-quality.yml b/.github/workflows/feature-tests-quality.yml index d6b30d326b..4275036cb3 100644 --- a/.github/workflows/feature-tests-quality.yml +++ b/.github/workflows/feature-tests-quality.yml @@ -17,10 +17,6 @@ jobs: run: | cd tests/feature_tests ci/pip-install.sh - - name: Test - run: | - cd tests/feature_tests - ci/unittest.sh - name: Quality run: | cd tests/feature_tests diff --git a/.github/workflows/release-quality.yml b/.github/workflows/release-quality.yml new file mode 100644 index 0000000000..fd93ec8fb2 --- /dev/null +++ b/.github/workflows/release-quality.yml @@ -0,0 +1,23 @@ +name: Release script quality + +on: [push] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4.1.7 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + - name: Install dependencies + run: | + cd release + ci/pip-install.sh + - name: Quality + run: | + cd release + ci/quality.sh diff --git a/ci/base.sh b/ci/base.sh index 993528b88c..b97de2937b 100644 --- a/ci/base.sh +++ b/ci/base.sh @@ -4,29 +4,19 @@ set -e -run () { - # Show the invoked command using a subdued text color so it's clear which tool is running. +run() { + # Show the invoked command using a subdued text color so it is clear which tool is running. header='\033[95m' endstyle='\033[0m' echo -e "${header}$*${endstyle}" eval "$*" } -spec () { - # The versions of tools are specified in pyproject.toml. This function calls the spec.py script which in turn - # reads the version numbers from the pyproject.toml file. - - # Get the dir of this script so the spec.py script that is in the same dir as this script can be invoked: - SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - python $SCRIPT_DIR/spec.py $* +script_dir() { + # Get the dir of this script so that scripts that are in the same dir as this script can be invoked. + # See https://stackoverflow.com/questions/39340169/dir-cd-dirname-bash-source0-pwd-how-does-that-work. + echo $( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) } -# Don't install tools in the global pipx home folder, but locally for each component: -export PIPX_HOME=.pipx -export PIPX_BIN_DIR=$PIPX_HOME/bin - # For Windows compatibility; prevent path from ending with a ':' -export PYTHONPATH=`python -c 'import sys;print(":".join(sys.argv[1:]))' src $PYTHONPATH` - -# Insert a custom compile command in generated requirements file, so it's clear how they are generated: -export CUSTOM_COMPILE_COMMAND="ci/pip-compile.sh" +export PYTHONPATH=$(python -c 'import sys;print(":".join(sys.argv[1:]))' src $PYTHONPATH) diff --git a/ci/pip-base.sh b/ci/pip-base.sh new file mode 100644 index 0000000000..968d4098bd --- /dev/null +++ b/ci/pip-base.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +source base.sh + +# Insert a custom compile command in generated requirements file so it is clear how they are generated: +export CUSTOM_COMPILE_COMMAND="ci/pip-compile.sh" + +run_pip_compile() { + for requirements_file in $(python $(script_dir)/requirements_files.py); do + extra=$([[ "$requirements_file" == *"-dev"* ]] && echo "--extra dev" || echo "") + run pip-compile $extra --output-file $requirements_file pyproject.toml + done +} + +run_pip_install() { + run pip install --ignore-installed --quiet --use-pep517 $@ +} diff --git a/ci/pipx-base.sh b/ci/pipx-base.sh new file mode 100644 index 0000000000..5faf896053 --- /dev/null +++ b/ci/pipx-base.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +source base.sh + +spec() { + # The versions of tools are specified in pyproject.toml. This function calls the spec.py script which in turn + # reads the version numbers from the pyproject.toml file. The function takes one argument: the package to return + # the spec for. + python $(script_dir)/spec.py $1 +} + +run_pipx() { + # Look up the version of the command using the spec function and run the command using pipx. + command_spec=$(spec $1) + shift 1 + run pipx run $command_spec $@ +} + +# Don't install tools in the global pipx home folder, but locally for each component: +export PIPX_HOME=.pipx +export PIPX_BIN_DIR=$PIPX_HOME/bin diff --git a/ci/python_files_and_folders.py b/ci/python_files_and_folders.py new file mode 100644 index 0000000000..85bfadaba2 --- /dev/null +++ b/ci/python_files_and_folders.py @@ -0,0 +1,13 @@ +"""Determine the Python files and folders in the current directory.""" + +from pathlib import Path + +def python_files_and_folders() -> list[str]: + """Return the Python files and folders in the current directory.""" + python_files = [python_file.name for python_file in Path(".").glob('*.py') if not python_file.name.startswith(".")] + python_folders = [folder_name for folder_name in ("src", "tests") if Path(folder_name).exists()] + return python_files + python_folders + + +if __name__ == "__main__": + print(" ".join(python_files_and_folders())) diff --git a/ci/quality-base.sh b/ci/quality-base.sh new file mode 100644 index 0000000000..60adfe2488 --- /dev/null +++ b/ci/quality-base.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +source pipx-base.sh + +PYTHON_FILES_AND_FOLDERS=$(python $(script_dir)/python_files_and_folders.py) + +run_ruff() { + run_pipx ruff check $PYTHON_FILES_AND_FOLDERS + run_pipx ruff format --check $PYTHON_FILES_AND_FOLDERS +} + +run_fixit() { + run_pipx fixit lint $PYTHON_FILES_AND_FOLDERS +} + +run_mypy() { + # Run mypy with or without pydantic plugin depending on whether pydantic is listed as dependency in the tools + # section of the optional dependencies in the pyproject.toml file. + pydantic_spec=$(spec pydantic) + if [[ "$pydantic_spec" == "" ]]; then + run_pipx mypy --python-executable=$(which python) $PYTHON_FILES_AND_FOLDERS + else + # To use the pydantic plugin, we need to first install mypy and then inject pydantic + run pipx install --force $(spec mypy) # --force works around this bug: https://github.com/pypa/pipx/issues/795 + run pipx inject mypy $pydantic_spec + run $PIPX_BIN_DIR/mypy --python-executable=$(which python) $PYTHON_FILES_AND_FOLDERS + fi +} + +run_pyproject_fmt() { + run_pipx pyproject-fmt --check pyproject.toml +} + +run_bandit() { + run_pipx bandit --configfile pyproject.toml --quiet --recursive $PYTHON_FILES_AND_FOLDERS +} + +run_pip_audit() { + run_pipx pip-audit --strict --progress-spinner=off $(python $(script_dir)/requirements_files.py "-r %s") +} + +run_vulture() { + run_pipx vulture --min-confidence 0 $PYTHON_FILES_AND_FOLDERS .vulture_ignore_list.py $@ +} + +run_vale() { + run_pipx vale sync + run_pipx vale --no-wrap --glob "*.md" src +} + +run_markdownlint() { + run ./node_modules/markdownlint-cli/markdownlint.js src/**/*.md +} + +check_python_quality() { + run_ruff + run_fixit + run_mypy + run_pyproject_fmt + run_pip_audit + run_bandit + run_vulture +} diff --git a/ci/requirements_files.py b/ci/requirements_files.py new file mode 100644 index 0000000000..d2d3d027d8 --- /dev/null +++ b/ci/requirements_files.py @@ -0,0 +1,27 @@ +"""Determine the Python requirements files. + +The script returns the requirements files as space separated string: + +$ requirements_files.py +requirements/requirements.txt requirements/requirements-dev.txt + +The script takes an optional template argument that is used to wrap each requirements filename. For example: + +$ requirements_files.py "-r %s" +-r requirements/requirements.txt -r requirements/requirements-dev.txt +""" + +import sys +from pathlib import Path + + +def requirements_files() -> list[str]: + """Return the Python requirements files in the requirements directory.""" + requirements_files = Path(".").glob("requirements/requirements*.txt") + # We never return the internal requirements file, because it does not need to be checked nor compiled + return [str(filename) for filename in requirements_files if "requirements-internal" not in filename.name] + + +if __name__ == "__main__": + template = sys.argv[1] if len(sys.argv) > 1 else "%s" + print(" ".join([template % filename for filename in requirements_files()])) diff --git a/ci/spec.py b/ci/spec.py index d6ef4a9822..bd3f3e9023 100644 --- a/ci/spec.py +++ b/ci/spec.py @@ -6,11 +6,15 @@ def spec(package: str, pyproject_toml_path: Path) -> str: - """Return the spec for the package from the pyproject.toml file.""" + """Return the spec for the package from the tools section in the pyproject.toml file. + + Returns an empty string if no spec can be found for the specified package. + """ with pyproject_toml_path.open("rb") as pyproject_toml_file: pyproject_toml = tomllib.load(pyproject_toml_file) tools = pyproject_toml["project"]["optional-dependencies"]["tools"] - return [spec for spec in tools if spec.split("==")[0] == package][0] + package_specs = [spec for spec in tools if spec.split("==")[0] == package] + return package_specs[0] if package_specs else "" if __name__ == "__main__": diff --git a/ci/unittest-base.sh b/ci/unittest-base.sh index e8bd57ca43..9beef787e8 100644 --- a/ci/unittest-base.sh +++ b/ci/unittest-base.sh @@ -1,8 +1,13 @@ #!/bin/bash -# Get the dir of this script so the vbase.sh script that is in the same dir as this script can be sourced: -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source $SCRIPT_DIR/base.sh +source base.sh # Turn on development mode, see https://docs.python.org/3/library/devmode.html export PYTHONDEVMODE=1 + +run_coverage() { + run coverage run -m unittest --quiet + run coverage report --fail-under=0 + run coverage html --fail-under=0 + run coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +} diff --git a/components/api_server/ci/pip-compile.sh b/components/api_server/ci/pip-compile.sh index 815df6440a..06fa99c058 100755 --- a/components/api_server/ci/pip-compile.sh +++ b/components/api_server/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/components/api_server/ci/pip-install.sh b/components/api_server/ci/pip-install.sh index 25a3052b7f..40b457d94c 100755 --- a/components/api_server/ci/pip-install.sh +++ b/components/api_server/ci/pip-install.sh @@ -1,7 +1,7 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet --use-pep517 -r requirements/requirements-dev.txt -run pip install --ignore-installed --quiet --use-pep517 -r requirements/requirements-internal.txt +run_pip_install -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements-internal.txt diff --git a/components/api_server/ci/quality.sh b/components/api_server/ci/quality.sh index 12a07f1a48..2f19fa446e 100755 --- a/components/api_server/ci/quality.sh +++ b/components/api_server/ci/quality.sh @@ -1,31 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source quality-base.sh -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src tests - -# Mypy -run pipx run `spec mypy` --python-executable=$(which python) src - -# pip-audit -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Safety -# Vulnerability ID: 67599 -# ADVISORY: ** DISPUTED ** An issue was discovered in pip (all versions) because it installs the version with the -# highest version number, even if the user had intended to obtain a private package from a private index. This only -# affects use of the --extra-index-url option, and exploitation requires that the... -# CVE-2018-20225 -# For more information about this vulnerability, visit https://data.safetycli.com/v/67599/97c -run pipx run `spec safety` check --bare --ignore 67599 -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Bandit -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -run pipx run `spec vulture` --min-confidence 0 src/ tests/ .vulture_ignore_list.py +check_python_quality diff --git a/components/api_server/ci/unittest.sh b/components/api_server/ci/unittest.sh index 87a5ea5b65..be9b0fa620 100755 --- a/components/api_server/ci/unittest.sh +++ b/components/api_server/ci/unittest.sh @@ -1,10 +1,7 @@ #!/bin/bash -source ../../ci/unittest-base.sh +PATH="$PATH:../../ci" +source unittest-base.sh export COVERAGE_RCFILE=../../.coveragerc - -coverage run -m unittest --quiet -coverage report --fail-under=0 -coverage html --fail-under=0 -coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +run_coverage diff --git a/components/api_server/pyproject.toml b/components/api_server/pyproject.toml index 128847b092..3594fbe8db 100644 --- a/components/api_server/pyproject.toml +++ b/components/api_server/pyproject.toml @@ -1,38 +1,108 @@ [project] name = "api-server" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "bottle==0.12.25", "cryptography==42.0.8", "gevent==24.2.1", "ldap3==2.9.1", - "lxml[html_clean]==5.2.2", + "lxml==5.2.2", + "lxml-html-clean==0.1.1", "pymongo==4.7.3", - "requests==2.32.3" + "requests==2.32.3", ] - -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.5.3", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements - "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component + "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component "types-cryptography==3.3.23.2", "types-ldap3==2.9.13.20240205", "types-requests==2.32.0.20240602", - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vulture==2.11" + "vulture==2.11", ] +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", +] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on + "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous + "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - too many untyped return values atm to turn this rule on + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "I001", # https://docs.astral.sh/ruff/rules/unsorted-imports/ - (probably) because ruff is run with pipx it can't differentiate between dependencies and modules + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files + "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - routes are imported in __init__.py files to flatten the module hierarchy +] +lint.per-file-ignores."src/model/issue_tracker.py" = [ + "BLE001", # https://docs.astral.sh/ruff/rules/blind-except/ - allow for catching blind exception `Exception` +] +lint.per-file-ignores."src/quality_time_server.py" = [ + "E402", + "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script +] +lint.per-file-ignores."tests/**/*.py" = [ + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types + "S105", # https://docs.astral.sh/ruff/rules/hardcoded-password-string/ - hardcoded passwords in test code are test data + "S106", # https://docs.astral.sh/ruff/rules/hardcoded-password-func-arg/ - hardcoded passwords in test code are test data +] +lint.isort.section-order = [ + "future", + "standard-library", + "third-party", + "second-party", + "first-party", + "tests", + "local-folder", +] +lint.isort.sections."second-party" = [ + "shared", + "shared_data_model", +] +lint.isort.sections.tests = [ + "tests", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers + [tool.mypy] ignore_missing_imports = false incremental = false @@ -40,14 +110,14 @@ warn_redundant_casts = true warn_return_any = true warn_unreachable = true warn_unused_ignores = true -disable_error_code = "valid-type" # mypy does not yet support PEP 695, Type Parameter Syntax. See https://github.com/python/mypy/issues/15238 +disable_error_code = "valid-type" # mypy does not yet support PEP 695, Type Parameter Syntax. See https://github.com/python/mypy/issues/15238 [[tool.mypy.overrides]] module = [ "bottle", "gevent", "lxml.html", - "lxml.html.clean" + "lxml_html_clean", ] ignore_missing_imports = true @@ -55,56 +125,5 @@ ignore_missing_imports = true allow_unsafe = true generate_hashes = true quiet = true -strip_extras = false # Needed for lxml[html-clean] +strip_extras = false # Needed for lxml[html-clean] upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on - "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous - "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - too many untyped return values atm to turn this rule on - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "I001", # https://docs.astral.sh/ruff/rules/unsorted-imports/ - (probably) because ruff is run with pipx it can't differentiate between dependencies and modules - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter - "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used - "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used -] - -[tool.ruff.lint.isort] -section-order = ["future", "standard-library", "third-party", "second-party", "first-party", "tests", "local-folder"] - -[tool.ruff.lint.isort.sections] -"second-party" = ["shared", "shared_data_model"] -"tests" = ["tests"] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files - "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - routes are imported in __init__.py files to flatten the module hierarchy -] -"src/quality_time_server.py" = [ - "E402", - "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script -] -"src/model/issue_tracker.py" = [ - "BLE001" # https://docs.astral.sh/ruff/rules/blind-except/ - allow for catching blind exception `Exception` -] -"tests/**/*.py" = [ - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types - "S105", # https://docs.astral.sh/ruff/rules/hardcoded-password-string/ - hardcoded passwords in test code are test data - "S106", # https://docs.astral.sh/ruff/rules/hardcoded-password-func-arg/ - hardcoded passwords in test code are test data -] diff --git a/components/api_server/requirements/requirements-dev.txt b/components/api_server/requirements/requirements-dev.txt index 9dc4f44a2d..789ff4760f 100644 --- a/components/api_server/requirements/requirements-dev.txt +++ b/components/api_server/requirements/requirements-dev.txt @@ -379,7 +379,7 @@ ldap3==2.9.1 \ --hash=sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70 \ --hash=sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f # via api-server (pyproject.toml) -lxml[html-clean]==5.2.2 \ +lxml==5.2.2 \ --hash=sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3 \ --hash=sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a \ --hash=sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0 \ @@ -529,7 +529,7 @@ lxml[html-clean]==5.2.2 \ lxml-html-clean==0.1.1 \ --hash=sha256:58c04176593c9caf72ec92e033d2f38859e918b3eff0cc0f8051ad27dc2ab8ef \ --hash=sha256:8a644ed01dbbe132fabddb9467f077f6dad12a1d4f3a6a553e280f3815fa46df - # via lxml + # via api-server (pyproject.toml) packaging==24.1 \ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 diff --git a/components/api_server/requirements/requirements.txt b/components/api_server/requirements/requirements.txt index d54d0a00f0..febfa4e0cd 100644 --- a/components/api_server/requirements/requirements.txt +++ b/components/api_server/requirements/requirements.txt @@ -307,7 +307,7 @@ ldap3==2.9.1 \ --hash=sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70 \ --hash=sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f # via api-server (pyproject.toml) -lxml[html-clean]==5.2.2 \ +lxml==5.2.2 \ --hash=sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3 \ --hash=sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a \ --hash=sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0 \ @@ -456,7 +456,7 @@ lxml[html-clean]==5.2.2 \ lxml-html-clean==0.1.1 \ --hash=sha256:58c04176593c9caf72ec92e033d2f38859e918b3eff0cc0f8051ad27dc2ab8ef \ --hash=sha256:8a644ed01dbbe132fabddb9467f077f6dad12a1d4f3a6a553e280f3815fa46df - # via lxml + # via api-server (pyproject.toml) pyasn1==0.6.0 \ --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 diff --git a/components/api_server/src/utils/functions.py b/components/api_server/src/utils/functions.py index eb347ec61c..e4f83e6817 100644 --- a/components/api_server/src/utils/functions.py +++ b/components/api_server/src/utils/functions.py @@ -15,7 +15,7 @@ # Bandit complains that "Using autolink_html to parse untrusted XML data is known to be vulnerable to XML attacks", # and Dlint complains 'insecure use of XML modules, prefer "defusedxml"' # but we give autolink_html clean html, so ignore the warning: -from lxml.html.clean import autolink_html, clean_html # nosec +from lxml_html_clean import autolink_html, clean_html # nosec from lxml.html import fromstring, tostring # nosec from shared.utils.type import ItemId diff --git a/components/api_server/tests/base.py b/components/api_server/tests/base.py index 2f778b1125..d35acb217c 100644 --- a/components/api_server/tests/base.py +++ b/components/api_server/tests/base.py @@ -6,6 +6,7 @@ import unittest from collections.abc import Callable from unittest.mock import Mock +from typing import ClassVar, cast from shared_data_model import DATA_MODEL_JSON @@ -21,6 +22,8 @@ def setUp(self): class DataModelTestCase(DatabaseTestCase): """Base class for unit tests that use the data model.""" + DATA_MODEL: ClassVar[dict] = {} + @classmethod def setUpClass(cls) -> None: """Override to set up the data model.""" @@ -34,7 +37,7 @@ def setUp(self): @staticmethod def load_data_model() -> dict: """Load the data model from the JSON dump.""" - data_model = json.loads(DATA_MODEL_JSON) + data_model = cast(dict, json.loads(DATA_MODEL_JSON)) data_model["_id"] = "id" data_model["timestamp"] = "now" return data_model diff --git a/components/api_server/tests/initialization/test_migrations.py b/components/api_server/tests/initialization/test_migrations.py index ff032882a2..d7657848bc 100644 --- a/components/api_server/tests/initialization/test_migrations.py +++ b/components/api_server/tests/initialization/test_migrations.py @@ -9,13 +9,27 @@ class MigrationTestCase(DataModelTestCase): """Base class for migration unit tests.""" - def existing_report(self, metric_type: str): + def existing_report( + self, + *, + metric_type: str, + metric_name: str = "", + metric_unit: str = "", + sources: dict[SourceId, dict[str, str | dict[str, str]]] | None = None, + ): """Return a report fixture. To be extended in subclasses.""" - return { + report: dict = { "_id": "id", "report_uuid": REPORT_ID, "subjects": {SUBJECT_ID: {"type": "software", "metrics": {METRIC_ID: {"type": metric_type}}}}, } + if metric_name: + report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["name"] = metric_name + if metric_unit: + report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["unit"] = metric_unit + if sources: + report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"] = sources + return report def inserted_report(self, **kwargs): """Return a report as it is expected to have been inserted into the reports collection. @@ -38,7 +52,7 @@ def test_no_reports(self): def test_empty_reports(self): """Test that the migration succeeds when the report does not have anything to migrate.""" - self.database.reports.find.return_value = [self.existing_report("issues")] + self.database.reports.find.return_value = [self.existing_report(metric_type="issues")] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() @@ -46,23 +60,6 @@ def test_empty_reports(self): class ChangeAccessibilityViolationsTest(MigrationTestCase): """Unit tests for the accessibility violations database migration.""" - def existing_report( - self, - *, - metric_type: str = "accessibility", - metric_name: str = "", - metric_unit: str = "", - extra_metrics: bool = False, - ): - """Extend to add name and unit to the metric and optional extra metrics.""" - report = super().existing_report(metric_type=metric_type) - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["name"] = metric_name - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["unit"] = metric_unit - if extra_metrics: - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID2] = {"type": "violations"} - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID3] = {"type": "security_warnings"} - return report - def inserted_report( self, metric_name: str = "Accessibility violations", metric_unit: str = "accessibility violations", **kwargs ): @@ -79,13 +76,15 @@ def test_report_without_accessibility_metrics(self): def test_report_with_accessibility_metric(self): """Test that the migration succeeds with an accessibility metric.""" - self.database.reports.find.return_value = [self.existing_report()] + self.database.reports.find.return_value = [self.existing_report(metric_type="accessibility")] perform_migrations(self.database) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, self.inserted_report()) def test_accessibility_metric_with_name_and_unit(self): """Test that the migration succeeds with an accessibility metric, and existing name and unit are kept.""" - self.database.reports.find.return_value = [self.existing_report(metric_name="name", metric_unit="unit")] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="accessibility", metric_name="name", metric_unit="unit"), + ] perform_migrations(self.database) self.database.reports.replace_one.assert_called_once_with( {"_id": "id"}, @@ -94,36 +93,33 @@ def test_accessibility_metric_with_name_and_unit(self): def test_report_with_accessibility_metric_and_other_types(self): """Test that the migration succeeds with an accessibility metric and other metric types.""" - self.database.reports.find.return_value = [self.existing_report(extra_metrics=True)] + report = self.existing_report(metric_type="accessibility") + report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID2] = {"type": "violations"} + report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID3] = {"type": "security_warnings"} + self.database.reports.find.return_value = [report] perform_migrations(self.database) - self.database.reports.replace_one.assert_called_once_with( - {"_id": "id"}, - self.inserted_report(extra_metrics=True), - ) + inserted_report = self.inserted_report() + inserted_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID2] = {"type": "violations"} + inserted_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID3] = {"type": "security_warnings"} + self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) class BranchParameterTest(MigrationTestCase): """Unit tests for the branch parameter database migration.""" - def existing_report( - self, metric_type: str = "loc", sources: dict[SourceId, dict[str, str | dict[str, str]]] | None = None - ): - """Extend to add sources and an extra metric without sources.""" - report = super().existing_report(metric_type=metric_type) - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID2] = {"type": "issues"} - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"] = sources or {} - return report - def test_report_without_branch_parameter(self): """Test that the migration succeeds with reports, but without metrics with a branch parameter.""" - self.database.reports.find.return_value = [self.existing_report()] + self.database.reports.find.return_value = [self.existing_report(metric_type="loc")] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() def test_report_with_non_empty_branch_parameter(self): """Test that the migration succeeds when the branch parameter is not empty.""" self.database.reports.find.return_value = [ - self.existing_report(sources={SOURCE_ID: {"type": "sonarqube", "parameters": {"branch": "main"}}}) + self.existing_report( + metric_type="loc", + sources={SOURCE_ID: {"type": "sonarqube", "parameters": {"branch": "main"}}}, + ) ] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() @@ -148,32 +144,28 @@ def test_report_with_branch_parameter_without_value(self): class SourceParameterHashMigrationTest(MigrationTestCase): """Unit tests for the source parameter hash database migration.""" - def existing_report(self, sources: dict[SourceId, dict[str, str | dict[str, str]]] | None = None): - """Extend to add sources and an extra metric without sources.""" - report = super().existing_report(metric_type="loc") - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID2] = {"type": "issues"} - if sources: - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"] = sources - return report - def test_report_with_sources_without_source_parameter_hash(self): """Test a report with sources and measurements.""" self.database.measurements.find_one.return_value = {"_id": "id", "metric_uuid": METRIC_ID} - self.database.reports.find.return_value = [self.existing_report(sources={SOURCE_ID: {"type": "cloc"}})] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="loc", sources={SOURCE_ID: {"type": "cloc"}}) + ] perform_migrations(self.database) inserted_measurement = {"metric_uuid": METRIC_ID, "source_parameter_hash": "8c3b464958e9ad0f20fb2e3b74c80519"} self.database.measurements.replace_one.assert_called_once_with({"_id": "id"}, inserted_measurement) def test_report_without_sources(self): """Test a report without sources.""" - self.database.reports.find.return_value = [self.existing_report()] + self.database.reports.find.return_value = [self.existing_report(metric_type="loc")] perform_migrations(self.database) self.database.measurements.replace_one.assert_not_called() def test_metric_without_measurement(self): """Test a metric without measurements.""" self.database.measurements.find_one.return_value = None - self.database.reports.find.return_value = [self.existing_report(sources={SOURCE_ID: {"type": "cloc"}})] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="loc", sources={SOURCE_ID: {"type": "cloc"}}) + ] perform_migrations(self.database) self.database.measurements.replace_one.assert_not_called() @@ -181,17 +173,6 @@ def test_metric_without_measurement(self): class CIEnvironmentTest(MigrationTestCase): """Unit tests for the CI-environment subject type database migration.""" - def existing_report(self, subject_type: str = "", subject_name: str = "", subject_description: str = ""): - """Extend to set the subject type to CI-environment.""" - report = super().existing_report(metric_type="issues") - if subject_type: - report["subjects"][SUBJECT_ID]["type"] = subject_type - if subject_name: - report["subjects"][SUBJECT_ID]["name"] = subject_name - if subject_description: - report["subjects"][SUBJECT_ID]["description"] = subject_description - return report - def inserted_report(self, **kwargs): """Extend to set the subject type to development environment.""" report = super().inserted_report(**kwargs) @@ -200,72 +181,79 @@ def inserted_report(self, **kwargs): def test_report_without_ci_environment(self): """Test that the migration succeeds without CI-environment subject.""" - self.database.reports.find.return_value = [self.existing_report()] + self.database.reports.find.return_value = [self.existing_report(metric_type="failed_jobs")] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() def test_report_with_ci_environment(self): """Test that the migration succeeds with CI-environment subject.""" - self.database.reports.find.return_value = [self.existing_report(subject_type="ci")] + report = self.existing_report(metric_type="failed_jobs") + report["subjects"][SUBJECT_ID]["type"] = "ci" + self.database.reports.find.return_value = [report] perform_migrations(self.database) - inserted_report = self.inserted_report( - subject_name="CI-environment", - subject_description="A continuous integration environment.", - ) + inserted_report = self.inserted_report(metric_type="failed_jobs") + inserted_report["subjects"][SUBJECT_ID]["name"] = "CI-environment" + inserted_report["subjects"][SUBJECT_ID]["description"] = "A continuous integration environment." self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) def test_ci_environment_with_title_and_subtitle(self): """Test that the migration succeeds with an CI-environment subject, and existing title and subtitle are kept.""" - self.database.reports.find.return_value = [ - self.existing_report(subject_type="ci", subject_name="CI", subject_description="My CI") - ] + report = self.existing_report(metric_type="failed_jobs") + report["subjects"][SUBJECT_ID]["type"] = "ci" + report["subjects"][SUBJECT_ID]["name"] = "CI" + report["subjects"][SUBJECT_ID]["description"] = "My CI" + self.database.reports.find.return_value = [report] perform_migrations(self.database) - inserted_report = self.inserted_report(subject_name="CI", subject_description="My CI") + inserted_report = self.inserted_report(metric_type="failed_jobs") + inserted_report["subjects"][SUBJECT_ID]["name"] = "CI" + inserted_report["subjects"][SUBJECT_ID]["description"] = "My CI" self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) class SonarQubeParameterTest(MigrationTestCase): """Unit tests for the SonarQube parameter database migration.""" - def existing_report( - self, - metric_type: str = "violations", - sources: dict[SourceId, dict[str, str | dict[str, str | list[str]]]] | None = None, - ): - """Extend to add sources.""" - report = super().existing_report(metric_type=metric_type) - report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"] = sources - return report - def sources(self, source_type: str = "sonarqube", **parameters): """Create the sources fixture.""" return {SOURCE_ID: {"type": source_type, "parameters": {"branch": "main", **parameters}}} def test_report_without_severity_or_types_parameter(self): """Test that the migration succeeds when the SonarQube source has no severity or types parameter.""" - self.database.reports.find.return_value = [self.existing_report(sources=self.sources())] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="violations", sources=self.sources()), + ] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() def test_report_with_violation_metric_but_no_sonarqube(self): """Test that the migration succeeds when a violations metric has no SonarQube sources.""" - self.database.reports.find.return_value = [self.existing_report(sources=self.sources("sarif"))] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="violations", sources=self.sources("sarif")), + ] perform_migrations(self.database) self.database.reports.replace_one.assert_not_called() def test_report_with_severity_parameter(self): """Test that the migration succeeds when the SonarQube source has a severity parameter.""" - self.database.reports.find.return_value = [self.existing_report(sources=self.sources(severities=["info"]))] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="violations", sources=self.sources(severities=["info"])), + ] perform_migrations(self.database) - inserted_report = self.inserted_report(sources=self.sources(impact_severities=["low"])) + inserted_report = self.inserted_report( + metric_type="violations", + sources=self.sources(impact_severities=["low"]), + ) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) def test_report_with_multiple_old_severity_values_that_map_to_the_same_new_value(self): """Test a severity parameter with multiple old values that map to the same new value.""" - reports = [self.existing_report(sources=self.sources(severities=["info", "minor"]))] + reports = [self.existing_report(metric_type="violations", sources=self.sources(severities=["info", "minor"]))] self.database.reports.find.return_value = reports perform_migrations(self.database) - inserted_report = self.inserted_report(sources=self.sources(impact_severities=["low"])) + inserted_report = self.inserted_report( + metric_type="violations", + sources=self.sources(impact_severities=["low"]), + ) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) @disable_logging @@ -273,25 +261,32 @@ def test_report_with_unknown_old_severity_values(self): """Test that unknown severity parameter values are ignored.""" sources = self.sources(severities=["info", ""]) sources[SOURCE_ID2] = {"type": "sonarqube", "parameters": {"branch": "main", "severities": ["foo"]}} - self.database.reports.find.return_value = [self.existing_report(sources=sources)] + self.database.reports.find.return_value = [self.existing_report(metric_type="violations", sources=sources)] perform_migrations(self.database) inserted_sources = self.sources(impact_severities=["low"]) inserted_sources[SOURCE_ID2] = {"type": "sonarqube", "parameters": {"branch": "main"}} - inserted_report = self.inserted_report(sources=inserted_sources) + inserted_report = self.inserted_report(metric_type="violations", sources=inserted_sources) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) def test_report_with_types_parameter(self): """Test that the migration succeeds when the SonarQube source has a types parameter.""" - self.database.reports.find.return_value = [self.existing_report(sources=self.sources(types=["bug"]))] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="violations", sources=self.sources(types=["bug"])), + ] perform_migrations(self.database) - inserted_report = self.inserted_report(sources=self.sources(impacted_software_qualities=["reliability"])) + inserted_report = self.inserted_report( + metric_type="violations", + sources=self.sources(impacted_software_qualities=["reliability"]), + ) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) def test_report_with_types_parameter_without_values(self): """Test that the migration succeeds when the SonarQube source has a types parameter without values.""" - self.database.reports.find.return_value = [self.existing_report(sources=self.sources(types=[]))] + self.database.reports.find.return_value = [ + self.existing_report(metric_type="violations", sources=self.sources(types=[])), + ] perform_migrations(self.database) - inserted_report = self.inserted_report(sources=self.sources()) + inserted_report = self.inserted_report(metric_type="violations", sources=self.sources()) self.database.reports.replace_one.assert_called_once_with({"_id": "id"}, inserted_report) def test_report_with_security_types_parameter(self): diff --git a/components/api_server/tests/model/test_issue_tracker.py b/components/api_server/tests/model/test_issue_tracker.py index 82310e433a..d84570c750 100644 --- a/components/api_server/tests/model/test_issue_tracker.py +++ b/components/api_server/tests/model/test_issue_tracker.py @@ -34,7 +34,7 @@ def test_username_and_password(self): def test_private_token(self): """Test the issue tracker credentials.""" - credentials = IssueTrackerCredentials(private_token="token") + credentials = IssueTrackerCredentials(private_token="token") # nosec issue_tracker = IssueTracker(self.ISSUE_TRACKER_URL, self.issue_parameters, credentials) self.assertEqual("token", issue_tracker.credentials.private_token) diff --git a/components/api_server/tests/model/test_transformations.py b/components/api_server/tests/model/test_transformations.py index 5c4f0b4a34..57595ed41e 100644 --- a/components/api_server/tests/model/test_transformations.py +++ b/components/api_server/tests/model/test_transformations.py @@ -34,7 +34,7 @@ def test_do_not_hide_empty_source_credentials(self): credential in the UI. If we mask empty credentials the users won't be able to tell that they did successfully clear a credential (because it looks the same as an existing credential) and complain there is a bug. """ - self.source_parameters["password"] = "" + self.source_parameters["password"] = "" # nosec hide_credentials(self.DATA_MODEL, self.report) self.assertEqual("", self.source_parameters["password"]) @@ -45,6 +45,6 @@ def test_do_not_hide_empty_issue_tracker_credentials(self): credential in the UI. If we mask empty credentials the users won't be able to tell that they did successfully clear a credential (because it looks the same as an existing credential) and complain there is a bug. """ - self.issue_tracker_parameters["private_token"] = "" + self.issue_tracker_parameters["private_token"] = "" # nosec hide_credentials(self.DATA_MODEL, self.report) self.assertEqual("", self.issue_tracker_parameters["private_token"]) diff --git a/components/api_server/tests/routes/plugins/test_route_auth_plugin.py b/components/api_server/tests/routes/plugins/test_route_auth_plugin.py index 2f140cf3a8..39e5d3574a 100644 --- a/components/api_server/tests/routes/plugins/test_route_auth_plugin.py +++ b/components/api_server/tests/routes/plugins/test_route_auth_plugin.py @@ -37,7 +37,7 @@ def tearDown(self): logging.disable(logging.NOTSET) @staticmethod - def route(database: Mock, user: User | None = None) -> tuple[Mock, User]: + def route(database: Mock, user: User | None = None) -> tuple[Mock, User | None]: """Route handler with injected parameters. Returns the parameters for test purposes.""" return database, user diff --git a/components/api_server/tests/routes/test_auth.py b/components/api_server/tests/routes/test_auth.py index 88540b974b..dab59c6f47 100644 --- a/components/api_server/tests/routes/test_auth.py +++ b/components/api_server/tests/routes/test_auth.py @@ -14,7 +14,7 @@ from tests.base import DatabaseTestCase USERNAME = "john-doe" -PASSWORD = "secret" +PASSWORD = "secret" # nosec class AuthTestCase(DatabaseTestCase): diff --git a/components/api_server/tests/routes/test_measurement.py b/components/api_server/tests/routes/test_measurement.py index 1e7a6d3cf5..275582a9d5 100644 --- a/components/api_server/tests/routes/test_measurement.py +++ b/components/api_server/tests/routes/test_measurement.py @@ -1,6 +1,7 @@ """Unit tests for the measurement routes.""" from datetime import timedelta +from typing import cast from unittest.mock import Mock, patch from shared.model.measurement import Measurement @@ -117,7 +118,7 @@ def insert_one(new_measurement) -> None: def set_entity_attribute(self, attribute: str = "attribute", value: str = "value") -> Measurement: """Set the entity attribute and return the new measurement.""" with patch("bottle.request", Mock(json={attribute: value})): - return set_entity_attribute(METRIC_ID, SOURCE_ID, "entity_key", attribute, self.database) + return cast(Measurement, set_entity_attribute(METRIC_ID, SOURCE_ID, "entity_key", attribute, self.database)) def test_set_attribute(self): """Test that setting an attribute inserts a new measurement.""" diff --git a/components/api_server/tests/routes/test_report.py b/components/api_server/tests/routes/test_report.py index ce1de44a55..3ff7adc95a 100644 --- a/components/api_server/tests/routes/test_report.py +++ b/components/api_server/tests/routes/test_report.py @@ -570,8 +570,8 @@ def test_get_json_report(self): exported_report["issue_tracker"]["parameters"].pop("password") self.assertDictEqual(exported_report, expected_report) - self.assertTrue(isinstance(exported_password, tuple)) - self.assertTrue(len(exported_password) == 2) # noqa: PLR2004 + self.assertIsInstance(exported_password, tuple) + self.assertEqual(2, len(exported_password)) def test_get_nonexisting_json_report(self): """Test that None is returned if report doesn't exist.""" @@ -598,10 +598,8 @@ def test_get_json_report_with_public_key(self, request): request.query = {"public_key": self.public_key} mocked_report = copy.deepcopy(self.report) - mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"]["password"] = [ - "0", - "1", - ] # Use a list as password for coverage of the last line + parameters = mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"] + parameters["password"] = ["0", "1"] # nosec # Use a list as password for coverage of the last line self.database.reports.find_one.return_value = mocked_report exported_report = export_report_as_json(self.database, REPORT_ID) exported_password = exported_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID][ @@ -610,16 +608,15 @@ def test_get_json_report_with_public_key(self, request): exported_report["issue_tracker"]["parameters"].pop("password") self.assertDictEqual(exported_report, expected_report) - self.assertTrue(isinstance(exported_password, tuple)) - self.assertTrue(len(exported_password) == 2) # noqa: PLR2004 + self.assertIsInstance(exported_password, tuple) + self.assertEqual(2, len(exported_password)) @patch("bottle.request") def test_post_report_import(self, request): """Test that a report is imported correctly.""" mocked_report = copy.deepcopy(self.report) - mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"]["password"] = ( - asymmetric_encrypt(self.public_key, "test_message") - ) + parameters = mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"] + parameters["password"] = asymmetric_encrypt(self.public_key, "test_message") # nosec request.json = mocked_report post_report_import(self.database) inserted = self.database.reports.insert_one.call_args_list[0][0][0] @@ -631,9 +628,8 @@ def test_post_report_import(self, request): def test_post_report_import_without_encrypted_credentials(self, request): """Test that a report is imported correctly.""" mocked_report = copy.deepcopy(self.report) - mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"]["password"] = ( - "unencrypted_password" - ) + parameters = mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"] + parameters["password"] = "unencrypted_password" # nosec request.json = mocked_report post_report_import(self.database) inserted_report = self.database.reports.insert_one.call_args_list[0][0][0] @@ -646,10 +642,8 @@ def test_post_report_import_without_encrypted_credentials(self, request): def test_post_report_import_with_failed_decryption(self, request): """Test that a report is imported correctly.""" mocked_report = copy.deepcopy(self.report) - mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"]["password"] = ( - "not_properly_encrypted==", - "test_message", - ) + parameters = mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"] + parameters["password"] = ("not_properly_encrypted==", "test_message") # nosec request.json = mocked_report response = post_report_import(self.database) self.assertIn("error", response) @@ -659,9 +653,8 @@ def test_post_report_import_without_private_key(self, request): """Test that a report cannot be imported if the Quality-time instance has no private key.""" self.database.secrets.find_one.return_value = None mocked_report = copy.deepcopy(self.report) - mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"]["password"] = ( - "unencrypted_password" - ) + parameters = mocked_report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]["parameters"] + parameters["password"] = "unencrypted_password" # nosec request.json = mocked_report response = post_report_import(self.database) self.assertIn("error", response) diff --git a/components/api_server/tests/routes/test_settings.py b/components/api_server/tests/routes/test_settings.py index 89d4f34464..1004d95f73 100644 --- a/components/api_server/tests/routes/test_settings.py +++ b/components/api_server/tests/routes/test_settings.py @@ -6,7 +6,7 @@ from tests.base import DatabaseTestCase USERNAME = "john-doe" -PASSWORD = "secret" +PASSWORD = "secret" # nosec class SettingsTest(DatabaseTestCase): diff --git a/components/api_server/tests/routes/test_source.py b/components/api_server/tests/routes/test_source.py index bac584e075..8848f1aa96 100644 --- a/components/api_server/tests/routes/test_source.py +++ b/components/api_server/tests/routes/test_source.py @@ -231,7 +231,7 @@ def test_url_socket_error_negative_errno(self, mock_get, request): def test_url_with_user(self, mock_get, request): """Test that the source url can be changed and that the availability is checked.""" self.sources[SOURCE_ID]["parameters"]["username"] = "un" - self.sources[SOURCE_ID]["parameters"]["password"] = "pwd" + self.sources[SOURCE_ID]["parameters"]["password"] = "pwd" # nosec mock_get.return_value = self.url_check_get_response request.json = {"url": self.url} response = post_source_parameter(SOURCE_ID, "url", self.database) @@ -261,7 +261,7 @@ def test_url_with_token(self, mock_get, request): """Test that the source url can be changed and that the availability is checked.""" mock_get.return_value = self.url_check_get_response request.json = {"url": self.url} - self.sources[SOURCE_ID]["parameters"]["private_token"] = "xxx" + self.sources[SOURCE_ID]["parameters"]["private_token"] = "xxx" # nosec response = post_source_parameter(SOURCE_ID, "url", self.database) self.assert_url_check(response) mock_get.assert_called_once_with( diff --git a/components/collector/.vulture_ignore_list.py b/components/collector/.vulture_ignore_list.py index 0a1368bc9b..ec63f78847 100644 --- a/components/collector/.vulture_ignore_list.py +++ b/components/collector/.vulture_ignore_list.py @@ -1,14 +1,143 @@ ChangeFailureRate # unused class (src/metric_collectors/change_failure_rate.py:13) +AnchoreSecurityWarnings # unused class (src/source_collectors/anchore/security_warnings.py:10) +AnchoreSourceUpToDateness # unused class (src/source_collectors/anchore/source_up_to_dateness.py:13) +AnchoreJenkinsPluginSecurityWarnings # unused class (src/source_collectors/anchore_jenkins_plugin/security_warnings.py:10) +AnchoreJenkinsPluginSourceUpToDateness # unused class (src/source_collectors/anchore_jenkins_plugin/source_up_to_dateness.py:6) +AxeCoreSourceUpToDateness # unused class (src/source_collectors/axe_core/source_up_to_dateness.py:10) +AxeCoreSourceVersion # unused class (src/source_collectors/axe_core/source_version.py:9) +AxeCoreViolations # unused class (src/source_collectors/axe_core/violations.py:55) +AxeCSVViolations # unused class (src/source_collectors/axe_csv/violations.py:14) +AxeHTMLReporterViolations # unused class (src/source_collectors/axe_html_reporter/violations.py:15) +AzureDevopsAverageIssueLeadTime # unused class (src/source_collectors/azure_devops/average_issue_lead_time.py:13) +AzureDevopsChangeFailureRate # unused class (src/source_collectors/azure_devops/change_failure_rate.py:15) +AzureDevopsFailedJobs # unused class (src/source_collectors/azure_devops/failed_jobs.py:8) +AzureDevopsJobRunsWithinTimePeriod # unused class (src/source_collectors/azure_devops/job_runs_within_time_period.py:11) +AzureDevopsSourceUpToDateness # unused class (src/source_collectors/azure_devops/source_up_to_dateness.py:53) __new__ # unused function (src/source_collectors/azure_devops/source_up_to_dateness.py:56) +AzureDevopsUnmergedBranches # unused class (src/source_collectors/azure_devops/unmerged_branches.py:15) +AzureDevopsUnusedJobs # unused class (src/source_collectors/azure_devops/unused_jobs.py:11) +AzureDevopsUserStoryPoints # unused class (src/source_collectors/azure_devops/user_story_points.py:12) +BanditSecurityWarnings # unused class (src/source_collectors/bandit/security_warnings.py:10) +BanditSourceUpToDateness # unused class (src/source_collectors/bandit/source_up_to_dateness.py:10) +CalendarSourceUpToDateness # unused class (src/source_collectors/calendar/source_up_to_dateness.py:13) +CalendarTimeRemaining # unused class (src/source_collectors/calendar/time_remaining.py:13) kind # unused variable (src/source_collectors/cargo_audit/security_warnings.py:42) warnings # unused variable (src/source_collectors/cargo_audit/security_warnings.py:49) -latestVersion # unused variable (src/source_collectors/dependency_track/dependencies.py:21) -repositoryMeta # unused variable (src/source_collectors/dependency_track/dependencies.py:29) +CargoAuditSecurityWarnings # unused class (src/source_collectors/cargo_audit/security_warnings.py:52) +ClocLOC # unused class (src/source_collectors/cloc/loc.py:10) +ClocSourceVersion # unused class (src/source_collectors/cloc/source_version.py:9) +CoberturaSourceUpToDateness # unused class (src/source_collectors/cobertura/source_up_to_dateness.py:11) +CoberturaSourceVersion # unused class (src/source_collectors/cobertura/source_version.py:10) +CoberturaUncoveredBranches # unused class (src/source_collectors/cobertura/uncovered_branches.py:6) +CoberturaUncoveredLines # unused class (src/source_collectors/cobertura/uncovered_lines.py:6) +CoberturaJenkinsPluginSourceUpToDateness # unused class (src/source_collectors/cobertura_jenkins_plugin/source_up_to_dateness.py:8) +CoberturaJenkinsPluginUncoveredBranches # unused class (src/source_collectors/cobertura_jenkins_plugin/uncovered_branches.py:6) +CoberturaJenkinsPluginUncoveredLines # unused class (src/source_collectors/cobertura_jenkins_plugin/uncovered_lines.py:6) +ComposerDependencies # unused class (src/source_collectors/composer/dependencies.py:7) +CxSASTSecurityWarnings # unused class (src/source_collectors/cxsast/security_warnings.py:10) +CxSASTSourceUpToDateness # unused class (src/source_collectors/cxsast/source_up_to_dateness.py:10) +CxSASTSourceVersion # unused class (src/source_collectors/cxsast/source_version.py:12) +lastBomImport # unused variable (src/source_collectors/dependency_track/base.py:17) +latestVersion # unused variable (src/source_collectors/dependency_track/dependencies.py:14) +repositoryMeta # unused variable (src/source_collectors/dependency_track/dependencies.py:22) +DependencyTrackDependencies # unused class (src/source_collectors/dependency_track/dependencies.py:27) vulnId # unused variable (src/source_collectors/dependency_track/security_warnings.py:23) matrix # unused variable (src/source_collectors/dependency_track/security_warnings.py:34) -lastBomImport # unused variable (src/source_collectors/dependency_track/source_up_to_dateness.py:17) +DependencyTrackSecurityWarnings # unused class (src/source_collectors/dependency_track/security_warnings.py:38) +DependencyTrackSourceUpToDateness # unused class (src/source_collectors/dependency_track/source_up_to_dateness.py:14) +DependencyTrackSourceVersion # unused class (src/source_collectors/dependency_track/source_version.py:9) +GatlingPerformanceTestDuration # unused class (src/source_collectors/gatling/performancetest_duration.py:9) +GatlingSlowTransactions # unused class (src/source_collectors/gatling/slow_transactions.py:12) +GatlingSourceUpToDateness # unused class (src/source_collectors/gatling/source_up_to_dateness.py:15) +GatlingSourceVersion # unused class (src/source_collectors/gatling/source_version.py:11) +GenericJSONSecurityWarnings # unused class (src/source_collectors/generic_json/security_warnings.py:12) +GitLabChangeFailureRate # unused class (src/source_collectors/gitlab/change_failure_rate.py:6) +GitLabFailedJobs # unused class (src/source_collectors/gitlab/failed_jobs.py:9) +GitLabMergeRequests # unused class (src/source_collectors/gitlab/merge_requests.py:70) +GitLabPipelineDuration # unused class (src/source_collectors/gitlab/pipeline_duration.py:12) +GitLabSourceUpToDateness # unused class (src/source_collectors/gitlab/source_up_to_dateness.py:93) __new__ # unused function (src/source_collectors/gitlab/source_up_to_dateness.py:96) +GitLabSourceVersion # unused class (src/source_collectors/gitlab/source_version.py:11) +GitLabUnmergedBranches # unused class (src/source_collectors/gitlab/unmerged_branches.py:15) +GitLabUnusedJobs # unused class (src/source_collectors/gitlab/unused_jobs.py:11) scan_status # unused variable (src/source_collectors/harbor/security_warnings.py:58) +HarborSecurityWarnings # unused class (src/source_collectors/harbor/security_warnings.py:62) +HarborJSONSecurityWarnings # unused class (src/source_collectors/harbor_json/security_warnings.py:36) +JacocoSourceUpToDateness # unused class (src/source_collectors/jacoco/source_up_to_dateness.py:11) +JacocoUncoveredBranches # unused class (src/source_collectors/jacoco/uncovered_branches.py:6) +JacocoUncoveredLines # unused class (src/source_collectors/jacoco/uncovered_lines.py:6) +JacocoJenkinsPluginSourceUpToDateness # unused class (src/source_collectors/jacoco_jenkins_plugin/source_up_to_dateness.py:8) +JacocoJenkinsPluginUncoveredBranches # unused class (src/source_collectors/jacoco_jenkins_plugin/uncovered_branches.py:6) +JacocoJenkinsPluginUncoveredLines # unused class (src/source_collectors/jacoco_jenkins_plugin/uncovered_lines.py:6) +JenkinsChangeFailureRate # unused class (src/source_collectors/jenkins/change_failure_rate.py:13) +JenkinsFailedJobs # unused class (src/source_collectors/jenkins/failed_jobs.py:8) +JenkinsJobRunsWithinTimePeriod # unused class (src/source_collectors/jenkins/job_runs_within_time_period.py:12) +JenkinsSourceUpToDateness # unused class (src/source_collectors/jenkins/source_up_to_dateness.py:9) +JenkinsSourceVersion # unused class (src/source_collectors/jenkins/source_version.py:9) +JenkinsUnusedJobs # unused class (src/source_collectors/jenkins/unused_jobs.py:11) +JenkinsTestReportSourceUpToDateness # unused class (src/source_collectors/jenkins_test_report/source_up_to_dateness.py:9) +JiraAverageIssueLeadTime # unused class (src/source_collectors/jira/average_issue_lead_time.py:13) +JiraIssueStatus # unused class (src/source_collectors/jira/issue_status.py:11) +JiraManualTestDuration # unused class (src/source_collectors/jira/manual_test_duration.py:6) +JiraManualTestExecution # unused class (src/source_collectors/jira/manual_test_execution.py:14) +JiraSourceVersion # unused class (src/source_collectors/jira/source_version.py:11) +JiraUserStoryPoints # unused class (src/source_collectors/jira/user_story_points.py:6) +JiraVelocity # unused class (src/source_collectors/jira/velocity.py:38) +JMeterCSVPerformanceTestDuration # unused class (src/source_collectors/jmeter_csv/performancetest_duration.py:9) +JMeterCSVSlowTransactions # unused class (src/source_collectors/jmeter_csv/slow_transactions.py:11) +JMeterCSVSourceUpToDateness # unused class (src/source_collectors/jmeter_csv/source_up_to_dateness.py:15) +JMeterJSONSlowTransactions # unused class (src/source_collectors/jmeter_json/slow_transactions.py:10) +JUnitSourceUpToDateness # unused class (src/source_collectors/junit/source_up_to_dateness.py:13) +ManualNumber # unused class (src/source_collectors/manual_number/all_metrics.py:8) +NCoverSourceUpToDateness # unused class (src/source_collectors/ncover/source_up_to_dateness.py:13) +NCoverUncoveredBranches # unused class (src/source_collectors/ncover/uncovered_branches.py:6) +NCoverUncoveredLines # unused class (src/source_collectors/ncover/uncovered_lines.py:6) +NpmDependencies # unused class (src/source_collectors/npm/dependencies.py:10) +OJAuditViolations # unused class (src/source_collectors/ojaudit/violations.py:22) +OpenVASSecurityWarnings # unused class (src/source_collectors/openvas/security_warnings.py:11) +OpenVASSourceUpToDateness # unused class (src/source_collectors/openvas/source_up_to_dateness.py:11) +OpenVASSourceVersion # unused class (src/source_collectors/openvas/source_version.py:10) +OWASPDependencyCheckSecurityWarnings # unused class (src/source_collectors/owasp_dependency_check/security_warnings.py:11) +OWASPDependencyCheckSourceUpToDateness # unused class (src/source_collectors/owasp_dependency_check/source_up_to_dateness.py:13) +OWASPDependencyCheckSourceVersion # unused class (src/source_collectors/owasp_dependency_check/source_version.py:12) +OWASPZAPSecurityWarnings # unused class (src/source_collectors/owasp_zap/security_warnings.py:15) +OWASPZAPSourceUpToDateness # unused class (src/source_collectors/owasp_zap/source_up_to_dateness.py:11) +OWASPZAPSourceVersion # unused class (src/source_collectors/owasp_zap/source_version.py:10) +PerformanceTestRunnerPerformanceTestDuration # unused class (src/source_collectors/performancetest_runner/performancetest_duration.py:13) +PerformanceTestRunnerScalability # unused class (src/source_collectors/performancetest_runner/performancetest_scalability.py:13) +PerformanceTestRunnerPerformanceTestStability # unused class (src/source_collectors/performancetest_runner/performancetest_stability.py:13) +PerformanceTestRunnerSlowTransactions # unused class (src/source_collectors/performancetest_runner/slow_transactions.py:11) +PerformanceTestRunnerSoftwareVersion # unused class (src/source_collectors/performancetest_runner/software_version.py:14) +PerformanceTestRunnerSourceUpToDateness # unused class (src/source_collectors/performancetest_runner/source_up_to_dateness.py:15) +PipDependencies # unused class (src/source_collectors/pip/dependencies.py:10) +PyupioSafetySecurityWarnings # unused class (src/source_collectors/pyupio_safety/security_warnings.py:12) +QualityTimeMetrics # unused class (src/source_collectors/quality_time/metrics.py:18) +QualityTimeMissingMetrics # unused class (src/source_collectors/quality_time/missing_metrics.py:11) +QualityTimeSourceUpToDateness # unused class (src/source_collectors/quality_time/source_up_to_dateness.py:12) +QualityTimeSourceVersion # unused class (src/source_collectors/quality_time/source_version.py:9) +RobotFrameworkSourceUpToDateness # unused class (src/source_collectors/robot_framework/source_up_to_dateness.py:13) +RobotFrameworkSourceVersion # unused class (src/source_collectors/robot_framework/source_version.py:12) +RobotFrameworkJenkinsPluginSourceUpToDateness # unused class (src/source_collectors/robot_framework_jenkins_plugin/source_up_to_dateness.py:8) +SARIFJSONSecurityWarnings # unused class (src/source_collectors/sarif/security_warnings.py:6) +SARIFJSONViolations # unused class (src/source_collectors/sarif/violations.py:6) +SnykSecurityWarnings # unused class (src/source_collectors/snyk/security_warnings.py:13) +SonarQubeCommentedOutCode # unused class (src/source_collectors/sonarqube/commented_out_code.py:6) +SonarQubeComplexUnits # unused class (src/source_collectors/sonarqube/complex_units.py:6) +SonarQubeDuplicatedLines # unused class (src/source_collectors/sonarqube/duplicated_lines.py:6) +SonarQubeLOC # unused class (src/source_collectors/sonarqube/loc.py:11) +SonarQubeLongUnits # unused class (src/source_collectors/sonarqube/long_units.py:6) +SonarQubeManyParameters # unused class (src/source_collectors/sonarqube/many_parameters.py:6) +SonarQubeRemediationEffort # unused class (src/source_collectors/sonarqube/remediation_effort.py:12) +SonarQubeSecurityWarnings # unused class (src/source_collectors/sonarqube/security_warnings.py:11) +SonarQubeSoftwareVersion # unused class (src/source_collectors/sonarqube/software_version.py:11) +SonarQubeSourceUpToDateness # unused class (src/source_collectors/sonarqube/source_up_to_dateness.py:12) +SonarQubeSourceVersion # unused class (src/source_collectors/sonarqube/source_version.py:9) +SonarQubeSuppressedViolations # unused class (src/source_collectors/sonarqube/suppressed_violations.py:10) +SonarQubeTodoAndFixmeComments # unused class (src/source_collectors/sonarqube/todo_and_fixme_comments.py:6) +SonarQubeUncoveredBranches # unused class (src/source_collectors/sonarqube/uncovered_branches.py:6) +SonarQubeUncoveredLines # unused class (src/source_collectors/sonarqube/uncovered_lines.py:6) +TrelloIssues # unused class (src/source_collectors/trello/issues.py:13) +TrelloSourceUpToDateness # unused class (src/source_collectors/trello/source_up_to_dateness.py:12) VulnerabilityID # unused variable (src/source_collectors/trivy/security_warnings.py:19) Title # unused variable (src/source_collectors/trivy/security_warnings.py:20) Description # unused variable (src/source_collectors/trivy/security_warnings.py:21) @@ -18,3 +147,5 @@ References # unused variable (src/source_collectors/trivy/security_warnings.py:26) Target # unused variable (src/source_collectors/trivy/security_warnings.py:32) Vulnerabilities # unused variable (src/source_collectors/trivy/security_warnings.py:33) +TrivyJSONSecurityWarnings # unused class (src/source_collectors/trivy/security_warnings.py:39) +RobotFrameworkSourceVersion # unused class (tests/source_collectors/robot_framework/test_source_version.py:6) diff --git a/components/collector/ci/pip-compile.sh b/components/collector/ci/pip-compile.sh index 815df6440a..3ddc132f40 100755 --- a/components/collector/ci/pip-compile.sh +++ b/components/collector/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/components/collector/ci/pip-install.sh b/components/collector/ci/pip-install.sh index c10334fc08..40b457d94c 100755 --- a/components/collector/ci/pip-install.sh +++ b/components/collector/ci/pip-install.sh @@ -1,7 +1,7 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet -r requirements/requirements-dev.txt -run pip install --ignore-installed --quiet -r requirements/requirements-internal.txt +run_pip_install -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements-internal.txt diff --git a/components/collector/ci/quality.sh b/components/collector/ci/quality.sh index 8933d67612..2f19fa446e 100755 --- a/components/collector/ci/quality.sh +++ b/components/collector/ci/quality.sh @@ -1,33 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source quality-base.sh -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src tests - -# Mypy -run pipx run `spec mypy` --python-executable=$(which python) src tests - -# pip-audit -# See https://github.com/aio-libs/aiohttp/issues/6772 for why we ignore the CVE -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Safety -# Vulnerability ID: 67599 -# ADVISORY: ** DISPUTED ** An issue was discovered in pip (all versions) because it installs the version with the -# highest version number, even if the user had intended to obtain a private package from a private index. This only -# affects use of the --extra-index-url option, and exploitation requires that the... -# CVE-2018-20225 -# For more information about this vulnerability, visit https://data.safetycli.com/v/67599/97c -run pipx run `spec safety` check --bare --ignore 67599 -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Bandit -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -NAMES_TO_IGNORE='Anchore*,Axe*,AzureDevops*,Bandit*,Calendar*,CargoAudit*,Cloc*,Cobertura*,Composer*,CxSAST*,DependencyTrack*,Gatling*,Generic*,GitLab*,Harbor*,Jacoco*,Jenkins*,Jira*,JMeter*,JUnit*,ManualNumber*,NCover*,Npm*,OJAudit*,OpenVAS*,OWASPDependencyCheck*,OWASPZAP*,PerformanceTestRunner*,Pip*,PyupioSafety*,QualityTime*,RobotFramework*,SARIF*,Snyk*,SonarQube*,Trello*,TrivyJSON*' -run pipx run `spec vulture` --min-confidence 0 --ignore-names $NAMES_TO_IGNORE src/ tests/ .vulture_ignore_list.py +check_python_quality diff --git a/components/collector/ci/unittest.sh b/components/collector/ci/unittest.sh index 87a5ea5b65..be9b0fa620 100755 --- a/components/collector/ci/unittest.sh +++ b/components/collector/ci/unittest.sh @@ -1,10 +1,7 @@ #!/bin/bash -source ../../ci/unittest-base.sh +PATH="$PATH:../../ci" +source unittest-base.sh export COVERAGE_RCFILE=../../.coveragerc - -coverage run -m unittest --quiet -coverage report --fail-under=0 -coverage html --fail-under=0 -coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +run_coverage diff --git a/components/collector/pyproject.toml b/components/collector/pyproject.toml index 99467663c7..8d6ad951a3 100644 --- a/components/collector/pyproject.toml +++ b/components/collector/pyproject.toml @@ -1,6 +1,11 @@ [project] name = "collector" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "aiogqlc==5.1.0", "aiohttp==3.9.5", @@ -11,28 +16,91 @@ dependencies = [ "python-dateutil==2.9.0.post0", ] -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.5.3", "mongomock==4.1.2", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements - "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component + "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component "types-beautifulsoup4==4.12.0.20240511", "types-python-dateutil==2.9.0.20240316", - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vulture==2.11" + "vulture==2.11", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", ] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on + "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous + "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous + "ANN204", # https://docs.astral.sh/ruff/rules/missing-return-type-special-method/ - typing classes that inherit from set and list correctly is surprisingly hard + "ARG002", # https://docs.astral.sh/ruff/rules/unused-method-argument/ - this rule doesn't take inheritance into account + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files + "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - collectors are imported in __init__.py files to flatten the module hierarchy +] +lint.per-file-ignores."src/base_collectors/source_collector.py" = [ + "BLE001", # https://docs.astral.sh/ruff/rules/blind-except/ - allow for catching blind exception `Exception` +] +lint.per-file-ignores."src/quality_time_collector.py" = [ + "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script +] +lint.per-file-ignores."tests/**/*.py" = [ + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types +] +lint.isort.section-order = [ + "future", + "standard-library", + "third-party", + "second-party", + "first-party", + "tests", + "local-folder", +] +lint.isort.sections."second-party" = [ + "shared", + "shared_data_model", +] +lint.isort.sections."tests" = [ + "tests", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers + [tool.mypy] ignore_missing_imports = false incremental = false @@ -43,7 +111,7 @@ warn_unused_ignores = true [[tool.mypy.overrides]] module = [ - "defusedxml" + "defusedxml", ] ignore_missing_imports = true @@ -53,51 +121,3 @@ generate_hashes = true quiet = true strip_extras = true upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on - "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous - "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous - "ANN204", # https://docs.astral.sh/ruff/rules/missing-return-type-special-method/ - typing classes that inherit from set and list correctly is surprisingly hard - "ARG002", # https://docs.astral.sh/ruff/rules/unused-method-argument/ - this rule doesn't take inheritance into account - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter - "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used - "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used -] - -[tool.ruff.lint.isort] -section-order = ["future", "standard-library", "third-party", "second-party", "first-party", "tests", "local-folder"] - -[tool.ruff.lint.isort.sections] -"second-party" = ["shared", "shared_data_model"] -"tests" = ["tests"] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files - "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - collectors are imported in __init__.py files to flatten the module hierarchy -] -"src/base_collectors/source_collector.py" = [ - "BLE001" # https://docs.astral.sh/ruff/rules/blind-except/ - allow for catching blind exception `Exception` -] -"src/quality_time_collector.py" = [ - "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script -] -"tests/**/*.py" = [ - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types -] diff --git a/components/collector/tests/source_collectors/dependency_track/test_security_warnings.py b/components/collector/tests/source_collectors/dependency_track/test_security_warnings.py index 4feed254e1..887e75e84e 100644 --- a/components/collector/tests/source_collectors/dependency_track/test_security_warnings.py +++ b/components/collector/tests/source_collectors/dependency_track/test_security_warnings.py @@ -106,6 +106,6 @@ async def test_api_key(self): get.assert_called_once_with( "https://dependency_track/api/v1/project?pageSize=25&pageNumber=1", allow_redirects=True, - auth=BasicAuth(login="API key", password="", encoding="latin1"), + auth=BasicAuth(login="API key", password="", encoding="latin1"), # nosec headers={"X-Api-Key": "API key"}, ) diff --git a/components/frontend/ci/quality.sh b/components/frontend/ci/quality.sh index db07ac2c9e..856c6d2301 100755 --- a/components/frontend/ci/quality.sh +++ b/components/frontend/ci/quality.sh @@ -1,4 +1,7 @@ #!/bin/bash +PATH="$PATH:../../ci" +source quality-base.sh + # Eslint -npx eslint *.js *.mjs src +run npx eslint *.js *.mjs src diff --git a/components/frontend/ci/unittest.sh b/components/frontend/ci/unittest.sh index a42bc09aec..729224589f 100755 --- a/components/frontend/ci/unittest.sh +++ b/components/frontend/ci/unittest.sh @@ -1,3 +1,6 @@ #!/bin/bash -npm test +PATH="$PATH:../../ci" +source unittest-base.sh + +run npm test diff --git a/components/notifier/ci/pip-compile.sh b/components/notifier/ci/pip-compile.sh index 815df6440a..3ddc132f40 100755 --- a/components/notifier/ci/pip-compile.sh +++ b/components/notifier/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/components/notifier/ci/pip-install.sh b/components/notifier/ci/pip-install.sh index 25a3052b7f..40b457d94c 100755 --- a/components/notifier/ci/pip-install.sh +++ b/components/notifier/ci/pip-install.sh @@ -1,7 +1,7 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet --use-pep517 -r requirements/requirements-dev.txt -run pip install --ignore-installed --quiet --use-pep517 -r requirements/requirements-internal.txt +run_pip_install -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements-internal.txt diff --git a/components/notifier/ci/quality.sh b/components/notifier/ci/quality.sh index e252158282..2f19fa446e 100755 --- a/components/notifier/ci/quality.sh +++ b/components/notifier/ci/quality.sh @@ -1,32 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source quality-base.sh -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src tests - -# Mypy -run pipx run `spec mypy` --python-executable=$(which python) src - -# pip-audit -# See https://github.com/aio-libs/aiohttp/issues/6772 for why we ignore the CVE -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Safety -# Vulnerability ID: 67599 -# ADVISORY: ** DISPUTED ** An issue was discovered in pip (all versions) because it installs the version with the -# highest version number, even if the user had intended to obtain a private package from a private index. This only -# affects use of the --extra-index-url option, and exploitation requires that the... -# CVE-2018-20225 -# For more information about this vulnerability, visit https://data.safetycli.com/v/67599/97c -run pipx run `spec safety` check --bare --ignore 67599 -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Bandit -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -run pipx run `spec vulture` --min-confidence 0 src/ tests/ .vulture_ignore_list.py +check_python_quality diff --git a/components/notifier/ci/unittest.sh b/components/notifier/ci/unittest.sh index 87a5ea5b65..be9b0fa620 100755 --- a/components/notifier/ci/unittest.sh +++ b/components/notifier/ci/unittest.sh @@ -1,10 +1,7 @@ #!/bin/bash -source ../../ci/unittest-base.sh +PATH="$PATH:../../ci" +source unittest-base.sh export COVERAGE_RCFILE=../../.coveragerc - -coverage run -m unittest --quiet -coverage report --fail-under=0 -coverage html --fail-under=0 -coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +run_coverage diff --git a/components/notifier/pyproject.toml b/components/notifier/pyproject.toml index f9b8dcf6d5..040957cbf7 100644 --- a/components/notifier/pyproject.toml +++ b/components/notifier/pyproject.toml @@ -1,30 +1,85 @@ [project] name = "notifier" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "aiohttp==3.9.5", - "pymsteams==0.2.2" + "pymsteams==0.2.2", ] -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.5.3", "mongomock==4.1.2", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements - "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "pydantic==2.7.4", # Needed for importing the data model in the tests from the shared code component + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vulture==2.11" + "vulture==2.11", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", +] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files +] +lint.per-file-ignores."src/quality_time_notifier.py" = [ + "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script +] +lint.per-file-ignores."tests/**/*.py" = [ + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types +] +lint.isort.section-order = [ + "future", + "standard-library", + "third-party", + "second-party", + "first-party", + "tests", + "local-folder", +] +lint.isort.sections."second-party" = [ + "shared", + "shared_data_model", ] +lint.isort.sections."tests" = [ + "tests", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers [tool.mypy] ignore_missing_imports = false @@ -36,7 +91,7 @@ warn_unused_ignores = true [[tool.mypy.overrides]] module = [ - "pymsteams" + "pymsteams", ] ignore_missing_imports = true @@ -46,39 +101,3 @@ generate_hashes = true quiet = true strip_extras = true upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter - "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used -] - -[tool.ruff.lint.isort] -section-order = ["future", "standard-library", "third-party", "second-party", "first-party", "tests", "local-folder"] - -[tool.ruff.lint.isort.sections] -"second-party" = ["shared", "shared_data_model"] -"tests" = ["tests"] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files -] -"src/quality_time_notifier.py" = [ - "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is the main script -] -"tests/**/*.py" = [ - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types -] diff --git a/components/notifier/tests/database/test_measurements.py b/components/notifier/tests/database/test_measurements.py index 5f27efb86e..1c3c4a0ed1 100644 --- a/components/notifier/tests/database/test_measurements.py +++ b/components/notifier/tests/database/test_measurements.py @@ -2,6 +2,7 @@ import unittest from datetime import UTC, datetime, timedelta +from typing import TYPE_CHECKING import mongomock @@ -11,6 +12,9 @@ from tests.fixtures import METRIC_ID, create_report +if TYPE_CHECKING: + from pymongo.database import Database + class MeasurementsTest(unittest.TestCase): """Unit tests for getting measurements.""" @@ -22,7 +26,7 @@ def setUp(self) -> None: {"_id": 2, "start": "3", "end": "4", "sources": [], "metric_uuid": METRIC_ID}, {"_id": 3, "start": "6", "end": "7", "sources": [], "metric_uuid": METRIC_ID}, ] - self.database = mongomock.MongoClient()["quality_time_db"] + self.database: Database = mongomock.MongoClient()["quality_time_db"] def test_get_recent_measurements(self): """Test that the recent measurements are returned.""" diff --git a/components/notifier/tests/database/test_reports.py b/components/notifier/tests/database/test_reports.py index 3b237b2526..b9018caf7a 100644 --- a/components/notifier/tests/database/test_reports.py +++ b/components/notifier/tests/database/test_reports.py @@ -1,6 +1,7 @@ """Unit tests for getting reports from the database.""" import unittest +from typing import TYPE_CHECKING import mongomock @@ -8,6 +9,9 @@ from tests.fixtures import METRIC_ID, create_report +if TYPE_CHECKING: + from pymongo.database import Database + class ReportsTest(unittest.TestCase): """Unit tests for getting information from the database.""" @@ -19,7 +23,7 @@ def setUp(self) -> None: {"_id": 2, "start": "3", "end": "4", "sources": [], "metric_uuid": METRIC_ID}, {"_id": 3, "start": "6", "end": "7", "sources": [], "metric_uuid": METRIC_ID}, ] - self.database = mongomock.MongoClient()["quality_time_db"] + self.database: Database = mongomock.MongoClient()["quality_time_db"] def test_get_reports_and_measurements(self): """Test that the reports and latest two measurements are returned.""" diff --git a/components/notifier/tests/notifier/test_notifier.py b/components/notifier/tests/notifier/test_notifier.py index 7d1a159374..7d62f8ea2b 100644 --- a/components/notifier/tests/notifier/test_notifier.py +++ b/components/notifier/tests/notifier/test_notifier.py @@ -217,7 +217,7 @@ async def test_no_webhook_in_notification_destination( """Test that the notifier continues if a destination does not have a webhook configured.""" report1 = create_report() report2 = deepcopy(report1) - measurements = [] + measurements: list[dict] = [] mocked_get.side_effect = [([report1], measurements), ([report2], measurements)] mocked_sleep.side_effect = [None, RuntimeError] diff --git a/components/shared_code/ci/pip-compile.sh b/components/shared_code/ci/pip-compile.sh index 16082fa439..3ddc132f40 100755 --- a/components/shared_code/ci/pip-compile.sh +++ b/components/shared_code/ci/pip-compile.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source base.sh -# Update the compiled requirements files -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/components/shared_code/ci/pip-install.sh b/components/shared_code/ci/pip-install.sh index cd44853124..f76f775ba5 100755 --- a/components/shared_code/ci/pip-install.sh +++ b/components/shared_code/ci/pip-install.sh @@ -1,7 +1,7 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet -r requirements/requirements-dev.txt -run pip install --ignore-installed --quiet . +run_pip_install -r requirements/requirements-dev.txt +run_pip_install . diff --git a/components/shared_code/ci/quality.sh b/components/shared_code/ci/quality.sh index 599ef9a839..2f19fa446e 100755 --- a/components/shared_code/ci/quality.sh +++ b/components/shared_code/ci/quality.sh @@ -1,34 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source quality-base.sh -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src tests - -# Mypy -# pipx run can't be used because mypy needs the pydantic plugin to be installed in the same venv (using pipx inject) -run pipx install --force `spec mypy` # --force works around this bug: https://github.com/pypa/pipx/issues/795 -run pipx inject mypy `spec pydantic` -run $PIPX_BIN_DIR/mypy src --python-executable=$(which python) - -# pip-audit -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements-dev.txt - -# Safety -# Vulnerability ID: 67599 -# ADVISORY: ** DISPUTED ** An issue was discovered in pip (all versions) because it installs the version with the -# highest version number, even if the user had intended to obtain a private package from a private index. This only -# affects use of the --extra-index-url option, and exploitation requires that the... -# CVE-2018-20225 -# For more information about this vulnerability, visit https://data.safetycli.com/v/67599/97c -run pipx run `spec safety` check --bare --ignore 67599 -r requirements/requirements-dev.txt - -# Bandit -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -run pipx run `spec vulture` --min-confidence 0 src/ tests/ .vulture_ignore_list.py +check_python_quality diff --git a/components/shared_code/ci/unittest.sh b/components/shared_code/ci/unittest.sh index 87a5ea5b65..be9b0fa620 100755 --- a/components/shared_code/ci/unittest.sh +++ b/components/shared_code/ci/unittest.sh @@ -1,10 +1,7 @@ #!/bin/bash -source ../../ci/unittest-base.sh +PATH="$PATH:../../ci" +source unittest-base.sh export COVERAGE_RCFILE=../../.coveragerc - -coverage run -m unittest --quiet -coverage report --fail-under=0 -coverage html --fail-under=0 -coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +run_coverage diff --git a/components/shared_code/pyproject.toml b/components/shared_code/pyproject.toml index f0f38e524a..8fe984512f 100644 --- a/components/shared_code/pyproject.toml +++ b/components/shared_code/pyproject.toml @@ -1,6 +1,11 @@ [project] name = "shared-code" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "bottle==0.12.25", "packaging==24.1", @@ -9,27 +14,85 @@ dependencies = [ "python-dateutil==2.9.0.post0", ] -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.5.3", "mongomock==4.1.2", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements "types-python-dateutil==2.9.0.20240316", - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", - "pydantic==2.7.4", # Needed because pipx needs to inject Pydantic into the mpyp venv, see ci/quality.sh + "pydantic==2.7.4", # Needed because pipx needs to inject Pydantic into the mpyp venv, see ci/quality.sh + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vulture==2.11" + "vulture==2.11", +] + +[tool.setuptools.packages.find] +where = [ + "src", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", ] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on + "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous + "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files + "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - imports in __init__.py files are used to flatten the module hierarchy +] +lint.per-file-ignores."src/shared_data_model/**/*.py" = [ + "RUF012", # https://docs.astral.sh/ruff/rules/mutable-class-default/ - Pydantic models' class attributes are used to specify instance attributes +] +lint.per-file-ignores."tests/**/*.py" = [ + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types +] +lint.isort.section-order = [ + "future", + "standard-library", + "third-party", + "first-party", + "tests", + "local-folder", +] +lint.isort.sections."tests" = [ + "tests", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers + [tool.mypy] plugins = "pydantic.mypy" ignore_missing_imports = false @@ -38,12 +101,12 @@ warn_redundant_casts = true warn_return_any = true warn_unreachable = true warn_unused_ignores = true -disable_error_code = "valid-type" # mypy does not yet support PEP 695, Type Parameter Syntax. See https://github.com/python/mypy/issues/15238 +disable_error_code = "valid-type" # mypy does not yet support PEP 695, Type Parameter Syntax. See https://github.com/python/mypy/issues/15238 [[tool.mypy.overrides]] module = [ "pymongo", - "bottle" + "bottle", ] ignore_missing_imports = true @@ -53,48 +116,3 @@ generate_hashes = true quiet = true strip_extras = true upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on - "ANN002", # https://docs.astral.sh/ruff/rules/missing-type-args/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN003", # https://docs.astral.sh/ruff/rules/missing-type-kwargs/ - leads to false positives for super().__init__(*args, **kwargs) - "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/ - type checkers can infer the type of `self`, so annotating it is superfluous - "ANN102", # https://docs.astral.sh/ruff/rules/missing-type-cls/ - type checkers can infer the type of `cls`, so annotating it is superfluous - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter - "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used - "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used -] - -[tool.ruff.lint.isort] -section-order = ["future", "standard-library", "third-party", "first-party", "tests", "local-folder"] - -[tool.ruff.lint.isort.sections] -"tests" = ["tests"] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files - "F401", # https://docs.astral.sh/ruff/rules/unused-import/ - imports in __init__.py files are used to flatten the module hierarchy -] -"src/shared_data_model/**/*.py" = [ - "RUF012", # https://docs.astral.sh/ruff/rules/mutable-class-default/ - Pydantic models' class attributes are used to specify instance attributes -] -"tests/**/*.py" = [ - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types -] - -[tool.setuptools.packages.find] -where = ["src"] diff --git a/components/shared_code/tests/shared/base.py b/components/shared_code/tests/shared/base.py index df53dacefc..e57b595d64 100644 --- a/components/shared_code/tests/shared/base.py +++ b/components/shared_code/tests/shared/base.py @@ -2,6 +2,7 @@ import json import unittest +from typing import ClassVar from shared_data_model import DATA_MODEL_JSON @@ -9,6 +10,8 @@ class DataModelTestCase(unittest.TestCase): """Base class for unit tests that use the data model from the database.""" + DATA_MODEL: ClassVar[dict] = {} + @classmethod def setUpClass(cls) -> None: """Override to set up the data model.""" diff --git a/components/shared_code/tests/shared/database/test_connection_params.py b/components/shared_code/tests/shared/database/test_connection_params.py index 3235aafb91..54fb0cdf97 100644 --- a/components/shared_code/tests/shared/database/test_connection_params.py +++ b/components/shared_code/tests/shared/database/test_connection_params.py @@ -16,7 +16,7 @@ def _assert_dbclient_host_url(self, dbclient, expected_url) -> None: def test_default(self): """Test the default url.""" db = client() - _default_user_pass = "root:root" # bypassing Sonar security check flagging plaintext password # noqa: S105 + _default_user_pass = "root:root" # nosec # noqa: S105 self._assert_dbclient_host_url(db, f"mongodb://{_default_user_pass}@localhost:27017") def test_full_url_override(self): diff --git a/components/shared_code/tests/shared/database/test_measurements.py b/components/shared_code/tests/shared/database/test_measurements.py index 7f7338f296..fd9f518b07 100644 --- a/components/shared_code/tests/shared/database/test_measurements.py +++ b/components/shared_code/tests/shared/database/test_measurements.py @@ -1,5 +1,6 @@ """Unit tests for the measurements collection.""" +from typing import TYPE_CHECKING from unittest.mock import Mock, patch import mongomock @@ -14,6 +15,9 @@ from tests.fixtures import METRIC_ID, METRIC_ID2, create_report from tests.shared.base import DataModelTestCase +if TYPE_CHECKING: + from pymongo.database import Database + @patch("shared.model.measurement.iso_timestamp", Mock(return_value=iso_timestamp())) class MeasurementsTest(DataModelTestCase): @@ -25,7 +29,7 @@ def setUp(self) -> None: self.database_mock = Mock() self.database_mock.measurements.find_one.return_value = None self.database_mock.measurements.insert_one = self.insert_one_measurement - self.metric = Metric(self.DATA_MODEL, {"type": "violations"}, "metric_uuid") + self.metric = Metric(self.DATA_MODEL, {"type": "violations"}, METRIC_ID) self.measurements = [ {"_id": 1, "start": "0", "end": "1", "sources": [], "metric_uuid": METRIC_ID}, {"_id": 2, "start": "3", "end": "4", "sources": [], "metric_uuid": METRIC_ID}, @@ -34,7 +38,7 @@ def setUp(self) -> None: {"_id": 5, "start": "4", "end": "5", "sources": [], "metric_uuid": METRIC_ID2}, {"_id": 6, "start": "7", "end": "8", "sources": [], "metric_uuid": METRIC_ID2}, ] - self.database = mongomock.MongoClient()["quality_time_db"] + self.database: Database = mongomock.MongoClient()["quality_time_db"] @staticmethod def insert_one_measurement(measurement: Measurement) -> None: diff --git a/components/shared_code/tests/shared/model/test_measurement.py b/components/shared_code/tests/shared/model/test_measurement.py index 13c1db536f..f4594d838c 100644 --- a/components/shared_code/tests/shared/model/test_measurement.py +++ b/components/shared_code/tests/shared/model/test_measurement.py @@ -1,6 +1,7 @@ """Test the measurements model.""" from datetime import UTC, datetime, timedelta +from typing import cast from unittest.mock import patch from packaging.version import Version @@ -504,7 +505,7 @@ def source( self.source_count += 1 source_number = "" if self.source_count == 1 else str(self.source_count) return Source( - f"source{source_number}", + cast(SourceId, f"source{source_number}"), metric, { "source_uuid": f"source{source_number}", diff --git a/components/shared_code/tests/shared/model/test_report.py b/components/shared_code/tests/shared/model/test_report.py index e37a7f5f85..0796eb43fe 100644 --- a/components/shared_code/tests/shared/model/test_report.py +++ b/components/shared_code/tests/shared/model/test_report.py @@ -1,6 +1,7 @@ """Unit tests for the report class.""" import unittest +from typing import TYPE_CHECKING import mongomock @@ -13,13 +14,16 @@ from tests.fixtures import METRIC_ID, REPORT_ID, SOURCE_ID, SUBJECT_ID, create_report from tests.shared.base import DataModelTestCase +if TYPE_CHECKING: + from pymongo.database import Database + class ReportTest(DataModelTestCase): """Report unit tests.""" def setUp(self) -> None: """Override to create a database fixture.""" - self.source_data = {} + self.source_data: dict = {} self.metric_data = {"type": "violations", "sources": {SOURCE_ID: self.source_data}, "tags": ["tag"]} self.subject_data = {"metrics": {METRIC_ID: self.metric_data}} report_data = { @@ -122,7 +126,7 @@ class TestMetrics(unittest.TestCase): def setUp(self) -> None: """Define info that is used in multiple tests.""" - self.database = mongomock.MongoClient()["quality_time_db"] + self.database: Database = mongomock.MongoClient()["quality_time_db"] def test_get_metrics_from_reports(self): """Test that the metrics are returned.""" diff --git a/components/shared_code/tests/shared_data_model/meta/base.py b/components/shared_code/tests/shared_data_model/meta/base.py index f4047d32c1..ca940c26f7 100644 --- a/components/shared_code/tests/shared_data_model/meta/base.py +++ b/components/shared_code/tests/shared_data_model/meta/base.py @@ -2,16 +2,14 @@ import unittest -from pydantic import BaseModel, ValidationError +from pydantic import ValidationError class MetaModelTestCase(unittest.TestCase): """Meta model test case.""" - MODEL = BaseModel # Should be overridden by subclasses - - def check_validation_error(self, message, **model_kwargs): + def check_validation_error(self, expected_message: str, model, **model_kwargs) -> None: """Check that parsing the object with the model raises a validation error with the specified message.""" with self.assertRaises(ValidationError) as context: - self.MODEL(**model_kwargs) - self.assertIn(message, str(context.exception)) + model(**model_kwargs) + self.assertIn(expected_message, str(context.exception)) diff --git a/components/shared_code/tests/shared_data_model/meta/test_base.py b/components/shared_code/tests/shared_data_model/meta/test_base.py index be6e85794f..a6c7732d2a 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_base.py +++ b/components/shared_code/tests/shared_data_model/meta/test_base.py @@ -10,15 +10,10 @@ class DescribedModelTest(MetaModelTestCase): """Unit tests for described models.""" - MODEL = DescribedModel - def test_description_without_punctuation(self): """Test that a description without punctuation fails.""" - self.check_validation_error( - "The description of Name does not end with punctuation", - name="Name", - description="Description", - ) + expected_message = "The description of Name does not end with punctuation" + self.check_validation_error(expected_message, DescribedModel, name="Name", description="Description") def test_description_with_punctuation(self): """Test that a description with punctuation passes the check.""" @@ -26,11 +21,11 @@ def test_description_with_punctuation(self): def test_missing_description(self): """Test that the description is mandatory.""" - self.check_validation_error("description\n Field required", name="Name") + self.check_validation_error("description\n Field required", DescribedModel, name="Name") def test_empty_description(self): """Test that the description has a non-zero length.""" - self.check_validation_error("String should match pattern '.+'", name="Name", description="") + self.check_validation_error("String should match pattern '.+'", DescribedModel, name="Name", description="") class StrEnumTest(unittest.TestCase): diff --git a/components/shared_code/tests/shared_data_model/meta/test_data_model.py b/components/shared_code/tests/shared_data_model/meta/test_data_model.py index d6d86a18ad..dda053e0b4 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_data_model.py +++ b/components/shared_code/tests/shared_data_model/meta/test_data_model.py @@ -1,6 +1,6 @@ """Data model unit tests.""" -from typing import ClassVar +from typing import ClassVar, cast from unittest.mock import Mock, patch from pydantic import HttpUrl @@ -13,32 +13,27 @@ class DataModelTest(MetaModelTestCase): """Data model unit tests.""" - MODEL = DataModel PARAMETERS: ClassVar[dict] = {"parameter": {"name": "parameter", "type": "string", "metrics": ["metric"]}} CONFIGURATION: ClassVar[dict] = {"config": {"name": "Config", "value": ["value"], "metrics": ["metric"]}} - def setUp(self) -> None: - """Set up the default model kwargs.""" - self.model_kwargs = { - "scales": {}, - "metrics": {}, - "sources": {}, - "subjects": {}, - } + def check_data_model_validation_error(self, expected_message: str, **extra_model_kwargs) -> None: + """Check the validation error when instantiation the DataModel model.""" + model_kwargs: dict[str, dict] = {"scales": {}, "metrics": {}, "sources": {}, "subjects": {}} + self.check_validation_error(expected_message, DataModel, **(model_kwargs | extra_model_kwargs)) @classmethod - def scale(cls, **kwargs) -> dict[str, str | dict]: + def scale(cls, **kwargs) -> dict: """Create a scale fixture.""" return {"name": "Count", "description": "The count scale."} | kwargs @classmethod - def source(cls, **kwargs) -> dict[str, str | dict]: + def source(cls, **kwargs) -> dict: """Create a source fixture.""" url = HttpUrl("https://example.org") return {"name": "Source", "description": "Source.", "url": url, "parameters": {}} | kwargs @classmethod - def metric(cls, **kwargs) -> dict[str, str | list[str]]: + def metric(cls, **kwargs) -> dict: """Create a metric fixture.""" return {"name": "Metric", "description": "Description.", "scales": ["count"], "sources": ["source"]} | kwargs @@ -73,9 +68,9 @@ def quality_time(cls, **kwargs) -> dict[str, str | dict]: ) @staticmethod - def mock_path(path_class, exists: bool = True) -> Mock: + def mock_path(path_class: Mock, exists: bool = True) -> Mock: """Return a mock path that does or does not exist.""" - path = path_class.return_value + path = cast(Mock, path_class.return_value) path.parent = path path.__truediv__.return_value = path path.exists.return_value = exists @@ -83,37 +78,35 @@ def mock_path(path_class, exists: bool = True) -> Mock: def test_invalid_scales(self): """Test that invalid scales throw an error.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric(scales=["invalid"])}, } - self.check_validation_error("Metric 'Metric' has invalid scales {'invalid'}", **model_kwargs) + self.check_data_model_validation_error("Metric 'Metric' has invalid scales {'invalid'}", **extra_model_kwargs) def test_unused_scales(self): """Test that unused scales throw an error.""" - model_kwargs = self.model_kwargs | {"scales": {"count": self.scale()}} - self.check_validation_error("Unused scales {'count'}", **model_kwargs) + self.check_data_model_validation_error("Unused scales {'count'}", scales={"count": self.scale()}) def test_source_urls(self): """Test that sources have a URL.""" - model_kwargs = self.model_kwargs | {"sources": {"source": self.source(url=None)}} - self.check_validation_error("Source source has no URL", **model_kwargs) + self.check_data_model_validation_error("Source source has no URL", sources={"source": self.source(url=None)}) @patch("pathlib.Path") - def test_missing_logo(self, path_class): + def test_missing_logo(self, path_class: Mock): """Test that a validation error occurs when a logo is missing.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric()}, "sources": {"source": self.source()}, } self.mock_path(path_class, exists=False) - self.check_validation_error("No logo exists for source", **model_kwargs) + self.check_data_model_validation_error("No logo exists for source", **extra_model_kwargs) @patch("pathlib.Path") - def test_missing_source(self, path_class): + def test_missing_source(self, path_class: Mock): """Test that a validation error occurs when a logo exists, but the source is missing.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric()}, "sources": {"source": self.source()}, @@ -121,23 +114,23 @@ def test_missing_source(self, path_class): logo_path = self.mock_path(path_class) logo_path.glob.return_value = [logo_path] logo_path.stem = "non_existing_source" - self.check_validation_error("No source exists for ", **model_kwargs) + self.check_data_model_validation_error("No source exists for ", **extra_model_kwargs) @patch("pathlib.Path") - def test_source_parameters(self, path_class): + def test_source_parameters(self, path_class: Mock): """Test that the sources have at least one parameter for each metric supported by the source.""" self.mock_path(path_class, exists=True) - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric()}, "sources": {"source": self.source()}, } - self.check_validation_error("No parameters for metric metric in source source", **model_kwargs) + self.check_data_model_validation_error("No parameters for metric metric in source source", **extra_model_kwargs) @patch("pathlib.Path") def test_source_parameters_list_valid_metric(self, path_class): """Test that the metrics listed by the source parameters are metrics that list the source.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric()}, "sources": { @@ -145,29 +138,25 @@ def test_source_parameters_list_valid_metric(self, path_class): "other_source": self.source(parameters=self.PARAMETERS), }, } - self.check_validation_error( + expected_message = ( "Parameter parameter of source other_source lists metric metric as metric needing this parameter, " - "but that metric doesn't list other_source as allowed source", - **model_kwargs, + "but that metric doesn't list other_source as allowed source" ) + self.check_data_model_validation_error(expected_message, **extra_model_kwargs) path_class.assert_called_once() @patch("pathlib.Path") def test_configuration_refers_to_existing_metric(self, path_class): """Test that source configurations refer to metrics that exist.""" - model_kwargs = self.model_kwargs | { - "sources": {"source": self.source(configuration=self.CONFIGURATION)}, - } - self.check_validation_error( - "Configuration config of source source refers to non-existing metric metric", - **model_kwargs, - ) + extra_model_kwargs = {"sources": {"source": self.source(configuration=self.CONFIGURATION)}} + expected_message = "Configuration config of source source refers to non-existing metric metric" + self.check_data_model_validation_error(expected_message, **extra_model_kwargs) path_class.assert_called_once() @patch("pathlib.Path") def test_configuration_refers_to_metric_that_refers_to_source(self, path_class): """Test that source configurations refer to metrics that list the source as supported source.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric()}, "sources": { @@ -175,17 +164,17 @@ def test_configuration_refers_to_metric_that_refers_to_source(self, path_class): "invalid_source": self.source(configuration=self.CONFIGURATION), }, } - self.check_validation_error( + expected_message = ( "Configuration config of source invalid_source refers to metric metric, " - "but metric doesn't list invalid_source as source", - **model_kwargs, + "but metric doesn't list invalid_source as source" ) + self.check_data_model_validation_error(expected_message, **extra_model_kwargs) path_class.assert_called_once() @patch("pathlib.Path") def test_metrics_belong_to_at_least_one_subject(self, path_class): """Test that metrics belong to at least one subject.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": { "metric": self.metric(sources=["quality_time", "source"]), @@ -196,13 +185,13 @@ def test_metrics_belong_to_at_least_one_subject(self, path_class): "quality_time": self.quality_time(), }, } - self.check_validation_error("Metric metric is not listed in any subject", **model_kwargs) + self.check_data_model_validation_error("Metric metric is not listed in any subject", **extra_model_kwargs) path_class.assert_called_once() @patch("pathlib.Path") def test_quality_time_lists_all_metric_types(self, path_class): """Test that Quality-time lists all metric types as possible values for its metric_type parameter.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": {"metric": self.metric(sources=["quality_time", "source"])}, "sources": { @@ -210,16 +199,14 @@ def test_quality_time_lists_all_metric_types(self, path_class): "quality_time": self.quality_time(metric_type_metrics=["metric"]), }, } - self.check_validation_error( - "Parameter metric_type of source quality_time doesn't list all metric types", - **model_kwargs, - ) + expected_message = "Parameter metric_type of source quality_time doesn't list all metric types" + self.check_data_model_validation_error(expected_message, **extra_model_kwargs) path_class.assert_called_once() @patch("pathlib.Path") def test_quality_time_lists_all_source_types(self, path_class): """Test that the Quality-time source lists all sources as possible values for its source type parameter.""" - model_kwargs = self.model_kwargs | { + extra_model_kwargs = { "scales": {"count": self.scale()}, "metrics": { "metric": self.metric(sources=["quality_time", "source"]), @@ -230,8 +217,6 @@ def test_quality_time_lists_all_source_types(self, path_class): "quality_time": self.quality_time(source_type_values=["Other source", "Source"]), }, } - self.check_validation_error( - "Parameter source_type of source quality_time doesn't list source types: Quality-time", - **model_kwargs, - ) + expected_message = "Parameter source_type of source quality_time doesn't list source types: Quality-time" + self.check_data_model_validation_error(expected_message, **extra_model_kwargs) path_class.assert_called_once() diff --git a/components/shared_code/tests/shared_data_model/meta/test_entity.py b/components/shared_code/tests/shared_data_model/meta/test_entity.py index e7f2d27b25..48cc5ba570 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_entity.py +++ b/components/shared_code/tests/shared_data_model/meta/test_entity.py @@ -8,7 +8,9 @@ class EntityTest(MetaModelTestCase): """Entity unit tests.""" - MODEL = Entity + def check_entity_validation_error(self, message: str, **model_kwargs) -> None: + """Check the validation error when instantiation the Entity model.""" + self.check_validation_error(message, Entity, **model_kwargs) def test_check_name_correct(self): """Test that a correct name passes the check.""" @@ -17,22 +19,19 @@ def test_check_name_correct(self): def test_check_name_incorrect(self): """Test that an incorrect name does not pass the check.""" - self.check_validation_error("String should match pattern '^[^A-Z]+$'", name="Upper case", attributes=[]) + model_kwargs = {"name": "Upper case", "attributes": []} + expected_message = "String should match pattern '^[^A-Z]+$'" + self.check_entity_validation_error(expected_message, **model_kwargs) def test_measured_attribute_exists(self): """Test that the measured attribute is an existing attribute.""" - self.check_validation_error( - "Measured attribute attribute is not an attribute of entity entity", - name="entity", - attributes=[], - measured_attribute="attribute", - ) + model_kwargs = {"name": "entity", "attributes": [], "measured_attribute": "attribute"} + expected_message = "Measured attribute attribute is not an attribute of entity entity" + self.check_entity_validation_error(expected_message, **model_kwargs) def test_measured_attribute_has_number_type(self): """Test that the measured attribute is an existing attribute.""" - self.check_validation_error( - "Measured attribute attribute does not have a number type", - name="entity", - attributes=[{"name": "Attribute", "description": "Attribute."}], - measured_attribute="attribute", - ) + attributes = [{"name": "Attribute", "description": "Attribute."}] + model_kwargs = {"name": "entity", "attributes": attributes, "measured_attribute": "attribute"} + expected_message = "Measured attribute attribute does not have a number type" + self.check_entity_validation_error(expected_message, **model_kwargs) diff --git a/components/shared_code/tests/shared_data_model/meta/test_metric.py b/components/shared_code/tests/shared_data_model/meta/test_metric.py index be8b0afd0a..aeff5c8a21 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_metric.py +++ b/components/shared_code/tests/shared_data_model/meta/test_metric.py @@ -8,29 +8,26 @@ class MetricTest(MetaModelTestCase): """Metric unit tests.""" - MODEL = Metric + def check_metric_validation_error(self, expected_message: str, **model_kwargs) -> None: + """Check the validation error when instantiation the Metric model.""" + self.check_validation_error(expected_message, Metric, **model_kwargs) def test_addition(self): """Test that an invalid addition value throws a validation error.""" - self.check_validation_error("Input should be 'max', 'min' or 'sum'", addition="invalid") + self.check_metric_validation_error("Input should be 'max', 'min' or 'sum'", addition="invalid") def test_direction(self): """Test that an invalid direction value throws a validation error.""" - self.check_validation_error("Input should be '<' or '>'", direction="<>") + self.check_metric_validation_error("Input should be '<' or '>'", direction="<>") def test_scales(self): """Test that a metric without scales throws a validation error.""" - self.check_validation_error( - "List should have at least 1 item after validation, not 0", - name="Metric", - description="Description", - sources=["source"], - scales=[], - ) + model_kwargs = {"name": "Metric", "description": "Description", "sources": ["source"], "scales": []} + self.check_metric_validation_error("List should have at least 1 item after validation, not 0", **model_kwargs) def test_default_scale(self): """Test that the default scale is set correctly.""" - metric = self.MODEL( + metric = Metric( default_scale="", description="Description.", name="Metric", diff --git a/components/shared_code/tests/shared_data_model/meta/test_parameter.py b/components/shared_code/tests/shared_data_model/meta/test_parameter.py index 619a3d95d5..2ffc11ff3e 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_parameter.py +++ b/components/shared_code/tests/shared_data_model/meta/test_parameter.py @@ -11,121 +11,89 @@ class ParameterTest(MetaModelTestCase): """Parameter unit tests.""" - MODEL = Parameter - TOO_FEW_VALUES = "Parameter Parameter is multiple choice but has fewer than two values" + def check_parameter_validation_error(self, expected_message: str, **extra_model_kwargs) -> None: + """Check the validation error when instantiation the Parameter model.""" + model_kwargs = {"name": "Parameter", "type": ParameterType.STRING, "metrics": ["loc"]} + self.check_validation_error(expected_message, Parameter, **(model_kwargs | extra_model_kwargs)) + def test_check_help_and_help_url(self): """Test that a parameter can only have one of help and help URL.""" - self.check_validation_error( - "Parameter Parameter has both help and help_url", - name="Parameter", - type=ParameterType.STRING, - help="Help.", - help_url=HttpUrl("https://help.example.org"), - metrics=["loc"], - ) - self.check_validation_error( - "The help of Parameter does not end with punctuation", - name="Parameter", - type=ParameterType.STRING, - help="Help", - metrics=["loc"], - ) + help_url = HttpUrl("https://help.example.org") + expected_message = "Parameter Parameter has both help and help_url" + self.check_parameter_validation_error(expected_message, help="Help.", help_url=help_url) + + def test_check_help_punctuation(self): + """Test that parameter help ends with puncutation.""" + expected_message = "The help of Parameter does not end with punctuation" + self.check_parameter_validation_error(expected_message, help="Help") def test_check_api_values_imply_values(self): """Test that if a parameter has API values it also has values.""" - self.check_validation_error( - "Parameter Parameter has api_values but no values", - name="Parameter", - type=ParameterType.STRING, - api_values={"value": "api_value"}, - metrics=["loc"], - ) + expected_message = "Parameter Parameter has api_values but no values" + self.check_parameter_validation_error(expected_message, api_values={"value": "api_value"}) def test_check_api_values_subset_of_values(self): """Test that the parameter API values are a subset of the parameter values.""" - expected_error = "Parameter Parameter has api_values keys that are not listed in values" - self.check_validation_error( - expected_error, - name="Parameter", - type=ParameterType.STRING, - values=["value"], - api_values={"other_value": "api_value"}, - metrics=["loc"], - ) + extra_model_kwargs = {"values": ["value"], "api_values": {"other_value": "api_value"}} + expected_message = "Parameter Parameter has api_values keys that are not listed in values" + self.check_parameter_validation_error(expected_message, **extra_model_kwargs) def test_check_placeholder(self): """Test that multiple choice parameters need a placeholder.""" - expected_error = "Parameter Parameter is multiple choice but has no placeholder" - self.check_validation_error(expected_error, name="Parameter", type="multiple_choice", metrics=["loc"]) + expected_message = "Parameter Parameter is multiple choice but has no placeholder" + self.check_parameter_validation_error(expected_message, type="multiple_choice") def test_check_default_value(self): """Test that multiple choice parameters have a list as default value.""" - self.check_validation_error( - "Parameter Parameter is multiple choice but default_value is not a list", - name="Parameter", - type="multiple_choice", - metrics=["loc"], - placeholder="placeholder", - ) + expected_message = "Parameter Parameter is multiple choice but default_value is not a list" + self.check_parameter_validation_error(expected_message, type="multiple_choice", placeholder="placeholder") def test_check_default_value_empty(self): """Test that multiple choice parameters with addition have an empty list as default value.""" - expected_error = "Parameter Parameter is multiple choice with addition but default_value is not empty" - self.check_validation_error( - expected_error, - name="Parameter", + self.check_parameter_validation_error( + "Parameter Parameter is multiple choice with addition but default_value is not empty", type="multiple_choice_with_addition", default_value=["value"], - metrics=["loc"], placeholder="placeholder", ) def test_check_values_too_few(self): """Test that multiple choice parameters have at least two values.""" - self.check_validation_error( + self.check_parameter_validation_error( self.TOO_FEW_VALUES, - name="Parameter", type="multiple_choice", values=["value"], - metrics=["loc"], - placeholder="placeholder", default_value=[], + placeholder="placeholder", ) def test_check_values_zero(self): """Test that multiple choice parameters have at least two values.""" - self.check_validation_error( + self.check_parameter_validation_error( self.TOO_FEW_VALUES, - name="Parameter", type="multiple_choice", values=[], - metrics=["loc"], - placeholder="placeholder", default_value=[], + placeholder="placeholder", ) def test_check_values_missing(self): """Test that multiple choice parameters have at least two values.""" - self.check_validation_error( + self.check_parameter_validation_error( self.TOO_FEW_VALUES, - name="Parameter", type="multiple_choice", - metrics=["loc"], - placeholder="placeholder", default_value=[], + placeholder="placeholder", ) def test_check_values_not_empty(self): """Test that multiple choice parameters with addition have no values.""" - expected_error = "Parameter Parameter is multiple choice with addition but has values" - self.check_validation_error( - expected_error, - name="Parameter", + self.check_parameter_validation_error( + "Parameter Parameter is multiple choice with addition but has values", type="multiple_choice_with_addition", values=["value"], - metrics=["loc"], - placeholder="placeholder", default_value=[], + placeholder="placeholder", ) diff --git a/components/shared_code/tests/shared_data_model/meta/test_source.py b/components/shared_code/tests/shared_data_model/meta/test_source.py index c408242d21..c6a7c813d2 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_source.py +++ b/components/shared_code/tests/shared_data_model/meta/test_source.py @@ -8,33 +8,30 @@ class SourceTest(MetaModelTestCase): """Unit tests for the source model.""" - MODEL = Source - DESCRIPTION = "Source." - URL = "https://example.org" + def check_source_validation_error(self, expected_message: str, **extra_model_kwargs) -> None: + """Extend to add the model.""" + model_kwargs = {"name": "Source", "description": "Source."} + self.check_validation_error(expected_message, Source, **(model_kwargs | extra_model_kwargs)) def test_missing_parameter_to_validate_on(self): """Test that a source with a parameter listing another parameter to validate on actually has that parameter.""" - model_kwargs = { - "name": "Source", - "description": self.DESCRIPTION, - "url": self.URL, + extra_model_kwargs = { + "url": "https://example.org", "parameters": { "url": {"name": "URL", "type": "url", "metrics": ["metric"], "validate_on": ["password"]}, }, } - self.check_validation_error( + self.check_source_validation_error( "Source Source should validate parameter url when parameter password changes, " "but source Source has no parameter password", - **model_kwargs, + **extra_model_kwargs, ) def test_missing_url_when_landing_url(self): """Test that a source that has a landing url also has a url parameter.""" - model_kwargs = { - "name": "Source", - "description": self.DESCRIPTION, + extra_model_kwargs = { "parameters": { "landing_url": {"name": "URL", "type": "url", "metrics": ["metric"]}, }, } - self.check_validation_error("Source Source has a landing URL but no URL", **model_kwargs) + self.check_source_validation_error("Source Source has a landing URL but no URL", **extra_model_kwargs) diff --git a/components/shared_code/tests/shared_data_model/meta/test_subject.py b/components/shared_code/tests/shared_data_model/meta/test_subject.py index 580b3049f1..d56f18c26a 100644 --- a/components/shared_code/tests/shared_data_model/meta/test_subject.py +++ b/components/shared_code/tests/shared_data_model/meta/test_subject.py @@ -8,8 +8,6 @@ class SubjectTest(MetaModelTestCase): """Unit tests for the subject model.""" - MODEL = Subject - def test_all_metrics(self): """Test that the all_metrics property of a subject returns all metrics recursively and deduplicated.""" subject = Subject( diff --git a/components/shared_code/tests/shared_data_model/test_parameters.py b/components/shared_code/tests/shared_data_model/test_parameters.py index 2e0b0fc935..58f5959549 100644 --- a/components/shared_code/tests/shared_data_model/test_parameters.py +++ b/components/shared_code/tests/shared_data_model/test_parameters.py @@ -8,13 +8,8 @@ class IntegerParameterTest(MetaModelTestCase): """Integer parameter unit tests.""" - MODEL = IntegerParameter - def test_check_unit(self): """Test that a parameter with the integer type also has a unit.""" - self.check_validation_error( - "Parameter Parameter has no unit", - name="Parameter", - type="integer", - metrics=["loc"], - ) + model_kwargs = {"name": "Parameter", "type": "integer", "metrics": ["loc"]} + expected_message = "Parameter Parameter has no unit" + self.check_validation_error(expected_message, IntegerParameter, **model_kwargs) diff --git a/docs/ci/pip-compile.sh b/docs/ci/pip-compile.sh index c2578028c2..42518222db 100755 --- a/docs/ci/pip-compile.sh +++ b/docs/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../ci/base.sh +PATH="$PATH:../ci" +source pip-base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/docs/ci/pip-install.sh b/docs/ci/pip-install.sh index da044f1e5f..c35ab3f1e7 100755 --- a/docs/ci/pip-install.sh +++ b/docs/ci/pip-install.sh @@ -1,7 +1,7 @@ #!/bin/bash -source ../ci/base.sh +PATH="$PATH:../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet --use-pep517 -r requirements/requirements.txt -r requirements/requirements-dev.txt -run pip install --ignore-installed --quiet -r requirements/requirements-internal.txt +run_pip_install -r requirements/requirements.txt -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements-internal.txt diff --git a/docs/ci/quality.sh b/docs/ci/quality.sh index 1745cbb158..a79101e81e 100755 --- a/docs/ci/quality.sh +++ b/docs/ci/quality.sh @@ -1,32 +1,8 @@ #!/bin/bash -source ../ci/base.sh +PATH="$PATH:../ci" +source quality-base.sh -# Markdownlint -run ./node_modules/markdownlint-cli/markdownlint.js src/*.md - -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src tests - -# Mypy -# pipx run can't be used because mypy needs the pydantic plugin to be installed in the same venv (using pipx inject) -run pipx install --force `spec mypy` # --force works around this bug: https://github.com/pypa/pipx/issues/795 -run pipx inject mypy `spec pydantic` -run $PIPX_BIN_DIR/mypy src --python-executable=$(which python) - -# Vale -run pipx run `spec vale` sync -run pipx run `spec vale` --no-wrap src/*.md - -# pip-audit -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements.txt -r requirements/requirements-dev.txt - -# Safety -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -run pipx run `spec vulture` --min-confidence 0 src/ tests/ .vulture_ignore_list.py +check_python_quality +run_markdownlint +run_vale diff --git a/docs/ci/unittest.sh b/docs/ci/unittest.sh index e88ff1a696..ea8e5f6936 100755 --- a/docs/ci/unittest.sh +++ b/docs/ci/unittest.sh @@ -1,10 +1,7 @@ #!/bin/bash -source ../ci/unittest-base.sh +PATH="$PATH:../ci" +source unittest-base.sh export COVERAGE_RCFILE=../.coveragerc - -coverage run -m unittest --quiet -coverage report --fail-under=0 -coverage html --fail-under=0 -coverage xml # Fail if coverage is too low, but only after the text and HTML reports have been generated +run_coverage diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 59d9978d81..c507ef471c 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -1,36 +1,94 @@ [project] name = "docs" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "furo==2023.9.10", "gitpython==3.1.43", "myst-parser==2.0.0", - "pydantic==2.7.4", # Needed for generating the reference docs from the data model - "Sphinx==7.2.6", + "pydantic==2.7.4", # Needed for generating the reference docs from the data model + "sphinx==7.2.6", "sphinx-copybutton==0.5.2", - "sphinx_design==0.5.0" + "sphinx-design==0.5.0", ] - -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.3.4", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", - "pydantic==2.7.4", # Needed because pipx needs to inject Pydantic into the mpyp venv, see ci/quality.sh + "pydantic==2.7.4", # Needed because pipx needs to inject Pydantic into the mpyp venv, see ci/quality.sh + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vale==3.0.3.0", # Documentation grammar and style checker - "vulture==2.11" + "vale==3.0.3.0", # Documentation grammar and style checker + "vulture==2.11", ] +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", +] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - type checkers can infer the type of `self`, so annotating it is superfluous + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files +] +lint.per-file-ignores."src/conf.py" = [ + "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is a configuration file +] +lint.per-file-ignores."src/create_reference_md.py" = [ + "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is a script +] +lint.per-file-ignores."tests/**/*.py" = [ + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types +] +lint.isort.section-order = [ + "future", + "standard-library", + "third-party", + "second-party", + "first-party", + "tests", + "local-folder", +] +lint.isort.sections.second-party = [ + "shared", + "shared_data_model", +] +lint.isort.sections.tests = [ + "tests", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers + [tool.mypy] plugins = "pydantic.mypy" ignore_missing_imports = false @@ -46,43 +104,3 @@ generate_hashes = true quiet = true strip_extras = true upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - type checkers can infer the type of `self`, so annotating it is superfluous - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter - "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used - "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used -] - -[tool.ruff.lint.isort] -section-order = ["future", "standard-library", "third-party", "second-party", "first-party", "tests", "local-folder"] - -[tool.ruff.lint.isort.sections] -"second-party" = ["shared", "shared_data_model"] -"tests" = ["tests"] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files -] -"src/conf.py" = [ - "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is a configuration file -] -"src/create_reference_md.py" = [ - "INP001", # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ - false positive because this is a script -] -"tests/**/*.py" = [ - "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - don't require test functions to have return types -] diff --git a/release/.vulture_ignore_list.py b/release/.vulture_ignore_list.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/release/ci/pip-compile.sh b/release/ci/pip-compile.sh index c2578028c2..42518222db 100755 --- a/release/ci/pip-compile.sh +++ b/release/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../ci/base.sh +PATH="$PATH:../ci" +source pip-base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/release/ci/pip-install.sh b/release/ci/pip-install.sh index 601cf93308..53bafc4734 100755 --- a/release/ci/pip-install.sh +++ b/release/ci/pip-install.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ../ci/base.sh +PATH="$PATH:../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements-dev.txt diff --git a/release/ci/quality.sh b/release/ci/quality.sh new file mode 100755 index 0000000000..c99a142071 --- /dev/null +++ b/release/ci/quality.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PATH="$PATH:../ci" +source quality-base.sh + +check_python_quality diff --git a/release/pyproject.toml b/release/pyproject.toml index 2dd4753a5e..3dcc03d98c 100644 --- a/release/pyproject.toml +++ b/release/pyproject.toml @@ -1,16 +1,52 @@ [project] name = "release" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "bump-my-version==0.23.0", "gitpython==3.1.43", ] - -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "pip==24.0", - "pip-tools==7.4.1" # To add hashes to requirements + "pip-tools==7.4.1", # To add hashes to requirements + "pipx==1.6.0", +] +optional-dependencies.tools = [ + "bandit==1.7.9", + "fixit==2.1.0", + "mypy==1.10.0", + "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", + "ruff==0.4.8", + "vulture==2.11", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", +] +lint.select = [ + "ALL", ] +lint.ignore = [ + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers [tool.bumpversion] current_version = "5.13.0" @@ -33,7 +69,10 @@ commit = true tag = true [tool.bumpversion.parts.pre_release_label] -values = ["rc", "final"] +values = [ + "rc", + "final", +] optional_value = "final" [[tool.bumpversion.files]] @@ -76,6 +115,12 @@ glob = "../**/pyproject.toml" search = 'version = "{current_version}"' replace = 'version = "{new_version}"' +[tool.bandit] +skips = [ + "B404", # Consider possible security implications associated with the subprocess module. + "B603", # subprocess call - check for execution of untrusted input. +] + [tool.pip-tools] allow_unsafe = true generate_hashes = true diff --git a/release/release.py b/release/release.py index 2faddc037c..e3c8cefda1 100755 --- a/release/release.py +++ b/release/release.py @@ -10,6 +10,7 @@ import sys import tomllib from argparse import ArgumentParser, RawDescriptionHelpFormatter +from typing import cast import git @@ -26,9 +27,10 @@ def get_version() -> str: with pathlib.Path(release_folder / "pyproject.toml").open(mode="rb") as py_project_toml_fp: py_project_toml = tomllib.load(py_project_toml_fp) version_re = py_project_toml["tool"]["bumpversion"]["parse"] - version_tags = [tag for tag in repo.tags if re.match(version_re, tag.tag.tag.strip("v"), re.MULTILINE)] + version_tags = [tag for tag in repo.tags if tag.tag and re.match(version_re, tag.tag.tag.strip("v"), re.MULTILINE)] latest_tag = sorted(version_tags, key=lambda tag: tag.commit.committed_datetime)[-1] - return latest_tag.tag.tag.strip("v") + # We cast latest_tag.tag to TagObject because we know it cannot be None, given how version_tags is constructed + return cast(git.TagObject, latest_tag.tag).tag.strip("v") def parse_arguments() -> tuple[str, str, bool]: @@ -44,13 +46,26 @@ def parse_arguments() -> tuple[str, str, bool]: - the changelog has an '[Unreleased]' header - the changelog contains no release candidates - the new release has been added to the version overview""" - parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter) - allowed_bumps_in_rc_mode = ["rc", "rc-major", "rc-minor", "rc-patch", "drop-rc"] # rc = release candidate + parser = ArgumentParser( + description=description, + epilog=epilog, + formatter_class=RawDescriptionHelpFormatter, + ) + allowed_bumps_in_rc_mode = [ + "rc", + "rc-major", + "rc-minor", + "rc-patch", + "drop-rc", + ] # rc = release candidate allowed_bumps = ["rc-patch", "rc-minor", "rc-major", "patch", "minor", "major"] bumps = allowed_bumps_in_rc_mode if "rc" in current_version else allowed_bumps parser.add_argument("bump", choices=bumps) parser.add_argument( - "-c", "--check-preconditions-only", action="store_true", help="only check the preconditions and then exit" + "-c", + "--check-preconditions-only", + action="store_true", + help="only check the preconditions and then exit", ) arguments = parser.parse_args() return arguments.bump, current_version, arguments.check_preconditions_only @@ -117,7 +132,7 @@ def failed_preconditions_version_overview(current_version: str, root: pathlib.Pa for line in version_overview_lines: if line.startswith(f"| v{current_version} "): if previous_line.startswith("| v"): - today = datetime.date.today().isoformat() + today = utc_today().isoformat() release_date = previous_line.split(" | ")[1].strip() if release_date != today: # Second column is the release date column return [f"{missing} the release date. Expected today: '{today}', found: '{release_date}'."] @@ -127,13 +142,18 @@ def failed_preconditions_version_overview(current_version: str, root: pathlib.Pa return [f"{missing} the current version ({current_version})."] +def utc_today() -> datetime.date: + """Return today in UTC.""" + return datetime.datetime.now(tz=datetime.UTC).date() + + def main() -> None: """Create the release.""" - os.environ["RELEASE_DATE"] = datetime.date.today().isoformat() # Used by bump-my-version to update CHANGELOG.md + os.environ["RELEASE_DATE"] = utc_today().isoformat() # Used by bump-my-version to update CHANGELOG.md bump, current_version, check_preconditions_only = parse_arguments() check_preconditions(bump, current_version) if check_preconditions_only: - return + return # See https://github.com/callowayproject/bump-my-version?tab=readme-ov-file#add-support-for-pre-release-versions # for how bump-my-version deals with pre-release versions if bump.startswith("rc-"): @@ -142,8 +162,8 @@ def main() -> None: bump = "pre_release_label" # Bump the pre-release label from "rc" to "final" (which is optional and omitted) elif bump == "rc": bump = "pre_release_number" # Bump the release candidate number - subprocess.run(("bump-my-version", "bump", bump), check=True) - subprocess.run(("git", "push", "--follow-tags"), check=True) + subprocess.run(("bump-my-version", "bump", bump), check=True) # noqa: S603 + subprocess.run(("git", "push", "--follow-tags"), check=True) # noqa: S603 if __name__ == "__main__": diff --git a/release/requirements/requirements-dev.txt b/release/requirements/requirements-dev.txt index 350ffc4bde..ed334dbb7d 100644 --- a/release/requirements/requirements-dev.txt +++ b/release/requirements/requirements-dev.txt @@ -8,6 +8,10 @@ annotated-types==0.7.0 \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 # via pydantic +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f + # via pipx bracex==2.4 \ --hash=sha256:a27eaf1df42cf561fed58b7a8f3fdf129d1ea16a81e1fadd1d17989bc6384beb \ --hash=sha256:efdc71eff95eaff5e0f8cfebe7d01adf2c8637c8c92edaf63ef348c241a82418 @@ -27,6 +31,7 @@ click==8.1.7 \ # bump-my-version # pip-tools # rich-click + # userpath gitdb==4.0.11 \ --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b @@ -46,11 +51,21 @@ mdurl==0.1.2 \ packaging==24.1 \ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 - # via build + # via + # build + # pipx pip-tools==7.4.1 \ --hash=sha256:4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9 \ --hash=sha256:864826f5073864450e24dbeeb85ce3920cdfb09848a3d69ebf537b521f14bcc9 # via release (pyproject.toml) +pipx==1.6.0 \ + --hash=sha256:760889dc3aeed7bf4024973bf22ca0c2a891003f52389159ab5cb0c57d9ebff4 \ + --hash=sha256:840610e00103e3d49ae24b6b51804b60988851a5dd65468adb71e5a97e2699b2 + # via release (pyproject.toml) +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 + # via pipx prompt-toolkit==3.0.36 \ --hash=sha256:3e163f254bef5a03b146397d7c1963bd3e2812f0964bb9a24e6ec761fd28db63 \ --hash=sha256:aa64ad242a462c5ff0363a7b9cfe696c20d55d9fc60c11fd8e632d064804d305 @@ -189,6 +204,10 @@ typing-extensions==4.12.2 \ # pydantic # pydantic-core # rich-click +userpath==1.9.2 \ + --hash=sha256:2cbf01a23d655a1ff8fc166dfb78da1b641d1ceabf0fe5f970767d380b14e89d \ + --hash=sha256:6c52288dab069257cc831846d15d48133522455d4677ee69a9781f11dbefd815 + # via pipx wcmatch==8.5.2 \ --hash=sha256:17d3ad3758f9d0b5b4dedc770b65420d4dac62e680229c287bf24c9db856a478 \ --hash=sha256:a70222b86dea82fb382dd87b73278c10756c138bd6f8f714e2183128887b9eb2 diff --git a/tests/application_tests/.vulture_ignore_list.py b/tests/application_tests/.vulture_ignore_list.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/application_tests/ci/pip-compile.sh b/tests/application_tests/ci/pip-compile.sh index 815df6440a..06fa99c058 100755 --- a/tests/application_tests/ci/pip-compile.sh +++ b/tests/application_tests/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/tests/application_tests/ci/pip-install.sh b/tests/application_tests/ci/pip-install.sh index 3fed8d3a71..dfb6e8e120 100755 --- a/tests/application_tests/ci/pip-install.sh +++ b/tests/application_tests/ci/pip-install.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet -r requirements/requirements.txt -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements.txt -r requirements/requirements-dev.txt diff --git a/tests/application_tests/ci/quality.sh b/tests/application_tests/ci/quality.sh new file mode 100755 index 0000000000..2f19fa446e --- /dev/null +++ b/tests/application_tests/ci/quality.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PATH="$PATH:../../ci" +source quality-base.sh + +check_python_quality diff --git a/tests/application_tests/ci/unittest.sh b/tests/application_tests/ci/unittest.sh new file mode 100755 index 0000000000..9a4802fb92 --- /dev/null +++ b/tests/application_tests/ci/unittest.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +# Dummy unittest.sh so we can run the same steps for different components. diff --git a/tests/application_tests/pyproject.toml b/tests/application_tests/pyproject.toml index 4cb8ec257a..3441713c74 100644 --- a/tests/application_tests/pyproject.toml +++ b/tests/application_tests/pyproject.toml @@ -1,17 +1,79 @@ [project] name = "application-tests" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "axe-selenium-python==2.1.6", "requests==2.32.3", - "selenium==4.21.0" + "selenium==4.21.0", ] - -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "pip==24.0", - "pip-tools==7.4.1" # To add hashes to requirements + "pip-tools==7.4.1", # To add hashes to requirements + "pipx==1.6.0", + "types-requests==2.32.0.20240602", +] +optional-dependencies.tools = [ + "bandit==1.7.9", + "fixit==2.1.0", + "mypy==1.10.0", + "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", + "ruff==0.4.8", + "vulture==2.11", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", +] +lint.select = [ + "ALL", +] +lint.ignore = [ + "ANN001", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - too many untyped arguments atm to turn this rule on + "ANN101", # https://docs.astral.sh/ruff/rules/missing-type-function-argument/ - type checkers can infer the type of `self`, so annotating it is superfluous + "ANN201", # https://docs.astral.sh/ruff/rules/missing-return-type-undocumented-public-function/ - too many untyped return values atm to turn this rule on + "ANN204", # https://docs.astral.sh/ruff/rules/missing-return-type-special-method/ - typing classes that inherit from set and list correctly is surprisingly hard + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D107", # https://docs.astral.sh/ruff/rules/undocumented-public-init/ - requiring __init__() methods to have docstrings seems a bit much + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter + "PD", # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - pandas isn't used + "PT", # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - pytest isn't used +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", +] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers + +[tool.mypy] +ignore_missing_imports = false +incremental = false +warn_redundant_casts = true +warn_return_any = true +warn_unreachable = true +warn_unused_ignores = true + +[[tool.mypy.overrides]] +module = [ + "axe_selenium_python", ] +ignore_missing_imports = true [tool.pip-tools] allow_unsafe = true diff --git a/tests/application_tests/requirements/requirements-dev.txt b/tests/application_tests/requirements/requirements-dev.txt index 9b49c6e5ed..0d6c811876 100644 --- a/tests/application_tests/requirements/requirements-dev.txt +++ b/tests/application_tests/requirements/requirements-dev.txt @@ -4,6 +4,10 @@ # # ci/pip-compile.sh # +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f + # via pipx attrs==23.2.0 \ --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 @@ -119,7 +123,9 @@ charset-normalizer==3.3.2 \ click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de - # via pip-tools + # via + # pip-tools + # userpath h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 @@ -143,11 +149,20 @@ packaging==24.1 \ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via # build + # pipx # pytest pip-tools==7.4.1 \ --hash=sha256:4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9 \ --hash=sha256:864826f5073864450e24dbeeb85ce3920cdfb09848a3d69ebf537b521f14bcc9 # via application-tests (pyproject.toml) +pipx==1.6.0 \ + --hash=sha256:760889dc3aeed7bf4024973bf22ca0c2a891003f52389159ab5cb0c57d9ebff4 \ + --hash=sha256:840610e00103e3d49ae24b6b51804b60988851a5dd65468adb71e5a97e2699b2 + # via application-tests (pyproject.toml) +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 + # via pipx pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 @@ -195,6 +210,10 @@ trio-websocket==0.11.1 \ --hash=sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f \ --hash=sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638 # via selenium +types-requests==2.32.0.20240602 \ + --hash=sha256:3f98d7bbd0dd94ebd10ff43a7fbe20c3b8528acace6d8efafef0b6a184793f06 \ + --hash=sha256:ed3946063ea9fbc6b5fc0c44fa279188bae42d582cb63760be6cb4b9d06c3de8 + # via application-tests (pyproject.toml) typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 @@ -205,6 +224,11 @@ urllib3==2.2.2 \ # via # requests # selenium + # types-requests +userpath==1.9.2 \ + --hash=sha256:2cbf01a23d655a1ff8fc166dfb78da1b641d1ceabf0fe5f970767d380b14e89d \ + --hash=sha256:6c52288dab069257cc831846d15d48133522455d4677ee69a9781f11dbefd815 + # via pipx wheel==0.43.0 \ --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 diff --git a/tests/application_tests/src/test_api.py b/tests/application_tests/src/test_api.py index 9ac89832f1..0897e53d95 100644 --- a/tests/application_tests/src/test_api.py +++ b/tests/application_tests/src/test_api.py @@ -11,5 +11,5 @@ class ApiTest(unittest.TestCase): def test_documentation(self): """Test that the documentation API is available.""" apis = requests.get("http://www:8080/api", timeout=10).json().keys() - self.assertTrue("/api/internal/login" in apis) - self.assertTrue("/api/v3/login" in apis) + self.assertIn("/api/internal/login", apis) + self.assertIn("/api/v3/login", apis) diff --git a/tests/application_tests/src/test_report.py b/tests/application_tests/src/test_report.py index 1d318fc9fd..8ed3e00a6a 100644 --- a/tests/application_tests/src/test_report.py +++ b/tests/application_tests/src/test_report.py @@ -10,29 +10,31 @@ from selenium.webdriver.support.ui import WebDriverWait -class element_has_no_css_class: - """An expectation for checking that an element has no css class. +class ElementHasNoCCSClass: + """An expectation for checking that an element has no CSS class. locator - used to find the element - returns the WebElement once it has the particular css class + returns the WebElement once it has no particular CSS class """ - def __init__(self, locator): + def __init__(self, locator) -> None: self.locator = locator def __call__(self, driver): + """Return the element if it no longer has a CSS class, otherwise False.""" element = driver.find_element(*self.locator) return element if len(element.get_attribute("class")) == 0 else False -class nr_elements: +class NrElements: """An expectation for the number of matching elements.""" - def __init__(self, locator, expected_nr: int): + def __init__(self, locator, expected_nr: int) -> None: self.locator = locator self.expected_nr = expected_nr def __call__(self, driver): + """Return the element if it has the expected number of elements, otherwise False.""" elements = driver.find_elements(*self.locator) return elements if len(elements) == self.expected_nr else False @@ -60,7 +62,7 @@ def login(self): login_form.find_element(By.NAME, "username").send_keys("jadoe") login_form.find_element(By.NAME, "password").send_keys("secret") login_form.find_element(By.CLASS_NAME, "button").click() - self.wait.until(element_has_no_css_class((By.TAG_NAME, "body"))) # Wait for body dimmer to disappear + self.wait.until(ElementHasNoCCSClass((By.TAG_NAME, "body"))) # Wait for body dimmer to disappear def test_title(self): """Test the title.""" @@ -90,7 +92,7 @@ def test_add_report(self): self.login() nr_reports = len(self.driver.find_elements(By.CLASS_NAME, "card")) self.driver.find_element(By.CLASS_NAME, "button.primary").click() - self.wait.until(nr_elements((By.CLASS_NAME, "card"), nr_reports + 1)) + self.wait.until(NrElements((By.CLASS_NAME, "card"), nr_reports + 1)) def test_report_axe_accessibility(self): """Run axe accessibility check on a report.""" @@ -104,15 +106,16 @@ def test_report_axe_accessibility(self): # Process axe results violation_results = results1["violations"] - axe.write_results(results1, '../../build/a11y.json') + axe.write_results(results1, "../../build/a11y.json") readable_report = axe.report(violation_results) - filename = pathlib.Path('../../build/a11y_violations.txt') + filename = pathlib.Path("../../build/a11y_violations.txt") try: - with open(filename, "w", encoding="utf8") as report_file: + with filename.open("w", encoding="utf8") as report_file: report_file.write(readable_report) except OSError: - print("Could not write axe violations report") + self.fail("Could not write axe violations report") - # If there are violations, output the readable report data - # TODO - assertEqual 0 in https://github.com/ICTU/quality-time/issues/6354 - self.assertTrue(6 >= len(violation_results), readable_report) + # If there are moe violations than expected, output the readable report data + # Fixing the axe violations is on the backlog: https://github.com/ICTU/quality-time/issues/6354 + current_number_of_axe_violations = 6 + self.assertLessEqual(len(violation_results), current_number_of_axe_violations, readable_report) diff --git a/tests/feature_tests/ci/pip-compile.sh b/tests/feature_tests/ci/pip-compile.sh index 815df6440a..3ddc132f40 100755 --- a/tests/feature_tests/ci/pip-compile.sh +++ b/tests/feature_tests/ci/pip-compile.sh @@ -1,7 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source base.sh -# Update the compiled requirements files -run pip-compile --output-file requirements/requirements.txt pyproject.toml -run pip-compile --extra dev --output-file requirements/requirements-dev.txt pyproject.toml +run_pip_compile diff --git a/tests/feature_tests/ci/pip-install.sh b/tests/feature_tests/ci/pip-install.sh index 3fed8d3a71..dfb6e8e120 100755 --- a/tests/feature_tests/ci/pip-install.sh +++ b/tests/feature_tests/ci/pip-install.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source pip-base.sh -# Install the requirements -run pip install --ignore-installed --quiet -r requirements/requirements.txt -r requirements/requirements-dev.txt +run_pip_install -r requirements/requirements.txt -r requirements/requirements-dev.txt diff --git a/tests/feature_tests/ci/quality.sh b/tests/feature_tests/ci/quality.sh index 686861329f..2f19fa446e 100755 --- a/tests/feature_tests/ci/quality.sh +++ b/tests/feature_tests/ci/quality.sh @@ -1,31 +1,6 @@ #!/bin/bash -source ../../ci/base.sh +PATH="$PATH:../../ci" +source quality-base.sh -# Ruff -run pipx run `spec ruff` check . -run pipx run `spec ruff` format --check . - -# Fixit -run pipx run `spec fixit` lint src - -# Mypy -run pipx run `spec mypy` --python-executable=$(which python) src - -# pip-audit -run pipx run `spec pip-audit` --strict --progress-spinner=off -r requirements/requirements-dev.txt - -# Safety -# Vulnerability ID: 67599 -# ADVISORY: ** DISPUTED ** An issue was discovered in pip (all versions) because it installs the version with the -# highest version number, even if the user had intended to obtain a private package from a private index. This only -# affects use of the --extra-index-url option, and exploitation requires that the... -# CVE-2018-20225 -# For more information about this vulnerability, visit https://data.safetycli.com/v/67599/97c -run pipx run `spec safety` check --bare --ignore 67599 -r requirements/requirements-dev.txt - -# Bandit -run pipx run `spec bandit` --quiet --recursive src/ - -# Vulture -run pipx run `spec vulture` --min-confidence 0 src/ .vulture_ignore_list.py +check_python_quality diff --git a/tests/feature_tests/pyproject.toml b/tests/feature_tests/pyproject.toml index 4bed5a0940..657573502b 100644 --- a/tests/feature_tests/pyproject.toml +++ b/tests/feature_tests/pyproject.toml @@ -1,33 +1,64 @@ [project] name = "feature-tests" version = "5.13.0" +requires-python = ">=3.12" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", +] dependencies = [ "asserts==0.13.1", "behave==1.2.6", "gevent==24.2.1", "pymongo==4.7.3", "requests==2.32.3", - "sseclient==0.0.27" + "sseclient==0.0.27", ] - -[project.optional-dependencies] -dev = [ +optional-dependencies.dev = [ "coverage==7.5.3", "pip==24.0", + "pip-tools==7.4.1", # To add hashes to requirements "pipx==1.6.0", - "pip-tools==7.4.1", # To add hashes to requirements "types-requests==2.32.0.20240602", - "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io + "unittest-xml-reporting==3.2.0", # Needed to generate JUnit XML output for Sonarcloud.io ] -tools = [ +optional-dependencies.tools = [ "bandit==1.7.9", "fixit==2.1.0", "mypy==1.10.0", "pip-audit==2.7.3", + "pyproject-fmt==2.1.3", "ruff==0.4.9", - "safety==3.2.3", - "vulture==2.11" + "vulture==2.11", +] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = [ + "src", + "src/steps", +] +lint.select = [ + "ALL", +] +lint.ignore = [ + "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter + "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` + "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` + "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" + "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter +] +lint.per-file-ignores.".vulture_ignore_list.py" = [ + "ALL", ] +lint.per-file-ignores."__init__.py" = [ + "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files +] + +[tool.pyproject-fmt] +indent = 4 +keep_full_version = true # Don't remove trailing zero's from version specifiers [tool.mypy] ignore_missing_imports = false @@ -43,7 +74,7 @@ module = [ "behave", "behave.model", "behave.runner", - "sseclient" + "sseclient", ] ignore_missing_imports = true @@ -53,24 +84,3 @@ generate_hashes = true quiet = true strip_extras = true upgrade = true - -[tool.ruff] -target-version = "py312" -line-length = 120 -src = ["src", "src/steps"] - -[tool.ruff.lint] -select = ["ALL"] -ignore = [ - "COM812", # https://docs.astral.sh/ruff/rules/missing-trailing-comma/ - this rule may cause conflicts when used with the ruff formatter - "D203", # https://docs.astral.sh/ruff/rules/one-blank-line-before-class/ - prevent warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class` - "D213", # https://docs.astral.sh/ruff/rules/multi-line-summary-second-line/ - prevent warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line` - "FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - not sure of the value of preventing "boolean traps" - "ISC001", # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - this rule may cause conflicts when used with the ruff formatter -] - -[tool.ruff.lint.per-file-ignores] -".vulture_ignore_list.py" = ["ALL"] -"__init__.py" = [ - "D104", # https://docs.astral.sh/ruff/rules/undocumented-public-package/ - don't require doc strings in __init__.py files -]