From 2a49b0444e3a53b674aacf7ba25c1c59fcf6cf03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Morosi?= Date: Tue, 30 Jul 2024 15:45:41 +0200 Subject: [PATCH 1/2] Fix various typing errors --- src/e3/aws/__init__.py | 21 ++++++----- src/e3/aws/pricing/__init__.py | 4 +-- .../tests_e3_aws/assume_profile_main_test.py | 6 +++- tests/tests_e3_aws/dynamodb/main_test.py | 4 +-- tests/tests_e3_aws/pricing/main_test.py | 4 +-- .../troposphere/apigateway/apigateway_test.py | 5 ++- .../troposphere/awslambda/awslambda_test.py | 35 ++++++++++++------- .../troposphere/cfn_project_test.py | 7 ++-- .../troposphere/cloudwatch/cloudwatch_test.py | 6 ++-- .../troposphere/dynamodb/dynamodb_test.py | 5 ++- .../tests_e3_aws/troposphere/iam/iam_test.py | 2 +- 11 files changed, 63 insertions(+), 36 deletions(-) diff --git a/src/e3/aws/__init__.py b/src/e3/aws/__init__.py index 71785fc6..e9bbc3eb 100644 --- a/src/e3/aws/__init__.py +++ b/src/e3/aws/__init__.py @@ -27,6 +27,7 @@ from typing import Any, TypedDict, Callable import botocore.client import botocore.stub + from datetime import datetime class AWSCredentials(TypedDict, total=False): """Annotate a dict containing AWS credentials. @@ -44,7 +45,7 @@ class AWSCredentials(TypedDict, total=False): AccessKeyId: str SecretAccessKey: str SessionToken: str - Expiration: str + Expiration: datetime class AWSSessionRunError(E3Error): @@ -372,7 +373,7 @@ def wrapper(*args, **kwargs): return decorator -def assume_profile_main(): +def assume_profile_main() -> None: """Generate shell commands to set credentials for a profile.""" argument_parser = argparse.ArgumentParser() argument_parser.add_argument( @@ -416,7 +417,7 @@ def assume_profile_main(): print(f"export {k}={v}") -def assume_role_main(): +def assume_role_main() -> None: """Generate shell commands to set credentials for a role.""" argument_parser = argparse.ArgumentParser() argument_parser.add_argument( @@ -449,14 +450,18 @@ def assume_role_main(): credentials = s.assume_role_get_credentials( args.role_arn, role_session_name, session_duration=session_duration ) - credentials["Expiration"] = credentials["Expiration"].timestamp() + credentials_float = credentials | { + "Expiration": credentials["Expiration"].timestamp() + } if args.json: - print(json.dumps(credentials)) + print(json.dumps(credentials_float)) else: - credentials = { - key_to_envvar[k]: v for k, v in credentials.items() if k in key_to_envvar + credentials_float = { + key_to_envvar[k]: v + for k, v in credentials_float.items() + if k in key_to_envvar } - for k, v in credentials.items(): + for k, v in credentials_float.items(): print(f"export {k}={v}") diff --git a/src/e3/aws/pricing/__init__.py b/src/e3/aws/pricing/__init__.py index 3c618fcf..6729fba1 100644 --- a/src/e3/aws/pricing/__init__.py +++ b/src/e3/aws/pricing/__init__.py @@ -5,10 +5,10 @@ from e3.aws.util import get_region_name if TYPE_CHECKING: - from typing import Any + from typing import Any, Union import botocore - _CacheKey = tuple[str | None, str | None, str | None] + _CacheKey = tuple[Union[str, None], Union[str, None], Union[str, None]] # This is only to avoid repeating the type everywhere PriceInformation = dict[str, Any] diff --git a/tests/tests_e3_aws/assume_profile_main_test.py b/tests/tests_e3_aws/assume_profile_main_test.py index 58965577..48acaf3f 100644 --- a/tests/tests_e3_aws/assume_profile_main_test.py +++ b/tests/tests_e3_aws/assume_profile_main_test.py @@ -45,7 +45,11 @@ def get_frozen_credentials(self) -> ReadOnlyCredentials: "json,expected_output", [(False, EXPECTED_DEFAULT_OUTPUT), (True, EXPECTED_JSON_OUTPUT)], ) -def test_assume_profile_main_json(json: bool, expected_output: str, capfd): +def test_assume_profile_main_json( + json: bool, + expected_output: str, + capfd: pytest.CaptureFixture[str], +) -> None: """Test the credentials returned by assume_profile_main.""" with ( mock.patch( diff --git a/tests/tests_e3_aws/dynamodb/main_test.py b/tests/tests_e3_aws/dynamodb/main_test.py index a9eec52d..56c4df4b 100644 --- a/tests/tests_e3_aws/dynamodb/main_test.py +++ b/tests/tests_e3_aws/dynamodb/main_test.py @@ -122,7 +122,7 @@ def test_update_item(client: DynamoDB) -> None: client.update_item( item=customers[0], table_name=TABLE_NAME, - keys=PRIMARY_KEYS, + keys=("name", "S"), data={"age": 33}, ) @@ -138,7 +138,7 @@ def test_update_item_condition(client: DynamoDB) -> None: client.update_item( item=customers[0], table_name=TABLE_NAME, - keys=PRIMARY_KEYS, + keys=("name", "S"), data={"age": 33}, condition_expression="attribute_exists(#n) AND #a = :a", expression_attribute_names={"#n": "name", "#a": "age"}, diff --git a/tests/tests_e3_aws/pricing/main_test.py b/tests/tests_e3_aws/pricing/main_test.py index c3541a08..e05d0ca5 100644 --- a/tests/tests_e3_aws/pricing/main_test.py +++ b/tests/tests_e3_aws/pricing/main_test.py @@ -66,8 +66,8 @@ {"Field": "capacitystatus", "Type": "TERM_MATCH", "Value": "Used"}, {"Field": "preInstalledSw", "Type": "TERM_MATCH", "Value": "NA"}, {"Field": "tenancy", "Type": "TERM_MATCH", "Value": "shared"}, - ] - + GET_PRODUCTS_PARAMS["Filters"], + *GET_PRODUCTS_PARAMS["Filters"], + ], } diff --git a/tests/tests_e3_aws/troposphere/apigateway/apigateway_test.py b/tests/tests_e3_aws/troposphere/apigateway/apigateway_test.py index 499dd234..a888cb48 100644 --- a/tests/tests_e3_aws/troposphere/apigateway/apigateway_test.py +++ b/tests/tests_e3_aws/troposphere/apigateway/apigateway_test.py @@ -1,4 +1,5 @@ from __future__ import annotations +from typing import Any, cast import json import os import pytest @@ -283,7 +284,9 @@ }, "TestapiIntegration": { "Properties": { - **EXPECTED_TEMPLATE["TestapiIntegration"]["Properties"], + **cast( + dict[str, Any], EXPECTED_TEMPLATE["TestapiIntegration"]["Properties"] + ), "IntegrationUri": "arn:aws:lambda:eu-west-1:123456789012:function:" "mypylambda:${stageVariables.lambdaAlias}", }, diff --git a/tests/tests_e3_aws/troposphere/awslambda/awslambda_test.py b/tests/tests_e3_aws/troposphere/awslambda/awslambda_test.py index e18ff2c8..c75e5811 100644 --- a/tests/tests_e3_aws/troposphere/awslambda/awslambda_test.py +++ b/tests/tests_e3_aws/troposphere/awslambda/awslambda_test.py @@ -34,8 +34,9 @@ from e3.pytest import require_tool if TYPE_CHECKING: - from typing import Iterable + from typing import Iterable, Callable from flask import Application, Response + from pathlib import Path SOURCE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "source_dir") @@ -414,7 +415,7 @@ def test_pyfunction(stack: Stack) -> None: assert stack.export()["Resources"] == EXPECTED_PYFUNCTION_TEMPLATE -def test_pyfunction_with_requirements(tmp_path, stack: Stack) -> None: +def test_pyfunction_with_requirements(tmp_path: Path, stack: Stack) -> None: """Test PyFunction creation.""" stack.s3_bucket = "cfn_bucket" stack.s3_key = "templates/" @@ -472,7 +473,7 @@ def test_pyfunction_policy_document(stack: Stack) -> None: @pytest.mark.skip( reason="This test does not work in GitLab CI jobs. Disable it for now.", ) -def test_docker_function(stack: Stack, has_docker: pytest.Fixture) -> None: +def test_docker_function(stack: Stack, has_docker: Callable) -> None: """Test adding docker function to stack.""" aws_env = AWSEnv(regions=["us-east-1"], stub=True) stubber_ecr = aws_env.stub("ecr") @@ -607,10 +608,14 @@ def test_autoversion_default(stack: Stack, simple_lambda_function: PyFunction) - stack.add(auto_version) print(stack.export()["Resources"]) assert stack.export()["Resources"] == EXPECTED_AUTOVERSION_DEFAULT_TEMPLATE - assert auto_version.get_version(1).name == "mypylambdaVersion1" - assert auto_version.get_version(2).name == "mypylambdaVersion2" - assert auto_version.previous.name == "mypylambdaVersion1" - assert auto_version.latest.name == "mypylambdaVersion2" + assert ( + version := auto_version.get_version(1) + ) and version.name == "mypylambdaVersion1" + assert ( + version := auto_version.get_version(2) + ) and version.name == "mypylambdaVersion2" + assert (version := auto_version.previous) and version.name == "mypylambdaVersion1" + assert (version := auto_version.latest) and version.name == "mypylambdaVersion2" def test_autoversion_single(stack: Stack, simple_lambda_function: PyFunction) -> None: @@ -641,10 +646,14 @@ def test_autoversion(stack: Stack, simple_lambda_function: PyFunction) -> None: stack.add(auto_version) print(stack.export()["Resources"]) assert stack.export()["Resources"] == EXPECTED_AUTOVERSION_TEMPLATE - assert auto_version.get_version(2).name == "mypylambdaVersion2" - assert auto_version.get_version(3).name == "mypylambdaVersion3" - assert auto_version.previous.name == "mypylambdaVersion2" - assert auto_version.latest.name == "mypylambdaVersion3" + assert ( + version := auto_version.get_version(2) + ) and version.name == "mypylambdaVersion2" + assert ( + version := auto_version.get_version(3) + ) and version.name == "mypylambdaVersion3" + assert (version := auto_version.previous) and version.name == "mypylambdaVersion2" + assert (version := auto_version.latest) and version.name == "mypylambdaVersion3" def test_bluegreenaliases_default( @@ -798,7 +807,7 @@ def get_base64_response() -> Response: yield app -def test_text_response(base64_response_server: Application): +def test_text_response(base64_response_server: Application) -> None: """Query a route sending back a plain text response.""" with open( os.path.join( @@ -815,7 +824,7 @@ def test_text_response(base64_response_server: Application): assert response["body"] == b"world" -def test_base64_response(base64_response_server: Application): +def test_base64_response(base64_response_server: Application) -> None: """Query a route sending back a base64 encoded response.""" with open( os.path.join( diff --git a/tests/tests_e3_aws/troposphere/cfn_project_test.py b/tests/tests_e3_aws/troposphere/cfn_project_test.py index aa8f393d..f57cf80a 100644 --- a/tests/tests_e3_aws/troposphere/cfn_project_test.py +++ b/tests/tests_e3_aws/troposphere/cfn_project_test.py @@ -11,7 +11,8 @@ if TYPE_CHECKING: - from e3.aws.troposphere import Stack + import pytest + from e3.aws.cfn import Stack TEST_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -20,7 +21,7 @@ class MyCFNProject(CFNProjectMain): """Provide CLI to manage MyCFNProject.""" - def create_stack(self) -> list[Stack]: + def create_stack(self) -> Stack | list[Stack]: """Return MyCFNProject stack.""" self.add( ( @@ -34,7 +35,7 @@ def create_stack(self) -> list[Stack]: return self.stack -def test_cfn_project_main(capfd) -> None: +def test_cfn_project_main(capfd: pytest.CaptureFixture[str]) -> None: """Test CFNProjectMain.""" aws_env = AWSEnv(regions=["eu-west-1"], stub=True) test = MyCFNProject( diff --git a/tests/tests_e3_aws/troposphere/cloudwatch/cloudwatch_test.py b/tests/tests_e3_aws/troposphere/cloudwatch/cloudwatch_test.py index 009041a1..cc150d8e 100644 --- a/tests/tests_e3_aws/troposphere/cloudwatch/cloudwatch_test.py +++ b/tests/tests_e3_aws/troposphere/cloudwatch/cloudwatch_test.py @@ -1,5 +1,5 @@ from __future__ import annotations - +from typing import Any, cast from troposphere import Ref from e3.aws.troposphere import Stack from e3.aws.troposphere.cloudwatch import Alarm @@ -22,7 +22,9 @@ EXPECTED_ALARM_TEMPLATE = { "Myalarm": { "Properties": { - **EXPECTED_ALARM_DEFAULT_TEMPLATE["Myalarm"]["Properties"], + **cast( + dict[str, Any], EXPECTED_ALARM_DEFAULT_TEMPLATE["Myalarm"]["Properties"] + ), **{ "AlarmActions": ["StrAction", {"Ref": "RefAction"}], "Dimensions": [ diff --git a/tests/tests_e3_aws/troposphere/dynamodb/dynamodb_test.py b/tests/tests_e3_aws/troposphere/dynamodb/dynamodb_test.py index d276c9d2..9fc237d4 100644 --- a/tests/tests_e3_aws/troposphere/dynamodb/dynamodb_test.py +++ b/tests/tests_e3_aws/troposphere/dynamodb/dynamodb_test.py @@ -1,4 +1,5 @@ from __future__ import annotations +from typing import Any, cast import os import json from troposphere import Ref @@ -35,7 +36,9 @@ EXPECTED_TABLE_TEMPLATE = { "Mytable": { "Properties": { - **EXPECTED_TABLE_DEFAULT_TEMPLATE["Mytable"]["Properties"], + **cast( + dict[str, Any], EXPECTED_TABLE_DEFAULT_TEMPLATE["Mytable"]["Properties"] + ), **{ "Tags": [{"Key": "tagkey", "Value": "tagvalue"}], "TimeToLiveSpecification": { diff --git a/tests/tests_e3_aws/troposphere/iam/iam_test.py b/tests/tests_e3_aws/troposphere/iam/iam_test.py index 3a9d68a2..de381220 100644 --- a/tests/tests_e3_aws/troposphere/iam/iam_test.py +++ b/tests/tests_e3_aws/troposphere/iam/iam_test.py @@ -95,7 +95,7 @@ def test_trust_roles(stack: Stack) -> None: name="TestRole", description="TestRole description", trust=Trust( - roles=[(123456789012, "OtherRole")], actions=["sts:SetSourceIdentity"] + roles=[("123456789012", "OtherRole")], actions=["sts:SetSourceIdentity"] ), ) ) From 3103a3a9d249a59dfdc44a7c6f113bedc83ef9be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9my=20Morosi?= Date: Mon, 7 Oct 2024 15:56:48 +0200 Subject: [PATCH 2/2] Use the cookiecutter template for e3-aws Ref it/org/operation_support/iaas/projects#84 --- .gitlab-ci.yml | 196 ++++++++++++++++++++++++++++++++++++++-------- pyproject.toml | 4 + tests/__init__.py | 0 tox.ini | 36 +++++---- 4 files changed, 188 insertions(+), 48 deletions(-) create mode 100644 tests/__init__.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c30aa42c..4b6114ed 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,39 +1,88 @@ variables: - GITLAB_REMOTE: "https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/" + GITLAB_REMOTE: + description: "The remote gitlab URL used." + value: "https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/" + LATEST_PYTHON: + description: "The latest python version used to test this project." + options: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + value: "3.12" stages: - - check - - test + - checks + - tests linux + - tests windows default: + services: + - run_as_root:false interruptible: true + +# Common + +.tox-common: + before_script: + - python -m pip install --force tox + script: + # Should be quoted using \' to deal with ':' in the command + - 'echo "Tox run environment: ${CI_TOX_ENV:=py${PYTHON_VERSION:0:1}${PYTHON_VERSION:2:2}-cov-xdist}"' + - python -m tox --colored yes -e ${CI_TOX_ENV} + +### Linux jobs ### + +.linux-image: services: - image:all-pythons before_script: - - git config --global --add - url."${GITLAB_REMOTE}/it/black.git".insteadOf - https://github.com/ambv/black - - git config --global --add - url."${GITLAB_REMOTE}/it/flake8.git".insteadOf - https://github.com/pycqa/flake8 - - source /it/activate-${PYTHON_VERSION} - - python -m pip install --force tox + - source /it/activate-py${PYTHON_VERSION:0:1}${PYTHON_VERSION:2:2} + - python -m pip install -U pip + +.linux-common: + extends: + - .linux-image + - .tox-common + before_script: + - !reference [.linux-image, before_script] + - !reference [.tox-common, before_script] + +# Stage: Checks check: - stage: check - script: + stage: checks + extends: .linux-common + needs: [] + before_script: + - !reference [.linux-common, before_script] + - git config --global --add + url."${GITLAB_REMOTE}/it/black.git".insteadOf + https://github.com/ambv/black + - git config --global --add + url."${GITLAB_REMOTE}/it/flake8.git".insteadOf + https://github.com/pycqa/flake8 - python -m pip install pre-commit - pre-commit install + script: - pre-commit run -a --show-diff-on-failure - - python -m tox --colored yes -e check + - !reference [.linux-common, script] variables: - PYTHON_VERSION: py311 + PYTHON_VERSION: ${LATEST_PYTHON} + CI_TOX_ENV: check + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" -.test-py-common: - stage: test - script: - - python -m tox --colored yes -e ${PYTHON_VERSION}-cov +.test-linux: + stage: tests linux + extends: .linux-common + services: + - !reference [.linux-common, services] + - cpu:4 coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' + parallel: + matrix: + - PYTHON_VERSION: ["3.9", "3.10", "3.11", "3.12"] artifacts: when: always paths: @@ -44,23 +93,102 @@ check: path: coverage.xml junit: pytest-report.xml -test-py39: - extends: .test-py-common - variables: - PYTHON_VERSION: py39 +linux python: + extends: .test-linux + needs: ["check"] + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" -test-py310: - extends: .test-py-common - variables: - PYTHON_VERSION: py310 +# A job triggered by 'run linux tests'. This jobs will run without waiting any others +# jobs. +linux python (always): + extends: .test-linux + needs: [] + rules: + - if: $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_MERGE_REQUEST_ID && $CI_PROJECT_NAME == "e3-core" && $ALWAYS_LINUX_TESTS == "y" -test-py311: - extends: .test-py-common +# A manual job to run Linux tests even if "check" job has failed +run linux tests: + stage: tests linux + needs: [] + trigger: + include: .gitlab-ci.yml + strategy: depend variables: - PYTHON_VERSION: py311 + ALWAYS_LINUX_TESTS: "y" + ALWAYS_WINDOWS_TESTS: "n" + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + when: manual + allow_failure: true + # Contrary to what the documentation might suggest, manual_confirmation + # is not currently usable with our gitlab. + # However, when it is, adding a manual confirmation to warn the user that + # this job should only be used when previous steps have failed seems + # useful. Something like: + # + # manual_confirmation: |- + # Are you sure you want to run Linux tests? + # + # This is only useful if the previous stages have failed and you still want to run the tests. -test-py312: - extends: .test-py-common - variables: - PYTHON_VERSION: py312 +### Windows jobs ### +.windows-image: + services: + - image:e3-windows-core-2022 + - platform:x86_64-windows-2022 + - cpu:2 + - mem:4 + before_script: + - source /it/activate-python ${PYTHON_VERSION} + - mkdir -p "C:/tmp/Trash" + - python -m pip install -U pip + +.windows-common: + extends: + - .windows-image + - .tox-common + before_script: + - !reference [.windows-image, before_script] + - !reference [.tox-common, before_script] + +.test-windows: + stage: tests windows + extends: .windows-common + parallel: + matrix: + - PYTHON_VERSION: ["3.9", "3.10", "3.11", "3.12"] + +# A job tiggered by 'Run Windows tests'. This jobs will run without waiting any others +# jobs. +windows python (always): + extends: .test-windows + needs: [] + rules: + - if: $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_MERGE_REQUEST_ID && $CI_PROJECT_NAME == "e3-aws" && $ALWAYS_WINDOWS_TESTS == "y" + +# A manual job to run Windows tests even if previous jobs have failed +run windows tests: + stage: tests windows + needs: [] + trigger: + include: .gitlab-ci.yml + strategy: depend + variables: + ALWAYS_LINUX_TESTS: "n" + ALWAYS_WINDOWS_TESTS: "y" + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + when: manual + allow_failure: true + # Contrary to what the documentation might suggest, manual_confirmation + # is not currently usable with our gitlab. + # However, when it is, adding a manual confirmation to warn the user that + # this job should only be used when previous steps have failed seems + # useful. Something like: + # + # manual_confirmation: |- + # Are you sure you want to run Windows tests? + # + # This is only useful if the previous stages have failed and you still want to run the tests. diff --git a/pyproject.toml b/pyproject.toml index 653233ae..0d8fe0de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ s3-boto3 = "e3.aws.handler.s3:S3Handler" test = [ "awscli", "pytest", + "pytest-html", "mock", "requests_mock", "httpretty", @@ -38,6 +39,9 @@ test = [ check = [ "mypy==1.8.0", + "pytest", + "flask", + "moto[sts, dynamodb]", "bandit", "pip-audit", "types-colorama", diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tox.ini b/tox.ini index 26266b69..5ef7fe0a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,36 +1,44 @@ [tox] -envlist = py311-cov,check -isolated_build = True +envlist = + py311-cov-xdist, + check +isolated_build=True [testenv] +passenv = CI,GITHUB_*,CODECOV_* + deps = + xdist: pytest-xdist[psutil] cov: pytest-cov codecov: codecov -extras = - test - -passenv = CI,GITHUB_*,CODECOV_* +extras = test -# Run testsuite with coverage when '-cov' is in the env name +# Run testsuite with coverage when '-cov' and with multi-threading when '-xdist' +# is in the env name commands= - pytest --ignore=build -vv \ + pytest -vv --ignore=build -v --html=pytest-report.html \ + --junit-xml=pytest-report.xml --self-contained-html \ + xdist: -n auto \ cov: --e3-cov-rewrite {envsitepackagesdir} src \ cov: --cov {envsitepackagesdir}/e3/aws --cov-report= --cov-fail-under=0 \ [] codecov: codecov [testenv:check] -# Run mypy, pip audit, and bandit extras = - config - check + config + check commands = - bandit -r src/e3 -ll -ii -s B102,B108,B202,B301,B303,B506 - mypy -- {toxinidir}/src +# Run bandit checks. Accept yaml.load(), pickle, and exec since this +# is needed by e3. There is also e3.env.tmp_dir that returns the TMPDIR +# environment variable. Don't check for that. +# Ignore B324 that is no longer similar to B303 since Python3.9. + bandit -r {toxinidir}/src -ll -ii -s B102,B108,B301,B506,B303,B324,B202 + mypy {toxinidir}/src {toxinidir}/tests [flake8] exclude = .git,__pycache__,build,dist,.tox -ignore = B905, C901, E203, E266, E501, W503,D100,D101,D102,D102,D103,D104,D105,D106,D107,D203,D403,D213 +ignore = B301,C901,E203,E266,E501,W503,D100,D101,D102,D102,D103,D104,D105,D106,D107,D203,D403,D213,E704,B905 # line length is intentionally set to 80 here because black uses Bugbear # See https://github.com/psf/black/blob/master/README.md#line-length for more details max-line-length = 80