diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f26d76066..5eebe5a31 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,28 +1,11 @@ --- repos: - # Normalise all Python code. (Black + isort + pyupgrade + autoflake) - - repo: https://github.com/Zac-HD/shed - rev: 2024.3.1 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.5 hooks: - - id: shed - # Python Linting - - repo: https://github.com/pycqa/flake8 - rev: 7.0.0 - hooks: - - id: flake8 - exclude: ^docs/ - additional_dependencies: - - flake8-bugbear # Lint-checks too opinionated for flake8 proper - - flake8-builtins # Don't allow built-in names like list - - flake8-coding # Only UTF-8 - - flake8-debugger # Don't commit debugger calls - - flake8-executable # Check shebangs and executable permissions - - flake8-logging-format # Use log arguments, not string format - - flake8-pep3101 # Don't use old string % formatting - - flake8-pytest-style # Avoid common pytest mistakes - - flake8-pytest # Use plain assert, not unittest assertions - - flake8-rst-docstrings # docstring should be valid ReST - - pep8-naming # Follow pep8 naming rules (eg. function names lowercase) + - id: ruff + args: [--fix, --show-fixes, --output-format, grouped] + - id: ruff-format # Lint Python snippets embedded in Markdown (using flake8) - repo: https://github.com/johnfraney/flake8-markdown rev: v0.5.0 @@ -41,7 +24,7 @@ repos: args: ['-c', '.yamllint'] # Common pre-commit checks - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-added-large-files # We don't want huge files. (Cut down test data!) args: ['--maxkb=2000'] diff --git a/cubedash/_audit.py b/cubedash/_audit.py index 4e590d6f6..f1f2f6c77 100644 --- a/cubedash/_audit.py +++ b/cubedash/_audit.py @@ -8,7 +8,8 @@ from datacube.model import Range from flask import Blueprint, Response, redirect, url_for -from . import _model, _utils as utils +from . import _model +from . import _utils as utils _LOG = logging.getLogger(__name__) bp = Blueprint( diff --git a/cubedash/_dataset.py b/cubedash/_dataset.py index 831dc3147..da45a32dc 100644 --- a/cubedash/_dataset.py +++ b/cubedash/_dataset.py @@ -4,7 +4,8 @@ import flask from flask import Blueprint, abort, url_for -from . import _model, _utils as utils +from . import _model +from . import _utils as utils _LOG = logging.getLogger(__name__) bp = Blueprint( diff --git a/cubedash/_filters.py b/cubedash/_filters.py index e4209312b..f9182a834 100644 --- a/cubedash/_filters.py +++ b/cubedash/_filters.py @@ -18,7 +18,8 @@ from orjson import orjson from shapely.geometry import MultiPolygon -from . import _model, _utils, _utils as utils +from . import _model, _utils +from . import _utils as utils # How far to step the number when the user hits up/down. NUMERIC_STEP_SIZE = { @@ -173,9 +174,7 @@ def _format_albers_area(shape: MultiPolygon): @bp.app_template_filter("query_value") def _format_query_value(val): if isinstance(val, Range): - return "{} to {}".format( - _format_query_value(val.begin), _format_query_value(val.end) - ) + return f"{_format_query_value(val.begin)} to {_format_query_value(val.end)}" if isinstance(val, datetime): return _format_datetime(val) if val is None: diff --git a/cubedash/_pages.py b/cubedash/_pages.py index 83dfbc252..87ea3594e 100644 --- a/cubedash/_pages.py +++ b/cubedash/_pages.py @@ -28,6 +28,8 @@ _product, _stac, _stac_legacy, +) +from . import ( _utils as utils, ) from ._utils import as_rich_json, get_sorted_product_summaries @@ -160,7 +162,7 @@ def legacy_search_page( @app.route("/products//datasets/") @app.route("/products//datasets//") @app.route("/products//datasets///") -def search_page( # noqa: C901 +def search_page( product_name: str = None, year: int = None, month: int = None, day: int = None ): ( @@ -427,7 +429,9 @@ def timeline_page(product_name: str): return redirect(url_for("product_page", product_name=product_name)) -def _load_product(product_name, year, month, day) -> Tuple[ +def _load_product( + product_name, year, month, day +) -> Tuple[ DatasetType, ProductSummary, TimePeriodOverview, diff --git a/cubedash/_product.py b/cubedash/_product.py index 13e90500b..28f815298 100644 --- a/cubedash/_product.py +++ b/cubedash/_product.py @@ -3,7 +3,8 @@ from flask import Blueprint, Response, abort, redirect, url_for -from cubedash import _model, _utils, _utils as utils +from cubedash import _model, _utils +from cubedash import _utils as utils _LOG = logging.getLogger(__name__) bp = Blueprint("product", __name__) diff --git a/cubedash/_stac.py b/cubedash/_stac.py index 737a7f108..0c94c84bf 100644 --- a/cubedash/_stac.py +++ b/cubedash/_stac.py @@ -1,7 +1,8 @@ import json import logging import uuid -from datetime import datetime, time as dt_time, timedelta +from datetime import datetime, timedelta +from datetime import time as dt_time from functools import partial from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union @@ -10,7 +11,8 @@ from datacube.model import Dataset, Range from datacube.utils import DocReader, parse_time from dateutil.tz import tz -from eodatasets3 import serialise, stac as eo3stac +from eodatasets3 import serialise +from eodatasets3 import stac as eo3stac from eodatasets3.model import AccessoryDoc, DatasetDoc, MeasurementDoc, ProductDoc from eodatasets3.properties import Eo3Dict from eodatasets3.utils import is_doc_eo3 @@ -288,7 +290,7 @@ def field_path_row(key, value): elif key == "sat_row": kind = "landsat:wrs_row" else: - raise ValueError(f"Path/row kind {repr(key)}") + raise ValueError(f"Path/row kind {key!r}") # If there's only one value in the range, return it. if isinstance(value, Range): @@ -623,7 +625,7 @@ def _handle_fields_extension( keys=inc.split("."), # get corresponding field from item # disallow default to avoid None values being inserted - func=lambda _: _get_property(inc, item, no_default=True), # noqa: B023 + func=lambda _: _get_property(inc, item, no_default=True), ) for exc in fields.get("exclude") or []: diff --git a/cubedash/_utils.py b/cubedash/_utils.py index 249c7c366..5efe333dd 100644 --- a/cubedash/_utils.py +++ b/cubedash/_utils.py @@ -18,7 +18,7 @@ import datacube.drivers.postgres._schema import eodatasets3.serialise import flask -import numpy +import numpy as np import shapely.geometry import shapely.validation import structlog @@ -578,7 +578,7 @@ def as_yaml(*o, content_type="text/yaml", downloadable_filename_prefix: str = No # TODO: remove the two functions once eo-datasets fix is released def _represent_float(self, value): - text = numpy.format_float_scientific(value) + text = np.format_float_scientific(value) return self.represent_scalar("tag:yaml.org,2002:float", text) def dumps_yaml(yml, stream, *docs) -> None: diff --git a/cubedash/generate.py b/cubedash/generate.py index e7161581b..aa0305a7c 100755 --- a/cubedash/generate.py +++ b/cubedash/generate.py @@ -52,6 +52,7 @@ """ + import collections import multiprocessing import re @@ -64,7 +65,8 @@ import click import structlog -from click import secho as click_secho, style +from click import secho as click_secho +from click import style from datacube.config import LocalConfig from datacube.index import Index, index_connect from datacube.model import DatasetType @@ -75,7 +77,7 @@ GenerateResult, SummaryStore, TimePeriodOverview, - UnsupportedWKTProductCRS, + UnsupportedWKTProductCRSError, ) from cubedash.summary._stores import DEFAULT_EPSG from cubedash.summary._summarise import DEFAULT_TIMEZONE @@ -132,7 +134,7 @@ def print_status(product_name=None, year=None, month=None, day=None, summary=Non minimum_change_scan_window=settings.minimum_change_scan_window, ) return product_name, result, updated_summary - except UnsupportedWKTProductCRS as e: + except UnsupportedWKTProductCRSError as e: log.warning("product.unsupported", reason=e.reason) return product_name, GenerateResult.UNSUPPORTED, None except Exception: @@ -230,7 +232,7 @@ def _load_products(index: Index, product_names) -> List[DatasetType]: p.name for p in index.products.get_all() ) raise click.BadParameter( - f"Unknown product {repr(product_name)}.\n\n" + f"Unknown product {product_name!r}.\n\n" f"Possibilities:\n\t{possible_product_names}", param_hint="product_names", ) diff --git a/cubedash/gunicorn_config.py b/cubedash/gunicorn_config.py index 249a78ad9..3768637b3 100644 --- a/cubedash/gunicorn_config.py +++ b/cubedash/gunicorn_config.py @@ -1,5 +1,4 @@ -"""Gunicorn config for Prometheus internal metrics -""" +"""Gunicorn config for Prometheus internal metrics""" import os diff --git a/cubedash/summary/__init__.py b/cubedash/summary/__init__.py index 9cb2201c0..c931eda04 100644 --- a/cubedash/summary/__init__.py +++ b/cubedash/summary/__init__.py @@ -1,4 +1,4 @@ -from ._extents import RegionInfo, UnsupportedWKTProductCRS +from ._extents import RegionInfo, UnsupportedWKTProductCRSError from ._model import TimePeriodOverview from ._stores import ( DatasetItem, @@ -18,5 +18,5 @@ "RegionInfo", "SummaryStore", "TimePeriodOverview", - "UnsupportedWKTProductCRS", + "UnsupportedWKTProductCRSError", ) diff --git a/cubedash/summary/_extents.py b/cubedash/summary/_extents.py index cdb9ace9b..b9d709de6 100644 --- a/cubedash/summary/_extents.py +++ b/cubedash/summary/_extents.py @@ -40,6 +40,8 @@ from cubedash._utils import ( ODC_DATASET as DATASET, +) +from cubedash._utils import ( alchemy_engine, expects_eo3_metadata_type, infer_crs, @@ -54,7 +56,7 @@ ] -class UnsupportedWKTProductCRS(NotImplementedError): +class UnsupportedWKTProductCRSError(NotImplementedError): """We can't, within Postgis, support arbitrary WKT CRSes at the moment.""" def __init__(self, reason: str) -> None: @@ -83,7 +85,7 @@ def get_dataset_extent_alchemy_expression(md: MetadataType, default_crs: str = N [ # If we have geometry, use it as the polygon. ( - doc[["geometry"]] != None, + doc[["geometry"]].is_not(None), func.ST_GeomFromGeoJSON(doc[["geometry"]], type_=Geometry), ) ], @@ -99,7 +101,7 @@ def get_dataset_extent_alchemy_expression(md: MetadataType, default_crs: str = N [ # If we have valid_data offset, use it as the polygon. ( - doc[valid_data_offset] != None, + doc[valid_data_offset].is_not(None), func.ST_GeomFromGeoJSON(doc[valid_data_offset], type_=Geometry), ) ], @@ -168,7 +170,7 @@ def get_dataset_srid_alchemy_expression(md: MetadataType, default_crs: str = Non # HACK: Change default CRS with inference inferred_crs = infer_crs(default_crs) if inferred_crs is None: - raise UnsupportedWKTProductCRS( + raise UnsupportedWKTProductCRSError( f"WKT Product CRSes are not currently well supported, and " f"we can't infer this product's one. " f"(Ideally use an auth-name format for CRS, such as 'EPSG:1234') " @@ -319,7 +321,8 @@ def refresh_spatial_extents( "spatial_deletion_full_scan", ) changed += engine.execute( - DATASET_SPATIAL.delete().where( + DATASET_SPATIAL.delete() + .where( DATASET_SPATIAL.c.dataset_type_ref == product.id, ) # Where it doesn't exist in the ODC dataset table. @@ -926,7 +929,7 @@ def get_sample_dataset(*product_names: str, index: Index = None) -> Iterable[Dic DATASET.c.dataset_type_ref == bindparam("product_ref", product.id, type_=SmallInteger) ) - .where(DATASET.c.archived == None) + .where(DATASET.c.archived.is_(None)) .limit(1) ) .fetchone() @@ -968,7 +971,7 @@ def get_mapped_crses(*product_names: str, index: Index = None) -> Iterable[Dict] ] ) .where(DATASET.c.dataset_type_ref == product.id) - .where(DATASET.c.archived == None) + .where(DATASET.c.archived.is_(None)) .limit(1) ) .fetchone() diff --git a/cubedash/summary/_schema.py b/cubedash/summary/_schema.py index 81890ae2f..c700fa971 100644 --- a/cubedash/summary/_schema.py +++ b/cubedash/summary/_schema.py @@ -12,7 +12,6 @@ Column, Date, DateTime, - Enum as SqlEnum, ForeignKey, Index, Integer, @@ -26,6 +25,9 @@ func, select, ) +from sqlalchemy import ( + Enum as SqlEnum, +) from sqlalchemy.dialects import postgresql as postgres from sqlalchemy.engine import Engine from sqlalchemy.exc import ProgrammingError @@ -278,7 +280,7 @@ def is_compatible_generate_schema(engine: Engine) -> bool: return is_latest and pg_column_exists(engine, ODC_DATASET.fullname, "updated") -class SchemaNotRefreshable(Exception): +class SchemaNotRefreshableError(Exception): """The schema is not set-up for running product refreshes""" ... @@ -368,7 +370,7 @@ def check_or_update_odc_schema(engine: Engine): _utils.install_timestamp_trigger(engine) except ProgrammingError as e: # We don't have permission. - raise SchemaNotRefreshable( + raise SchemaNotRefreshableError( dedent( """ Missing update triggers. diff --git a/cubedash/summary/_stores.py b/cubedash/summary/_stores.py index c44e47c96..0315191c6 100644 --- a/cubedash/summary/_stores.py +++ b/cubedash/summary/_stores.py @@ -25,7 +25,8 @@ import structlog from cachetools.func import lru_cache, ttl_cache from dateutil import tz -from geoalchemy2 import WKBElement, shape as geo_shape +from geoalchemy2 import WKBElement +from geoalchemy2 import shape as geo_shape from geoalchemy2.shape import from_shape, to_shape from shapely.geometry.base import BaseGeometry from sqlalchemy import DDL, String, and_, exists, func, literal, or_, select, union_all @@ -713,7 +714,7 @@ def _find_product_fixed_metadata( ] ) .select_from(ODC_DATASET) - .where(ODC_DATASET.c.id.in_([r for r, in dataset_samples])) + .where(ODC_DATASET.c.id.in_([r for (r,) in dataset_samples])) ).fetchall() assert len(result) == 1 @@ -1633,7 +1634,7 @@ def _mark_product_refresh_completed( ) self._product.cache_clear() - @lru_cache() # noqa: B019 + @lru_cache() def _get_srid_name(self, srid: int): """ Convert an internal postgres srid key to a string auth code: eg: 'EPSG:1234' diff --git a/cubedash/summary/_summarise.py b/cubedash/summary/_summarise.py index 9f3549718..354530d55 100644 --- a/cubedash/summary/_summarise.py +++ b/cubedash/summary/_summarise.py @@ -9,7 +9,8 @@ from cachetools.func import lru_cache from datacube.model import Range from dateutil import tz -from geoalchemy2 import Geometry, shape as geo_shape +from geoalchemy2 import Geometry +from geoalchemy2 import shape as geo_shape from sqlalchemy import and_, func, or_, select from sqlalchemy.dialects.postgresql import TSTZRANGE from sqlalchemy.sql import ColumnElement @@ -238,7 +239,7 @@ def _where( ) return begin_time, end_time, where_clause - @lru_cache() # noqa: B019 + @lru_cache() def _get_srid_name(self, srid: int): """ Convert an internal postgres srid key to a string auth code: eg: 'EPSG:1234' diff --git a/cubedash/warmup.py b/cubedash/warmup.py index feb1c7627..379fb87cc 100644 --- a/cubedash/warmup.py +++ b/cubedash/warmup.py @@ -153,11 +153,9 @@ def cli( def handle_failure(): nonlocal consecutive_failures consecutive_failures += 1 - failures.append(url) # noqa: B023 + failures.append(url) # Back off slightly for network hiccups. - time.sleep( - max(throttle_seconds, 1) * (consecutive_failures + 1) - ) # noqa: B023 + time.sleep(max(throttle_seconds, 1) * (consecutive_failures + 1)) try: start_time = time.time() diff --git a/integration_tests/asserts.py b/integration_tests/asserts.py index 340b81006..863061b22 100644 --- a/integration_tests/asserts.py +++ b/integration_tests/asserts.py @@ -231,10 +231,10 @@ def expect_values( dataset_count {s.dataset_count} footprint_count {s.footprint_count} time range: - - {repr(s.time_range.begin.astimezone(tzutc()))} - - {repr(s.time_range.end.astimezone(tzutc()))} - newest: {repr(s.newest_dataset_creation_time.astimezone(tzutc()))} - crses: {repr(s.crses)} + - {s.time_range.begin.astimezone(tzutc())!r} + - {s.time_range.end.astimezone(tzutc())!r} + newest: {s.newest_dataset_creation_time.astimezone(tzutc())!r} + crses: {s.crses!r} size_bytes: {s.size_bytes} timeline period: {s.timeline_period} @@ -244,12 +244,12 @@ def expect_values( if was_timeline_error: print("timeline keys:") for day, count in s.timeline_dataset_counts.items(): - print(f"\t{repr(day)}: {count}") + print(f"\t{day!r}: {count}") if was_regions_error: print("region keys:") for region, count in s.region_dataset_counts.items(): - print(f"\t{repr(region)}: {count}") + print(f"\t{region!r}: {count}") raise diff --git a/integration_tests/test_center_datetime_logic.py b/integration_tests/test_center_datetime_logic.py index 0357cc358..afc67c7fb 100644 --- a/integration_tests/test_center_datetime_logic.py +++ b/integration_tests/test_center_datetime_logic.py @@ -43,17 +43,21 @@ def test_datestring_on_dataset_page(client: FlaskClient): def test_datestring_on_datasets_search_page(client: FlaskClient): html = get_html(client, "/products/rainfall_chirps_daily/datasets") - assert "Time UTC: 2019-05-15 00:00:00" in [ - a.find("td", first=True).attrs["title"] for a in html.find(".search-result") - ], "datestring does not match expected center_time recorded in dataset_spatial table" + assert ( + "Time UTC: 2019-05-15 00:00:00" + in [ + a.find("td", first=True).attrs["title"] for a in html.find(".search-result") + ] + ), "datestring does not match expected center_time recorded in dataset_spatial table" def test_datestring_on_regions_page(client: FlaskClient): html = get_html(client, "/product/rainfall_chirps_daily/regions/x210y106") - assert "2019-05-15 00:00:00" in [ - a.find("td", first=True).text.strip() for a in html.find(".search-result") - ], "datestring does not match expected center_time recorded in dataset_spatial table" + assert ( + "2019-05-15 00:00:00" + in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] + ), "datestring does not match expected center_time recorded in dataset_spatial table" def test_summary_center_datetime(client: FlaskClient): diff --git a/integration_tests/test_dataset_listing.py b/integration_tests/test_dataset_listing.py index d271b1911..96ce2a842 100644 --- a/integration_tests/test_dataset_listing.py +++ b/integration_tests/test_dataset_listing.py @@ -70,7 +70,10 @@ def test_default_args(dea_index: Index): res = query_to_search(MultiDict(()), product) # The last month of LANDSAT_5 for this product - assert res == dict( - # time=Range(datetime(2011, 10, 30), datetime(2011, 11, 30)), - # product=product.name + assert ( + res + == dict( + # time=Range(datetime(2011, 10, 30), datetime(2011, 11, 30)), + # product=product.name + ) ) diff --git a/integration_tests/test_filter_geom.py b/integration_tests/test_filter_geom.py index 5c38555f7..f2e3b9ccf 100644 --- a/integration_tests/test_filter_geom.py +++ b/integration_tests/test_filter_geom.py @@ -61,7 +61,7 @@ def test_nested_exception(testing_polygon): ) polygonlist = _polygon_chain(testing_polygon) - assert type(polygonlist) is list + assert isinstance(polygonlist, list) assert len(polygonlist) == 262 filtered_geom = _filter_geom(polygonlist) assert len(filtered_geom) == 199 diff --git a/integration_tests/test_stac.py b/integration_tests/test_stac.py index f8a922e57..2a96fdc54 100644 --- a/integration_tests/test_stac.py +++ b/integration_tests/test_stac.py @@ -227,7 +227,7 @@ def get_extension(url: str) -> jsonschema.Draft7Validator: def get_collection(client: FlaskClient, url: str, validate=True) -> Dict: """ Get a URL, expecting a valid stac collection document to be there""" - with DebugContext(f"Requested {repr(url)}"): + with DebugContext(f"Requested {url!r}"): data = get_json(client, url) if validate: assert_collection(data) @@ -237,7 +237,7 @@ def get_collection(client: FlaskClient, url: str, validate=True) -> Dict: def get_items(client: FlaskClient, url: str) -> Dict: """ Get a URL, expecting a valid stac item collection document to be there""" - with DebugContext(f"Requested {repr(url)}"): + with DebugContext(f"Requested {url!r}"): data = get_geojson(client, url) assert_item_collection(data) return data @@ -247,7 +247,7 @@ def get_item(client: FlaskClient, url: str) -> Dict: """ Get a URL, expecting a single valid Stac Item to be there """ - with DebugContext(f"Requested {repr(url)}"): + with DebugContext(f"Requested {url!r}"): data = get_json(client, url) validate_item(data) return data @@ -342,7 +342,7 @@ def validate_items( product_counts = Counter() for item in items: id_ = item["id"] - with DebugContext(f"Invalid item {i}, id {repr(str(id_))}"): + with DebugContext(f"Invalid item {i}, id {str(id_)!r}"): validate_item(item) product_counts[item["properties"].get("odc:product", item["collection"])] += 1 @@ -601,7 +601,7 @@ def test_stac_links(stac_client: FlaskClient): href: str = child_link["href"] # ignore child links corresponding to catalogs if "catalogs" not in href: - print(f"Loading collection page for {product_name}: {repr(href)}") + print(f"Loading collection page for {product_name}: {href!r}") collection_data = get_collection(stac_client, href, validate=True) assert collection_data["id"] == product_name @@ -1255,7 +1255,7 @@ def test_stac_search_by_post(stac_client: FlaskClient): # TODO: These are the same file in a NetCDF. They should probably be one asset? assert len(feature["assets"]) == len( bands - ), f"Expected an asset per band, got {repr(feature['assets'])}" + ), f"Expected an asset per band, got {feature['assets']!r}" assert set(feature["assets"].keys()) == set(bands) while bands: band = bands.pop() diff --git a/integration_tests/test_utc_tst.py b/integration_tests/test_utc_tst.py index 7de1da6f3..0d3b08a8f 100644 --- a/integration_tests/test_utc_tst.py +++ b/integration_tests/test_utc_tst.py @@ -62,19 +62,24 @@ def test_yearly_dataset_count(client: FlaskClient): def test_dataset_search_page_localised_time(client: FlaskClient): html = get_html(client, "/products/ls5_fc_albers/datasets/2011") - assert "2011-01-01 09:03:13" in [ - a.find("td", first=True).text.strip() for a in html.find(".search-result") - ], "datestring does not match expected center_time recorded in dataset_spatial table" - - assert "Time UTC: 2010-12-31 23:33:13" in [ - a.find("td", first=True).attrs["title"] for a in html.find(".search-result") - ], "datestring does not match expected center_time recorded in dataset_spatial table" + assert ( + "2011-01-01 09:03:13" + in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] + ), "datestring does not match expected center_time recorded in dataset_spatial table" + + assert ( + "Time UTC: 2010-12-31 23:33:13" + in [ + a.find("td", first=True).attrs["title"] for a in html.find(".search-result") + ] + ), "datestring does not match expected center_time recorded in dataset_spatial table" html = get_html(client, "/products/ls5_fc_albers/datasets/2010") - assert "2010-12-31 09:56:02" in [ - a.find("td", first=True).text.strip() for a in html.find(".search-result") - ], "datestring does not match expected center_time recorded in dataset_spatial table" + assert ( + "2010-12-31 09:56:02" + in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] + ), "datestring does not match expected center_time recorded in dataset_spatial table" def test_clirunner_generate_grouping_timezone(odc_test_db, run_generate): diff --git a/pyproject.toml b/pyproject.toml index 5228ddd9d..1cfdaeba6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,3 +23,31 @@ exclude = ''' | cubedash/_version\.py )/ ''' + +[tool.ruff] +target-version = "py38" + +[tool.ruff.lint] +# Which checkers to enable? +select = [ + "A", # Don't shadow built-ins + "E", # pycodestyle + "EXE", # Shebangs+Executable permisssions should match + "F", # pyflakes + "G", # Use logging formatter, not manual string concat + "I", # Auto-sort imports + "ICN", # Use standard import names, like np for numpy + "N", # pep8-naming + "NPY", # Numpy + # "RUF", # Ruf-specific python rules? + # "S", # Bandit (security) -- explore warnings and enable in future? +] + +[tool.ruff.lint.per-file-ignores] +# The file deliberately doesn't put the import at the top, and we can't avoid global overrides +"docs/conf.py" = ["E402", "A001", "EXE001"] + +# Matching old behaviour: We auto-format with the smaller line default +# ... but only enforce line length to be under this larger 120 limit. +[tool.ruff.lint.pycodestyle] +max-line-length = 120