diff --git a/eodatasets3/__init__.py b/eodatasets3/__init__.py index 4209cbb6..fa2ffdc8 100644 --- a/eodatasets3/__init__.py +++ b/eodatasets3/__init__.py @@ -12,6 +12,7 @@ del get_versions __all__ = ( + "REPO_URL", "DatasetAssembler", "DatasetDoc", "DatasetPrepare", @@ -20,10 +21,9 @@ "IfExists", "IncompleteDatasetError", "NamingConventions", - "namer", - "REPO_URL", "ValidDataMethod", "__version__", + "namer", ) __version__ = _version.get_versions()["version"] diff --git a/eodatasets3/assemble.py b/eodatasets3/assemble.py index 082e4d2d..79bb3646 100644 --- a/eodatasets3/assemble.py +++ b/eodatasets3/assemble.py @@ -1018,7 +1018,7 @@ def format_list(items: list, max_len=60): return dedent( f""" - Assembling {product_name or ''} ({status}) + Assembling {product_name or ""} ({status}) - {len(measurements)} measurements: {format_list(measurements)} - {len(properties)} properties: {format_list(properties)} Writing to location: {output_location} @@ -1420,7 +1420,7 @@ def _write_measurement( if file_format != self.properties["odc:file_format"]: raise RuntimeError( f"Inconsistent file formats between bands. " - f"Was {self.properties['odc:file_format']!r}, now {file_format !r}" + f"Was {self.properties['odc:file_format']!r}, now {file_format!r}" ) self._measurements.record_image( diff --git a/eodatasets3/properties.py b/eodatasets3/properties.py index d8d754df..65ba9ef6 100644 --- a/eodatasets3/properties.py +++ b/eodatasets3/properties.py @@ -457,9 +457,7 @@ def normalise_and_set(self, key, value, allow_override=True, expect_override=Fal self.normalise_and_set(k, v, allow_override=allow_override) if key in self._props and value != self[key] and (not expect_override): - message = ( - f"Overriding property {key!r} " f"(from {self[key]!r} to {value!r})" - ) + message = f"Overriding property {key!r} (from {self[key]!r} to {value!r})" if allow_override: warnings.warn(message, category=PropertyOverrideWarning) else: diff --git a/eodatasets3/scripts/packagewagl.py b/eodatasets3/scripts/packagewagl.py index 713079d8..a315a19e 100644 --- a/eodatasets3/scripts/packagewagl.py +++ b/eodatasets3/scripts/packagewagl.py @@ -78,7 +78,7 @@ ) @click.option( "--contiguity-resolution", - help="Resolution choice for contiguity " "(default: automatic based on sensor)", + help="Resolution choice for contiguity (default: automatic based on sensor)", type=float, default=None, ) diff --git a/eodatasets3/scripts/recompress.py b/eodatasets3/scripts/recompress.py index 5dc83f1a..f7f75e37 100755 --- a/eodatasets3/scripts/recompress.py +++ b/eodatasets3/scripts/recompress.py @@ -346,8 +346,7 @@ def _recompress_image( @click.option( "--output-base", type=PathPath(file_okay=False, writable=True), - help="The base output directory " - "(default to same dir as input if --clean-inputs).", + help="The base output directory (default to same dir as input if --clean-inputs).", ) @click.option( "--zlevel", type=click.IntRange(0, 9), default=5, help="Deflate compression level." @@ -417,7 +416,7 @@ def main( ) else: raise ValueError( - f"Expected either tar.gz or a dataset folder. " f"Got: {path!r}" + f"Expected either tar.gz or a dataset folder. Got: {path!r}" ) if not success: diff --git a/eodatasets3/scripts/tostac.py b/eodatasets3/scripts/tostac.py index 2a4484a3..aeefd746 100644 --- a/eodatasets3/scripts/tostac.py +++ b/eodatasets3/scripts/tostac.py @@ -63,7 +63,7 @@ def run( json.dump(jsonify_document(item_doc), f, indent=4, default=json_fallback) if verbose: - echo(f'Wrote {style(output_path.as_posix(), "green")}') + echo(f"Wrote {style(output_path.as_posix(), 'green')}") def dc_to_stac( @@ -118,9 +118,7 @@ def json_fallback(o): return str(o) raise TypeError( - f"Unhandled type for json conversion: " - f"{o.__class__.__name__!r} " - f"(object {o!r})" + f"Unhandled type for json conversion: {o.__class__.__name__!r} (object {o!r})" ) diff --git a/eodatasets3/validate.py b/eodatasets3/validate.py index 96321562..9a9c4f91 100644 --- a/eodatasets3/validate.py +++ b/eodatasets3/validate.py @@ -466,7 +466,7 @@ def _error(code: str, reason: str, hint: str = None): yield _error( "different_nodata", f"{name} nodata: " - f"product {expected_nodata !r} != dataset {ds_nodata !r}", + f"product {expected_nodata!r} != dataset {ds_nodata!r}", ) diff --git a/eodatasets3/wagl.py b/eodatasets3/wagl.py index eca12466..ba2e31e6 100644 --- a/eodatasets3/wagl.py +++ b/eodatasets3/wagl.py @@ -427,7 +427,7 @@ def get_oa_resolution_group( oa_resolution = (20.0, 20.0) else: raise NotImplementedError( - f"Don't know how to choose a default OA resolution for platform {platform !r}" + f"Don't know how to choose a default OA resolution for platform {platform!r}" ) res_grp = resolution_groups.get(oa_resolution) diff --git a/tests/__init__.py b/tests/__init__.py index cfdacd61..73469c0a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -56,9 +56,9 @@ def assert_file_structure(folder, expected_structure, root=""): optional_filenames = { name for name, option in expected_structure.items() if option == "optional" } - assert ( - folder.exists() - ), f"Expected base folder doesn't even exist! {folder.as_posix()!r}" + assert folder.exists(), ( + f"Expected base folder doesn't even exist! {folder.as_posix()!r}" + ) actual_filenames = {f.name for f in folder.iterdir()} diff --git a/tests/common.py b/tests/common.py index db945233..2362a544 100644 --- a/tests/common.py +++ b/tests/common.py @@ -29,7 +29,7 @@ def check_prepare_outputs( try: assert_expected_eo3_path(expected_doc, expected_metadata_path, ignore_fields) except AssertionError: - print(f'Output:\n{indent(res.output, " ")}') + print(f"Output:\n{indent(res.output, ' ')}") raise @@ -44,9 +44,9 @@ def assert_expected_eo3_path( This is slightly smarter about doing geometry equality etc within the document. """ __tracebackhide__ = operator.methodcaller("errisinstance", AssertionError) - assert ( - expected_path.exists() - ), f"Expected output EO3 path doesn't exist: {expected_path}" + assert expected_path.exists(), ( + f"Expected output EO3 path doesn't exist: {expected_path}" + ) assert_same_as_file( expected_doc, expected_path, @@ -112,9 +112,9 @@ def assert_shapes_mostly_equal( shape2 = shape(shape2) # Check area first, as it's a nicer error message when they're wildly different. - assert shape1.area == pytest.approx( - shape2.area, abs=threshold - ), f"Shapes have different areas: {shape1.area} != {shape2.area}" + assert shape1.area == pytest.approx(shape2.area, abs=threshold), ( + f"Shapes have different areas: {shape1.area} != {shape2.area}" + ) s1 = shape1.simplify(tolerance=threshold) s2 = shape2.simplify(tolerance=threshold) @@ -194,8 +194,8 @@ def clean_offset(offset: str): out.extend( ( f" {clean_offset(offset)}: ", - f' {change["old_value"]!r}', - f' != {change["new_value"]!r}', + f" {change['old_value']!r}", + f" != {change['new_value']!r}", ) ) if "dictionary_item_added" in doc_diffs: diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 78b2ff99..d8de32b2 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -28,18 +28,18 @@ def assert_image( assert d.count == bands, f"Expected {bands} band{'s' if bands > 1 else ''}" if overviews is not allow_anything: - assert ( - d.overviews(1) == overviews - ), f"Unexpected overview: {d.overviews(1)!r} != {overviews!r}" + assert d.overviews(1) == overviews, ( + f"Unexpected overview: {d.overviews(1)!r} != {overviews!r}" + ) if nodata is not allow_anything: assert d.nodata == nodata, f"Unexpected nodata: {d.nodata!r} != {nodata!r}" if unique_pixel_counts is not allow_anything: array = d.read(1) value_counts = dict(zip(*numpy.unique(array, return_counts=True))) - assert ( - value_counts == unique_pixel_counts - ), f"Unexpected pixel counts: {value_counts!r} != {unique_pixel_counts!r}" + assert value_counts == unique_pixel_counts, ( + f"Unexpected pixel counts: {value_counts!r} != {unique_pixel_counts!r}" + ) if shape: assert shape == d.shape, f"Unexpected shape: {shape!r} != {d.shape!r}" diff --git a/tests/integration/prepare/test_prepare_landsat_l1.py b/tests/integration/prepare/test_prepare_landsat_l1.py index 3e9b479c..fd5a5a2e 100644 --- a/tests/integration/prepare/test_prepare_landsat_l1.py +++ b/tests/integration/prepare/test_prepare_landsat_l1.py @@ -596,9 +596,9 @@ def test_skips_old_datasets(l1_ls7_tarball): datetime.now().isoformat(), str(l1_ls7_tarball), ) - assert ( - not expected_metadata_path.exists() - ), "Dataset should have been skipped due to age" + assert not expected_metadata_path.exists(), ( + "Dataset should have been skipped due to age" + ) # It should work with an old date. run_prepare_cli( @@ -608,9 +608,9 @@ def test_skips_old_datasets(l1_ls7_tarball): "2014-05-04", str(l1_ls7_tarball), ) - assert ( - expected_metadata_path.exists() - ), "Dataset should have been packaged when using an ancient date cutoff" + assert expected_metadata_path.exists(), ( + "Dataset should have been packaged when using an ancient date cutoff" + ) def expected_lc08_l2_c2_post_20210507_folder( diff --git a/tests/integration/prepare/test_prepare_sentinel_l1.py b/tests/integration/prepare/test_prepare_sentinel_l1.py index 835e03a6..66b11ca8 100644 --- a/tests/integration/prepare/test_prepare_sentinel_l1.py +++ b/tests/integration/prepare/test_prepare_sentinel_l1.py @@ -477,9 +477,9 @@ def test_filter_folder_structure_info( output_folder, input_dataset_path, ) - assert ( - expected_metadata_path.exists() - ), f"Expected dataset to be processed (it's within the region file)! {res.output}" + assert expected_metadata_path.exists(), ( + f"Expected dataset to be processed (it's within the region file)! {res.output}" + ) expected_metadata_path.unlink() # Run with a region list that doesn't include our dataset region. @@ -493,9 +493,9 @@ def test_filter_folder_structure_info( output_folder, input_dataset_path, ) - assert ( - not expected_metadata_path.exists() - ), f"Expected dataset to be filtered out! {res.output}" + assert not expected_metadata_path.exists(), ( + f"Expected dataset to be filtered out! {res.output}" + ) # Filter the time period res = run_prepare_cli( @@ -508,9 +508,9 @@ def test_filter_folder_structure_info( output_folder, input_dataset_path, ) - assert ( - not expected_metadata_path.exists() - ), f"Expected dataset to be filtered out! {res.output}" + assert not expected_metadata_path.exists(), ( + f"Expected dataset to be filtered out! {res.output}" + ) # Filter the time period res = run_prepare_cli( @@ -523,9 +523,9 @@ def test_filter_folder_structure_info( output_folder, input_dataset_path, ) - assert ( - not expected_metadata_path.exists() - ), f"Expected dataset to be filtered out! {res.output}" + assert not expected_metadata_path.exists(), ( + f"Expected dataset to be filtered out! {res.output}" + ) # Now run for real, expect an output. check_prepare_outputs( diff --git a/tests/integration/test_image.py b/tests/integration/test_image.py index b804d651..97a1942e 100644 --- a/tests/integration/test_image.py +++ b/tests/integration/test_image.py @@ -27,9 +27,9 @@ def test_rescale_intensity(): ) unmodified = original_image.copy() - assert np.array_equal( - original_image, unmodified - ), "rescale_intensity modified the input image" + assert np.array_equal(original_image, unmodified), ( + "rescale_intensity modified the input image" + ) staticly_rescaled = images.rescale_intensity( original_image, in_range=(4000, 6000), out_range=(100, 255), image_nodata=-999 diff --git a/tests/integration/test_packagewagl.py b/tests/integration/test_packagewagl.py index 216cf837..5c7b2f0b 100644 --- a/tests/integration/test_packagewagl.py +++ b/tests/integration/test_packagewagl.py @@ -111,9 +111,9 @@ def test_whole_landsat_wagl_package( ) [output_metadata] = expected_folder.rglob("*.odc-metadata.yaml") - assert reported_metadata == str( - output_metadata - ), "Cli didn't report the expected output path" + assert reported_metadata == str(output_metadata), ( + "Cli didn't report the expected output path" + ) # Checksum should include all files other than itself. [checksum_file] = expected_folder.rglob("*.sha1") @@ -496,9 +496,9 @@ def _run_wagl(args): # The last line of output ends with the dataset path. words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1) - assert ( - res.exit_code == 0 - ), f"WAGL returned error code. Output:\n{indent(res.output, ' ' * 4)}" + assert res.exit_code == 0, ( + f"WAGL returned error code. Output:\n{indent(res.output, ' ' * 4)}" + ) return reported_metadata diff --git a/tests/integration/test_recompress.py b/tests/integration/test_recompress.py index 56ca8a2a..fc17a8b8 100644 --- a/tests/integration/test_recompress.py +++ b/tests/integration/test_recompress.py @@ -67,9 +67,9 @@ def test_recompress_dataset(base_in_path: Path, in_offset: str, tmp_path: Path): ) assert all_output_files == {str(expected_output.relative_to(output_base))} - assert ( - expected_output.exists() - ), f"No output produced in expected location {expected_output}." + assert expected_output.exists(), ( + f"No output produced in expected location {expected_output}." + ) # It should contain all of our files checksums, members = _get_checksums_members(expected_output) @@ -139,9 +139,9 @@ def test_recompress_gap_mask_dataset(tmp_path: Path): ) assert all_output_files == [str(expected_output)] - assert ( - expected_output.exists() - ), f"No output produced in expected location {expected_output}." + assert expected_output.exists(), ( + f"No output produced in expected location {expected_output}." + ) # It should contain all of our files checksums, members = _get_checksums_members(expected_output) @@ -222,9 +222,9 @@ def test_recompress_dirty_dataset(tmp_path: Path): ) assert all_output_files == [str(expected_output)] - assert ( - expected_output.exists() - ), f"No output produced in expected location {expected_output}." + assert expected_output.exists(), ( + f"No output produced in expected location {expected_output}." + ) checksums, members = _get_checksums_members(expected_output) @@ -359,8 +359,7 @@ def test_calculate_out_path(tmp_path: Path): mtl.write_text("fake mtl") assert_path_eq( out_base.joinpath( - "L1/092_091/LT50920911991126/" - "LT05_L1GS_092091_19910506_20170126_01_T2.tar" + "L1/092_091/LT50920911991126/LT05_L1GS_092091_19910506_20170126_01_T2.tar" ), recompress._output_tar_path_from_directory(out_base, path), ) diff --git a/tests/integration/test_validate.py b/tests/integration/test_validate.py index c10b41d2..f2762cff 100644 --- a/tests/integration/test_validate.py +++ b/tests/integration/test_validate.py @@ -205,9 +205,9 @@ def assert_valid(self, *docs: Doc, expect_no_messages=True, suffix=None): __tracebackhide__ = operator.methodcaller("errisinstance", AssertionError) self.run_validate(docs, suffix=suffix or ".yaml") was_successful = self.result.exit_code == 0 - assert ( - was_successful - ), f"Expected validation to succeed. Output:\n{self.result.output}" + assert was_successful, ( + f"Expected validation to succeed. Output:\n{self.result.output}" + ) if expect_no_messages and self.messages: raise AssertionError( @@ -218,18 +218,18 @@ def assert_valid(self, *docs: Doc, expect_no_messages=True, suffix=None): def assert_invalid(self, *docs: Doc, codes: Sequence[str] = None, suffix=".yaml"): __tracebackhide__ = operator.methodcaller("errisinstance", AssertionError) self.run_validate(docs, suffix=suffix) - assert ( - self.result.exit_code != 0 - ), f"Expected validation to fail.\n{self.result.output}" + assert self.result.exit_code != 0, ( + f"Expected validation to fail.\n{self.result.output}" + ) if codes is not None: - assert ( - sorted(codes) == sorted(self.messages.keys()) - ), f"{sorted(codes)} != {sorted(self.messages.keys())}. Messages: {self.messages}" + assert sorted(codes) == sorted(self.messages.keys()), ( + f"{sorted(codes)} != {sorted(self.messages.keys())}. Messages: {self.messages}" + ) else: - assert ( - self.result.exit_code == 1 - ), f"Expected error code 1 for 1 invalid path. Got {sorted(self.messages.items())}" + assert self.result.exit_code == 1, ( + f"Expected error code 1 for 1 invalid path. Got {sorted(self.messages.items())}" + ) def run_validate(self, docs: Sequence[Doc], suffix=".yaml"): __tracebackhide__ = operator.methodcaller("errisinstance", AssertionError) diff --git a/versioneer.py b/versioneer.py index d4059b0e..cf89396b 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1838,9 +1838,9 @@ def get_versions(verbose: bool = False) -> dict[str, Any]: handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` - assert ( - cfg.versionfile_source is not None - ), "please set versioneer.versionfile_source" + assert cfg.versionfile_source is not None, ( + "please set versioneer.versionfile_source" + ) assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source)