diff --git a/darwin/cli.py b/darwin/cli.py index f5fbe7325..97dacbb86 100644 --- a/darwin/cli.py +++ b/darwin/cli.py @@ -100,7 +100,7 @@ def _run(args: Namespace, parser: ArgumentParser) -> None: print(__version__) elif args.command == "convert": - f.convert(args.format, args.files, args.output_dir, legacy=args.legacy) + f.convert(args.format, args.files, args.output_dir) elif args.command == "dataset": if args.action == "remote": f.list_remote_datasets(args.all, args.team) @@ -173,12 +173,13 @@ def _run(args: Namespace, parser: ArgumentParser) -> None: args.import_annotators, args.import_reviewers, args.overwrite, - legacy=args.legacy, cpu_limit=args.cpu_limit, ) elif args.action == "convert": f.dataset_convert( - args.dataset, args.format, args.output_dir, legacy=args.legacy + args.dataset, + args.format, + args.output_dir, ) elif args.action == "set-file-status": f.set_file_status(args.dataset, args.status, args.files) diff --git a/darwin/cli_functions.py b/darwin/cli_functions.py index d2a614658..bd8ce1f91 100644 --- a/darwin/cli_functions.py +++ b/darwin/cli_functions.py @@ -5,7 +5,6 @@ import os import sys import traceback -from functools import partial from glob import glob from itertools import tee from pathlib import Path @@ -888,7 +887,6 @@ def dataset_import( import_annotators: bool = False, import_reviewers: bool = False, overwrite: bool = False, - legacy: bool = False, use_multi_cpu: bool = False, cpu_limit: Optional[int] = None, ) -> None: @@ -922,9 +920,6 @@ def dataset_import( overwrite : bool, default: False If ``True`` it will bypass a warning that the import will overwrite the current annotations if any are present. If ``False`` this warning will be skipped and the import will overwrite the current annotations without warning. - legacy : bool, default: False - If ``True`` it will resize the annotations to be isotropic. - If ``False`` it will not resize the annotations to be isotropic. use_multi_cpu : bool, default: False If ``True`` it will use all multiple CPUs to speed up the import process. cpu_limit : Optional[int], default: Core count - 2 @@ -955,7 +950,6 @@ def dataset_import( overwrite, use_multi_cpu, cpu_limit, - legacy, ) except ImporterNotFoundError: @@ -1211,7 +1205,6 @@ def dataset_convert( dataset_identifier: str, format: str, output_dir: Optional[PathLike] = None, - legacy: bool = False, ) -> None: """ Converts the annotations from the given dataset to the given format. @@ -1227,10 +1220,6 @@ def dataset_convert( output_dir : Optional[PathLike], default: None The folder where the exported annotation files will be. If None it will be the inside the annotations folder of the dataset under 'other_formats/{format}'. - legacy : bool, default: False - This flag is only for the nifti format. - If True, it will resize the annotations by dividing by pixdims. - If False, it will not export the annotations using legacy calculations """ identifier: DatasetIdentifier = DatasetIdentifier.parse(dataset_identifier) client: Client = _load_client(team_slug=identifier.team_slug) @@ -1269,7 +1258,9 @@ def dataset_convert( def convert( - format: str, files: List[PathLike], output_dir: Path, legacy: bool = False + format: str, + files: List[PathLike], + output_dir: Path, ) -> None: """ Converts the given files to the specified format. @@ -1282,15 +1273,9 @@ def convert( List of files to be converted. output_dir: Path Folder where the exported annotations will be placed. - legacy: bool, default: False - This flag is only for the nifti format. - If True, it will resize the annotations by dividing by pixdims - If False, it will not export the annotations using legacy calculations. """ try: parser: ExportParser = get_exporter(format) - if format == "nifti" and legacy: - parser = partial(parser, legacy=True) except ExporterNotFoundError: _error(f"Unsupported export format, currently supported: {export_formats}") except AttributeError: diff --git a/darwin/exporter/formats/nifti.py b/darwin/exporter/formats/nifti.py index 011b1e4e8..ce32ee78c 100644 --- a/darwin/exporter/formats/nifti.py +++ b/darwin/exporter/formats/nifti.py @@ -59,7 +59,6 @@ class Volume: def export( annotation_files: Iterable[dt.AnnotationFile], output_dir: Path, - legacy: Optional[bool] = None, ) -> None: """ Exports the given ``AnnotationFile``\\s into nifti format inside of the given @@ -71,23 +70,12 @@ def export( The ``AnnotationFile``\\s to be exported. output_dir : Path The folder where the new instance mask files will be. - legacy : bool, default=None - If ``True``, the exporter will use the legacy calculation. - If ``False``, the exporter will use the new calculation by dividing with pixdims. - Returns ------- sends output volumes, image_id and output_dir to the write_output_volume_to_disk function """ - - if legacy is not None: - console.print( - "The `legacy` flag is now non-functional and will be deprecated soon. The annotation conversion process now automatically detects if legacy annotation scaling is required.", - style="warning", - ) - video_annotations = list(annotation_files) for video_annotation in video_annotations: try: diff --git a/darwin/importer/importer.py b/darwin/importer/importer.py index f329a44f9..0d4764b84 100644 --- a/darwin/importer/importer.py +++ b/darwin/importer/importer.py @@ -3,7 +3,6 @@ import json import copy from collections import defaultdict -from functools import partial from logging import getLogger from multiprocessing import cpu_count from pathlib import Path @@ -1158,7 +1157,6 @@ def import_annotations( # noqa: C901 overwrite: bool = False, use_multi_cpu: bool = False, cpu_limit: Optional[int] = None, - legacy: Optional[bool] = None, ) -> None: """ Imports the given given Annotations into the given Dataset. @@ -1200,9 +1198,6 @@ def import_annotations( # noqa: C901 If ``cpu_limit`` is greater than the number of available CPU cores, it will be set to the number of available cores. If ``cpu_limit`` is less than 1, it will be set to CPU count - 2. If ``cpu_limit`` is omitted, it will be set to CPU count - 2. - legacy : bool, default: False - If ``True`` will use the legacy isotropic transformation to resize annotations - If ``False`` will not use the legacy isotropic transformation to resize annotations Raises ------- ValueError @@ -1215,12 +1210,6 @@ def import_annotations( # noqa: C901 """ console = Console(theme=_console_theme()) - if legacy is not None: - console.print( - "The `legacy` flag is now non-functional and will be deprecated soon. The annotation import process now automatically detects if legacy annotation scaling is required.", - style="warning", - ) - if append and delete_for_empty: raise IncompatibleOptions( "The options 'append' and 'delete_for_empty' cannot be used together. Use only one of them." @@ -2072,9 +2061,6 @@ def _get_annotation_format( annotation_format : str The annotation format of the importer used to parse local files """ - # This `if` block is temporary, but necessary while we migrate NifTI imports between the legacy method & the new method - if isinstance(importer, partial): - return importer.func.__module__.split(".")[3] return importer.__module__.split(".")[3] diff --git a/darwin/options.py b/darwin/options.py index 9b550aa79..f793ce74a 100644 --- a/darwin/options.py +++ b/darwin/options.py @@ -60,14 +60,6 @@ def __init__(self) -> None: nargs="+", help="Annotation files (or folders) to convert.", ) - parser_convert.add_argument( - "--legacy", - type=lambda x: (str(x).lower() == "true") if x is not None else None, - nargs="?", - const=True, - default=None, - help="The legacy flag is now non-functional and will be deprecated soon. The annotation conversion process now automatically detects if legacy annotation scaling is required.", - ) parser_convert.add_argument( "output_dir", type=str, help="Where to store output files." ) @@ -381,14 +373,6 @@ def __init__(self) -> None: action="store_true", help="Bypass warnings about overwiting existing annotations.", ) - parser_import.add_argument( - "--legacy", - type=lambda x: (str(x).lower() == "true") if x is not None else None, - nargs="?", - const=True, - default=None, - help="The legacy flag is now non-functional and will be deprecated soon. The annotation import process now automatically detects if legacy annotation scaling is required.", - ) # Cpu limit for multiprocessing tasks def cpu_default_types(input: Any) -> Optional[int]: # type: ignore @@ -418,14 +402,6 @@ def cpu_default_types(input: Any) -> Optional[int]: # type: ignore parser_convert.add_argument( "format", type=str, help="Annotation format to convert to." ) - parser_convert.add_argument( - "--legacy", - type=lambda x: (str(x).lower() == "true") if x is not None else None, - nargs="?", - const=True, - default=None, - help="The legacy flag is now non-functional and will be deprecated soon. The annotation conversion process now automatically detects if legacy annotation scaling is required.", - ) parser_convert.add_argument( "-o", "--output_dir", type=str, help="Where to store output files." ) diff --git a/tests/darwin/importer/importer_test.py b/tests/darwin/importer/importer_test.py index 28a345847..9b55d9cc5 100644 --- a/tests/darwin/importer/importer_test.py +++ b/tests/darwin/importer/importer_test.py @@ -1,6 +1,5 @@ import json import tempfile -from functools import partial from pathlib import Path from typing import List, Tuple from unittest.mock import MagicMock, Mock, _patch, patch @@ -2145,12 +2144,6 @@ def test__get_annotation_format(): assert _get_annotation_format(get_importer("superannotate")) == "superannotate" -def test__get_annotation_format_with_partial(): - nifti_importer = get_importer("nifti") - legacy_nifti_importer = partial(nifti_importer, legacy=True) - assert _get_annotation_format(legacy_nifti_importer) == "nifti" - - def test_no_verify_warning_for_single_slotted_items(): bounding_box_class = dt.AnnotationClass( name="class1", annotation_type="bounding_box"