From 5f7eca9db8236b44515cbd6a8625e653378cfee7 Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 25 Jul 2024 15:26:51 +0200 Subject: [PATCH 01/10] remove old scale back --- spineps/entrypoint.py | 2 +- spineps/seg_model.py | 8 ++++---- spineps/seg_pipeline.py | 3 +-- spineps/seg_run.py | 26 ++++++++------------------ spineps/seg_utils.py | 40 ---------------------------------------- 5 files changed, 14 insertions(+), 65 deletions(-) diff --git a/spineps/entrypoint.py b/spineps/entrypoint.py index 83476e4..0362ed4 100755 --- a/spineps/entrypoint.py +++ b/spineps/entrypoint.py @@ -85,7 +85,7 @@ def entry_point(): "-ms", # type=str.lower, default=None, - # required=True, + required=True, # choices=modelids_semantic, metavar="", help=f"The model used for the semantic segmentation. Choices are {modelids_semantic} or a string absolute path the model folder", diff --git a/spineps/seg_model.py b/spineps/seg_model.py index 4abd866..28dd382 100755 --- a/spineps/seg_model.py +++ b/spineps/seg_model.py @@ -240,7 +240,7 @@ def modelid(self, include_log_name: bool = False): return name return self.inference_config.log_name - def dict_representation(self, input_zms: ZOOMS | None): + def dict_representation(self): info = { "name": self.modelid(), # self.inference_config.__repr__() "model_path": str(self.model_folder), @@ -248,9 +248,9 @@ def dict_representation(self, input_zms: ZOOMS | None): "aquisition": str(self.acquisition()), "resolution_range": str(self.inference_config.resolution_range), } - if input_zms is not None: - proc_zms = self.calc_recommended_resampling_zoom(input_zms) - info["resolution_processed"] = str(proc_zms) + # if input_zms is not None: + # proc_zms = self.calc_recommended_resampling_zoom(input_zms) + # info["resolution_processed"] = str(proc_zms) return info def __str__(self): diff --git a/spineps/seg_pipeline.py b/spineps/seg_pipeline.py index 33c17bc..7b84add 100755 --- a/spineps/seg_pipeline.py +++ b/spineps/seg_pipeline.py @@ -38,7 +38,6 @@ def predict_centroids_from_both( seg_nii: NII, models: list[Segmentation_Model], parameter: dict[str, Any], - input_zms_pir: ZOOMS | None = None, ): """Calculates the centroids of each vertebra corpus by using both semantic and instance mask @@ -65,7 +64,7 @@ def predict_centroids_from_both( models_repr = {} for idx, m in enumerate(models): - models_repr[idx] = m.dict_representation(input_zms_pir) + models_repr[idx] = m.dict_representation() ctd.info["source"] = "MRI Segmentation Pipeline" ctd.info["version"] = pipeline_version() ctd.info["models"] = models_repr diff --git a/spineps/seg_run.py b/spineps/seg_run.py index 277f72c..d2ab14f 100755 --- a/spineps/seg_run.py +++ b/spineps/seg_run.py @@ -17,13 +17,7 @@ from spineps.seg_enums import Acquisition, ErrCode, Modality from spineps.seg_model import Segmentation_Model from spineps.seg_pipeline import logger, predict_centroids_from_both -from spineps.seg_utils import ( - InputPackage, - Modality_Pair, - check_input_model_compatibility, - check_model_modality_acquisition, - find_best_matching_model, -) +from spineps.seg_utils import Modality_Pair, check_input_model_compatibility, check_model_modality_acquisition, find_best_matching_model from spineps.utils.citation_reminder import citation_reminder @@ -372,17 +366,15 @@ def process_img_nii( # noqa: C901 if verbose: model_semantic.logger.default_verbose = True input_nii = img_ref.open_nii() - input_package = InputPackage( - input_nii, - pad_size=proc_pad_size, - ) + input_nii.seg = False + input_nii_ = input_nii.copy() logger.print("Input image", input_nii.zoom, input_nii.orientation, input_nii.shape) # First stage if not out_spine_raw.exists() or override_semantic: input_preprocessed, errcode = preprocess_input( input_nii, - pad_size=input_package.pad_size, + pad_size=proc_pad_size, debug_data=debug_data_run, proc_crop_input=proc_sem_crop_input, proc_do_n4_bias_correction=proc_sem_n4_bias_correction, @@ -456,13 +448,12 @@ def process_img_nii( # noqa: C901 # back to input space # if not save_modelres_mask: - seg_nii_back = input_package.sample_to_this(seg_nii_modelres) - whole_vert_nii = input_package.sample_to_this(whole_vert_nii, intermediate_nii=seg_nii_modelres) + seg_nii_back = seg_nii_modelres.resample_from_to(input_nii_) + whole_vert_nii = whole_vert_nii.resample_from_to(input_nii_) else: seg_nii_back = seg_nii_modelres seg_nii_back.assert_affine(other=input_nii) - # use both seg_raw and vert_raw to clean each other, add ivd_ep ... seg_nii_clean, vert_nii_clean = phase_postprocess_combined( seg_nii=seg_nii_back, @@ -494,9 +485,8 @@ def process_img_nii( # noqa: C901 seg_nii_clean, models=[model_semantic, model_instance], parameter={l: v for l, v in arguments.items() if "proc_" in l}, - input_zms_pir=input_package.zms_pir, ) - ctd.rescale(input_package._zms, verbose=logger).reorient(input_package._orientation).save(out_ctd, verbose=logger) + ctd.resample_from_to(input_nii_).save(out_ctd, verbose=logger) done_something = True else: logger.print("Centroids already exists, will load instead. Set -override_ctd = True to create it anew") @@ -509,7 +499,7 @@ def process_img_nii( # noqa: C901 else: out_debug.parent.mkdir(parents=True, exist_ok=True) for k, v in debug_data_run.items(): - v.reorient_(input_package._orientation).save( + v.reorient_(input_nii_.orientation).save( out_debug.joinpath(k + f"_{input_format}.nii.gz"), make_parents=True, verbose=False ) logger.print(f"Saved debug data into {out_debug}/*", Log_Type.OK) diff --git a/spineps/seg_utils.py b/spineps/seg_utils.py index e6a37a7..5eb7c04 100755 --- a/spineps/seg_utils.py +++ b/spineps/seg_utils.py @@ -9,46 +9,6 @@ Modality_Pair = tuple[list[Modality] | Modality, Acquisition] -class InputPackage: - def __init__(self, mri_nii: NII, pad_size: int = 4) -> None: - self._zms = mri_nii.zoom - self._affine = mri_nii.affine - self._header = mri_nii.header - self._orientation = mri_nii.orientation - self._shape = mri_nii.shape - self.zms_pir = mri_nii.reorient().zoom - self.pad_size = pad_size - - def sample_to_this(self, other_nii: NII, intermediate_nii: NII | None = None) -> NII: - other_nii = other_nii.copy() - other_nii.assert_affine(orientation=("P", "I", "R")) - - if intermediate_nii is not None: - intermediate_nii.assert_affine(orientation=("P", "I", "R")) - other_nii.rescale_(voxel_spacing=intermediate_nii.zoom, verbose=logger).reorient_(intermediate_nii.orientation, verbose=logger) - other_nii.rescale_(voxel_spacing=self.zms_pir, verbose=logger).reorient_(self._orientation, verbose=logger) - # other_nii.reorient_(self._orientation, verbose=logger).rescale_(voxel_spacing=self._zms, verbose=logger) - if self.pad_size > 0: - other_nii = other_nii.pad_to(tuple(other_nii.shape[i] - (2 * self.pad_size) for i in range(3))) - # arr = other_nii.get_array() - # arr = arr[self.pad_size : -self.pad_size, self.pad_size : -self.pad_size, self.pad_size : -self.pad_size] - # other_nii.set_array_(arr) - other_nii.pad_to(self._shape, inplace=True) - assert_true = other_nii.assert_affine( - zoom=self._zms, orientation=self._orientation, shape=self._shape, raise_error=False, verbose=logger - ) - assert assert_true, "sampled back to input did not meet affine criteria" - return other_nii - - def make_nii_from_this(self, other_nii: NII) -> NII: - other_nii.assert_affine(shape=self._shape, orientation=self._orientation, zoom=self._zms) - other_nii.nii = nib.nifti1.Nifti1Image(other_nii.get_seg_array(), affine=self._affine, header=self._header) - return other_nii - - def __str__(self) -> str: - return f"Input image, {self._zms}, {self._orientation}, {self._shape}" - - def find_best_matching_model( modality_pair: Modality_Pair, expected_resolution: ZOOMS | None, # actual resolution here? From 9e038f5ef7af1af672f1018da6749a5a62834c93 Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 25 Jul 2024 15:33:27 +0200 Subject: [PATCH 02/10] add Vibe semantic segmentation from TotalVibeSegmentor to autodownload --- spineps/auto_download.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spineps/auto_download.py b/spineps/auto_download.py index fdae818..82390b4 100644 --- a/spineps/auto_download.py +++ b/spineps/auto_download.py @@ -15,6 +15,7 @@ semantic: dict[str, Path | str] = { "t2w": link + current_highest_version + "/t2w.zip", "t1w": link + current_highest_version + "/t1w.zip", + "vibe": link + current_highest_version + "/vibe.zip", } @@ -22,6 +23,7 @@ "instance": "instance_sagittal", "t2w": "T2w_semantic", "t1w": "T1w_semantic", + "vibe": "Vibe_semantic", } From 302f4a5b272a68540ca56f8efcdc0365daef4aba Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 25 Jul 2024 15:43:50 +0200 Subject: [PATCH 03/10] Add citation where the segmentation came from to readme --- README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.md b/README.md index 800a325..2bc607b 100644 --- a/README.md +++ b/README.md @@ -13,16 +13,27 @@ This is a segmentation pipeline to automatically, and robustly, segment the whol If you are using SPINEPS, please cite the following: ``` +SPINEPS: + Hendrik Möller, Robert Graf, Joachim Schmitt, Benjamin Keinert, Matan Atad, Anjany Sekuboyina, Felix Streckenbach, Hanna Schon, Florian Kofler, Thomas Kroencke, Ste- fanie Bette, Stefan Willich, Thomas Keil, Thoralf Niendorf, Tobias Pischon, Beate Ende- mann, Bjoern Menze, Daniel Rueckert, and Jan S. Kirschke. Spineps – automatic whole spine segmentation of t2-weighted mr images using a two-phase approach to multi-class semantic and instance segmentation. arXiv preprint arXiv:2402.16368, 2024. + +Source of the T2w/T1w Segmentation: + +Robert Graf, Joachim Schmitt, Sarah Schlaeger, Hendrik Kristian Möller, Vasiliki Sideri-Lampretsa, Anjany Sekuboyina, Sandro Manuel Krieg, Benedikt Wiestler, Bjoern Menze, Daniel Rueckert, Jan Stefan Kirschke. Denoising diffusion-based MRI to CT image translation enables automated spinal segmentation. Eur Radiol Exp 7, 70 (2023). https://doi.org/10.1186/s41747-023-00385-2 ``` +SPINEPS: ArXiv link: https://arxiv.org/abs/2402.16368 +Source of the T2w/T1w Segmentation + +Open Access link: https://doi.org/10.1186/s41747-023-00385-2 + BibTeX citation: ``` @article{moeller2024, @@ -34,6 +45,17 @@ BibTeX citation: archivePrefix={arXiv}, primaryClass={eess.IV}, } + +@article{graf2023denoising, + title={Denoising diffusion-based MRI to CT image translation enables automated spinal segmentation}, + author={Graf, Robert and Schmitt, Joachim and Schlaeger, Sarah and M{\"o}ller, Hendrik Kristian and Sideri-Lampretsa, Vasiliki and Sekuboyina, Anjany and Krieg, Sandro Manuel and Wiestler, Benedikt and Menze, Bjoern and Rueckert, Daniel and others}, + journal={European Radiology Experimental}, + volume={7}, + number={1}, + pages={70}, + year={2023}, + publisher={Springer} +} ``` ## Installation (Ubuntu) From b9c5608a4d50439b4a5b05579efe73888cf3484f Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 25 Jul 2024 15:45:23 +0200 Subject: [PATCH 04/10] Add citation where the segmentation came from to readme --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2bc607b..1499575 100644 --- a/README.md +++ b/README.md @@ -24,13 +24,17 @@ semantic and instance segmentation. arXiv preprint arXiv:2402.16368, 2024. Source of the T2w/T1w Segmentation: -Robert Graf, Joachim Schmitt, Sarah Schlaeger, Hendrik Kristian Möller, Vasiliki Sideri-Lampretsa, Anjany Sekuboyina, Sandro Manuel Krieg, Benedikt Wiestler, Bjoern Menze, Daniel Rueckert, Jan Stefan Kirschke. Denoising diffusion-based MRI to CT image translation enables automated spinal segmentation. Eur Radiol Exp 7, 70 (2023). https://doi.org/10.1186/s41747-023-00385-2 +Robert Graf, Joachim Schmitt, Sarah Schlaeger, Hendrik Kristian Möller, Vasiliki +Sideri-Lampretsa, Anjany Sekuboyina, Sandro Manuel Krieg, Benedikt Wiestler, Bjoern +Menze, Daniel Rueckert, Jan Stefan Kirschke. Denoising diffusion-based MRI to CT image +translation enables automated spinal segmentation. Eur Radiol Exp 7, 70 (2023). +https://doi.org/10.1186/s41747-023-00385-2 ``` SPINEPS: ArXiv link: https://arxiv.org/abs/2402.16368 -Source of the T2w/T1w Segmentation +Source of the T2w/T1w Segmentation: Open Access link: https://doi.org/10.1186/s41747-023-00385-2 From ce6cad81682d124c424bcbcffbbaea15f0f39176 Mon Sep 17 00:00:00 2001 From: ga84mun Date: Fri, 26 Jul 2024 08:42:17 +0000 Subject: [PATCH 05/10] update checks for new mevibe --- pyproject.toml | 2 +- spineps/example/get_gpu.py | 43 ++++++++++++++++++++++++++++ spineps/example/helper_parallel.py | 25 ++++++++++------ spineps/models.py | 5 ---- spineps/seg_enums.py | 6 ++-- spineps/seg_run.py | 46 +++++++++++++++++++++++------- spineps/utils/citation_reminder.py | 2 +- 7 files changed, 99 insertions(+), 30 deletions(-) create mode 100644 spineps/example/get_gpu.py diff --git a/pyproject.toml b/pyproject.toml index 4bd7c04..ececa69 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ SciPy = "^1.11.2" torchmetrics = "^1.1.2" tqdm = "^4.66.1" einops= "^0.6.1" -nnunetv2 = "2.2" +nnunetv2 = "2.4.2" tptbox = "^0.1.4" antspyx = "*" rich = "^13.6.0" diff --git a/spineps/example/get_gpu.py b/spineps/example/get_gpu.py new file mode 100644 index 0000000..658fac6 --- /dev/null +++ b/spineps/example/get_gpu.py @@ -0,0 +1,43 @@ +import time + +import GPUtil +from TPTBox import Log_Type, No_Logger + +logger = No_Logger() + + +def get_gpu(verbose: bool = False, maxLoad: float = 0.3, maxMemory: float = 0.4): + GPUtil.showUtilization() if verbose else None + deviceIDs = GPUtil.getAvailable( + order="load", + limit=4, + maxLoad=maxLoad, + maxMemory=maxMemory, + includeNan=False, + excludeID=[], + excludeUUID=[], + ) + return deviceIDs + + +def intersection(lst1, lst2): + return set(lst1).intersection(lst2) + + +def get_free_gpus(blocked_gpus=None, maxLoad: float = 0.3, maxMemory: float = 0.4): + # print("get_free_gpus") + if blocked_gpus is None: + blocked_gpus = {0: False, 1: False, 2: False, 3: False} + cached_list = get_gpu(maxLoad=maxLoad, maxMemory=maxMemory) + for i in range(15): + time.sleep(0.25) + cached_list = intersection(cached_list, get_gpu()) + # print("result:", list(cached_list)) + gpulist = [i for i in list(cached_list) if i not in blocked_gpus or blocked_gpus[i] == False] + # print("result:", gpulist) + return gpulist + + +def thread_print(fold, *text): + global logger + logger.print(f"Fold [{fold}]: ", *text) diff --git a/spineps/example/helper_parallel.py b/spineps/example/helper_parallel.py index 589d2d5..05d5b4f 100755 --- a/spineps/example/helper_parallel.py +++ b/spineps/example/helper_parallel.py @@ -9,26 +9,29 @@ from TPTBox import BIDS_FILE # noqa: E402 -from spineps.models import get_segmentation_model # noqa: E402 +from spineps.models import get_instance_model, get_semantic_model # noqa: E402 from spineps.seg_run import process_img_nii # noqa: E402 -from spineps.utils.filepaths import filepath_model # noqa: E402 + +# Example +# python /spineps/example/helper_parallel.py -i PATH/TO/IMG.nii.gz -ds DATASET-PATH -der derivatives -ms [t1w,t2w,vibe] -mv instance if __name__ == "__main__": main_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) main_parser.add_argument("-i", type=str) main_parser.add_argument("-ds", type=str) - main_parser.add_argument("-der", type=str) - main_parser.add_argument("-ms", type=str) - main_parser.add_argument("-mv", type=str) + main_parser.add_argument("-der", default="derivatives", type=str) + main_parser.add_argument("-ms", default="t2w", type=str) + main_parser.add_argument("-mv", default="instance", type=str) + main_parser.add_argument("-snap", default=None, type=str) opt = main_parser.parse_args() input_bids_file = BIDS_FILE(file=opt.i, dataset=opt.ds) - model_dir = "/DATA/NAS/ongoing_projects/hendrik/nako-segmentation/nnUNet/" - ms = get_segmentation_model(in_config=filepath_model(opt.ms, model_dir=model_dir)) - mv = get_segmentation_model(in_config=filepath_model(opt.mv, model_dir=model_dir)) - + ms = get_semantic_model(opt.ms) + mv = get_instance_model(opt.mv) + if opt.snap is not None: + Path(opt.snap).mkdir(exist_ok=True, parents=True) process_img_nii( img_ref=input_bids_file, derivative_name=opt.der, @@ -38,4 +41,8 @@ override_instance=False, save_debug_data=False, verbose=False, + ignore_compatibility_issues=False, # If true, we do not check if the file ending match like _T2w.nii.gz for T2w images + ignore_bids_filter=False, # If true, we do not check if BIDS compliant + save_raw=False, # Save output as they are produced by the model + snapshot_copy_folder=opt.snap, ) diff --git a/spineps/models.py b/spineps/models.py index 6a76869..6d9fee0 100755 --- a/spineps/models.py +++ b/spineps/models.py @@ -130,11 +130,6 @@ def check_available_models(models_folder: str | Path, verbose: bool = False) -> except Exception as e: logger.print(f"Modelfolder '{model_folder_name}' ignored, caused by '{e}'", Log_Type.STRANGE, verbose=verbose) # raise e # - if len(config_paths) == 0 or len(_modelid2folder_instance.keys()) == 0 or len(_modelid2folder_semantic.keys()) == 0: - logger.print( - "Automatic search for models did not find anything. Did you set the environment variable correctly? Did you download model weights and put them into the specified folder? Ignore this if you specified your model using an absolute path.", - Log_Type.FAIL, - ) return _modelid2folder_semantic, _modelid2folder_instance diff --git a/spineps/seg_enums.py b/spineps/seg_enums.py index 8ec9515..e4972e7 100755 --- a/spineps/seg_enums.py +++ b/spineps/seg_enums.py @@ -62,11 +62,11 @@ def format_keys(cls, modalities: Self | list[Self]) -> list[str]: elif modality == Modality.SEG: result += ["msk", "seg"] elif modality == Modality.T1w: - result += ["T1w", "t1", "T1"] + result += ["T1w", "t1", "T1", "T1c"] elif modality == Modality.T2w: - result += ["T2w", "dixon", "mr", "t2", "T2"] + result += ["T2w", "dixon", "mr", "t2", "T2", "T2c"] elif modality == Modality.Vibe: - result += ["t1dixon", "vibe"] + result += ["t1dixon", "vibe", "mevibe", "GRE"] elif modality == Modality.MPR: result += ["mpr", "MPR", "Mpr"] else: diff --git a/spineps/seg_run.py b/spineps/seg_run.py index d2ab14f..301c5c3 100755 --- a/spineps/seg_run.py +++ b/spineps/seg_run.py @@ -244,6 +244,7 @@ def process_img_nii( # noqa: C901 save_modelres_mask: bool = False, save_softmax_logits: bool = False, save_debug_data: bool = False, + save_raw: bool = True, override_semantic: bool = False, override_instance: bool = False, override_postpair: bool = False, @@ -347,7 +348,6 @@ def process_img_nii( # noqa: C901 logger.print(f"{out_spine.name}: Outputs are all already created and no override set, will skip") return output_paths, ErrCode.ALL_DONE - out_raw.mkdir(parents=True, exist_ok=True) done_something = False debug_data_run: dict[str, NII] = {} @@ -408,8 +408,8 @@ def process_img_nii( # noqa: C901 # Lambda Injection if lambda_semantic is not None: seg_nii_modelres = lambda_semantic(seg_nii_modelres) - - seg_nii_modelres.save(out_spine_raw, verbose=logger) + if save_raw: + seg_nii_modelres.save(out_spine_raw, verbose=logger) if save_softmax_logits and isinstance(softmax_logits, np.ndarray): save_nparray(softmax_logits, out_logits) done_something = True @@ -437,7 +437,8 @@ def process_img_nii( # noqa: C901 assert whole_vert_nii is not None, "whole_vert_nii is None" whole_vert_nii = whole_vert_nii.copy() # .reorient(orientation, verbose=True).rescale(zms, verbose=True) logger.print("vert_out", whole_vert_nii.zoom, whole_vert_nii.orientation, whole_vert_nii.shape, verbose=verbose) - whole_vert_nii.save(out_vert_raw, verbose=logger) + if save_raw: + whole_vert_nii.save(out_vert_raw, verbose=logger) done_something = True else: logger.print("Vert Mask already exists. Set -override_vert to create it anew") @@ -524,15 +525,23 @@ def process_img_nii( # noqa: C901 def output_paths_from_input( img_ref: BIDS_FILE, derivative_name: str, - snapshot_copy_folder: Path | None, + snapshot_copy_folder: Path | str | None, input_format: str, non_strict_mode: bool = False, ): out_spine = img_ref.get_changed_path( - bids_format="msk", parent=derivative_name, info={"seg": "spine", "mod": img_ref.format}, non_strict_mode=non_strict_mode + bids_format="msk", + parent=derivative_name, + info={"seg": "spine", "mod": img_ref.format}, + non_strict_mode=non_strict_mode, + make_parent=False, ) out_vert = img_ref.get_changed_path( - bids_format="msk", parent=derivative_name, info={"seg": "vert", "mod": img_ref.format}, non_strict_mode=non_strict_mode + bids_format="msk", + parent=derivative_name, + info={"seg": "vert", "mod": img_ref.format}, + non_strict_mode=non_strict_mode, + make_parent=False, ) out_snap = img_ref.get_changed_path( bids_format="snp", @@ -540,6 +549,7 @@ def output_paths_from_input( parent=derivative_name, info={"seg": "spine", "mod": img_ref.format}, non_strict_mode=non_strict_mode, + make_parent=False, ) out_ctd = img_ref.get_changed_path( bids_format="ctd", @@ -547,20 +557,33 @@ def output_paths_from_input( parent=derivative_name, info={"seg": "spine", "mod": img_ref.format}, non_strict_mode=non_strict_mode, + make_parent=False, ) - out_snap2 = snapshot_copy_folder.joinpath(out_snap.name) if snapshot_copy_folder is not None else out_snap + out_snap2 = Path(snapshot_copy_folder).joinpath(out_snap.name) if snapshot_copy_folder is not None else out_snap out_debug = out_vert.parent.joinpath(f"debug_{input_format}") out_raw = out_vert.parent.joinpath(f"output_raw_{input_format}") out_spine_raw = img_ref.get_changed_path( - bids_format="msk", parent=derivative_name, info={"seg": "spine-raw", "mod": img_ref.format}, non_strict_mode=non_strict_mode + bids_format="msk", + parent=derivative_name, + info={"seg": "spine-raw", "mod": img_ref.format}, + non_strict_mode=non_strict_mode, + make_parent=False, ) out_spine_raw = out_raw.joinpath(out_spine_raw.name) out_vert_raw = img_ref.get_changed_path( - bids_format="msk", parent=derivative_name, info={"seg": "vert-raw", "mod": img_ref.format}, non_strict_mode=non_strict_mode + bids_format="msk", + parent=derivative_name, + info={"seg": "vert-raw", "mod": img_ref.format}, + non_strict_mode=non_strict_mode, + make_parent=False, ) out_vert_raw = out_raw.joinpath(out_vert_raw.name) out_unc = img_ref.get_changed_path( - bids_format="uncertainty", parent=derivative_name, info={"seg": "spine", "mod": img_ref.format}, non_strict_mode=non_strict_mode + bids_format="uncertainty", + parent=derivative_name, + info={"seg": "spine", "mod": img_ref.format}, + non_strict_mode=non_strict_mode, + make_parent=False, ) out_unc = out_raw.joinpath(out_unc.name) out_logits = img_ref.get_changed_path( @@ -569,6 +592,7 @@ def output_paths_from_input( parent=derivative_name, info={"seg": "spine", "mod": img_ref.format}, non_strict_mode=non_strict_mode, + make_parent=False, ) out_logits = out_raw.joinpath(out_logits.name) return { diff --git a/spineps/utils/citation_reminder.py b/spineps/utils/citation_reminder.py index c88c81b..95bc953 100644 --- a/spineps/utils/citation_reminder.py +++ b/spineps/utils/citation_reminder.py @@ -15,7 +15,7 @@ def citation_reminder(func): def wrapper(*args, **kwargs): global has_reminded_citation # noqa: PLW0603 - if not has_reminded_citation: + if not has_reminded_citation and os.environ["SPINEPS_TURN_OF_CITATION_REMINDER"] != "TRUE": print_citation_reminder() has_reminded_citation = True func_result = func(*args, **kwargs) From 7f3a86e756c5c69bd238b9dd34a3e4abd919e90d Mon Sep 17 00:00:00 2001 From: ga84mun Date: Fri, 26 Jul 2024 08:45:06 +0000 Subject: [PATCH 06/10] fix ruff issues --- spineps/example/get_gpu.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/spineps/example/get_gpu.py b/spineps/example/get_gpu.py index 658fac6..be2f76e 100644 --- a/spineps/example/get_gpu.py +++ b/spineps/example/get_gpu.py @@ -1,4 +1,4 @@ -import time +import time # noqa: INP001 import GPUtil from TPTBox import Log_Type, No_Logger @@ -6,38 +6,37 @@ logger = No_Logger() -def get_gpu(verbose: bool = False, maxLoad: float = 0.3, maxMemory: float = 0.4): +def get_gpu(verbose: bool = False, max_load: float = 0.3, max_memory: float = 0.4): GPUtil.showUtilization() if verbose else None - deviceIDs = GPUtil.getAvailable( + device_ids = GPUtil.getAvailable( order="load", limit=4, - maxLoad=maxLoad, - maxMemory=maxMemory, + maxLoad=max_load, + maxMemory=max_memory, includeNan=False, excludeID=[], excludeUUID=[], ) - return deviceIDs + return device_ids def intersection(lst1, lst2): return set(lst1).intersection(lst2) -def get_free_gpus(blocked_gpus=None, maxLoad: float = 0.3, maxMemory: float = 0.4): +def get_free_gpus(blocked_gpus=None, max_load: float = 0.3, max_memory: float = 0.4): # print("get_free_gpus") if blocked_gpus is None: blocked_gpus = {0: False, 1: False, 2: False, 3: False} - cached_list = get_gpu(maxLoad=maxLoad, maxMemory=maxMemory) - for i in range(15): + cached_list = get_gpu(max_load=max_load, max_memory=max_memory) + for _ in range(15): time.sleep(0.25) cached_list = intersection(cached_list, get_gpu()) # print("result:", list(cached_list)) - gpulist = [i for i in list(cached_list) if i not in blocked_gpus or blocked_gpus[i] == False] + gpulist = [i for i in list(cached_list) if i not in blocked_gpus or blocked_gpus[i] is False] # print("result:", gpulist) return gpulist def thread_print(fold, *text): - global logger logger.print(f"Fold [{fold}]: ", *text) From 5608507997cbd1f6dd05659295021b0f21913375 Mon Sep 17 00:00:00 2001 From: ga84mun Date: Fri, 26 Jul 2024 08:59:40 +0000 Subject: [PATCH 07/10] fix #issue-2362692788 --- spineps/entrypoint.py | 2 +- spineps/utils/citation_reminder.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spineps/entrypoint.py b/spineps/entrypoint.py index 0362ed4..653d83b 100755 --- a/spineps/entrypoint.py +++ b/spineps/entrypoint.py @@ -165,7 +165,7 @@ def entry_point(): @citation_reminder def run_sample(opt: Namespace): - input_path = Path(opt.input) + input_path = Path(opt.input).absolute() dataset = str(input_path.parent) assert os.path.exists(dataset), f"-input parent does not exist, got {dataset}" # noqa: PTH110 assert dataset not in ("", "."), f"-input you only gave a filename, not a direction to the file, got {input_path}" diff --git a/spineps/utils/citation_reminder.py b/spineps/utils/citation_reminder.py index 95bc953..02480b8 100644 --- a/spineps/utils/citation_reminder.py +++ b/spineps/utils/citation_reminder.py @@ -15,7 +15,7 @@ def citation_reminder(func): def wrapper(*args, **kwargs): global has_reminded_citation # noqa: PLW0603 - if not has_reminded_citation and os.environ["SPINEPS_TURN_OF_CITATION_REMINDER"] != "TRUE": + if not has_reminded_citation and os.environ.get("SPINEPS_TURN_OF_CITATION_REMINDER", "FALSE") != "TRUE": print_citation_reminder() has_reminded_citation = True func_result = func(*args, **kwargs) From 1ffe7e4d5434f41140e893452dc0a1e9f1db809f Mon Sep 17 00:00:00 2001 From: ga84mun Date: Tue, 13 Aug 2024 11:31:12 +0000 Subject: [PATCH 08/10] small bug fixes --- spineps/phase_instance.py | 13 +++++-------- spineps/seg_run.py | 3 ++- spineps/utils/filepaths.py | 1 + spineps/utils/inference_api.py | 5 ++++- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/spineps/phase_instance.py b/spineps/phase_instance.py index 80f904d..4a1f383 100755 --- a/spineps/phase_instance.py +++ b/spineps/phase_instance.py @@ -175,7 +175,7 @@ def get_corpus_coms( seg_nii.assert_affine(orientation=["P", "I", "R"]) # # Extract Corpus region and try to find all coms naively (some skips shouldnt matter) - corpus_nii = seg_nii.extract_label(Location.Vertebra_Corpus_border.value) + corpus_nii = seg_nii.extract_label([Location.Vertebra_Corpus_border, Location.Vertebra_Corpus]) corpus_nii.erode_msk_(mm=2, connectivity=2, verbose=False) if 1 in corpus_nii.unique() and corpus_size_cleaning > 0: corpus_nii.set_array_( @@ -192,7 +192,7 @@ def get_corpus_coms( ) if 1 not in corpus_nii.unique(): - logger.print("No 1 in corpus nifty, cannot make vertebra mask", Log_Type.FAIL) + logger.print(f"No corpus found after get_corpus_coms post process, cannot make vertebra mask. {corpus_nii.unique()}", Log_Type.FAIL) return None if not process_detect_and_solve_merged_corpi: @@ -256,19 +256,15 @@ def get_corpus_coms( stats_by_height.pop(vl) stats_by_height = dict(sorted(stats_by_height.items(), key=lambda x: x[1][0])) stats_by_height_keys = list(stats_by_height.keys()) - print(stats_by_height_keys) continue logger.print("Merged corpi, try to fix it", verbose=verbose) neighbor_verts = { stats_by_height_keys[idx + i]: stats_by_height[stats_by_height_keys[idx + i]] for i in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5] - if (idx + i) in stats_by_height_keys and stats_by_height_keys[idx + i] < 99 + if (idx + i) < len(stats_by_height_keys) and (idx + i) >= 0 and stats_by_height_keys[idx + i] < 99 } - # stats_by_height_keys[idx + i] - # for i in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5] - # if (idx + i) in stats_by_height_keys and stats_by_height_keys[idx + i] < 99 - # ] # (+-3) + logger.print("neighbor_vert_labels", neighbor_verts, verbose=verbose) if len(neighbor_verts) == 0: logger.print("Got no neighbor vert labels to fix", Log_Type.FAIL) @@ -505,6 +501,7 @@ def collect_vertebra_predictions( 47: 7, 48: 8, 49: 9, + 50: 9, Location.Spinal_Cord.value: 0, Location.Spinal_Canal.value: 0, Location.Vertebra_Disc.value: 0, diff --git a/spineps/seg_run.py b/spineps/seg_run.py index 301c5c3..d842c8d 100755 --- a/spineps/seg_run.py +++ b/spineps/seg_run.py @@ -262,6 +262,7 @@ def process_img_nii( # noqa: C901 proc_inst_clean_small_cc_artifacts: bool = True, proc_inst_largest_k_cc: int = 0, proc_inst_detect_and_solve_merged_corpi: bool = True, + vertebra_instance_labeling_offset=2, # Both proc_fill_3d_holes: bool = True, proc_assign_missing_cc: bool = True, @@ -460,7 +461,7 @@ def process_img_nii( # noqa: C901 seg_nii=seg_nii_back, vert_nii=whole_vert_nii, debug_data=debug_data_run, - labeling_offset=1, + labeling_offset=vertebra_instance_labeling_offset - 1, proc_clean_inst_by_sem=proc_clean_inst_by_sem, proc_assign_missing_cc=proc_assign_missing_cc, proc_vertebra_inconsistency=proc_vertebra_inconsistency, diff --git a/spineps/utils/filepaths.py b/spineps/utils/filepaths.py index fedd428..189f29b 100755 --- a/spineps/utils/filepaths.py +++ b/spineps/utils/filepaths.py @@ -9,6 +9,7 @@ # "/DATA/NAS/ongoing_projects/hendrik/mri_usage/models/" # ) # None # You can put an absolute path to the model weights here instead of using environment variable spineps_environment_path_backup = Path(__file__).parent.parent.joinpath("models") # EDIT this to use this instead of environment variable +spineps_environment_path_backup.mkdir(exist_ok=True) def get_mri_segmentor_models_dir() -> Path: diff --git a/spineps/utils/inference_api.py b/spineps/utils/inference_api.py index 95e0d80..8c18db2 100755 --- a/spineps/utils/inference_api.py +++ b/spineps/utils/inference_api.py @@ -49,7 +49,10 @@ def load_inf_model( elif ddevice == "cuda": # multithreading in torch doesn't help nnU-Net if run on GPU torch.set_num_threads(1) if init_threads else None - torch.set_num_interop_threads(1) if init_threads else None + try: + torch.set_num_interop_threads(1) if init_threads else None + except RuntimeError: + pass device = torch.device("cuda") else: device = torch.device("mps") From 65c2249c73d2511a08d9781d2cc7bc1432522a06 Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 29 Aug 2024 08:49:20 +0200 Subject: [PATCH 09/10] bugfix pyants --- pyproject.toml | 4 +++- spineps/seg_enums.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ececa69..a9f9b8c 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,8 @@ exclude = ["models", "examples"] [tool.poetry.scripts] spineps = 'spineps.entrypoint:entry_point' +spineps_ = 'spineps.entrypoint:entrypoint_no_checks' + [tool.poetry.dependencies] python = "^3.10 || ^3.11" connected-components-3d = "^3.12.3" @@ -30,7 +32,7 @@ tqdm = "^4.66.1" einops= "^0.6.1" nnunetv2 = "2.4.2" tptbox = "^0.1.4" -antspyx = "*" +antspyx = "0.4.2" rich = "^13.6.0" diff --git a/spineps/seg_enums.py b/spineps/seg_enums.py index e4972e7..24d89e1 100755 --- a/spineps/seg_enums.py +++ b/spineps/seg_enums.py @@ -4,7 +4,7 @@ class MetaEnum(EnumMeta): - def __contains__(cls, item): # noqa: N805 + def __contains__(cls, item): try: cls[item] except ValueError: From f1d98a686af8916b7080742417929d9c81244cba Mon Sep 17 00:00:00 2001 From: robert Date: Thu, 29 Aug 2024 12:28:09 +0200 Subject: [PATCH 10/10] update test --- unit_tests/test_semantic.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/unit_tests/test_semantic.py b/unit_tests/test_semantic.py index e7b3176..206986b 100644 --- a/unit_tests/test_semantic.py +++ b/unit_tests/test_semantic.py @@ -33,7 +33,7 @@ def __init__( acquisition="sag", log_name="DummySegModel", modeltype="unet", - model_expected_orientation=["P", "I", "R"], + model_expected_orientation=("P", "I", "R"), available_folds=1, inference_augmentation=False, resolution_range=[0.75, 0.75, 1.65], @@ -65,10 +65,7 @@ def test_phase_preprocess(self): preprossed_input, errcode = preprocess_input(mri, debug_data={}, pad_size=pad_size, verbose=True) print(mri) print(preprossed_input) - - # backchanged_origin = tuple(preprossed_input.origin[idx] + origin_diff[idx] for idx in range(3)) - self.assertTrue(preprossed_input.assert_affine(origin=mri.origin, error_tolerance=origin_diff)) - # affine=mri.affine, + self.assertTrue(preprossed_input.assert_affine(origin=mri.origin, origin_tolerance=origin_diff)) self.assertTrue(preprossed_input.assert_affine(rotation=mri.rotation, orientation=mri.orientation, zoom=mri.zoom)) self.assertEqual(errcode, ErrCode.OK) for idx, s in enumerate(mri.shape):