From f5ae024d99aff155ef2841310ff2d1449f81f4e6 Mon Sep 17 00:00:00 2001 From: iback Date: Tue, 26 Nov 2024 09:55:21 +0000 Subject: [PATCH] first version of a labeling step done. ALPHA --- pyproject.toml | 1 + spineps/architectures/pl_densenet.py | 2 - spineps/architectures/pl_unet.py | 2 - spineps/architectures/read_labels.py | 27 ----- spineps/entrypoint.py | 54 ++++++++-- spineps/get_models.py | 94 ++++++++++++++--- spineps/lab_model.py | 73 ++++++++----- spineps/phase_instance.py | 30 ------ spineps/phase_labeling.py | 151 ++++++++++++++++++++------- spineps/seg_enums.py | 9 +- spineps/seg_model.py | 20 ---- spineps/seg_pipeline.py | 10 +- spineps/seg_run.py | 16 ++- spineps/utils/auto_download.py | 14 ++- spineps/utils/find_min_cost_path.py | 64 ++++-------- spineps/utils/predictor.py | 1 + unit_tests/test_filepaths.py | 2 +- unit_tests/test_path.py | 92 ++++++++++++++++ unit_tests/test_proc_functions.py | 2 +- 19 files changed, 446 insertions(+), 218 deletions(-) create mode 100644 unit_tests/test_path.py diff --git a/pyproject.toml b/pyproject.toml index a7aedb1..eba8069 100755 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ nnunetv2 = "2.4.2" TPTBox = "^0.2.1" antspyx = "0.4.2" rich = "^13.6.0" +monai="^1.3.0" [tool.poetry.dev-dependencies] diff --git a/spineps/architectures/pl_densenet.py b/spineps/architectures/pl_densenet.py index f816c20..cd9a5f0 100644 --- a/spineps/architectures/pl_densenet.py +++ b/spineps/architectures/pl_densenet.py @@ -9,8 +9,6 @@ from torch import nn from TypeSaveArgParse import Class_to_ArgParse -from spineps.architectures.read_labels import Objectives - @dataclass class ARGS_MODEL(Class_to_ArgParse): diff --git a/spineps/architectures/pl_unet.py b/spineps/architectures/pl_unet.py index b65389d..7081e54 100755 --- a/spineps/architectures/pl_unet.py +++ b/spineps/architectures/pl_unet.py @@ -124,8 +124,6 @@ def _shared_metric_append(self, metrics, outputs): def _shared_cat_metrics(self, outputs): results = {} for m, v in outputs.items(): - # v = np.asarray(v) - # print(m, v.shape) stacked = torch.stack(v) results[m] = torch.mean(stacked) if m != "dice_p_cls" else torch.mean(stacked, dim=0) return results diff --git a/spineps/architectures/read_labels.py b/spineps/architectures/read_labels.py index 6f38940..87ed92e 100644 --- a/spineps/architectures/read_labels.py +++ b/spineps/architectures/read_labels.py @@ -195,30 +195,3 @@ def flatten(a: list[str | int | list[str] | list[int]]): else: for b in a: yield from flatten(b) - - -### - -# Eval-pipeline zuerst -# sensitivity, recall, AUC, ROC, F1, MCC -# dann MONAI baseline bauen mit Resnet, Densenet, ViT -if __name__ == "__main__": - objectives = Objectives( - [ - Target.FULLYVISIBLE, - Target.REGION, - Target.VERTREL, - Target.VERT, - ], - as_group=True, - ) - - entry_dict = { - "vert_exact": VertExact.L1, - "vert_region": VertRegion.LWS, - "vert_rel": VertRel.FIRST_LWK, - "vert_cut": True, - } - - label = objectives(entry_dict) - print(label) diff --git a/spineps/entrypoint.py b/spineps/entrypoint.py index fa1f805..accef20 100755 --- a/spineps/entrypoint.py +++ b/spineps/entrypoint.py @@ -8,10 +8,12 @@ from TPTBox import BIDS_FILE, Log_Type, No_Logger from spineps.get_models import ( + get_actual_model, get_instance_model, - get_segmentation_model, + get_labeling_model, get_semantic_model, modelid2folder_instance, + modelid2folder_labeling, modelid2folder_semantic, ) from spineps.seg_run import process_dataset, process_img_nii @@ -74,6 +76,7 @@ def parser_arguments(parser: argparse.ArgumentParser): def entry_point(): modelids_semantic = list(modelid2folder_semantic().keys()) modelids_instance = list(modelid2folder_instance().keys()) + modelids_labeling = [*list(modelid2folder_labeling().keys()), "none"] ########################### ########################### main_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -107,6 +110,16 @@ def entry_point(): metavar="", help=f"The model used for the vertebra instance segmentation. Choices are {modelids_instance} or a string absolute path the model folder", ) + parser_sample.add_argument( + "-model_labeling", + "-ml", + # type=str.lower, + default="labeling", + # required=True, + # choices=modelids_instance, + metavar="", + help=f"The model used for the vertebra labeling classification. Choices are {modelids_labeling} or a string absolute path the model folder", + ) parser_sample = parser_arguments(parser_sample) ########################### @@ -135,6 +148,16 @@ def entry_point(): metavar="", help=f"The model used for the vertebra segmentation. Choices are {model_vert_choices} or a string absolute path the model folder", ) + parser_dataset.add_argument( + "-model_labeling", + "-ml", + # type=str.lower, + default="labeling", + # required=True, + # choices=modelids_instance, + metavar="", + help=f"The model used for the vertebra labeling classification. Choices are {modelids_labeling} or a string absolute path the model folder", + ) parser_dataset.add_argument( "-ignore_bids_filter", "-ibf", @@ -180,16 +203,23 @@ def run_sample(opt: Namespace): if not input_path.endswith(".nii.gz"): input_path += ".nii.gz" assert os.path.isfile(input_path), f"-input does not exist or is not a file, got {input_path}" # noqa: PTH113 - + # model semantic if "/" in str(opt.model_semantic): - # given path - model_semantic = get_segmentation_model(opt.model_semantic, use_cpu=opt.cpu).load() + model_semantic = get_actual_model(opt.model_semantic, use_cpu=opt.cpu).load() else: model_semantic = get_semantic_model(opt.model_semantic, use_cpu=opt.cpu).load() + # model instance if "/" in str(opt.model_instance): - model_instance = get_segmentation_model(opt.model_instance, use_cpu=opt.cpu).load() + model_instance = get_actual_model(opt.model_instance, use_cpu=opt.cpu).load() else: model_instance = get_instance_model(opt.model_instance, use_cpu=opt.cpu).load() + # model labeling + if opt.model_labeling == "none": + model_labeling = None + elif "/" in str(opt.model_labeling): + model_labeling = get_actual_model(opt.model_labeling, use_cpu=opt.cpu).load() + else: + model_labeling = get_labeling_model(opt.model_labeling, use_cpu=opt.cpu).load() bids_sample = BIDS_FILE(input_path, dataset=dataset, verbose=True) @@ -197,6 +227,7 @@ def run_sample(opt: Namespace): "img_ref": bids_sample, "model_semantic": model_semantic, "model_instance": model_instance, + "model_labeling": model_labeling, "derivative_name": opt.der_name, # # "save_uncertainty_image": opt.save_unc_img, @@ -245,7 +276,7 @@ def run_dataset(opt: Namespace): if opt.model_semantic == "auto": model_semantic = None elif "/" in str(opt.model_semantic): - model_semantic = get_segmentation_model(opt.model_semantic, use_cpu=opt.cpu).load() + model_semantic = get_actual_model(opt.model_semantic, use_cpu=opt.cpu).load() else: model_semantic = get_semantic_model(opt.model_semantic, use_cpu=opt.cpu).load() @@ -253,16 +284,25 @@ def run_dataset(opt: Namespace): if opt.model_instance == "auto": model_instance = None elif "/" in str(opt.model_instance): - model_instance = get_segmentation_model(opt.model_instance, use_cpu=opt.cpu).load() + model_instance = get_actual_model(opt.model_instance, use_cpu=opt.cpu).load() else: model_instance = get_instance_model(opt.model_instance, use_cpu=opt.cpu).load() + # Model Labeling + if opt.model_labeling == "none": + model_labeling = None + elif "/" in str(opt.model_labeling): + model_labeling = get_actual_model(opt.model_labeling, use_cpu=opt.cpu).load() + else: + model_labeling = get_labeling_model(opt.model_labeling, use_cpu=opt.cpu).load() + assert model_instance is not None, "-model_vert was None" kwargs = { "dataset_path": input_dir, "model_semantic": model_semantic, "model_instance": model_instance, + "model_labeling": model_labeling, "rawdata_name": opt.raw_name, "derivative_name": opt.der_name, # diff --git a/spineps/get_models.py b/spineps/get_models.py index 91c4d91..8f2b2f5 100755 --- a/spineps/get_models.py +++ b/spineps/get_models.py @@ -3,9 +3,10 @@ from TPTBox import Log_Type, No_Logger -from spineps.seg_enums import Modality -from spineps.seg_model import Segmentation_Model, modeltype2class -from spineps.utils.auto_download import download_if_missing, instances, semantic +from spineps.lab_model import VertLabelingClassifier +from spineps.seg_enums import Modality, ModelType, SpinepsPhase +from spineps.seg_model import Segmentation_Model, Segmentation_Model_NNunet, Segmentation_Model_Unet3D +from spineps.utils.auto_download import download_if_missing, instances, labeling, semantic from spineps.utils.filepaths import get_mri_segmentor_models_dir, search_path from spineps.utils.seg_modelconfig import load_inference_config @@ -38,8 +39,8 @@ def get_semantic_model(model_name: str, **kwargs) -> Segmentation_Model: config_path = _modelid2folder_subreg[model_name] if str(config_path).startswith("http"): # Resolve HTTP - config_path = download_if_missing(model_name, config_path, is_instance=False) - return get_segmentation_model(config_path, **kwargs) + config_path = download_if_missing(model_name, config_path, phase=SpinepsPhase.SEMANTIC) + return get_actual_model(config_path, **kwargs) def get_instance_model(model_name: str, **kwargs) -> Segmentation_Model: @@ -66,13 +67,43 @@ def get_instance_model(model_name: str, **kwargs) -> Segmentation_Model: config_path = _modelid2folder_vert[model_name] if str(config_path).startswith("http"): # Resolve HTTP - config_path = download_if_missing(model_name, config_path, is_instance=True) + config_path = download_if_missing(model_name, config_path, phase=SpinepsPhase.INSTANCE) - return get_segmentation_model(config_path, **kwargs) + return get_actual_model(config_path, **kwargs) + + +def get_labeling_model(model_name: str, **kwargs) -> VertLabelingClassifier: + """Finds and returns an instance model by name + + Args: + model_name (str): _description_ + + Returns: + Segmentation_Model: _description_ + """ + model_name = model_name.lower() + _modelid2folder_labeling = modelid2folder_labeling() + possible_keys = list(_modelid2folder_labeling.keys()) + if len(possible_keys) == 0: + logger.print( + "Found no available labeling models. Did you set one up by downloading modelweights and putting them into the folder specified by the env variable or did you want to specify with an absolute path instead?", + Log_Type.FAIL, + ) + raise KeyError(model_name) + if model_name not in possible_keys: + logger.print(f"Model with name {model_name} does not exist, options are {possible_keys}", Log_Type.FAIL) + raise KeyError(model_name) + config_path = _modelid2folder_labeling[model_name] + if str(config_path).startswith("http"): + # Resolve HTTP + config_path = download_if_missing(model_name, config_path, phase=SpinepsPhase.LABELING) + + return get_actual_model(config_path, **kwargs) _modelid2folder_semantic: dict[str, Path | str] | None = None _modelid2folder_instance: dict[str, Path | str] | None = None +_modelid2folder_labeling: dict[str, Path | str] | None = None def modelid2folder_semantic() -> dict[str, Path | str]: @@ -99,6 +130,18 @@ def modelid2folder_instance() -> dict[str, Path | str]: return check_available_models(get_mri_segmentor_models_dir())[1] +def modelid2folder_labeling() -> dict[str, Path | str]: + """Returns the dictionary mapping labeling model ids to their corresponding path + + Returns: + _type_: _description_ + """ + if _modelid2folder_labeling is not None: + return _modelid2folder_labeling + else: + return check_available_models(get_mri_segmentor_models_dir())[2] + + def check_available_models(models_folder: str | Path, verbose: bool = False) -> tuple[dict[str, Path | int], dict[str, Path | int]]: """Searches through the specified directories and finds models, sorting them into the dictionaries mapping to instance or semantic models @@ -115,15 +158,18 @@ def check_available_models(models_folder: str | Path, verbose: bool = False) -> assert models_folder.exists(), f"models_folder {models_folder} does not exist" config_paths = search_path(models_folder, query="**/inference_config.json", suppress=True) - global _modelid2folder_semantic, _modelid2folder_instance # noqa: PLW0603 + global _modelid2folder_semantic, _modelid2folder_instance, _modelid2folder_labeling # noqa: PLW0603 _modelid2folder_semantic = semantic # id to model_folder _modelid2folder_instance = instances # id to model_folder + _modelid2folder_labeling = labeling for cp in config_paths: model_folder = cp.parent model_folder_name = model_folder.name.lower() try: inference_config = load_inference_config(str(cp)) - if Modality.SEG in inference_config.modalities: + if inference_config.modeltype == ModelType.classifier: + _modelid2folder_labeling[model_folder_name] = model_folder + elif Modality.SEG in inference_config.modalities: _modelid2folder_instance[model_folder_name] = model_folder else: _modelid2folder_semantic[model_folder_name] = model_folder @@ -131,10 +177,32 @@ def check_available_models(models_folder: str | Path, verbose: bool = False) -> logger.print(f"Modelfolder '{model_folder_name}' ignored, caused by '{e}'", Log_Type.STRANGE, verbose=verbose) # raise e # - return _modelid2folder_semantic, _modelid2folder_instance + return _modelid2folder_semantic, _modelid2folder_instance, _modelid2folder_labeling + + +def modeltype2class(modeltype: ModelType): + """Maps ModelType to actual Segmentation_Model Subclass + + Args: + type (ModelType): _description_ + + Raises: + NotImplementedError: _description_ + + Returns: + _type_: _description_ + """ + if modeltype == ModelType.nnunet: + return Segmentation_Model_NNunet + elif modeltype == ModelType.unet: + return Segmentation_Model_Unet3D + elif modeltype == ModelType.classifier: + return VertLabelingClassifier + else: + raise NotImplementedError(modeltype) -def get_segmentation_model(in_config: str | Path, **kwargs) -> Segmentation_Model: +def get_actual_model(in_config: str | Path, **kwargs) -> Segmentation_Model | VertLabelingClassifier: """Creates the Model class from given path Args: @@ -154,13 +222,13 @@ def get_segmentation_model(in_config: str | Path, **kwargs) -> Segmentation_Mode path_search = search_path(in_dir, "**/*inference_config.json", suppress=True) if len(path_search) == 0: logger.print( - f"get_segmentation_model: did not find a singular inference_config.json in {in_dir}/**/*inference_config.json. Is this the correct folder?", + f"get_actual_model: did not find a singular inference_config.json in {in_dir}/**/*inference_config.json. Is this the correct folder?", Log_Type.FAIL, ) raise FileNotFoundError(f"{in_dir}/**/*inference_config.json") assert ( len(path_search) == 1 - ), f"get_segmentation_model: found more than one inference_config.json in {in_dir}/**/*inference_config.json. Ambigous behavior, please manually correct this by removing one of these.\nFound {path_search}" + ), f"get_actual_model: found more than one inference_config.json in {in_dir}/**/*inference_config.json. Ambigous behavior, please manually correct this by removing one of these.\nFound {path_search}" in_dir = path_search[0] # else: # base = filepath_model(in_config, model_dir=None) diff --git a/spineps/lab_model.py b/spineps/lab_model.py index 2fad92d..3d80b9c 100755 --- a/spineps/lab_model.py +++ b/spineps/lab_model.py @@ -1,22 +1,33 @@ +import os from pathlib import Path import numpy as np import torch from monai.transforms import CenterSpatialCropd, Compose, NormalizeIntensityd, ToTensor from TPTBox import NII, Log_Type, No_Logger, np_utils +from typing_extensions import Self from spineps.architectures.pl_densenet import PLClassifier +from spineps.seg_enums import OutputType +from spineps.seg_model import Segmentation_Inference_Config, Segmentation_Model +from spineps.utils.filepaths import search_path logger = No_Logger(prefix="VertLabelingClassifier") -class VertLabelingClassifier: - def __init__(self, model: PLClassifier): - self.model: PLClassifier = model +class VertLabelingClassifier(Segmentation_Model): + def __init__( + self, + model_folder: str | Path, + inference_config: Segmentation_Inference_Config | None = None, # type:ignore + use_cpu: bool = False, + default_verbose: bool = False, + default_allow_tqdm: bool = True, + ): + super().__init__(model_folder, inference_config, use_cpu, default_verbose, default_allow_tqdm) + assert len(self.inference_config.expected_inputs) == 1, "Unet3D cannot expect more than one input" + # self.model: PLClassifier = model self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - model.to(self.device) - self.model.eval() - self.model.net.eval() final_size: tuple[int, int, int] = (152, 168, 32) self.totensor = ToTensor() self.transform = Compose( @@ -26,30 +37,44 @@ def __init__(self, model: PLClassifier): ] ) + def load(self, folds: tuple[str, ...] | None = None) -> Self: # noqa: ARG002 + assert os.path.exists(self.model_folder) # noqa: PTH110 + + chktpath = search_path(self.model_folder, "**/*val_f1=*valf1-weights.ckpt") + assert len(chktpath) >= 1, chktpath + model = PLClassifier.load_from_checkpoint(checkpoint_path=chktpath[-1]) + model.eval() + model.net.eval() + self.device = torch.device("cuda:0" if torch.cuda.is_available() and not self.use_cpu else "cpu") + model.to(self.device) + self.predictor = model + self.print("Model loaded from", self.model_folder, verbose=True) + return self + + def run( + self, + input_nii: list[NII], + verbose: bool = False, + ) -> dict[OutputType, NII | None]: + raise NotImplementedError("Doesnt make sense") + + def segment_scan(*args, **kwargs): + raise NotImplementedError("Doesnt make sense") + @classmethod def from_modelfolder(cls, model_folder: str | Path): raise NotImplementedError() - # pass # find checkpoint yourself, then load from checkpoitn path + # find checkpoint yourself, then load from checkpoitn path @classmethod def from_checkpoint_path(cls, checkpoint_path: str | Path): if isinstance(checkpoint_path, str): checkpoint_path = Path(checkpoint_path) assert checkpoint_path.exists(), f"Checkpoint path does not exist: {checkpoint_path}" - model = PLClassifier.load_from_checkpoint( - str(checkpoint_path), - # opt=ARGS_MODEL(), - # objectives=Objectives( - # [ - # Target.VERT, - # Target.REGION, - # Target.VERTREL, - # Target.FULLYVISIBLE, - # ] - # ), - ) - # print("weight", model.classification_heads["REGION"].weight[:5]) - d = cls(model) + # model = PLClassifier.load_from_checkpoint( + # str(checkpoint_path), + # ) + d = cls(checkpoint_path.parent.parent) logger.print("Model loaded from", checkpoint_path, verbose=True) return d @@ -117,8 +142,8 @@ def _run_array(self, img_arr: np.ndarray): # , seg_arr: np.ndarray): model_input = model_input.to(torch.float32) model_input = model_input.to(self.device) - self.model.eval() - logits_dict = self.model.forward(model_input) - logits_soft = {k: self.model.softmax(v)[0].detach().cpu().numpy() for k, v in logits_dict.items()} + self.predictor.eval() + logits_dict = self.predictor.forward(model_input) + logits_soft = {k: self.predictor.softmax(v)[0].detach().cpu().numpy() for k, v in logits_dict.items()} pred_cls = {k: np.argmax(v, 0) for k, v in logits_soft.items()} return logits_soft, pred_cls diff --git a/spineps/phase_instance.py b/spineps/phase_instance.py index 9c90f3f..c2d1a20 100755 --- a/spineps/phase_instance.py +++ b/spineps/phase_instance.py @@ -643,18 +643,6 @@ def create_prediction_couples( n_predictions = hierarchical_predictions.shape[0] coupled_predictions = {} - - # TODO LANGSAMER!!! multiprocessing Pool verwenden? - # task = [] - # for idx in range(0, n_predictions): - # for pred in range(3): - # task.append( - # delayed(find_prediction_couple)( - # idx, pred, hierarchical_predictions, hierarchical_existing_predictions, n_predictions, verbose - # ) - # ) - # result = Parallel(n_jobs=5)(task) - # TODO try to calculate list of candidates here, take the predictions and then parallelize the find_prediction_couple for idx in range(n_predictions): @@ -668,24 +656,6 @@ def create_prediction_couples( coupled_predictions[couple] = [agreement] else: coupled_predictions[couple].append(agreement) - # with get_context("spawn").Pool() as pool: - # instance_pairs = [ - # (idx, pred, hierarchical_predictions.copy(), hierarchical_existing_predictions, n_predictions) - # for idx in range(0, n_predictions) - # for pred in range(3) - # ] - - # result = pool.starmap(find_prediction_couple, instance_pairs) - - # for r in result: - # couple = r[0] - # agreement = r[1] - # if couple is None: - # continue - # if couple not in coupled_predictions: - # coupled_predictions[couple] = [agreement] - # else: - # coupled_predictions[couple].append(agreement) coupled_predictions = {i: sum(v) / len(v) for i, v in coupled_predictions.items()} coupled_predictions = dict( sorted( diff --git a/spineps/phase_labeling.py b/spineps/phase_labeling.py index ca6c466..44edba8 100644 --- a/spineps/phase_labeling.py +++ b/spineps/phase_labeling.py @@ -7,11 +7,13 @@ from TPTBox import NII, Log_Type, No_Logger from spineps.architectures.read_labels import VertExact +from spineps.get_models import get_actual_model from spineps.lab_model import VertLabelingClassifier from spineps.utils.find_min_cost_path import find_most_probably_sequence check_dir = "/DATA/NAS/ongoing_projects/hendrik/nako-segmentation/code/classifier/lightning_logs/" -model_p = "densenet169_v2_multilabel_img_a14_tr100-101-102_val103_ad3_withFCN/version_0/checkpoints/epoch=1-step=1542-val_f1=0.9791_valf1-weights.ckpt" +model_p = "densenet169_v2_multilabel_img_a17_tr100-101-102-103-104_valtr_ad3_withFCN/version_0" +# "densenet169_v2_multilabel_img_a17_tr100-101-102-103-104_valtr_ad3_withFCN/version_0/" logger = No_Logger(prefix="LabelingPhase") @@ -20,26 +22,49 @@ THOR = slice(7, 19) # 7 to 18 LUMB = slice(19, None) # 19 to end (23) +DIVIDE_BY_ZERO_OFFSET = 1e-6 -def perform_labeling_step(img_nii: NII, vert_nii: NII): - model = VertLabelingClassifier.from_checkpoint_path(check_dir + model_p) - img = img_nii.reorient() - vert = vert_nii.reorient() - img.assert_affine(other=vert) - img.assert_affine(zoom=(0.8571, 0.8571, 3.3)) +def perform_labeling_step(model: VertLabelingClassifier, img_nii: NII, vert_nii: NII): + if model is None: + model = get_actual_model( + in_config=Path(check_dir + model_p), + ) + model.load() + # run model + labelmap = run_model_for_vert_labeling(model, img_nii, vert_nii)[0] + # relabel according to labelmap + return vert_nii.map_labels_(labelmap) + + +def run_model_for_vert_labeling( + model: VertLabelingClassifier, + img_nii: NII, + vert_nii: NII, + verbose: bool = False, +): + # reorient + img = img_nii.reorient(model.inference_config.model_expected_orientation, verbose=logger) + vert = vert_nii.reorient(model.inference_config.model_expected_orientation, verbose=logger) + zms_pir = img.zoom + # rescale + img.rescale_(model.calc_recommended_resampling_zoom(zms_pir), verbose=logger) + vert.rescale_(model.calc_recommended_resampling_zoom(zms_pir), verbose=logger) + # + img.assert_affine(other=vert) # extract vertebrae vert.extract_label_(list(range(1, 26)), keep_label=True) # counted label orig_label = vert.unique() # run model predictions = model.run_all_seg_instances(img, vert) - fcost, fpath, fpath_post, costlist = find_vert_path_from_predictions(predictions) # TODO arguments - # offset because C1 is 0, not 1 as in mask + fcost, fpath, fpath_post, costlist, min_costs_path, args = find_vert_path_from_predictions( + predictions=predictions, + verbose=verbose, + ) assert len(orig_label) == len(fpath_post), f"{len(orig_label)} != {len(fpath_post)}" labelmap = {orig_label[idx]: fpath_post[idx] for idx in range(len(orig_label))} - # relabel according to fpath_post - return vert_nii.map_labels_(labelmap) + return labelmap, fcost, fpath, fpath_post, costlist, min_costs_path, predictions def region_to_vert(region_softmax_values: np.ndarray): # shape(1,3) @@ -50,18 +75,28 @@ def region_to_vert(region_softmax_values: np.ndarray): # shape(1,3) return vert_prediction_values -def prepare_vert(vert_softmax_values: np.ndarray, gaussian_sigma: float = 0.85, gaussian_radius: int = 2): - # region-wise gaussian? - softmax_values2 = vert_softmax_values.copy() - for s in [CERV, THOR, LUMB]: - softmax_values2[s] = gaussian_filter1d(softmax_values2[s], sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) - softmax_values2 /= np.sum(softmax_values2) - return softmax_values2 +def prepare_vert( + vert_softmax_values: np.ndarray, + gaussian_sigma: float = 0.85, + gaussian_radius: int = 2, + gaussian_regionwise: bool = True, +): + # gaussian region-wise + softmax_values = vert_softmax_values.copy() + if gaussian_sigma > 0.0: + if gaussian_regionwise: + for s in [CERV, THOR, LUMB]: + softmax_values[s] = gaussian_filter1d(softmax_values[s], sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) + else: + softmax_values = gaussian_filter1d(softmax_values, sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) + softmax_values /= np.sum(softmax_values) + DIVIDE_BY_ZERO_OFFSET + return softmax_values def prepare_visible(predictions: dict, visible_w: float = 1.0, gaussian_sigma: float = 0.8, gaussian_radius: int = 2): visible_chain = np.asarray([k["soft"]["FULLYVISIBLE"][1] for v, k in predictions.items()]) - visible_chain = gaussian_filter1d(visible_chain, sigma=gaussian_sigma, mode="constant", radius=gaussian_radius) + if gaussian_sigma > 0.0: + visible_chain = gaussian_filter1d(visible_chain, sigma=gaussian_sigma, mode="constant", radius=gaussian_radius) visible_chain = np.round(visible_chain, 3) # weighting visible_chain = 1 - visible_chain @@ -74,56 +109,102 @@ def prepare_visible(predictions: dict, visible_w: float = 1.0, gaussian_sigma: f def prepare_region(region_softmax_values: np.ndarray, gaussian_sigma: float = 0.75, gaussian_radius: int = 1): softmax_values = region_to_vert(region_softmax_values) - softmax_values2 = gaussian_filter1d(softmax_values, sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) - softmax_values2 /= np.sum(softmax_values2) - return softmax_values2 + if gaussian_sigma > 0.0: + softmax_values = gaussian_filter1d(softmax_values, sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) + softmax_values /= np.sum(softmax_values) + DIVIDE_BY_ZERO_OFFSET + return softmax_values + + +def prepare_vertrel(vertrel_softmax_values: np.ndarray, gaussian_sigma: float = 0.75, gaussian_radius: int = 1): + softmax_values = vertrel_softmax_values.copy() + if gaussian_sigma > 0.0: + softmax_values = gaussian_filter1d(softmax_values, sigma=gaussian_sigma, mode="nearest", radius=gaussian_radius) + softmax_values /= np.sum(softmax_values) + DIVIDE_BY_ZERO_OFFSET + return softmax_values def find_vert_path_from_predictions( predictions, visible_w: float = 1.0, - vert_w: float = 1.0, - region_w: float = 1.0, - vertrel_w: float = 1.0, + vert_w: float = 0.75, + region_w: float = 0.75, + vertrel_w: float = 0.5, disable_c1: bool = True, - boost_c2: bool = True, + boost_c2: float = 3.0, allow_cervical_skip: bool = True, # punish_multiple_sequence: float = 0.0, punish_skip_sequence: float = 0.0, - # TODO all the parameters (gaussian sigmas and radius? region-wise vert or not) + # + region_gaussian_sigma: float = 0.0, # 0 means no gaussian + vert_gaussian_sigma: float = 1.0, # 0 means no gaussian + vert_gaussian_regionwise: bool = True, + vertrel_gaussian_sigma: float = 0.75, # 0 means no gaussian # TODO preprocess vertrel so that if gap between 3/4 is linear interpolated + # + verbose: bool = False, ): + args = locals() assert 0 <= visible_w, visible_w # noqa: SIM300 assert 0 <= vert_w, vert_w # noqa: SIM300 assert 0 <= region_w, region_w # noqa: SIM300 assert 0 <= vertrel_w, vertrel_w # noqa: SIM300 + assert 0 <= boost_c2, boost_c2 # noqa: SIM300 cost_matrix = np.zeros((len(predictions), 24)) # TODO 24 fix? relative_cost_matrix = np.zeros((len(predictions), 6)) # TODO 6 fix? visible_chain = prepare_visible(predictions, visible_w) # print(visible_chain) + if verbose: + print("visible_w", visible_w) + print("vert_w", vert_w) + print("region_w", region_w) + print("vertrel_w", vertrel_w) + print("disable_c1", disable_c1) + print("boost_c2", boost_c2) + print("allow_cervical_skip", allow_cervical_skip) + print("region_gaussian_sigma", region_gaussian_sigma) + print("vert_gaussian_sigma", vert_gaussian_sigma) + print("vert_gaussian_regionwise", vert_gaussian_regionwise) + print("vertrel_gaussian_sigma", vertrel_gaussian_sigma) + # for idx, (_, k) in enumerate(predictions.items()): - region_values = np.multiply(prepare_region(k["soft"]["REGION"]), region_w) - vert_values = np.multiply(prepare_vert(k["soft"]["VERT"]), vert_w) + region_values = np.multiply( + prepare_region( + k["soft"]["REGION"], + gaussian_sigma=region_gaussian_sigma, + ), + region_w, + ) + vert_values = np.multiply( + prepare_vert( + k["soft"]["VERT"], + gaussian_sigma=vert_gaussian_sigma, + gaussian_regionwise=vert_gaussian_regionwise, + ), + vert_w, + ) # # add region and vert final_vert_pred = np.add(region_values, vert_values) # normalize - final_vert_pred /= np.sum(final_vert_pred) + final_vert_pred /= np.sum(final_vert_pred) + DIVIDE_BY_ZERO_OFFSET # boost c2 if enabled - if boost_c2 and np.argmax(final_vert_pred) == 1: - final_vert_pred = np.multiply(final_vert_pred, 3.0) + if boost_c2 > 0.0 and np.argmax(final_vert_pred) == 1: + final_vert_pred = np.multiply(final_vert_pred, boost_c2) # then multiply with visible factor final_vert_pred = np.multiply(final_vert_pred, visible_chain[idx]) cost_matrix[idx] = final_vert_pred # relative gets own matrix - relative_cost_matrix[idx] = k["soft"]["VERTREL"] + relative_cost_matrix[idx] = prepare_vertrel( + k["soft"]["VERTREL"], + gaussian_sigma=vertrel_gaussian_sigma, + ) cost_matrix = np.asarray(cost_matrix) # invert rel cost relative_cost_matrix = np.multiply(-relative_cost_matrix, vertrel_w) # - fcost, fpath = find_most_probably_sequence( + fcost, fpath, min_costs_path = find_most_probably_sequence( # input cost_matrix, min_start_class=0 if not disable_c1 else 1, @@ -142,7 +223,7 @@ def find_vert_path_from_predictions( ) # post processing fpath_post = fpath_post_processing(fpath) - return fcost, fpath, fpath_post, cost_matrix.tolist() + return fcost, fpath, fpath_post, cost_matrix.tolist(), min_costs_path, args def fpath_post_processing(fpath): diff --git a/spineps/seg_enums.py b/spineps/seg_enums.py index a4ec94a..a397b76 100755 --- a/spineps/seg_enums.py +++ b/spineps/seg_enums.py @@ -4,7 +4,7 @@ class MetaEnum(EnumMeta): - def __contains__(cls, item): + def __contains__(cls, item): # noqa: N805 try: cls[item] except ValueError: @@ -107,9 +107,16 @@ def format_keys(cls, acquisition: Self) -> list[str]: raise NotImplementedError(acquisition) +class SpinepsPhase(Enum_Compare): + SEMANTIC = auto() + INSTANCE = auto() + LABELING = auto() + + class ModelType(Enum_Compare): nnunet = auto() unet = auto() + classifier = auto() class InputType(Enum_Compare): diff --git a/spineps/seg_model.py b/spineps/seg_model.py index 3149e5a..833fddf 100755 --- a/spineps/seg_model.py +++ b/spineps/seg_model.py @@ -374,23 +374,3 @@ def run( seg_nii: NII = input_nii.set_array(pred_cls) self.print("out", seg_nii.zoom, seg_nii.orientation, seg_nii.shape) if verbose else None return {OutputType.seg: seg_nii} - - -def modeltype2class(modeltype: ModelType): - """Maps ModelType to actual Segmentation_Model Subclass - - Args: - type (ModelType): _description_ - - Raises: - NotImplementedError: _description_ - - Returns: - _type_: _description_ - """ - if modeltype == ModelType.nnunet: - return Segmentation_Model_NNunet - elif modeltype == ModelType.unet: - return Segmentation_Model_Unet3D - else: - raise NotImplementedError(modeltype) diff --git a/spineps/seg_pipeline.py b/spineps/seg_pipeline.py index 7b84add..6901b0b 100755 --- a/spineps/seg_pipeline.py +++ b/spineps/seg_pipeline.py @@ -36,7 +36,7 @@ def predict_centroids_from_both( vert_nii_cleaned: NII, seg_nii: NII, - models: list[Segmentation_Model], + models: list[Segmentation_Model | None], parameter: dict[str, Any], ): """Calculates the centroids of each vertebra corpus by using both semantic and instance mask @@ -64,7 +64,10 @@ def predict_centroids_from_both( models_repr = {} for idx, m in enumerate(models): - models_repr[idx] = m.dict_representation() + if m is not None: + models_repr[idx] = m.dict_representation() + else: + models_repr[idx] = {"name": "No Model"} ctd.info["source"] = "MRI Segmentation Pipeline" ctd.info["version"] = pipeline_version() ctd.info["models"] = models_repr @@ -75,9 +78,6 @@ def predict_centroids_from_both( return ctd -# TODO make automatic version of this repo (below is the repo the code is called from... -.-) - - def pipeline_version(): try: label = subprocess.check_output(["git", "rev-list", "--count", "main"]).strip() diff --git a/spineps/seg_run.py b/spineps/seg_run.py index 5e82ffd..97b4213 100755 --- a/spineps/seg_run.py +++ b/spineps/seg_run.py @@ -11,7 +11,7 @@ from TPTBox.spine.snapshot2D.snapshot_templates import mri_snapshot from spineps.phase_instance import predict_instance_mask -from spineps.phase_labeling import perform_labeling_step +from spineps.phase_labeling import VertLabelingClassifier, perform_labeling_step from spineps.phase_post import phase_postprocess_combined from spineps.phase_pre import preprocess_input from spineps.phase_semantic import predict_semantic_mask @@ -27,6 +27,8 @@ def process_dataset( dataset_path: Path, model_instance: Segmentation_Model, model_semantic: list[Segmentation_Model] | Segmentation_Model | None = None, + model_labeling: VertLabelingClassifier | None = None, + # rawdata_name: str = "rawdata", derivative_name: str = "derivatives_seg", modalities: list[Modality_Pair] | Modality_Pair = [(Modality.T2w, Acquisition.sag)], # noqa: B006 @@ -126,6 +128,7 @@ def process_dataset( compatible = True for idx, mp in enumerate(modalities): compatible = False if not check_model_modality_acquisition(model_semantic[idx], mp) else compatible + compatible = False if model_labeling is not None and not check_model_modality_acquisition(model_labeling, mp) else compatible del idx, mp if not compatible and not ignore_model_compatibility: @@ -174,6 +177,8 @@ def process_dataset( img_ref=s, model_semantic=model, model_instance=model_instance, + model_labeling=model_labeling, + # derivative_name=derivative_name, # # save_uncertainty_image=save_uncertainty_image, @@ -238,6 +243,7 @@ def process_img_nii( # noqa: C901 img_ref: BIDS_FILE, model_semantic: Segmentation_Model, model_instance: Segmentation_Model, + model_labeling: VertLabelingClassifier | None = None, derivative_name: str = "derivatives_seg", # # save_uncertainty_image: bool = False, @@ -357,7 +363,8 @@ def process_img_nii( # noqa: C901 proc_normalize_input = False # Never normalize input if it is an CT compatible = check_input_model_compatibility(img_ref, model=model_semantic) - if not compatible: + compatible_labeling = check_input_model_compatibility(img_ref, model=model_labeling) if model_labeling is not None else True + if not (compatible and compatible_labeling): if not ignore_compatibility_issues: return output_paths, ErrCode.COMPATIBILITY else: @@ -478,7 +485,8 @@ def process_img_nii( # noqa: C901 # input_package.make_nii_from_this(seg_nii_clean) # input_package.make_nii_from_this(vert_nii_clean) - vert_nii_clean = perform_labeling_step(input_nii, vert_nii_clean) + if model_labeling is not None: + vert_nii_clean = perform_labeling_step(model=model_labeling, img_nii=input_nii, vert_nii=vert_nii_clean) seg_nii_clean.save(out_spine, verbose=logger) vert_nii_clean.save(out_vert, verbose=logger) @@ -492,7 +500,7 @@ def process_img_nii( # noqa: C901 ctd = predict_centroids_from_both( vert_nii_clean, seg_nii_clean, - models=[model_semantic, model_instance], + models=[model_semantic, model_instance, model_labeling], # TODO add labeling info and parameters parameter={l: v for l, v in arguments.items() if "proc_" in l}, ) ctd.resample_from_to(input_nii_).save(out_ctd, verbose=logger) diff --git a/spineps/utils/auto_download.py b/spineps/utils/auto_download.py index 0b16ded..4d37999 100644 --- a/spineps/utils/auto_download.py +++ b/spineps/utils/auto_download.py @@ -6,11 +6,19 @@ from TPTBox import Print_Logger from tqdm import tqdm +from spineps.seg_enums import SpinepsPhase from spineps.utils.filepaths import get_mri_segmentor_models_dir link = "https://github.com/Hendrik-code/spineps/releases/download/" current_highest_version = "v1.0.9" current_instance_highest_version = "v1.2.0" +current_labeling_highest_version = "v1.3.0" + +phase_to_version: dict[SpinepsPhase, str] = { + SpinepsPhase.SEMANTIC: current_highest_version, + SpinepsPhase.INSTANCE: current_instance_highest_version, + SpinepsPhase.LABELING: current_labeling_highest_version, +} instances: dict[str, Path | str] = {"instance": link + current_instance_highest_version + "/instance.zip"} semantic: dict[str, Path | str] = { @@ -18,6 +26,7 @@ "t1w": link + current_highest_version + "/t1w.zip", "vibe": link + current_highest_version + "/vibe.zip", } +labeling: dict[str, Path | str] = {"t2w_labeling": link + current_instance_highest_version + "/t2w_labeling.zip"} download_names = { @@ -25,11 +34,12 @@ "t2w": "T2w_semantic", "t1w": "T1w_semantic", "vibe": "Vibe_semantic", + "t2w_labeling": "T2w_labeling", } -def download_if_missing(key, url, is_instance: bool): - version = current_highest_version if not is_instance else current_instance_highest_version +def download_if_missing(key, url, phase: SpinepsPhase): + version = phase_to_version[phase] out_path = Path(get_mri_segmentor_models_dir(), download_names[key] + "_" + version) if not out_path.exists(): download_weights(url, out_path) diff --git a/spineps/utils/find_min_cost_path.py b/spineps/utils/find_min_cost_path.py index e77f58e..fdffce8 100644 --- a/spineps/utils/find_min_cost_path.py +++ b/spineps/utils/find_min_cost_path.py @@ -22,6 +22,12 @@ def c_to_region_idx(c: int, regions: list[int]): return len(regions) - 1 +def internal_to_real_path(p): + pat = sorted(p, key=lambda x: x[0]) + pat = [x[1] for x in pat] + return pat + + def find_most_probably_sequence( # noqa: C901 cost: np.ndarray | list[int], # @@ -43,7 +49,7 @@ def find_most_probably_sequence( # noqa: C901 allow_skip_at_region: list[int] | None = None, punish_skip_at_region_sequence: float = 0.2, ) -> tuple[float, list[int]]: - # convert to np arrays + # default mutable arguments if allow_skip_at_region is None: allow_skip_at_region = [0] if allow_skip_at_class is None: @@ -52,6 +58,7 @@ def find_most_probably_sequence( # noqa: C901 allow_multiple_at_class = [18, 23] if regions is None: regions = [0, 7, 19] + # convert to np arrays if isinstance(cost, list): cost = np.asarray(cost) if region_rel_cost is not None and isinstance(region_rel_cost, list): @@ -66,7 +73,7 @@ def find_most_probably_sequence( # noqa: C901 regions_ranges = None if region_rel_cost is not None: if n_classes < regions[-1]: - warn(f"n_classes < defined regions, got {n_classes} and {regions}", stacklevel=2) + warn(f"n_classes < defined regions, got {n_classes} and {regions}", stacklevel=3) regions.append(n_classes) regions_ranges = [(regions[i], regions[i + 1] - 1) for i in range(len(regions) - 1)] region_rel_shape = region_rel_cost.shape @@ -93,7 +100,9 @@ def minCostAlgo(r, c): # start point if c == -1 and r == -1: # go over each possible start column - options = [minCostAlgo(r=0, c=cc) for cc in range(min_start_class, n_classes)] + options = [] + for cc in range(min_start_class, n_classes): + options.append(minCostAlgo(r=0, c=cc)) # noqa: PERF401 minidx, minval = argmin([o[0] for o in options]) return minval, options[minidx][1] # stepped over the line @@ -157,12 +166,12 @@ def minCostAlgo(r, c): # print(f"Setting {r}, {c} to {cost_value}, {p}") return min_costs_path[r][c] - def rel_cost(r, c, pnext, _, region_cur): + def rel_cost(r, c, pnext, p, region_cur): # noqa: ARG001 # transition cost of vertrel # first is just equal to that specific vertebra # last is dependant on next in path # classes are always first, last in order of regions - cost_value = 0 + cost_add = 0 if region_rel_cost is not None: # for ridx in range(len(regions) - 1): for last in [0, 1]: @@ -173,48 +182,15 @@ def rel_cost(r, c, pnext, _, region_cur): if rel_cost == 0: continue if last == 0 and c == regions_ranges[region_cur][0]: - # print(f"Added F {rel_cost} to {r}, {c}, {p}") - cost_value += rel_cost + # print(f"Added F {rel_cost} to {r}, {c}, {internal_to_real_path(p)}") + cost_add += rel_cost # break elif last == 1 and c_to_region_idx(pnext[-1][1], regions) >= region_cur + 1: - # print(f"Added L {rel_cost} to {r}, {c}, {p}") - cost_value += rel_cost - return cost_value + # print(f"Added L {rel_cost} to {r}, {c}, {internal_to_real_path(p)}") + cost_add += rel_cost + return cost_add fcost, fpath = minCostAlgo(-1, -1) fpath.reverse() fpath = [f[1] for f in fpath] - return fcost / len(fpath), fpath - - -if __name__ == "__main__": - cost = np.array( - [ - # colum lables - # rows predictions - [0, 10, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - ], - dtype=int, - ) - rel_cost = np.array( - [ - # nothing, last0, first1, last2 - [0, 0, 0, 0], - [0, 10, 0, 0], - [0, 0, 0, 0], - [0, 0, 11, 0], - ], - dtype=int, - ) - rel_cost = -rel_cost - fcost, fpath, _ = find_most_probably_sequence( - cost, - region_rel_cost=rel_cost, - regions=[0, 3], - ) - print() - print("Path cost", round(fcost, 3)) - print("Path", fpath) + return fcost / len(fpath), fpath, min_costs_path diff --git a/spineps/utils/predictor.py b/spineps/utils/predictor.py index e9f201c..2a6f4f1 100755 --- a/spineps/utils/predictor.py +++ b/spineps/utils/predictor.py @@ -85,6 +85,7 @@ def initialize_from_trained_model_folder( checkpoint = torch.load( join(model_training_output_dir, f"fold_{f}", checkpoint_name), map_location=torch.device("cpu"), + weights_only=False, ) if i == 0: trainer_name = checkpoint["trainer_name"] diff --git a/unit_tests/test_filepaths.py b/unit_tests/test_filepaths.py index b0ed2ed..00a9958 100644 --- a/unit_tests/test_filepaths.py +++ b/unit_tests/test_filepaths.py @@ -7,7 +7,7 @@ from pathlib import Path import spineps -from spineps.get_models import Segmentation_Model, get_segmentation_model +from spineps.get_models import Segmentation_Model, get_actual_model from spineps.utils.filepaths import ( filepath_model, get_mri_segmentor_models_dir, diff --git a/unit_tests/test_path.py b/unit_tests/test_path.py new file mode 100644 index 0000000..34843a6 --- /dev/null +++ b/unit_tests/test_path.py @@ -0,0 +1,92 @@ +# Call 'python -m unittest' on this folder # noqa: INP001 +# coverage run -m unittest +# coverage report +# coverage html +import unittest +from unittest.mock import MagicMock + +import numpy as np +from TPTBox.tests.test_utils import get_test_mri + +from spineps.architectures.read_labels import Objectives, Target, VertExact, VertRegion, VertRel +from spineps.phase_labeling import VertLabelingClassifier, perform_labeling_step +from spineps.utils.find_min_cost_path import find_most_probably_sequence + + +class VertLabelingClassifierDummy(VertLabelingClassifier): + def __init__(self): + pass + + +class Test_PathLabeling(unittest.TestCase): + def test_search_path_simple(self): + cost = np.array( + [ + # colum lables + # rows predictions + [0, 10, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + dtype=int, + ) + rel_cost = np.array( + [ + # nothing, last0, first1, last2 + [0, 0, 0, 0], + [0, 10, 0, 0], + [0, 0, 0, 0], + [0, 0, 11, 0], + ], + dtype=int, + ) + rel_cost = -rel_cost + fcost, fpath = find_most_probably_sequence( + cost, + region_rel_cost=rel_cost, + regions=[0, 3], + ) + print() + print("Path cost", round(fcost, 3)) + print("Path", fpath) + self.assertEqual(round(fcost, 3), -5.0) + self.assertEqual(fpath, [1, 2, 3, 4]) + + def test_search_path_relativeonly(self): + self.skipTest("Notimplemented") + + def test_search_path_complex(self): + self.skipTest("Notimplemented") + + def test_objective(self): + objectives = Objectives( + [ + Target.FULLYVISIBLE, + Target.REGION, + Target.VERTREL, + Target.VERT, + ], + as_group=True, + ) + + entry_dict = { + "vert_exact": VertExact.L1, + "vert_region": VertRegion.LWS, + "vert_rel": VertRel.FIRST_LWK, + "vert_cut": True, + } + + label = objectives(entry_dict) + print(label) + self.assertEqual(label["FULLYVISIBLE"], [1, 0]) + self.assertEqual(label["REGION"], [0, 0, 1]) + self.assertEqual(label["VERTREL"], [0, 0, 0, 0, 1, 0]) + + def test_labeling_easy(self): + self.skipTest("Notimplemented") + # mri, subreg, vert, label = get_test_mri() + # model = VertLabelingClassifierDummy() + # l = vert.unique() + # model.run_all_seg_instances = MagicMock(return_value=l) + # perform_labeling_step(model, mri, vert) diff --git a/unit_tests/test_proc_functions.py b/unit_tests/test_proc_functions.py index 3875e91..04e589d 100644 --- a/unit_tests/test_proc_functions.py +++ b/unit_tests/test_proc_functions.py @@ -10,7 +10,7 @@ from TPTBox.tests.test_utils import get_test_mri import spineps -from spineps.get_models import Segmentation_Model, get_segmentation_model +from spineps.get_models import Segmentation_Model, get_actual_model from spineps.utils.proc_functions import clean_cc_artifacts, connected_components_3d, n4_bias logger = No_Logger()