diff --git a/models/bamf_nnunet_pet_ct_breast/config/default.yml b/models/bamf_nnunet_pet_ct_breast/config/default.yml new file mode 100644 index 00000000..e810f999 --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/config/default.yml @@ -0,0 +1,49 @@ +general: + data_base_dir: /app/data + version: 1.0 + description: default configuration for Bamf NNUnet Breast FDG-avid lesions segmentation (dicom to dicom) + +execute: +- FileStructureImporter +- NiftiConverter +- Registration +- NNUnetPETCTRunner +- TotalSegmentatorMLRunner +- BreastPostProcessor +- DsegConverter +- DataOrganizer + +modules: + FileStructureImporter: + input_dir: 'input_data' + structures: + - $patientID@instance/CT@dicom:mod=ct + - $patientID/PT@dicom:mod=pt + import_id: patientID + + NiftiConverter: + in_datas: dicom:mod=pt|ct + allow_multi_input: true + + NNUnetPETCTRunner: + in_ct_data: nifti:mod=ct:registered=true + nnunet_task: Task762_PET_CT_Breast + nnunet_model: 3d_fullres + roi: LIVER,KIDNEY,URINARY_BLADDER,SPLEEN,LUNG,BRAIN,HEART,STOMACH,BREAST+FDG_AVID_TUMOR + + TotalSegmentatorMLRunner: + in_data: nifti:mod=ct:registered=true + use_fast_mode: true + + BreastPostProcessor: + in_ct_data: nifti:mod=ct:registered=true + + DsegConverter: + source_segs: nifti:mod=seg:processor=bamf + model_name: BAMF Breast FDG PET CT + target_dicom: dicom:mod=ct + skip_empty_slices: True + + DataOrganizer: + targets: + - dicomseg-->[i:patientID]/bamf_nnunet_pet_ct_breast.seg.dcm \ No newline at end of file diff --git a/models/bamf_nnunet_pet_ct_breast/dockerfiles/Dockerfile b/models/bamf_nnunet_pet_ct_breast/dockerfiles/Dockerfile new file mode 100644 index 00000000..eabe7a82 --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/dockerfiles/Dockerfile @@ -0,0 +1,36 @@ +FROM mhubai/base:latest + +# FIXME: set this environment variable as a shortcut to avoid nnunet crashing the build +# by pulling sklearn instead of scikit-learn +# N.B. this is a known issue: +# https://github.com/MIC-DKFZ/nnUNet/issues/1281 +# https://github.com/MIC-DKFZ/nnUNet/pull/1209 +ENV SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True + +# Install nnunet and platipy +RUN pip3 install --no-cache-dir \ + nnunet + +# Install TotalSegmentator +RUN pip3 install --no-cache-dir totalsegmentator==1.5.6 + +# Clone the main branch of MHubAI/models +ARG MHUB_MODELS_REPO +RUN buildutils/import_mhub_model.sh bamf_nnunet_pet_ct_breast ${MHUB_MODELS_REPO} + +# Pull nnUNet model weights into the container for Task777_CT_Nodules +ENV WEIGHTS_DIR=/root/.nnunet/nnUNet_models/nnUNet/ +RUN mkdir -p $WEIGHTS_DIR +ENV TASK_NAME=Task762_PET_CT_Breast +ENV WEIGHTS_FN=$TASK_NAME.zip +ENV WEIGHTS_URL=https://zenodo.org/record/8290055/files/$WEIGHTS_FN +RUN wget --directory-prefix ${WEIGHTS_DIR} ${WEIGHTS_URL} --no-check-certificate +RUN unzip ${WEIGHTS_DIR}${WEIGHTS_FN} -d ${WEIGHTS_DIR} +RUN rm ${WEIGHTS_DIR}${WEIGHTS_FN} + +# specify nnunet specific environment variables +ENV WEIGHTS_FOLDER=$WEIGHTS_DIR + +# Default run script +ENTRYPOINT ["mhub.run"] +CMD ["--config", "/app/models/bamf_nnunet_pet_ct_breast/config/default.yml"] diff --git a/models/bamf_nnunet_pet_ct_breast/meta.json b/models/bamf_nnunet_pet_ct_breast/meta.json new file mode 100644 index 00000000..9739742f --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/meta.json @@ -0,0 +1,148 @@ +{ + "id": "", + "name": "bamf_nnunet_pet_ct_breast", + "title": "Bamf NNunet PET CT Breast", + "summary": { + "description": "This model used to detect FDG-avid lesions in breast from FDG PET/CT scans", + "inputs": [ + { + "label": "Input Image", + "description": "The CT scan of a patient.", + "format": "DICOM", + "modality": "CT", + "bodypartexamined": "Lung", + "slicethickness": "5mm", + "non-contrast": true, + "contrast": false + }, + { + "label": "Input Image", + "description": "The PET scan of a patient.", + "format": "DICOM", + "modality": "CT", + "bodypartexamined": "Lung", + "slicethickness": "3.38mm", + "non-contrast": false, + "contrast": false + } + ], + "outputs": [ + { + "label": "Segmentation", + "type": "Segmentation", + "description": "FDG-avid Breast tumor", + "classes": [ + "FDG_AVID_TUMOR" + ] + } + ], + "model": { + "architecture": "U-net", + "training": "supervised", + "cmpapproach": "3D" + }, + "data": { + "training": { + "vol_samples": 1014 + }, + "evaluation": { + "vol_samples": 11 + }, + "public": true, + "external": true + } + }, + "details": { + "name": "AIMI PET CT BREAST", + "version": "1.0.0", + "devteam": "BAMF Health", + "type": "nnU-Net (U-Net structure, optimized by data-driven heuristics)", + "date": { + "code": "17.10.2023", + "weights": "28.08.2023", + "pub": "23.10.2023" + }, + "cite": "Murugesan, Gowtham Krishnan, Diana McCrumb, Mariam Aboian, Tej Verma, Rahul Soni, Fatima Memon, and Jeff Van Oss. The AIMI Initiative: AI-Generated Annotations for Imaging Data Commons Collections. arXiv preprint arXiv:2310.14897 (2023).", + "license": { + "code": "MIT", + "weights": "CC BY-NC 4.0" + }, + "publications": [ + { + "title": "The AIMI Initiative: AI-Generated Annotations in IDC Collections", + "uri": "https://arxiv.org/abs/2310.14897" + } + ], + "github": "https://github.com/bamf-health/aimi-breast-pet-ct" + }, + "info": { + "use": { + "title": "Intended Use", + "text": "This model is intended to perform segmentations of Breast FDG-avid Tumor region in PET, CT scans. The model has been trained and tested on scans acquired during clinical care of patients, so it might not be suited for a healthy population. The generalization capabilities of the model on a range of ages, genders, and ethnicities are unknown." + }, + "analyses": { + "title": "Quantitative Analyses", + "text": "Label-wise metrics (mean (standard deviation)) between AI derived and expert corrected FDG PET/CT breast lesion annotations.", + "tables": [ + { + "label": "Segmentation Metric Expert", + "entries": { + "Tumor DSC": "0.80 (0.33)", + "Tumor 95% Hausdorff (mm)": "29.70 (33.43)" + } + }, + { + "label": "Segmentation Metric Tumor - Non-Expert", + "entries": { + "Tumor DSC": "0.94 (0.10)", + "Tumor 95% Hausdorff (mm)": "13.53 (20.00)" + } + }, + { + "label": "Detection Accuracy", + "entries": { + "Sensitivity": "0.43", + "False negative rate": "0.57", + "F1 score": "0.52" + } + } + ], + "references": [ + { + "label": "The AIMI Initiative: AI-Generated Annotations for Imaging Data Commons Collections", + "uri": "https://arxiv.org/abs/2310.14897" + } + ] + }, + "evaluation": { + "title": "Evaluation Data", + "text": "77 validation cases were rated by a radiologist and non-expert", + "references": [ + { + "label": "Imaging Data Collections (IDC)", + "uri": "https://datacommons.cancer.gov/repository/imaging-data-commons" + }, + { + "label": "Image segmentations produced by the AIMI Annotations initiative", + "uri": "https://zenodo.org/records/10009368" + } + ] + }, + "training": { + "title": "Training Data", + "text": "The AutoPET Challenge 2023 dataset is comprised of whole-body FDG-PET/CT data from 900 patients, encompassing 1014 studies with tumor annotations. This dataset was augmented by adding labels for the brain, bladder, kidneys, liver, stomach, spleen, lungs, and heart generated by the TotalSegmentator model. A multi-task AI model was trained using the augmented datasets", + "references": [ + { + "label": "AutoPET Challenge 2023 dataset", + "uri": "https://doi.org/10.7937/gkr0-xv29" + }, + { + "label": "Total Segmentator", + "uri": "https://doi.org/10.48550/arXiv.2208.05868" + } + ] + } + } +} + + diff --git a/models/bamf_nnunet_pet_ct_breast/utils/BreastPostProcessor.py b/models/bamf_nnunet_pet_ct_breast/utils/BreastPostProcessor.py new file mode 100644 index 00000000..bdd0f770 --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/utils/BreastPostProcessor.py @@ -0,0 +1,134 @@ +import os +import SimpleITK as sitk +import numpy as np +import os, shutil +import cv2 +from skimage import measure +from mhubio.core import IO +from mhubio.core import Module, Instance, InstanceData, InstanceDataCollection + + +class BreastPostProcessor(Module): + + def mask_labels(self, labels, ts): + """ + Create a mask based on given labels. + + Args: + labels (list): List of labels to be masked. + ts (np.ndarray): Image data. + + Returns: + np.ndarray: Masked image data. + """ + lung = np.zeros(ts.shape) + for lbl in labels: + lung[ts == lbl] = 1 + return lung + + def bbox2_3D(self, img): + r = np.any(img, axis=(1, 2)) + c = np.any(img, axis=(0, 2)) + z = np.any(img, axis=(0, 1)) + + rmin, rmax = np.where(r)[0][[0, -1]] + cmin, cmax = np.where(c)[0][[0, -1]] + zmin, zmax = np.where(z)[0][[0, -1]] + + return rmin, rmax, cmin, cmax, zmin, zmax + + def n_connected(self, img_data): + """ + Get the largest connected component in a binary image. + + Args: + img_data (np.ndarray): image data. + + Returns: + np.ndarray: Processed image with the largest connected component. + """ + img_filtered = np.zeros(img_data.shape) + blobs_labels = measure.label(img_data, background=0) + lbl, counts = np.unique(blobs_labels, return_counts=True) + lbl_dict = {} + for i, j in zip(lbl, counts): + lbl_dict[i] = j + sorted_dict = dict(sorted(lbl_dict.items(), key=lambda x: x[1], reverse=True)) + count = 0 + + for key, value in sorted_dict.items(): + if count >= 1 and count <= 2 and value > 20: + print(key, value) + img_filtered[blobs_labels == key] = 1 + count += 1 + + img_data[img_filtered != 1] = 0 + return img_data + + def arr_2_sitk_img(self, arr, ref): + """ + Convert numpy array to SimpleITK image. + + Args: + arr (np.ndarray): Input image data as a numpy array. + ref: Reference image for copying information. + + Returns: + sitk.Image: Converted SimpleITK image. + """ + op_img = sitk.GetImageFromArray(arr) + op_img.CopyInformation(ref) + return op_img + + @IO.Instance() + @IO.Input('in_ct_data', 'nifti:mod=ct:registered=true', the='input ct data') + @IO.Input('in_tumor_data', 'nifti:mod=seg:model=nnunet', the='input tumor segmentation') + @IO.Input('in_total_seg_data', 'nifti:mod=seg:model=TotalSegmentator', the='input total segmentation') + @IO.Output('out_data', 'bamf_processed.nii.gz', 'nifti:mod=seg:processor=bamf:roi=BREAST+FDG_AVID_TUMOR', data='in_tumor_data', + the="FDG-avid lesions in breast") + def task(self, instance: Instance, in_ct_data: InstanceData, in_tumor_data: InstanceData, + in_total_seg_data: InstanceData, out_data: InstanceData): + """ + Perform postprocessing and writes simpleITK Image + """ + tumor_seg_path = in_tumor_data.abspath + total_seg_path = in_total_seg_data.abspath + + ts_data = sitk.GetArrayFromImage(sitk.ReadImage(total_seg_path)) + ts_abdominal = sitk.GetArrayFromImage(sitk.ReadImage(total_seg_path)) + ts_data[ts_data > 1] = 1 + lesions = sitk.GetArrayFromImage(sitk.ReadImage(tumor_seg_path)) + tumor_label = 9 + lesions[lesions != tumor_label] = 0 + lesions[lesions == tumor_label] = 1 + + op_data = np.zeros(ts_data.shape) + ref = sitk.ReadImage(in_ct_data.abspath) + ct_data = sitk.GetArrayFromImage(ref) + + op_data[lesions == 1] = 1 + th = np.min(ct_data) + op_data[ct_data == th] = 0 # removing predicitons where CT not available + # Use the coordinates of the bounding box to crop the 3D numpy array. + ts_abdominal[ts_abdominal > 4] = 0 + ts_abdominal[ts_abdominal > 1] = 1 + if ts_abdominal.max() > 0: + x1, x2, y1, y2, z1, z2 = self.bbox2_3D(ts_abdominal) + # Create a structuring element with ones in the middle and zeros around it + structuring_element = np.ones((3, 3)) + + # Dilate the array with the structuring element + op_temp = cv2.dilate(ts_data, structuring_element, iterations=5) + op_temp = cv2.erode(op_temp, structuring_element, iterations=5) + op_data[op_temp == 1] = 0 + if ts_abdominal.max() > 0: + op_data[x1:x2, y1:, :] = 0 + op_data[0:3, :, :] = 0 + op_data = self.n_connected(op_data) + op_img = sitk.GetImageFromArray(op_data) + op_img.CopyInformation(ref) + tmp_dir = self.config.data.requestTempDir(label="breast-post-processor") + tmp_file = os.path.join(tmp_dir, f'final.nii.gz') + sitk.WriteImage(op_img, tmp_file) + + shutil.copyfile(tmp_file, out_data.abspath) diff --git a/models/bamf_nnunet_pet_ct_breast/utils/NNUnetPETCTRunner.py b/models/bamf_nnunet_pet_ct_breast/utils/NNUnetPETCTRunner.py new file mode 100644 index 00000000..082929c2 --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/utils/NNUnetPETCTRunner.py @@ -0,0 +1,215 @@ +""" +------------------------------------------------- +MHub - NNU-Net Runner + This is a base runner for pre-trained + nnunet models using PET-CT Modalities +------------------------------------------------- + +------------------------------------------------- +Author: Jithendra Kumar +Email: jithendra.kumar@bamfhealth.com +------------------------------------------------- +""" +# TODO: support multi-i/o and batch processing on multiple instances + +from typing import List, Optional +import os, subprocess, shutil +import SimpleITK as sitk, numpy as np +from mhubio.core import Module, Instance, InstanceData, DataType, FileType, IO, InstanceDataCollection + +# TODO: add an optional evaluation pattern (regex) to IO.Config +nnunet_task_name_regex = r"Task[0-9]{3}_[a-zA-Z0-9_]+" + +@IO.ConfigInput('in_ct_data', 'nifti:mod=ct', the="input ct data to run nnunet on") +@IO.ConfigInput('in_pt_data', 'nifti:mod=pt', the="input pt resampled data to run nnunet on") +@IO.Config('nnunet_task', str, None, the='nnunet task name') +@IO.Config('nnunet_model', str, None, the='nnunet model name (2d, 3d_lowres, 3d_fullres, 3d_cascade_fullres)') +#@IO.Config('input_data_type', DataType, 'nifti:mod=ct', factory=DataType.fromString, the='input data type') +@IO.Config('folds', int, None, the='number of folds to run nnunet on') +@IO.Config('use_tta', bool, True, the='flag to enable test time augmentation') +@IO.Config('export_prob_maps', bool, False, the='flag to export probability maps') +@IO.Config('prob_map_segments', list, [], the='segment labels for probability maps') +@IO.Config('roi', str, None, the='roi or comma separated list of roi the nnunet segments') +class NNUnetPETCTRunner(Module): + + nnunet_task: str + nnunet_model: str + input_data_type: DataType + folds: int # TODO: support optional config attributes + use_tta: bool + export_prob_maps: bool + prob_map_segments: list + roi: str + + def export_prob_mask(self, nnunet_out_dir: str, ref_file: InstanceData, output_dtype: str = 'float32', structure_list: Optional[List[str]] = None): + """ + Convert softmax probability maps to NRRD. For simplicity, the probability maps + are converted by default to UInt8 + Arguments: + model_output_folder : required - path to the folder where the inferred segmentation masks should be stored. + ref_file : required - InstanceData object of the generated segmentation mask used as reference file. + output_dtype : optional - output data type. Data type float16 is not supported by the NRRD standard, + so the choice should be between uint8, uint16 or float32. + structure_list : optional - list of the structures whose probability maps are stored in the + first channel of the `.npz` file (output from the nnU-Net pipeline + when `export_prob_maps` is set to True). + Outputs: + This function [...] + """ + + # initialize structure list + if structure_list is None: + if self.roi is not None: + structure_list = self.roi.split(',') + else: + structure_list = [] + + # sanity check user inputs + assert(output_dtype in ["uint8", "uint16", "float32"]) + + # input file containing the raw information + pred_softmax_fn = 'VOLUME_001.npz' + pred_softmax_path = os.path.join(nnunet_out_dir, pred_softmax_fn) + + # parse NRRD file - we will make use of if to populate the header of the + # NRRD mask we are going to get from the inferred segmentation mask + sitk_ct = sitk.ReadImage(ref_file.abspath) + + # generate bundle for prob masks + # TODO: we really have to create folders (or add this as an option that defaults to true) automatically + prob_masks_bundle = ref_file.getDataBundle('prob_masks') + if not os.path.isdir(prob_masks_bundle.abspath): + os.mkdir(prob_masks_bundle.abspath) + + # load softmax probability maps + pred_softmax_all = np.load(pred_softmax_path)["softmax"] + + # iterate all channels + for channel in range(0, len(pred_softmax_all)): + + structure = structure_list[channel] if channel < len(structure_list) else f"structure_{channel}" + pred_softmax_segmask = pred_softmax_all[channel].astype(dtype = np.float32) + + if output_dtype == "float32": + # no rescale needed - the values will be between 0 and 1 + # set SITK image dtype to Float32 + sitk_dtype = sitk.sitkFloat32 + + elif output_dtype == "uint8": + # rescale between 0 and 255, quantize + pred_softmax_segmask = (255*pred_softmax_segmask).astype(np.int32) + # set SITK image dtype to UInt8 + sitk_dtype = sitk.sitkUInt8 + + elif output_dtype == "uint16": + # rescale between 0 and 65536 + pred_softmax_segmask = (65536*pred_softmax_segmask).astype(np.int32) + # set SITK image dtype to UInt16 + sitk_dtype = sitk.sitkUInt16 + else: + raise ValueError("Invalid output data type. Please choose between uint8, uint16 or float32.") + + pred_softmax_segmask_sitk = sitk.GetImageFromArray(pred_softmax_segmask) + pred_softmax_segmask_sitk.CopyInformation(sitk_ct) + pred_softmax_segmask_sitk = sitk.Cast(pred_softmax_segmask_sitk, sitk_dtype) + + # generate data + prob_mask = InstanceData(f'{structure}.nrrd', DataType(FileType.NRRD, {'mod': 'prob_mask', 'structure': structure}), bundle=prob_masks_bundle) + + # export file + writer = sitk.ImageFileWriter() + writer.UseCompressionOn() + writer.SetFileName(prob_mask.abspath) + writer.Execute(pred_softmax_segmask_sitk) + + # check if the file was written + if os.path.isfile(prob_mask.abspath): + self.v(f" > prob mask for {structure} saved to {prob_mask.abspath}") + prob_mask.confirm() + + @IO.Instance() + @IO.Input('in_ct_data', the="input ct data to run nnunet on") + @IO.Input('in_pt_data', the="input pt resampled data to run nnunet on") + @IO.Output("out_data", 'VOLUME_001.nii.gz', 'nifti:mod=seg:model=nnunet', the="output data from nnunet") + def task(self, instance: Instance, in_ct_data: InstanceData,in_pt_data: InstanceData, out_data: InstanceData) -> None: + + # get the nnunet model to run + self.v("Running nnUNet_predict.") + self.v(f" > task: {self.nnunet_task}") + self.v(f" > model: {self.nnunet_model}") + self.v(f" > output data: {out_data.abspath}") + + # download weights if not found + # NOTE: only for testing / debugging. For productiio always provide the weights in the Docker container. + if not os.path.isdir(os.path.join(os.environ["WEIGHTS_FOLDER"], '')): + print("Downloading nnUNet model weights...") + bash_command = ["nnUNet_download_pretrained_model", self.nnunet_task] + self.subprocess(bash_command, text=True) + + # bring input data in nnunet specific format + # NOTE: only for nifti data as we hardcode the nnunet-formatted-filename (and extension) for now. + # This model expects 2 input modalities for each image + inp_dir = self.config.data.requestTempDir(label="nnunet-model-inp") + inp_file = f'VOLUME_001_0000.nii.gz' + shutil.copyfile(in_ct_data.abspath, os.path.join(inp_dir, inp_file)) + inp_file = f'VOLUME_001_0001.nii.gz' + shutil.copyfile(in_pt_data.abspath, os.path.join(inp_dir, inp_file)) + + # define output folder (temp dir) and also override environment variable for nnunet + out_dir = self.config.data.requestTempDir(label="nnunet-model-out") + os.environ['RESULTS_FOLDER'] = out_dir + + # symlink nnunet input folder to the input data with python + # create symlink in python + # NOTE: this is a workaround for the nnunet bash script that expects the input data to be in a specific folder + # structure. This is not the case for the mhub data structure. So we create a symlink to the input data + # in the nnunet input folder structure. + os.symlink(os.environ['WEIGHTS_FOLDER'], os.path.join(out_dir, 'nnUNet')) + + # NOTE: instead of running from commandline this could also be done in a pythonic way: + # `nnUNet/nnunet/inference/predict.py` - but it would require + # to set manually all the arguments that the user is not intended + # to fiddle with; so stick with the bash executable + + # construct nnunet inference command + bash_command = ["nnUNet_predict"] + bash_command += ["--input_folder", str(inp_dir)] + bash_command += ["--output_folder", str(out_dir)] + bash_command += ["--task_name", self.nnunet_task] + bash_command += ["--model", self.nnunet_model] + + # add optional arguments + if self.folds is not None: + bash_command += ["--folds", str(self.folds)] + + if not self.use_tta: + bash_command += ["--disable_tta"] + + if self.export_prob_maps: + bash_command += ["--save_npz"] + + self.v(f" > command 1: {bash_command}") + # run command + self.subprocess(bash_command, text=True) + + # output meta + meta = { + "model": "nnunet", + "nnunet_task": self.nnunet_task, + "nnunet_model": self.nnunet_model, + "roi": self.roi + } + + # get output data + out_file = f'VOLUME_001.nii.gz' + out_path = os.path.join(out_dir, out_file) + + # copy output data to instance + shutil.copyfile(out_path, out_data.abspath) + + # export probabiliy maps if requested as dynamic data + if self.export_prob_maps: + self.export_prob_mask(str(out_dir), out_data, 'float32', self.prob_map_segments) + + # update meta dynamically + out_data.type.meta += meta diff --git a/models/bamf_nnunet_pet_ct_breast/utils/Registration.py b/models/bamf_nnunet_pet_ct_breast/utils/Registration.py new file mode 100644 index 00000000..e017a74c --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/utils/Registration.py @@ -0,0 +1,47 @@ +import os +import shutil +import SimpleITK as sitk +import numpy as np +from mhubio.core import IO +from mhubio.core import Module, Instance, InstanceData + + +class Registration(Module): + + @IO.Instance() + @IO.Input('in_fixed_data', 'nifti:mod=pt', the='input pt data') + @IO.Input('in_moving_data', 'nifti:mod=ct', the='input ct data') + @IO.Output('out_data', 'VOL000_registered.nii.gz', 'nifti:mod=ct:registered=true', the="registered ct data") + def task(self, instance: Instance, in_moving_data: InstanceData, in_fixed_data: InstanceData, out_data: InstanceData): + """ + Perform registration + """ + fixed = sitk.ReadImage(in_fixed_data.abspath, sitk.sitkFloat32) + moving = sitk.ReadImage(in_moving_data.abspath, sitk.sitkFloat32) + numberOfBins = 24 + samplingPercentage = 0.10 + R = sitk.ImageRegistrationMethod() + R.SetMetricAsMattesMutualInformation(numberOfBins) + R.SetMetricSamplingPercentage(samplingPercentage, sitk.sitkWallClock) + R.SetMetricSamplingStrategy(R.RANDOM) + R.SetOptimizerAsRegularStepGradientDescent(1.0, 0.001, 200) + R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension())) + R.SetInterpolator(sitk.sitkLinear) + + def command_iteration(method): + print(f"{method.GetOptimizerIteration():3} = {method.GetMetricValue():10.5f}") + + R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R)) + + outTx = R.Execute(fixed, moving) + resampler = sitk.ResampleImageFilter() + resampler.SetReferenceImage(fixed) + resampler.SetInterpolator(sitk.sitkLinear) + resampler.SetDefaultPixelValue(int(np.min(sitk.GetArrayFromImage(moving)))) + resampler.SetTransform(outTx) + out = resampler.Execute(moving) + tmp_dir = self.config.data.requestTempDir(label="registration-processor") + output_path = os.path.join(tmp_dir, f'registered.nii.gz') + out.CopyInformation(fixed) + sitk.WriteImage(out, output_path) + shutil.copyfile(output_path, out_data.abspath) \ No newline at end of file diff --git a/models/bamf_nnunet_pet_ct_breast/utils/TotalSegmentatorMLRunner.py b/models/bamf_nnunet_pet_ct_breast/utils/TotalSegmentatorMLRunner.py new file mode 100644 index 00000000..d77bd485 --- /dev/null +++ b/models/bamf_nnunet_pet_ct_breast/utils/TotalSegmentatorMLRunner.py @@ -0,0 +1,157 @@ +""" +------------------------------------------------- +MHub - Run Module for TotalSegmentator. +------------------------------------------------- + +------------------------------------------------- +Author: Leonard Nürnberg +Email: leonard.nuernberg@maastrichtuniversity.nl +Comments: Reused the file from model/totalsegmentator +------------------------------------------------- +""" + +from mhubio.core import Module, Instance, InstanceData, DataType, FileType, CT, SEG, IO, DataTypeQuery +import os, subprocess + +mapping = { + 'spleen': 'SPLEEN', + 'kidney_right': 'RIGHT_KIDNEY', + 'kidney_left': 'LEFT_KIDNEY', + 'gallbladder': 'GALLBLADDER', + 'liver': 'LIVER', + 'stomach': 'STOMACH', + 'aorta': 'AORTA', + 'inferior_vena_cava': 'INFERIOR_VENA_CAVA', + 'portal_vein_and_splenic_vein': 'PORTAL_AND_SPLENIC_VEIN', + 'pancreas': 'PANCREAS', + 'adrenal_gland_right': 'RIGHT_ADRENAL_GLAND', + 'adrenal_gland_left': 'LEFT_ADRENAL_GLAND', + 'lung_upper_lobe_left': 'LEFT_UPPER_LUNG_LOBE', + 'lung_lower_lobe_left': 'LEFT_LOWER_LUNG_LOBE', + 'lung_upper_lobe_right': 'RIGHT_UPPER_LUNG_LOBE', + 'lung_middle_lobe_right': 'RIGHT_MIDDLE_LUNG_LOBE', + 'lung_lower_lobe_right': 'RIGHT_LOWER_LUNG_LOBE', + 'vertebrae_L5': 'VERTEBRAE_L5', + 'vertebrae_L4': 'VERTEBRAE_L4', + 'vertebrae_L3': 'VERTEBRAE_L3', + 'vertebrae_L2': 'VERTEBRAE_L2', + 'vertebrae_L1': 'VERTEBRAE_L1', + 'vertebrae_T12': 'VERTEBRAE_T12', + 'vertebrae_T11': 'VERTEBRAE_T11', + 'vertebrae_T10': 'VERTEBRAE_T10', + 'vertebrae_T9': 'VERTEBRAE_T9', + 'vertebrae_T8': 'VERTEBRAE_T8', + 'vertebrae_T7': 'VERTEBRAE_T7', + 'vertebrae_T6': 'VERTEBRAE_T6', + 'vertebrae_T5': 'VERTEBRAE_T5', + 'vertebrae_T4': 'VERTEBRAE_T4', + 'vertebrae_T3': 'VERTEBRAE_T3', + 'vertebrae_T2': 'VERTEBRAE_T2', + 'vertebrae_T1': 'VERTEBRAE_T1', + 'vertebrae_C7': 'VERTEBRAE_C7', + 'vertebrae_C6': 'VERTEBRAE_C6', + 'vertebrae_C5': 'VERTEBRAE_C5', + 'vertebrae_C4': 'VERTEBRAE_C4', + 'vertebrae_C3': 'VERTEBRAE_C3', + 'vertebrae_C2': 'VERTEBRAE_C2', + 'vertebrae_C1': 'VERTEBRAE_C1', + 'esophagus': 'ESOPHAGUS', + 'trachea': 'TRACHEA', + 'heart_myocardium': 'MYOCARDIUM', + 'heart_atrium_left': 'LEFT_ATRIUM', + 'heart_ventricle_left': 'LEFT_VENTRICLE', + 'heart_atrium_right': 'RIGHT_ATRIUM', + 'heart_ventricle_right': 'RIGHT_VENTRICLE', + 'pulmonary_artery': 'PULMONARY_ARTERY', + 'brain': 'BRAIN', + 'iliac_artery_left': 'LEFT_ILIAC_ARTERY', + 'iliac_artery_right': 'RIGHT_ILIAC_ARTERY', + 'iliac_vena_left': 'LEFT_ILIAC_VEIN', + 'iliac_vena_right': 'RIGHT_ILIAC_VEIN', + 'small_bowel': 'SMALL_INTESTINE', + 'duodenum': 'DUODENUM', + 'colon': 'COLON', + 'rib_left_1': 'LEFT_RIB_1', + 'rib_left_2': 'LEFT_RIB_2', + 'rib_left_3': 'LEFT_RIB_3', + 'rib_left_4': 'LEFT_RIB_4', + 'rib_left_5': 'LEFT_RIB_5', + 'rib_left_6': 'LEFT_RIB_6', + 'rib_left_7': 'LEFT_RIB_7', + 'rib_left_8': 'LEFT_RIB_8', + 'rib_left_9': 'LEFT_RIB_9', + 'rib_left_10': 'LEFT_RIB_10', + 'rib_left_11': 'LEFT_RIB_11', + 'rib_left_12': 'LEFT_RIB_12', + 'rib_right_1': 'RIGHT_RIB_1', + 'rib_right_2': 'RIGHT_RIB_2', + 'rib_right_3': 'RIGHT_RIB_3', + 'rib_right_4': 'RIGHT_RIB_4', + 'rib_right_5': 'RIGHT_RIB_5', + 'rib_right_6': 'RIGHT_RIB_6', + 'rib_right_7': 'RIGHT_RIB_7', + 'rib_right_8': 'RIGHT_RIB_8', + 'rib_right_9': 'RIGHT_RIB_9', + 'rib_right_10': 'RIGHT_RIB_10', + 'rib_right_11': 'RIGHT_RIB_11', + 'rib_right_12': 'RIGHT_RIB_12', + 'humerus_left': 'LEFT_HUMERUS', + 'humerus_right': 'RIGHT_HUMERUS', + 'scapula_left': 'LEFT_SCAPULA', + 'scapula_right': 'RIGHT_SCAPULA', + 'clavicula_left': 'LEFT_CLAVICLE', + 'clavicula_right': 'RIGHT_CLAVICLE', + 'femur_left': 'LEFT_FEMUR', + 'femur_right': 'RIGHT_FEMUR', + 'hip_left': 'LEFT_HIP', + 'hip_right': 'RIGHT_HIP', + 'sacrum': 'SACRUM', + 'face': 'FACE', + 'gluteus_maximus_left': 'LEFT_GLUTEUS_MAXIMUS', + 'gluteus_maximus_right': 'RIGHT_GLUTEUS_MAXIMUS', + 'gluteus_medius_left': 'LEFT_GLUTEUS_MEDIUS', + 'gluteus_medius_right': 'RIGHT_GLUTEUS_MEDIUS', + 'gluteus_minimus_left': 'LEFT_GLUTEUS_MINIMUS', + 'gluteus_minimus_right': 'RIGHT_GLUTEUS_MINIMUS', + 'autochthon_left': 'LEFT_AUTOCHTHONOUS_BACK_MUSCLE', + 'autochthon_right': 'RIGHT_AUTOCHTHONOUS_BACK_MUSCLE', + 'iliopsoas_left': 'LEFT_ILIOPSOAS', + 'iliopsoas_right': 'RIGHT_ILIOPSOAS', + 'urinary_bladder': 'URINARY_BLADDER' +} + +# from totalsegmentator.map_to_binary import class_map +# ROI = ','.join(mapping[class_map['total'][ci]] for ci in range(1, 105)) +ROI = 'SPLEEN,RIGHT_KIDNEY,LEFT_KIDNEY,GALLBLADDER,LIVER,STOMACH,AORTA,INFERIOR_VENA_CAVA,PORTAL_AND_SPLENIC_VEIN,PANCREAS,RIGHT_ADRENAL_GLAND,LEFT_ADRENAL_GLAND,LEFT_UPPER_LUNG_LOBE,LEFT_LOWER_LUNG_LOBE,RIGHT_UPPER_LUNG_LOBE,RIGHT_MIDDLE_LUNG_LOBE,RIGHT_LOWER_LUNG_LOBE,VERTEBRAE_L5,VERTEBRAE_L4,VERTEBRAE_L3,VERTEBRAE_L2,VERTEBRAE_L1,VERTEBRAE_T12,VERTEBRAE_T11,VERTEBRAE_T10,VERTEBRAE_T9,VERTEBRAE_T8,VERTEBRAE_T7,VERTEBRAE_T6,VERTEBRAE_T5,VERTEBRAE_T4,VERTEBRAE_T3,VERTEBRAE_T2,VERTEBRAE_T1,VERTEBRAE_C7,VERTEBRAE_C6,VERTEBRAE_C5,VERTEBRAE_C4,VERTEBRAE_C3,VERTEBRAE_C2,VERTEBRAE_C1,ESOPHAGUS,TRACHEA,MYOCARDIUM,LEFT_ATRIUM,LEFT_VENTRICLE,RIGHT_ATRIUM,RIGHT_VENTRICLE,PULMONARY_ARTERY,BRAIN,LEFT_ILIAC_ARTERY,RIGHT_ILIAC_ARTERY,LEFT_ILIAC_VEIN,RIGHT_ILIAC_VEIN,SMALL_INTESTINE,DUODENUM,COLON,LEFT_RIB_1,LEFT_RIB_2,LEFT_RIB_3,LEFT_RIB_4,LEFT_RIB_5,LEFT_RIB_6,LEFT_RIB_7,LEFT_RIB_8,LEFT_RIB_9,LEFT_RIB_10,LEFT_RIB_11,LEFT_RIB_12,RIGHT_RIB_1,RIGHT_RIB_2,RIGHT_RIB_3,RIGHT_RIB_4,RIGHT_RIB_5,RIGHT_RIB_6,RIGHT_RIB_7,RIGHT_RIB_8,RIGHT_RIB_9,RIGHT_RIB_10,RIGHT_RIB_11,RIGHT_RIB_12,LEFT_HUMERUS,RIGHT_HUMERUS,LEFT_SCAPULA,RIGHT_SCAPULA,LEFT_CLAVICLE,RIGHT_CLAVICLE,LEFT_FEMUR,RIGHT_FEMUR,LEFT_HIP,RIGHT_HIP,SACRUM,FACE,LEFT_GLUTEUS_MAXIMUS,RIGHT_GLUTEUS_MAXIMUS,LEFT_GLUTEUS_MEDIUS,RIGHT_GLUTEUS_MEDIUS,LEFT_GLUTEUS_MINIMUS,RIGHT_GLUTEUS_MINIMUS,LEFT_AUTOCHTHONOUS_BACK_MUSCLE,RIGHT_AUTOCHTHONOUS_BACK_MUSCLE,LEFT_ILIOPSOAS,RIGHT_ILIOPSOAS,URINARY_BLADDER' + +@IO.Config('use_fast_mode', bool, True, the="flag to set to run TotalSegmentator in fast mode") +class TotalSegmentatorMLRunner(Module): + + use_fast_mode: bool + + @IO.Instance() + @IO.Input('in_data', 'nifti:mod=ct:registered=true', the="input whole body ct scan") + @IO.Output('out_data', 'segmentations.nii.gz', 'nifti:mod=seg:model=TotalSegmentator:roi=SPLEEN,RIGHT_KIDNEY,LEFT_KIDNEY,GALLBLADDER,LIVER,STOMACH,AORTA,INFERIOR_VENA_CAVA,PORTAL_AND_SPLENIC_VEIN,PANCREAS,RIGHT_ADRENAL_GLAND,LEFT_ADRENAL_GLAND,LEFT_UPPER_LUNG_LOBE,LEFT_LOWER_LUNG_LOBE,RIGHT_UPPER_LUNG_LOBE,RIGHT_MIDDLE_LUNG_LOBE,RIGHT_LOWER_LUNG_LOBE,VERTEBRAE_L5,VERTEBRAE_L4,VERTEBRAE_L3,VERTEBRAE_L2,VERTEBRAE_L1,VERTEBRAE_T12,VERTEBRAE_T11,VERTEBRAE_T10,VERTEBRAE_T9,VERTEBRAE_T8,VERTEBRAE_T7,VERTEBRAE_T6,VERTEBRAE_T5,VERTEBRAE_T4,VERTEBRAE_T3,VERTEBRAE_T2,VERTEBRAE_T1,VERTEBRAE_C7,VERTEBRAE_C6,VERTEBRAE_C5,VERTEBRAE_C4,VERTEBRAE_C3,VERTEBRAE_C2,VERTEBRAE_C1,ESOPHAGUS,TRACHEA,MYOCARDIUM,LEFT_ATRIUM,LEFT_VENTRICLE,RIGHT_ATRIUM,RIGHT_VENTRICLE,PULMONARY_ARTERY,BRAIN,LEFT_ILIAC_ARTERY,RIGHT_ILIAC_ARTERY,LEFT_ILIAC_VEIN,RIGHT_ILIAC_VEIN,SMALL_INTESTINE,DUODENUM,COLON,LEFT_RIB_1,LEFT_RIB_2,LEFT_RIB_3,LEFT_RIB_4,LEFT_RIB_5,LEFT_RIB_6,LEFT_RIB_7,LEFT_RIB_8,LEFT_RIB_9,LEFT_RIB_10,LEFT_RIB_11,LEFT_RIB_12,RIGHT_RIB_1,RIGHT_RIB_2,RIGHT_RIB_3,RIGHT_RIB_4,RIGHT_RIB_5,RIGHT_RIB_6,RIGHT_RIB_7,RIGHT_RIB_8,RIGHT_RIB_9,RIGHT_RIB_10,RIGHT_RIB_11,RIGHT_RIB_12,LEFT_HUMERUS,RIGHT_HUMERUS,LEFT_SCAPULA,RIGHT_SCAPULA,LEFT_CLAVICLE,RIGHT_CLAVICLE,LEFT_FEMUR,RIGHT_FEMUR,LEFT_HIP,RIGHT_HIP,SACRUM,FACE,LEFT_GLUTEUS_MAXIMUS,RIGHT_GLUTEUS_MAXIMUS,LEFT_GLUTEUS_MEDIUS,RIGHT_GLUTEUS_MEDIUS,LEFT_GLUTEUS_MINIMUS,RIGHT_GLUTEUS_MINIMUS,LEFT_AUTOCHTHONOUS_BACK_MUSCLE,RIGHT_AUTOCHTHONOUS_BACK_MUSCLE,LEFT_ILIOPSOAS,RIGHT_ILIOPSOAS,URINARY_BLADDER', data='in_data', the="output segmentation mask containing all labels") + def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: + + # build command + bash_command = ["TotalSegmentator"] + bash_command += ["-i", in_data.abspath] + + # multi-label output (one nifti file containing all labels instead of one nifti file per label) + self.v("Generating multi-label output ('--ml')") + bash_command += ["-o", out_data.abspath] + bash_command += ["--ml"] + + # fast mode + if self.use_fast_mode: + self.v("Running TotalSegmentator in fast mode ('--fast', 3mm)") + bash_command += ["--fast"] + else: + self.v("Running TotalSegmentator in default mode (1.5mm)") + + # TODO: remove + self.v(">> run: ", " ".join(bash_command)) + + # run the model + self.subprocess(bash_command, text=True) \ No newline at end of file