From 1cf2989b39c8650cc6dd2cb31b79dbcd92eb5b3f Mon Sep 17 00:00:00 2001 From: rdemets Date: Wed, 18 Jan 2023 14:00:25 +0100 Subject: [PATCH] initial commit --- Untitled.ipynb | 89 + configs/._default_multiclass_unet.yml | Bin 0 -> 4096 bytes .../default_multiclass_unet-checkpoint.yml | 145 ++ .../default_singleclass_unet-checkpoint.yml | 146 ++ configs/default_multiclass_unet.yml | 145 ++ configs/default_singleclass_unet.yml | 146 ++ configs/default_unet.yml | 138 ++ html/models/CNN_Base.html | 1604 +++++++++++++++++ html/models/Unet.html | 417 +++++ html/models/Unet_Resnet.html | 1095 +++++++++++ html/models/index.html | 86 + html/models/internals/dataset.html | 958 ++++++++++ html/models/internals/image_functions.html | 1340 ++++++++++++++ html/models/internals/index.html | 86 + html/models/internals/losses.html | 705 ++++++++ html/models/internals/network_config.html | 908 ++++++++++ html/models/layers/index.html | 71 + html/models/layers/layers.html | 222 +++ models/.DS_Store | Bin 0 -> 6148 bytes models/._.DS_Store | Bin 0 -> 4096 bytes models/._CNN_Base.py | Bin 0 -> 4096 bytes .../.ipynb_checkpoints/CNN_Base-checkpoint.py | 570 ++++++ models/.ipynb_checkpoints/Unet-checkpoint.py | 109 ++ .../Unet_ResAttnet-checkpoint.py | 501 +++++ .../Unet_Resnet-checkpoint.py | 260 +++ .../.ipynb_checkpoints/__init__-checkpoint.py | 1 + models/CNN_Base.py | 570 ++++++ models/Unet.py | 109 ++ models/Unet_ResAttnet.py | 501 +++++ models/Unet_Resnet.py | 260 +++ models/__init__.py | 1 + models/__pycache__/CNN_Base.cpython-36.pyc | Bin 0 -> 15946 bytes models/__pycache__/CNN_Base.cpython-37.pyc | Bin 0 -> 16027 bytes models/__pycache__/Unet.cpython-36.pyc | Bin 0 -> 2864 bytes models/__pycache__/Unet.cpython-37.pyc | Bin 0 -> 2846 bytes .../__pycache__/Unet_ResAttnet.cpython-36.pyc | Bin 0 -> 9974 bytes .../__pycache__/Unet_ResAttnet.cpython-37.pyc | Bin 0 -> 9748 bytes models/__pycache__/Unet_Resnet.cpython-36.pyc | Bin 0 -> 7049 bytes models/__pycache__/Unet_Resnet.cpython-37.pyc | Bin 0 -> 6969 bytes models/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 206 bytes models/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 224 bytes models/internals/._losses.py | Bin 0 -> 4096 bytes .../.ipynb_checkpoints/__init__-checkpoint.py | 1 + .../.ipynb_checkpoints/dataset-checkpoint.py | 304 ++++ .../image_functions-checkpoint.py | 366 ++++ .../.ipynb_checkpoints/losses-checkpoint.py | 328 ++++ .../.ipynb_checkpoints/metrics-checkpoint.py | 23 + .../network_config-checkpoint.py | 237 +++ models/internals/__init__.py | 1 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 216 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 234 bytes .../__pycache__/dataset.cpython-36.pyc | Bin 0 -> 7665 bytes .../__pycache__/dataset.cpython-37.pyc | Bin 0 -> 7668 bytes .../image_functions.cpython-36.pyc | Bin 0 -> 9734 bytes .../image_functions.cpython-37.pyc | Bin 0 -> 9709 bytes .../__pycache__/losses.cpython-36.pyc | Bin 0 -> 9786 bytes .../__pycache__/losses.cpython-37.pyc | Bin 0 -> 9707 bytes .../__pycache__/metrics.cpython-36.pyc | Bin 0 -> 989 bytes .../__pycache__/metrics.cpython-37.pyc | Bin 0 -> 997 bytes .../__pycache__/network_config.cpython-36.pyc | Bin 0 -> 6930 bytes .../__pycache__/network_config.cpython-37.pyc | Bin 0 -> 6948 bytes models/internals/dataset.py | 304 ++++ models/internals/image_functions.py | 366 ++++ models/internals/losses.py | 328 ++++ models/internals/metrics.py | 23 + models/internals/network_config.py | 237 +++ .../.ipynb_checkpoints/layers-checkpoint.py | 59 + models/layers/__init__.py | 1 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 213 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 231 bytes .../layers/__pycache__/layers.cpython-36.pyc | Bin 0 -> 2291 bytes .../layers/__pycache__/layers.cpython-37.pyc | Bin 0 -> 2304 bytes models/layers/layers.py | 59 + requirements.txt | 20 + unets.ipynb | 549 ++++++ 75 files changed, 14389 insertions(+) create mode 100644 Untitled.ipynb create mode 100644 configs/._default_multiclass_unet.yml create mode 100644 configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml create mode 100644 configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml create mode 100644 configs/default_multiclass_unet.yml create mode 100644 configs/default_singleclass_unet.yml create mode 100644 configs/default_unet.yml create mode 100644 html/models/CNN_Base.html create mode 100644 html/models/Unet.html create mode 100644 html/models/Unet_Resnet.html create mode 100644 html/models/index.html create mode 100644 html/models/internals/dataset.html create mode 100644 html/models/internals/image_functions.html create mode 100644 html/models/internals/index.html create mode 100644 html/models/internals/losses.html create mode 100644 html/models/internals/network_config.html create mode 100644 html/models/layers/index.html create mode 100644 html/models/layers/layers.html create mode 100644 models/.DS_Store create mode 100644 models/._.DS_Store create mode 100644 models/._CNN_Base.py create mode 100644 models/.ipynb_checkpoints/CNN_Base-checkpoint.py create mode 100644 models/.ipynb_checkpoints/Unet-checkpoint.py create mode 100644 models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py create mode 100644 models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py create mode 100644 models/.ipynb_checkpoints/__init__-checkpoint.py create mode 100644 models/CNN_Base.py create mode 100644 models/Unet.py create mode 100644 models/Unet_ResAttnet.py create mode 100644 models/Unet_Resnet.py create mode 100644 models/__init__.py create mode 100644 models/__pycache__/CNN_Base.cpython-36.pyc create mode 100644 models/__pycache__/CNN_Base.cpython-37.pyc create mode 100644 models/__pycache__/Unet.cpython-36.pyc create mode 100644 models/__pycache__/Unet.cpython-37.pyc create mode 100644 models/__pycache__/Unet_ResAttnet.cpython-36.pyc create mode 100644 models/__pycache__/Unet_ResAttnet.cpython-37.pyc create mode 100644 models/__pycache__/Unet_Resnet.cpython-36.pyc create mode 100644 models/__pycache__/Unet_Resnet.cpython-37.pyc create mode 100644 models/__pycache__/__init__.cpython-36.pyc create mode 100644 models/__pycache__/__init__.cpython-37.pyc create mode 100644 models/internals/._losses.py create mode 100644 models/internals/.ipynb_checkpoints/__init__-checkpoint.py create mode 100644 models/internals/.ipynb_checkpoints/dataset-checkpoint.py create mode 100644 models/internals/.ipynb_checkpoints/image_functions-checkpoint.py create mode 100644 models/internals/.ipynb_checkpoints/losses-checkpoint.py create mode 100644 models/internals/.ipynb_checkpoints/metrics-checkpoint.py create mode 100644 models/internals/.ipynb_checkpoints/network_config-checkpoint.py create mode 100644 models/internals/__init__.py create mode 100644 models/internals/__pycache__/__init__.cpython-36.pyc create mode 100644 models/internals/__pycache__/__init__.cpython-37.pyc create mode 100644 models/internals/__pycache__/dataset.cpython-36.pyc create mode 100644 models/internals/__pycache__/dataset.cpython-37.pyc create mode 100644 models/internals/__pycache__/image_functions.cpython-36.pyc create mode 100644 models/internals/__pycache__/image_functions.cpython-37.pyc create mode 100644 models/internals/__pycache__/losses.cpython-36.pyc create mode 100644 models/internals/__pycache__/losses.cpython-37.pyc create mode 100644 models/internals/__pycache__/metrics.cpython-36.pyc create mode 100644 models/internals/__pycache__/metrics.cpython-37.pyc create mode 100644 models/internals/__pycache__/network_config.cpython-36.pyc create mode 100644 models/internals/__pycache__/network_config.cpython-37.pyc create mode 100644 models/internals/dataset.py create mode 100644 models/internals/image_functions.py create mode 100644 models/internals/losses.py create mode 100644 models/internals/metrics.py create mode 100644 models/internals/network_config.py create mode 100644 models/layers/.ipynb_checkpoints/layers-checkpoint.py create mode 100644 models/layers/__init__.py create mode 100644 models/layers/__pycache__/__init__.cpython-36.pyc create mode 100644 models/layers/__pycache__/__init__.cpython-37.pyc create mode 100644 models/layers/__pycache__/layers.cpython-36.pyc create mode 100644 models/layers/__pycache__/layers.cpython-37.pyc create mode 100644 models/layers/layers.py create mode 100644 requirements.txt create mode 100644 unets.ipynb diff --git a/Untitled.ipynb b/Untitled.ipynb new file mode 100644 index 0000000..9632afc --- /dev/null +++ b/Untitled.ipynb @@ -0,0 +1,89 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "676fb056", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Default GPU Device:/device:GPU:0\n" + ] + } + ], + "source": [ + "import tensorflow as tf \n", + "\n", + "if tf.test.gpu_device_name(): \n", + "\n", + " print('Default GPU Device:{}'.format(tf.test.gpu_device_name()))\n", + "\n", + "else:\n", + "\n", + " print(\"Please install GPU version of TF\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb1acd79", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/configs/._default_multiclass_unet.yml b/configs/._default_multiclass_unet.yml new file mode 100644 index 0000000000000000000000000000000000000000..e7d5edc1a7122d07acfbb8bad5a394fc5c54fee8 GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103v0xDsI z=wMg?WDB5a0m{L|rIPb=^%4sTa#Hnj5{pYpi&Ill5=&B*1A;+%gqkwqKy)EOnfZB%IXRUIIjLzS3Q0MMdD+0eFjUu&rcnJ4_lgXI-2eXo Dd^IS6 literal 0 HcmV?d00001 diff --git a/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml b/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml new file mode 100644 index 0000000..f5be855 --- /dev/null +++ b/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml @@ -0,0 +1,145 @@ +general: +#### General settings #### + dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/MultiClass/RI/Train' + model_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/Networks_edge/RI/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 10 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'bce_dice_loss' #'bce_dice_loss', 'binary_crossentropy', 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: False + + metrics: + - 'categorical_accuracy' # 'binary_accuracy','categorical_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'softmax' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 3 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml b/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml new file mode 100644 index 0000000..6b58bf7 --- /dev/null +++ b/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml @@ -0,0 +1,146 @@ +general: +#### General settings #### + dataset_dir: '/Users/cjt678/Desktop/Unets/Data/' + #dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Anne/Unet_Noyau/train/' + model_dir: '/Users/cjt678/Desktop/Unets/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 120 + val_split: 0.1 + batch_size_per_GPU: 6 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'dice_loss' #'bce_dice_loss', 'binary_crossentropy' 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: True + + metrics: + - 'IoU' # 'binary_accuracy','categorical_accuracy', 'IoU' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'sigmoid' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w,z + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_multiclass_unet.yml b/configs/default_multiclass_unet.yml new file mode 100644 index 0000000..f5be855 --- /dev/null +++ b/configs/default_multiclass_unet.yml @@ -0,0 +1,145 @@ +general: +#### General settings #### + dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/MultiClass/RI/Train' + model_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/Networks_edge/RI/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 10 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'bce_dice_loss' #'bce_dice_loss', 'binary_crossentropy', 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: False + + metrics: + - 'categorical_accuracy' # 'binary_accuracy','categorical_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'softmax' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 3 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_singleclass_unet.yml b/configs/default_singleclass_unet.yml new file mode 100644 index 0000000..6b58bf7 --- /dev/null +++ b/configs/default_singleclass_unet.yml @@ -0,0 +1,146 @@ +general: +#### General settings #### + dataset_dir: '/Users/cjt678/Desktop/Unets/Data/' + #dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Anne/Unet_Noyau/train/' + model_dir: '/Users/cjt678/Desktop/Unets/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 120 + val_split: 0.1 + batch_size_per_GPU: 6 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'dice_loss' #'bce_dice_loss', 'binary_crossentropy' 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: True + + metrics: + - 'IoU' # 'binary_accuracy','categorical_accuracy', 'IoU' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'sigmoid' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w,z + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_unet.yml b/configs/default_unet.yml new file mode 100644 index 0000000..4db72fd --- /dev/null +++ b/configs/default_unet.yml @@ -0,0 +1,138 @@ +general: +#### General settings #### + dataset_dir: '/tf/Documents/Unet/Training_sets/' + model_dir: '/tf/Documents/Unet/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: 0 + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + +model: +#### Model parameters #### + filters: 32 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 32 + + optimizer: + optimizer_function: 'rmsprop' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'binary_crossentropy' #'bce_dice_loss' + metrics: + - 'binary_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + + activation: + activation_function: 'relu' + final_activation: 'sigmoid' #'relu' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [128,128] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 10 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/html/models/CNN_Base.html b/html/models/CNN_Base.html new file mode 100644 index 0000000..37dfaaf --- /dev/null +++ b/html/models/CNN_Base.html @@ -0,0 +1,1604 @@ + + + + + + +models.CNN_Base API documentation + + + + + + + + + +
+
+
+

Module models.CNN_Base

+
+
+
+ +Expand source code + +
import os
+
+import glob
+import datetime
+
+import skimage.io
+import numpy as np
+
+import tensorflow as tf
+
+import keras
+from keras import backend as K
+from keras.models import Model, load_model
+from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger
+
+from .internals.image_functions import Image_Functions
+from .internals.network_config import Network_Config
+from .internals.dataset import Dataset
+
+class CNN_Base(Dataset, Image_Functions):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates the base neural network class with basic functions
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is stored
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file
+        **kwargs
+            Parameters that are passed to :class:`network_config.Network_Config`
+
+        Attributes
+        ----------
+        config : :class:`network_config.Network_Config`
+            Network_config object containing the config and necessary functions
+        """
+        
+        super().__init__()
+        
+        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
+        
+        self.config.update_parameter(["general", "now"], datetime.datetime.now())
+        
+        if self.config.get_parameter("use_cpu") is True:
+            self.initialize_cpu()
+        else:
+            self.initialize_gpu()
+    
+    #######################
+    # Logging functions
+    #######################
+    def init_logs(self):
+        """Initiates the parameters required for the log file
+        """
+        # Directory for training logs
+        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        
+        # Path to save after each epoch. Include placeholders that get filled by Keras.
+        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+        
+    def write_logs(self):
+        """Writes the log file
+        """
+        # Create log_dir if it does not exist
+        if os.path.exists(self.log_dir) is False:
+            os.makedirs(self.log_dir)
+            
+        # save the parameters used in current run to logs dir
+        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+        
+    #######################
+    # Initialization functions
+    #######################
+    def summary(self):
+        """Summary of the layers in the model
+        """
+        self.model.summary()
+        
+    def compile_model(self, optimizer, loss):
+        """Compiles model
+        
+        Parameters
+        ----------
+        optimizer
+            Gradient optimizer used in during the training of the network
+        loss
+            Loss function of the network
+        """
+        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+    def initialize_model(self):
+        """Initializes the logs, builds the model, and chooses the correct initialization function
+        """
+        # write parameters to yaml file
+        self.init_logs()
+        if self.config.get_parameter("for_prediction") is False:
+            self.write_logs()
+            
+        # build model
+        self.model = self.build_model(self.config.get_parameter("input_size"))
+        
+        # save model to yaml file
+        if self.config.get_parameter("for_prediction") is False:
+            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+        self.initialize_model_normal()
+            
+    def initialize_cpu(self):
+        """Sets the session to only use the CPU
+        """
+        config = tf.ConfigProto(
+                        device_count = {'CPU' : 1,
+                                        'GPU' : 0}
+                       )
+        session = tf.Session(config=config)
+        K.set_session(session)   
+        
+    def initialize_gpu(self):
+        """Sets the seesion to use the gpu specified in config file
+        """
+        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+        
+        config = tf.ConfigProto()
+        config.gpu_options.allow_growth = True
+        sess = tf.Session(config=config)
+        K.tensorflow_backend.set_session(sess)
+    
+    def initialize_model_normal(self):
+        """Initializes the optimizer and any specified callback functions
+        """
+        opt = self.optimizer_function()
+        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+        
+        if self.config.get_parameter("for_prediction") == False:
+            self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+            if self.config.get_parameter("use_tensorboard") is True:
+                self.callbacks.append(self.tensorboard_call())
+                
+            if self.config.get_parameter("reduce_LR_on_plateau") is True:
+                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                        factor = self.config.get_parameter("reduce_LR_factor"),
+                                                        patience = self.config.get_parameter("reduce_LR_patience"),
+                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                        verbose = True))
+            
+            if self.config.get_parameter("early_stopping") is True:
+                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                    patience = self.config.get_parameter("early_stopping_patience"),
+                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                    verbose = True))
+                
+    #######################
+    # Optimizer/Loss functions
+    #######################         
+    def optimizer_function(self, learning_rate = None):
+        """Initialize optimizer function
+        
+        Parameters
+        ----------
+        learning_rate : `int`
+            Learning rate of the descent algorithm
+            
+        Returns
+        ----------
+        optimizer
+            Function to call the optimizer
+        """
+        if learning_rate is None:
+            learning_rate = self.config.get_parameter("learning_rate")
+        if self.config.get_parameter("optimizer_function") == 'sgd':
+            return keras.optimizers.SGD(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"), 
+                                        momentum = self.config.get_parameter("momentum"), 
+                                        nesterov = self.config.get_parameter("nesterov"))
+        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+            return keras.optimizers.RMSprop(lr = learning_rate, 
+                                            decay = self.config.get_parameter("decay"))
+        elif self.config.get_parameter("optimizer_function") == 'adam':
+            return keras.optimizers.Adam(lr = learning_rate, 
+                                         decay = self.config.get_parameter("decay"))
+        
+    def loss_function(self, loss):
+        """Initialize loss function
+        
+        Parameters
+        ----------
+        loss : `str`
+            Name of the loss function
+            
+        Returns
+        ----------
+        loss
+            Function to call loss function
+        """
+        if loss == "binary_crossentropy":
+            print("Using binary crossentropy")
+            return loss
+        elif loss == "jaccard_distance_loss":
+            print("Using jaccard distance loss")
+            from .internals.losses import jaccard_distance_loss
+            return jaccard_distance_loss
+        elif loss == "lovasz_hinge":
+            print("Using Lovasz-hinge loss")
+            from .internals.losses import lovasz_loss
+            return lovasz_loss
+        elif loss == "dice_loss":
+            print("Using Dice loss")
+            from .internals.losses import dice_coef_loss
+            return dice_coef_loss
+        elif loss == "bce_dice_loss":
+            print("Using 1 - Dice + BCE loss")
+            from .internals.losses import bce_dice_loss
+            return bce_dice_loss
+        elif loss == "ssim_loss":
+            print("Using DSSIM loss")
+            from .internals.losses import DSSIM_loss
+            return DSSIM_loss
+        elif loss == "bce_ssim_loss":
+            print("Using BCE + DSSIM loss")
+            from .internals.losses import bce_ssim_loss
+            return bce_ssim_loss
+        elif loss == "mean_squared_error":
+            return keras.losses.mean_squared_error
+        elif loss == "mean_absolute_error":
+            return keras.losses.mean_absolute_error
+        elif loss == "ssim_mae_loss":
+            print("Using DSSIM + MAE loss")
+            from .internals.losses import dssim_mae_loss
+            return dssim_mae_loss
+        else:
+            print("Using {}".format(loss))
+            return loss
+        
+    #######################
+    # Callbacks
+    #######################     
+    def tensorboard_call(self):
+        """Initialize tensorboard call
+        """
+        return TensorBoard(log_dir=self.log_dir, 
+                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                           write_graph=self.config.get_parameter("write_graph"),
+                           write_images=self.config.get_parameter("write_images"), 
+                           write_grads=self.config.get_parameter("write_grads"), 
+                           update_freq='epoch', 
+                           histogram_freq=self.config.get_parameter("histogram_freq"))
+    
+    def model_checkpoint_call(self, verbose = 0):
+        """Initialize model checkpoint call
+        """
+        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+    
+    #######################
+    # Clear memory once training is done
+    #######################
+    def end_training(self):
+        """Deletes model and releases gpu memory held by tensorflow
+        """
+        # del reference to model
+        del self.model
+        
+        # clear memory
+        tf.reset_default_graph()
+        K.clear_session()
+        
+        # take hold of cuda device to shut it down
+        from numba import cuda
+        cuda.select_device(0)
+        cuda.close()
+    
+    #######################
+    # Train Model
+    #######################
+    def train_model(self, verbose = True):
+        """Trains model
+        
+        Parameters
+        ----------
+        verbose : `int`, optional
+            [Default: True] Verbose output
+        """      
+        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                                 callbacks=self.callbacks, verbose=verbose)
+        
+        self.end_training()
+        
+    #######################
+    # Predict using loaded model weights
+    ####################### 
+    # TODO: change to load model from yaml file
+    def load_model(self, model_dir = None):
+        """Loads model from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the model file
+        """
+        # TODO: rewrite to load model from yaml file
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+            
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            
+            model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+        self.model.load_model(model_dir)
+        print("Loaded model from: " + model_dir)
+        
+    def load_weights(self, model_dir = None, weights_index = -1):
+        """Loads weights from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the weights file
+        weights_index : `int`, optional
+            [Default: -1] 
+        """
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+        
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            self.weights_path = list_weights_files[weights_index]
+            model_dir = os.path.join(model_dir, self.weights_path)
+        else:
+            self.weights_path = model_dir
+        
+        self.model.load_weights(model_dir)
+        print("Loaded weights from: " + model_dir)
+       
+    def predict_images(self, image_dir):
+        """Perform prediction on images found in ``image_dir``
+        
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory containing the images to perform prediction on
+            
+        Returns
+        ----------
+        image : `array_like`
+            Last image that prediction was perfromed on
+        """
+        # load image list
+        image_list = self.list_images(image_dir)
+        
+        for image_path in image_list:
+            image = self.load_image(image_path = image_path)
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if self.config.get_parameter("tile_overlap_size") == [0,0]:
+                padding = None
+                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+                input_image = image[np.newaxis,:,:,np.newaxis]
+                
+                output_image = self.model.predict(input_image, verbose=1)
+                
+                if padding is not None: 
+                    h, w = output_image.shape[1:3]
+                    output_image = np.reshape(output_image, (h, w))
+                    output_image = self.remove_pad_image(output_image, padding = padding)
+            else:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                
+                pred_train_list = []
+                for tile in tile_image_list:
+
+                    # reshape image to correct dimensions for unet
+                    h, w = tile.shape[:2]
+                    
+                    tile = np.reshape(tile, (1, h, w, 1))
+
+                    pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                                 num_rows, num_cols, padding = padding)
+            
+            self.save_image(output_image, image_path)
+            
+        return output_image
+    
+    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+        """Saves image to image_path
+        
+        Final location of image is as follows:
+          - image_path
+              - subfolder
+                 - model/weights file name
+        
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be saved
+        image_path : `str`
+            Location to save the image in
+        subfolder : `str`
+            [Default: 'Masks'] Subfolder in which the image is to be saved in
+        suffix : `str`
+            [Default: '-preds'] Suffix to append to the filename of the predicted image
+        """
+        image_dir = os.path.dirname(image_path)
+        
+        output_dir = os.path.join(image_dir, subfolder)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+        
+        output_dir = os.path.join(output_dir, basename)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        filename, _ = os.path.splitext(os.path.basename(image_path))
+        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+        
+        skimage.io.imsave(output_path, image)
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class CNN_Base +(model_dir=None, config_filepath=None, **kwargs) +
+
+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class CNN_Base(Dataset, Image_Functions):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates the base neural network class with basic functions
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is stored
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file
+        **kwargs
+            Parameters that are passed to :class:`network_config.Network_Config`
+
+        Attributes
+        ----------
+        config : :class:`network_config.Network_Config`
+            Network_config object containing the config and necessary functions
+        """
+        
+        super().__init__()
+        
+        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
+        
+        self.config.update_parameter(["general", "now"], datetime.datetime.now())
+        
+        if self.config.get_parameter("use_cpu") is True:
+            self.initialize_cpu()
+        else:
+            self.initialize_gpu()
+    
+    #######################
+    # Logging functions
+    #######################
+    def init_logs(self):
+        """Initiates the parameters required for the log file
+        """
+        # Directory for training logs
+        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        
+        # Path to save after each epoch. Include placeholders that get filled by Keras.
+        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+        
+    def write_logs(self):
+        """Writes the log file
+        """
+        # Create log_dir if it does not exist
+        if os.path.exists(self.log_dir) is False:
+            os.makedirs(self.log_dir)
+            
+        # save the parameters used in current run to logs dir
+        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+        
+    #######################
+    # Initialization functions
+    #######################
+    def summary(self):
+        """Summary of the layers in the model
+        """
+        self.model.summary()
+        
+    def compile_model(self, optimizer, loss):
+        """Compiles model
+        
+        Parameters
+        ----------
+        optimizer
+            Gradient optimizer used in during the training of the network
+        loss
+            Loss function of the network
+        """
+        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+    def initialize_model(self):
+        """Initializes the logs, builds the model, and chooses the correct initialization function
+        """
+        # write parameters to yaml file
+        self.init_logs()
+        if self.config.get_parameter("for_prediction") is False:
+            self.write_logs()
+            
+        # build model
+        self.model = self.build_model(self.config.get_parameter("input_size"))
+        
+        # save model to yaml file
+        if self.config.get_parameter("for_prediction") is False:
+            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+        self.initialize_model_normal()
+            
+    def initialize_cpu(self):
+        """Sets the session to only use the CPU
+        """
+        config = tf.ConfigProto(
+                        device_count = {'CPU' : 1,
+                                        'GPU' : 0}
+                       )
+        session = tf.Session(config=config)
+        K.set_session(session)   
+        
+    def initialize_gpu(self):
+        """Sets the seesion to use the gpu specified in config file
+        """
+        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+        
+        config = tf.ConfigProto()
+        config.gpu_options.allow_growth = True
+        sess = tf.Session(config=config)
+        K.tensorflow_backend.set_session(sess)
+    
+    def initialize_model_normal(self):
+        """Initializes the optimizer and any specified callback functions
+        """
+        opt = self.optimizer_function()
+        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+        
+        if self.config.get_parameter("for_prediction") == False:
+            self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+            if self.config.get_parameter("use_tensorboard") is True:
+                self.callbacks.append(self.tensorboard_call())
+                
+            if self.config.get_parameter("reduce_LR_on_plateau") is True:
+                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                        factor = self.config.get_parameter("reduce_LR_factor"),
+                                                        patience = self.config.get_parameter("reduce_LR_patience"),
+                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                        verbose = True))
+            
+            if self.config.get_parameter("early_stopping") is True:
+                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                    patience = self.config.get_parameter("early_stopping_patience"),
+                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                    verbose = True))
+                
+    #######################
+    # Optimizer/Loss functions
+    #######################         
+    def optimizer_function(self, learning_rate = None):
+        """Initialize optimizer function
+        
+        Parameters
+        ----------
+        learning_rate : `int`
+            Learning rate of the descent algorithm
+            
+        Returns
+        ----------
+        optimizer
+            Function to call the optimizer
+        """
+        if learning_rate is None:
+            learning_rate = self.config.get_parameter("learning_rate")
+        if self.config.get_parameter("optimizer_function") == 'sgd':
+            return keras.optimizers.SGD(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"), 
+                                        momentum = self.config.get_parameter("momentum"), 
+                                        nesterov = self.config.get_parameter("nesterov"))
+        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+            return keras.optimizers.RMSprop(lr = learning_rate, 
+                                            decay = self.config.get_parameter("decay"))
+        elif self.config.get_parameter("optimizer_function") == 'adam':
+            return keras.optimizers.Adam(lr = learning_rate, 
+                                         decay = self.config.get_parameter("decay"))
+        
+    def loss_function(self, loss):
+        """Initialize loss function
+        
+        Parameters
+        ----------
+        loss : `str`
+            Name of the loss function
+            
+        Returns
+        ----------
+        loss
+            Function to call loss function
+        """
+        if loss == "binary_crossentropy":
+            print("Using binary crossentropy")
+            return loss
+        elif loss == "jaccard_distance_loss":
+            print("Using jaccard distance loss")
+            from .internals.losses import jaccard_distance_loss
+            return jaccard_distance_loss
+        elif loss == "lovasz_hinge":
+            print("Using Lovasz-hinge loss")
+            from .internals.losses import lovasz_loss
+            return lovasz_loss
+        elif loss == "dice_loss":
+            print("Using Dice loss")
+            from .internals.losses import dice_coef_loss
+            return dice_coef_loss
+        elif loss == "bce_dice_loss":
+            print("Using 1 - Dice + BCE loss")
+            from .internals.losses import bce_dice_loss
+            return bce_dice_loss
+        elif loss == "ssim_loss":
+            print("Using DSSIM loss")
+            from .internals.losses import DSSIM_loss
+            return DSSIM_loss
+        elif loss == "bce_ssim_loss":
+            print("Using BCE + DSSIM loss")
+            from .internals.losses import bce_ssim_loss
+            return bce_ssim_loss
+        elif loss == "mean_squared_error":
+            return keras.losses.mean_squared_error
+        elif loss == "mean_absolute_error":
+            return keras.losses.mean_absolute_error
+        elif loss == "ssim_mae_loss":
+            print("Using DSSIM + MAE loss")
+            from .internals.losses import dssim_mae_loss
+            return dssim_mae_loss
+        else:
+            print("Using {}".format(loss))
+            return loss
+        
+    #######################
+    # Callbacks
+    #######################     
+    def tensorboard_call(self):
+        """Initialize tensorboard call
+        """
+        return TensorBoard(log_dir=self.log_dir, 
+                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                           write_graph=self.config.get_parameter("write_graph"),
+                           write_images=self.config.get_parameter("write_images"), 
+                           write_grads=self.config.get_parameter("write_grads"), 
+                           update_freq='epoch', 
+                           histogram_freq=self.config.get_parameter("histogram_freq"))
+    
+    def model_checkpoint_call(self, verbose = 0):
+        """Initialize model checkpoint call
+        """
+        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+    
+    #######################
+    # Clear memory once training is done
+    #######################
+    def end_training(self):
+        """Deletes model and releases gpu memory held by tensorflow
+        """
+        # del reference to model
+        del self.model
+        
+        # clear memory
+        tf.reset_default_graph()
+        K.clear_session()
+        
+        # take hold of cuda device to shut it down
+        from numba import cuda
+        cuda.select_device(0)
+        cuda.close()
+    
+    #######################
+    # Train Model
+    #######################
+    def train_model(self, verbose = True):
+        """Trains model
+        
+        Parameters
+        ----------
+        verbose : `int`, optional
+            [Default: True] Verbose output
+        """      
+        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                                 callbacks=self.callbacks, verbose=verbose)
+        
+        self.end_training()
+        
+    #######################
+    # Predict using loaded model weights
+    ####################### 
+    # TODO: change to load model from yaml file
+    def load_model(self, model_dir = None):
+        """Loads model from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the model file
+        """
+        # TODO: rewrite to load model from yaml file
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+            
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            
+            model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+        self.model.load_model(model_dir)
+        print("Loaded model from: " + model_dir)
+        
+    def load_weights(self, model_dir = None, weights_index = -1):
+        """Loads weights from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the weights file
+        weights_index : `int`, optional
+            [Default: -1] 
+        """
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+        
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            self.weights_path = list_weights_files[weights_index]
+            model_dir = os.path.join(model_dir, self.weights_path)
+        else:
+            self.weights_path = model_dir
+        
+        self.model.load_weights(model_dir)
+        print("Loaded weights from: " + model_dir)
+       
+    def predict_images(self, image_dir):
+        """Perform prediction on images found in ``image_dir``
+        
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory containing the images to perform prediction on
+            
+        Returns
+        ----------
+        image : `array_like`
+            Last image that prediction was perfromed on
+        """
+        # load image list
+        image_list = self.list_images(image_dir)
+        
+        for image_path in image_list:
+            image = self.load_image(image_path = image_path)
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if self.config.get_parameter("tile_overlap_size") == [0,0]:
+                padding = None
+                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+                input_image = image[np.newaxis,:,:,np.newaxis]
+                
+                output_image = self.model.predict(input_image, verbose=1)
+                
+                if padding is not None: 
+                    h, w = output_image.shape[1:3]
+                    output_image = np.reshape(output_image, (h, w))
+                    output_image = self.remove_pad_image(output_image, padding = padding)
+            else:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                
+                pred_train_list = []
+                for tile in tile_image_list:
+
+                    # reshape image to correct dimensions for unet
+                    h, w = tile.shape[:2]
+                    
+                    tile = np.reshape(tile, (1, h, w, 1))
+
+                    pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                                 num_rows, num_cols, padding = padding)
+            
+            self.save_image(output_image, image_path)
+            
+        return output_image
+    
+    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+        """Saves image to image_path
+        
+        Final location of image is as follows:
+          - image_path
+              - subfolder
+                 - model/weights file name
+        
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be saved
+        image_path : `str`
+            Location to save the image in
+        subfolder : `str`
+            [Default: 'Masks'] Subfolder in which the image is to be saved in
+        suffix : `str`
+            [Default: '-preds'] Suffix to append to the filename of the predicted image
+        """
+        image_dir = os.path.dirname(image_path)
+        
+        output_dir = os.path.join(image_dir, subfolder)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+        
+        output_dir = os.path.join(output_dir, basename)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        filename, _ = os.path.splitext(os.path.basename(image_path))
+        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+        
+        skimage.io.imsave(output_path, image)
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def compile_model(self, optimizer, loss) +
+
+

Compiles model

+

Parameters

+
+
optimizer
+
Gradient optimizer used in during the training of the network
+
loss
+
Loss function of the network
+
+
+ +Expand source code + +
def compile_model(self, optimizer, loss):
+    """Compiles model
+    
+    Parameters
+    ----------
+    optimizer
+        Gradient optimizer used in during the training of the network
+    loss
+        Loss function of the network
+    """
+    self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+
+
+def end_training(self) +
+
+

Deletes model and releases gpu memory held by tensorflow

+
+ +Expand source code + +
def end_training(self):
+    """Deletes model and releases gpu memory held by tensorflow
+    """
+    # del reference to model
+    del self.model
+    
+    # clear memory
+    tf.reset_default_graph()
+    K.clear_session()
+    
+    # take hold of cuda device to shut it down
+    from numba import cuda
+    cuda.select_device(0)
+    cuda.close()
+
+
+
+def init_logs(self) +
+
+

Initiates the parameters required for the log file

+
+ +Expand source code + +
def init_logs(self):
+    """Initiates the parameters required for the log file
+    """
+    # Directory for training logs
+    print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+    self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+    
+    # Path to save after each epoch. Include placeholders that get filled by Keras.
+    self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+    self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+
+
+
+def initialize_cpu(self) +
+
+

Sets the session to only use the CPU

+
+ +Expand source code + +
def initialize_cpu(self):
+    """Sets the session to only use the CPU
+    """
+    config = tf.ConfigProto(
+                    device_count = {'CPU' : 1,
+                                    'GPU' : 0}
+                   )
+    session = tf.Session(config=config)
+    K.set_session(session)   
+
+
+
+def initialize_gpu(self) +
+
+

Sets the seesion to use the gpu specified in config file

+
+ +Expand source code + +
def initialize_gpu(self):
+    """Sets the seesion to use the gpu specified in config file
+    """
+    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+    
+    config = tf.ConfigProto()
+    config.gpu_options.allow_growth = True
+    sess = tf.Session(config=config)
+    K.tensorflow_backend.set_session(sess)
+
+
+
+def initialize_model(self) +
+
+

Initializes the logs, builds the model, and chooses the correct initialization function

+
+ +Expand source code + +
def initialize_model(self):
+    """Initializes the logs, builds the model, and chooses the correct initialization function
+    """
+    # write parameters to yaml file
+    self.init_logs()
+    if self.config.get_parameter("for_prediction") is False:
+        self.write_logs()
+        
+    # build model
+    self.model = self.build_model(self.config.get_parameter("input_size"))
+    
+    # save model to yaml file
+    if self.config.get_parameter("for_prediction") is False:
+        self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+    print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+    self.initialize_model_normal()
+
+
+
+def initialize_model_normal(self) +
+
+

Initializes the optimizer and any specified callback functions

+
+ +Expand source code + +
def initialize_model_normal(self):
+    """Initializes the optimizer and any specified callback functions
+    """
+    opt = self.optimizer_function()
+    self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+    
+    if self.config.get_parameter("for_prediction") == False:
+        self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+        if self.config.get_parameter("use_tensorboard") is True:
+            self.callbacks.append(self.tensorboard_call())
+            
+        if self.config.get_parameter("reduce_LR_on_plateau") is True:
+            self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                    factor = self.config.get_parameter("reduce_LR_factor"),
+                                                    patience = self.config.get_parameter("reduce_LR_patience"),
+                                                    min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                    verbose = True))
+        
+        if self.config.get_parameter("early_stopping") is True:
+            self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                patience = self.config.get_parameter("early_stopping_patience"),
+                                                min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                verbose = True))
+
+
+
+def load_model(self, model_dir=None) +
+
+

Loads model from h5 file

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Directory containing the model file
+
+
+ +Expand source code + +
def load_model(self, model_dir = None):
+    """Loads model from h5 file
+    
+    Parameters
+    ----------
+    model_dir : `str`, optional
+        [Default: None] Directory containing the model file
+    """
+    # TODO: rewrite to load model from yaml file
+    if model_dir is None:
+        model_dir = self.config.get_parameter("model_dir")
+        
+    if os.path.isdir(model_dir) is True:
+        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+        list_weights_files.sort() # To ensure that [-1] gives the last file
+        
+        model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+    self.model.load_model(model_dir)
+    print("Loaded model from: " + model_dir)
+
+
+
+def load_weights(self, model_dir=None, weights_index=-1) +
+
+

Loads weights from h5 file

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Directory containing the weights file
+
weights_index : int, optional
+
[Default: -1]
+
+
+ +Expand source code + +
def load_weights(self, model_dir = None, weights_index = -1):
+    """Loads weights from h5 file
+    
+    Parameters
+    ----------
+    model_dir : `str`, optional
+        [Default: None] Directory containing the weights file
+    weights_index : `int`, optional
+        [Default: -1] 
+    """
+    if model_dir is None:
+        model_dir = self.config.get_parameter("model_dir")
+    
+    if os.path.isdir(model_dir) is True:
+        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+        list_weights_files.sort() # To ensure that [-1] gives the last file
+        self.weights_path = list_weights_files[weights_index]
+        model_dir = os.path.join(model_dir, self.weights_path)
+    else:
+        self.weights_path = model_dir
+    
+    self.model.load_weights(model_dir)
+    print("Loaded weights from: " + model_dir)
+
+
+
+def loss_function(self, loss) +
+
+

Initialize loss function

+

Parameters

+
+
loss : str
+
Name of the loss function
+
+

Returns

+
+
loss
+
Function to call loss function
+
+
+ +Expand source code + +
def loss_function(self, loss):
+    """Initialize loss function
+    
+    Parameters
+    ----------
+    loss : `str`
+        Name of the loss function
+        
+    Returns
+    ----------
+    loss
+        Function to call loss function
+    """
+    if loss == "binary_crossentropy":
+        print("Using binary crossentropy")
+        return loss
+    elif loss == "jaccard_distance_loss":
+        print("Using jaccard distance loss")
+        from .internals.losses import jaccard_distance_loss
+        return jaccard_distance_loss
+    elif loss == "lovasz_hinge":
+        print("Using Lovasz-hinge loss")
+        from .internals.losses import lovasz_loss
+        return lovasz_loss
+    elif loss == "dice_loss":
+        print("Using Dice loss")
+        from .internals.losses import dice_coef_loss
+        return dice_coef_loss
+    elif loss == "bce_dice_loss":
+        print("Using 1 - Dice + BCE loss")
+        from .internals.losses import bce_dice_loss
+        return bce_dice_loss
+    elif loss == "ssim_loss":
+        print("Using DSSIM loss")
+        from .internals.losses import DSSIM_loss
+        return DSSIM_loss
+    elif loss == "bce_ssim_loss":
+        print("Using BCE + DSSIM loss")
+        from .internals.losses import bce_ssim_loss
+        return bce_ssim_loss
+    elif loss == "mean_squared_error":
+        return keras.losses.mean_squared_error
+    elif loss == "mean_absolute_error":
+        return keras.losses.mean_absolute_error
+    elif loss == "ssim_mae_loss":
+        print("Using DSSIM + MAE loss")
+        from .internals.losses import dssim_mae_loss
+        return dssim_mae_loss
+    else:
+        print("Using {}".format(loss))
+        return loss
+
+
+
+def model_checkpoint_call(self, verbose=0) +
+
+

Initialize model checkpoint call

+
+ +Expand source code + +
def model_checkpoint_call(self, verbose = 0):
+    """Initialize model checkpoint call
+    """
+    return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+
+
+
+def optimizer_function(self, learning_rate=None) +
+
+

Initialize optimizer function

+

Parameters

+
+
learning_rate : int
+
Learning rate of the descent algorithm
+
+

Returns

+
+
optimizer
+
Function to call the optimizer
+
+
+ +Expand source code + +
def optimizer_function(self, learning_rate = None):
+    """Initialize optimizer function
+    
+    Parameters
+    ----------
+    learning_rate : `int`
+        Learning rate of the descent algorithm
+        
+    Returns
+    ----------
+    optimizer
+        Function to call the optimizer
+    """
+    if learning_rate is None:
+        learning_rate = self.config.get_parameter("learning_rate")
+    if self.config.get_parameter("optimizer_function") == 'sgd':
+        return keras.optimizers.SGD(lr = learning_rate, 
+                                    decay = self.config.get_parameter("decay"), 
+                                    momentum = self.config.get_parameter("momentum"), 
+                                    nesterov = self.config.get_parameter("nesterov"))
+    elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+        return keras.optimizers.RMSprop(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"))
+    elif self.config.get_parameter("optimizer_function") == 'adam':
+        return keras.optimizers.Adam(lr = learning_rate, 
+                                     decay = self.config.get_parameter("decay"))
+
+
+
+def predict_images(self, image_dir) +
+
+

Perform prediction on images found in image_dir

+

Parameters

+
+
image_dir : str
+
Directory containing the images to perform prediction on
+
+

Returns

+
+
image : array_like
+
Last image that prediction was perfromed on
+
+
+ +Expand source code + +
def predict_images(self, image_dir):
+    """Perform prediction on images found in ``image_dir``
+    
+    Parameters
+    ----------
+    image_dir : `str`
+        Directory containing the images to perform prediction on
+        
+    Returns
+    ----------
+    image : `array_like`
+        Last image that prediction was perfromed on
+    """
+    # load image list
+    image_list = self.list_images(image_dir)
+    
+    for image_path in image_list:
+        image = self.load_image(image_path = image_path)
+        
+        # percentile normalization
+        if self.config.get_parameter("percentile_normalization"):
+            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+        
+        if self.config.get_parameter("tile_overlap_size") == [0,0]:
+            padding = None
+            if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+            input_image = image[np.newaxis,:,:,np.newaxis]
+            
+            output_image = self.model.predict(input_image, verbose=1)
+            
+            if padding is not None: 
+                h, w = output_image.shape[1:3]
+                output_image = np.reshape(output_image, (h, w))
+                output_image = self.remove_pad_image(output_image, padding = padding)
+        else:
+            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            
+            pred_train_list = []
+            for tile in tile_image_list:
+
+                # reshape image to correct dimensions for unet
+                h, w = tile.shape[:2]
+                
+                tile = np.reshape(tile, (1, h, w, 1))
+
+                pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+            output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                             num_rows, num_cols, padding = padding)
+        
+        self.save_image(output_image, image_path)
+        
+    return output_image
+
+
+
+def save_image(self, image, image_path, subfolder='Masks', suffix='-preds') +
+
+

Saves image to image_path

+

Final location of image is as follows: +- image_path +- subfolder +- model/weights file name

+

Parameters

+
+
image : array_like
+
Image to be saved
+
image_path : str
+
Location to save the image in
+
subfolder : str
+
[Default: 'Masks'] Subfolder in which the image is to be saved in
+
suffix : str
+
[Default: '-preds'] Suffix to append to the filename of the predicted image
+
+
+ +Expand source code + +
def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+    """Saves image to image_path
+    
+    Final location of image is as follows:
+      - image_path
+          - subfolder
+             - model/weights file name
+    
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be saved
+    image_path : `str`
+        Location to save the image in
+    subfolder : `str`
+        [Default: 'Masks'] Subfolder in which the image is to be saved in
+    suffix : `str`
+        [Default: '-preds'] Suffix to append to the filename of the predicted image
+    """
+    image_dir = os.path.dirname(image_path)
+    
+    output_dir = os.path.join(image_dir, subfolder)
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+        
+    basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+    
+    output_dir = os.path.join(output_dir, basename)
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+        
+    filename, _ = os.path.splitext(os.path.basename(image_path))
+    output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+    
+    skimage.io.imsave(output_path, image)
+
+
+
+def summary(self) +
+
+

Summary of the layers in the model

+
+ +Expand source code + +
def summary(self):
+    """Summary of the layers in the model
+    """
+    self.model.summary()
+
+
+
+def tensorboard_call(self) +
+
+

Initialize tensorboard call

+
+ +Expand source code + +
def tensorboard_call(self):
+    """Initialize tensorboard call
+    """
+    return TensorBoard(log_dir=self.log_dir, 
+                       batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                       write_graph=self.config.get_parameter("write_graph"),
+                       write_images=self.config.get_parameter("write_images"), 
+                       write_grads=self.config.get_parameter("write_grads"), 
+                       update_freq='epoch', 
+                       histogram_freq=self.config.get_parameter("histogram_freq"))
+
+
+
+def train_model(self, verbose=True) +
+
+

Trains model

+

Parameters

+
+
verbose : int, optional
+
[Default: True] Verbose output
+
+
+ +Expand source code + +
def train_model(self, verbose = True):
+    """Trains model
+    
+    Parameters
+    ----------
+    verbose : `int`, optional
+        [Default: True] Verbose output
+    """      
+    history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                             batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                             callbacks=self.callbacks, verbose=verbose)
+    
+    self.end_training()
+
+
+
+def write_logs(self) +
+
+

Writes the log file

+
+ +Expand source code + +
def write_logs(self):
+    """Writes the log file
+    """
+    # Create log_dir if it does not exist
+    if os.path.exists(self.log_dir) is False:
+        os.makedirs(self.log_dir)
+        
+    # save the parameters used in current run to logs dir
+    self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/Unet.html b/html/models/Unet.html new file mode 100644 index 0000000..b67adc9 --- /dev/null +++ b/html/models/Unet.html @@ -0,0 +1,417 @@ + + + + + + +models.Unet API documentation + + + + + + + + + +
+
+
+

Module models.Unet

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tf
+
+from .CNN_Base import CNN_Base
+from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d
+    
+######
+# Unet
+######
+class Unet(CNN_Base):
+    """
+    Unet functions
+    see https://www.nature.com/articles/s41592-018-0261-2
+    """
+    
+    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        
+    def build_model(self, input_size, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+        
+        ### Actual network###
+        inputs = Input(input_size)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+        
+        layer_store = []
+        
+        # encoding arm
+        for _ in range(self.config.get_parameter("levels")):
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer_store.append(layer)
+            layer = MaxPooling2D((2, 2))(layer)
+            
+            filters = filters * 2
+            
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        # decoding arm 
+        for i in range(self.config.get_parameter("levels")):
+            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+            
+            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+            filters = filters // 2
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Unet +(model_dir=None, name='Unet', **kwargs) +
+
+

Unet functions +see https://www.nature.com/articles/s41592-018-0261-2

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet(CNN_Base):
+    """
+    Unet functions
+    see https://www.nature.com/articles/s41592-018-0261-2
+    """
+    
+    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        
+    def build_model(self, input_size, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+        
+        ### Actual network###
+        inputs = Input(input_size)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+        
+        layer_store = []
+        
+        # encoding arm
+        for _ in range(self.config.get_parameter("levels")):
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer_store.append(layer)
+            layer = MaxPooling2D((2, 2))(layer)
+            
+            filters = filters * 2
+            
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        # decoding arm 
+        for i in range(self.config.get_parameter("levels")):
+            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+            
+            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+            filters = filters // 2
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+

Ancestors

+ +

Methods

+
+
+def build_model(self, input_size, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) +
+
+
+
+ +Expand source code + +
def build_model(self, input_size, mean_std_normalization = None, 
+                dropout_value = None, acti = None, padding = None, 
+                kernel_initializer = None, weight_regularizer = None):
+    
+    ### get parameters from config file ###
+    filters = self.config.get_parameter("filters")
+    
+    if dropout_value is None:
+        dropout_value = self.config.get_parameter("dropout_value")
+    if acti is None:
+        acti = self.config.get_parameter("activation_function")
+    if padding is None:
+        padding = self.config.get_parameter("padding")
+    if kernel_initializer is None:
+        kernel_initializer = self.config.get_parameter("initializer")
+    if weight_regularizer is None:
+        weight_regularizer = self.config.get_parameter("weight_regularizer")
+    if mean_std_normalization is None:
+        if self.config.get_parameter("mean_std_normalization") == True:
+            mean = self.config.get_parameter("mean")
+            std = self.config.get_parameter("std")
+        else:
+            mean = None
+            std = None
+    
+    ### Actual network###
+    inputs = Input(input_size)
+    
+    # normalize images
+    layer = normalize_input(inputs, 
+                            scale_input = self.config.get_parameter("scale_input"),
+                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                            mean = mean, std = std)
+    
+    layer_store = []
+    
+    # encoding arm
+    for _ in range(self.config.get_parameter("levels")):
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer_store.append(layer)
+        layer = MaxPooling2D((2, 2))(layer)
+        
+        filters = filters * 2
+        
+    
+    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    # decoding arm 
+    for i in range(self.config.get_parameter("levels")):
+        layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+        
+        layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+        filters = filters // 2
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+    
+    return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/Unet_Resnet.html b/html/models/Unet_Resnet.html new file mode 100644 index 0000000..97475e5 --- /dev/null +++ b/html/models/Unet_Resnet.html @@ -0,0 +1,1095 @@ + + + + + + +models.Unet_Resnet API documentation + + + + + + + + + +
+
+
+

Module models.Unet_Resnet

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tfconv
+
+from .CNN_Base import CNN_Base
+from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose
+        
+################################################
+# Unet + Resnet
+################################################
+
+class Unet_Resnet(CNN_Base):
+    """
+    Unet + resnet functions
+    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
+    """
+    
+    def __init__(self, model_dir = None, **kwargs):       
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+    def bottleneck_block(self, inputs, 
+                         upsample = False,
+                         filters = 8,
+                         strides = 1, dropout_value = None, acti = None, padding = None, 
+                         kernel_initializer = None, weight_regularizer = None, name = None):            
+        # Bottleneck_block
+        with tf.name_scope("Bottleneck_block" + name):
+            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            if upsample == True:
+                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+                output = Conv2D(filters * 4, (1,1), padding=padding, 
+                                kernel_initializer=kernel_initializer, 
+                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            else:
+                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Dropout(dropout_value)(output)
+            
+            # reshape input to the same size as output
+            if upsample == True:
+                inputs = UpSampling2D()(inputs)
+            if strides == 2:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+            
+            # ensure number of filters are correct between input and output
+            if output.shape[3] != inputs.shape[3]:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+        
+    def simple_block(self, inputs, filters,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):
+            
+        with tf.name_scope("Simple_block" + name):
+            output = BatchNormalization()(inputs)
+            output = activation_function(output, acti)
+            output = MaxPooling2D()(output)
+            output = Conv2D(filters, 3, padding=padding, strides=strides,
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+            output = Dropout(dropout_value)(output)
+
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+            
+            return Add()([output, inputs])
+        
+    def simple_block_up(self, inputs, filters,
+                        strides = 1, dropout_value = None, acti = None, padding = None, 
+                        kernel_initializer = None, weight_regularizer = None, name = None):
+        
+        with tf.name_scope("Simple_block_up" + name):
+            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+            output = Dropout(dropout_value)(output)
+            
+            inputs = UpSampling2D()(inputs)
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+    
+
+    def build_model(self, unet_input, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+            
+        
+        ### Actual network###
+        inputs = Input(unet_input)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+
+        # encoder arm
+        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                         kernel_initializer = kernel_initializer, 
+                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+        
+        layer_2 = self.simple_block(layer_1, filters, 
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_2")
+        
+        layer = layer_2
+        layer_store = [layer]
+        
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+            strides = 2
+            
+            # last layer of encoding arm is treated as across    
+            if i == len(self.config.get_parameter("bottleneck_block")):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i-2):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                    
+                layer = self.bottleneck_block(layer, upsample = True,
+                                              filters = filters, strides = 1,
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_up_layer_{}".format(2 + i))
+            else:       
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i - 1):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                filters = filters*2
+                layer_store.append(layer)
+
+        # decoder arm
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+            filters = filters//2  
+
+            # note that i should be positive possibly due to the way keras/tf model compile works
+            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+            
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(i+6, count))
+
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(i+6))
+        
+        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+        layer_14 = self.simple_block_up(layer_13, filters,
+                                        dropout_value = dropout_value, acti = acti, padding = padding, 
+                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                        name = "_layer_14")
+
+        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+        
+        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                          name="Conv_layer_16")(layer_15)
+        
+        layer_17 = BatchNormalization()(layer_16)
+        layer_18 = activation_function(layer_17, acti)
+
+        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+    
+class Unet_Resnet101(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+class Unet_Resnet50(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
+        
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+        
+class Unet_Resnet_paper(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        """
+        see https://arxiv.org/pdf/1608.04117.pdf
+        """
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Unet_Resnet +(model_dir=None, **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet(CNN_Base):
+    """
+    Unet + resnet functions
+    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
+    """
+    
+    def __init__(self, model_dir = None, **kwargs):       
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+    def bottleneck_block(self, inputs, 
+                         upsample = False,
+                         filters = 8,
+                         strides = 1, dropout_value = None, acti = None, padding = None, 
+                         kernel_initializer = None, weight_regularizer = None, name = None):            
+        # Bottleneck_block
+        with tf.name_scope("Bottleneck_block" + name):
+            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            if upsample == True:
+                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+                output = Conv2D(filters * 4, (1,1), padding=padding, 
+                                kernel_initializer=kernel_initializer, 
+                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            else:
+                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Dropout(dropout_value)(output)
+            
+            # reshape input to the same size as output
+            if upsample == True:
+                inputs = UpSampling2D()(inputs)
+            if strides == 2:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+            
+            # ensure number of filters are correct between input and output
+            if output.shape[3] != inputs.shape[3]:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+        
+    def simple_block(self, inputs, filters,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):
+            
+        with tf.name_scope("Simple_block" + name):
+            output = BatchNormalization()(inputs)
+            output = activation_function(output, acti)
+            output = MaxPooling2D()(output)
+            output = Conv2D(filters, 3, padding=padding, strides=strides,
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+            output = Dropout(dropout_value)(output)
+
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+            
+            return Add()([output, inputs])
+        
+    def simple_block_up(self, inputs, filters,
+                        strides = 1, dropout_value = None, acti = None, padding = None, 
+                        kernel_initializer = None, weight_regularizer = None, name = None):
+        
+        with tf.name_scope("Simple_block_up" + name):
+            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+            output = Dropout(dropout_value)(output)
+            
+            inputs = UpSampling2D()(inputs)
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+    
+
+    def build_model(self, unet_input, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+            
+        
+        ### Actual network###
+        inputs = Input(unet_input)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+
+        # encoder arm
+        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                         kernel_initializer = kernel_initializer, 
+                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+        
+        layer_2 = self.simple_block(layer_1, filters, 
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_2")
+        
+        layer = layer_2
+        layer_store = [layer]
+        
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+            strides = 2
+            
+            # last layer of encoding arm is treated as across    
+            if i == len(self.config.get_parameter("bottleneck_block")):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i-2):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                    
+                layer = self.bottleneck_block(layer, upsample = True,
+                                              filters = filters, strides = 1,
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_up_layer_{}".format(2 + i))
+            else:       
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i - 1):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                filters = filters*2
+                layer_store.append(layer)
+
+        # decoder arm
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+            filters = filters//2  
+
+            # note that i should be positive possibly due to the way keras/tf model compile works
+            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+            
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(i+6, count))
+
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(i+6))
+        
+        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+        layer_14 = self.simple_block_up(layer_13, filters,
+                                        dropout_value = dropout_value, acti = acti, padding = padding, 
+                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                        name = "_layer_14")
+
+        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+        
+        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                          name="Conv_layer_16")(layer_15)
+        
+        layer_17 = BatchNormalization()(layer_16)
+        layer_18 = activation_function(layer_17, acti)
+
+        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def bottleneck_block(self, inputs, upsample=False, filters=8, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def bottleneck_block(self, inputs, 
+                     upsample = False,
+                     filters = 8,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):            
+    # Bottleneck_block
+    with tf.name_scope("Bottleneck_block" + name):
+        output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        if upsample == True:
+            output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                            kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            output = Conv2D(filters * 4, (1,1), padding=padding, 
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+        else:
+            output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+        output = Dropout(dropout_value)(output)
+        
+        # reshape input to the same size as output
+        if upsample == True:
+            inputs = UpSampling2D()(inputs)
+        if strides == 2:
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+        
+        # ensure number of filters are correct between input and output
+        if output.shape[3] != inputs.shape[3]:
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+        return Add()([output, inputs])
+
+
+
+def build_model(self, unet_input, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) +
+
+
+
+ +Expand source code + +
def build_model(self, unet_input, mean_std_normalization = None, 
+                dropout_value = None, acti = None, padding = None, 
+                kernel_initializer = None, weight_regularizer = None):
+    
+    ### get parameters from config file ###
+    filters = self.config.get_parameter("filters")
+    
+    if dropout_value is None:
+        dropout_value = self.config.get_parameter("dropout_value")
+    if acti is None:
+        acti = self.config.get_parameter("activation_function")
+    if padding is None:
+        padding = self.config.get_parameter("padding")
+    if kernel_initializer is None:
+        kernel_initializer = self.config.get_parameter("initializer")
+    if weight_regularizer is None:
+        weight_regularizer = self.config.get_parameter("weight_regularizer")
+    if mean_std_normalization is None:
+        if self.config.get_parameter("mean_std_normalization") == True:
+            mean = self.config.get_parameter("mean")
+            std = self.config.get_parameter("std")
+        else:
+            mean = None
+            std = None
+        
+    
+    ### Actual network###
+    inputs = Input(unet_input)
+    
+    # normalize images
+    layer = normalize_input(inputs, 
+                            scale_input = self.config.get_parameter("scale_input"),
+                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                            mean = mean, std = std)
+
+    # encoder arm
+    layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                     kernel_initializer = kernel_initializer, 
+                     kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+    
+    layer_2 = self.simple_block(layer_1, filters, 
+                                dropout_value = dropout_value, acti = acti, padding = padding, 
+                                kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                name = "_layer_2")
+    
+    layer = layer_2
+    layer_store = [layer]
+    
+    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+        strides = 2
+        
+        # last layer of encoding arm is treated as across    
+        if i == len(self.config.get_parameter("bottleneck_block")):
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(2 + i))
+
+            for count in range(conv_layer_i-2):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(2 + i, count))
+                
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_up_layer_{}".format(2 + i))
+        else:       
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(2 + i))
+
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(2 + i, count))
+            filters = filters*2
+            layer_store.append(layer)
+
+    # decoder arm
+    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+        filters = filters//2  
+
+        # note that i should be positive possibly due to the way keras/tf model compile works
+        layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+        
+        for count in range(conv_layer_i - 1):
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name="_layer_{}-{}".format(i+6, count))
+
+        layer = self.bottleneck_block(layer, upsample = True,
+                                      filters = filters, strides = 1,
+                                      dropout_value = dropout_value, acti = acti, padding = padding, 
+                                      kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                      name = "_layer_{}".format(i+6))
+    
+    layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+    layer_14 = self.simple_block_up(layer_13, filters,
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_14")
+
+    layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+    
+    layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                      kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                      name="Conv_layer_16")(layer_15)
+    
+    layer_17 = BatchNormalization()(layer_16)
+    layer_18 = activation_function(layer_17, acti)
+
+    outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+    
+    return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+
+
+
+def simple_block(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def simple_block(self, inputs, filters,
+                 strides = 1, dropout_value = None, acti = None, padding = None, 
+                 kernel_initializer = None, weight_regularizer = None, name = None):
+        
+    with tf.name_scope("Simple_block" + name):
+        output = BatchNormalization()(inputs)
+        output = activation_function(output, acti)
+        output = MaxPooling2D()(output)
+        output = Conv2D(filters, 3, padding=padding, strides=strides,
+                        kernel_initializer=kernel_initializer, 
+                        kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+        output = Dropout(dropout_value)(output)
+
+        inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+        
+        return Add()([output, inputs])
+
+
+
+def simple_block_up(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def simple_block_up(self, inputs, filters,
+                    strides = 1, dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = None):
+    
+    with tf.name_scope("Simple_block_up" + name):
+        output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+        output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+        output = Dropout(dropout_value)(output)
+        
+        inputs = UpSampling2D()(inputs)
+        inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+        return Add()([output, inputs])
+
+
+
+

Inherited members

+ +
+
+class Unet_Resnet101 +(model_dir=None, name='Unet_Resnet101', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet101(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+class Unet_Resnet50 +(model_dir=None, name='Unet_Resnet50', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet50(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
+        
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+class Unet_Resnet_paper +(model_dir=None, name='Unet_Resnet101', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

see https://arxiv.org/pdf/1608.04117.pdf

+
+ +Expand source code + +
class Unet_Resnet_paper(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        """
+        see https://arxiv.org/pdf/1608.04117.pdf
+        """
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/index.html b/html/models/index.html new file mode 100644 index 0000000..b530452 --- /dev/null +++ b/html/models/index.html @@ -0,0 +1,86 @@ + + + + + + +models API documentation + + + + + + + + + +
+ + +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/dataset.html b/html/models/internals/dataset.html new file mode 100644 index 0000000..d653901 --- /dev/null +++ b/html/models/internals/dataset.html @@ -0,0 +1,958 @@ + + + + + + +models.internals.dataset API documentation + + + + + + + + + +
+
+
+

Module models.internals.dataset

+
+
+
+ +Expand source code + +
import os, sys
+import numpy as np
+
+import matplotlib.pyplot as plt
+
+from tqdm import tqdm
+
+from .image_functions import Image_Functions      
+
+class Dataset(Image_Functions):
+    def __init__(self):
+        """Creates Dataset object that is used to manipulate the training data.
+    
+        Attributes
+        ----------
+        classes : list
+            List of dictionaries containing the class name and id
+            
+        train_images : list
+            List of images that is used as the input for the network
+            
+        train_ground_truth : list
+            List of images that is used as the ground truth for the network
+        """
+            
+        self.classes = []
+        self.train_images = []
+        self.train_ground_truth = []
+        
+        super().__init__()
+    
+    #######################
+    # Class id functions
+    #######################
+    def get_class_id(self, class_name):
+        """Returns the class id and adds class to list if not in list of classes.
+    
+        Parameters
+        ----------
+        class_name : str
+            Identity of class that will be associated with the class id
+            
+        Returns
+        ----------
+        int
+            Class id
+        """
+        
+        if len(self.classes) == 0:
+            self.classes.append({"class": class_name, "id": 0})
+            return 0
+        
+        for class_info in self.classes:
+            # if class exist, return class id
+            if class_info["class"] == class_name:
+                return class_info["id"]
+   
+        self.classes.append({"class": class_name, "id": len(self.classes)-1})
+        return len(self.classes)-1
+    
+    #######################
+    # Class id functions
+    #######################
+    def sanity_check(self, image_index):
+        """Plots the augmented image and ground_truth to check if everything is ok.
+    
+        Parameters
+        ----------
+        image_index : int
+            Index of the image and its corresponding ground_truth
+        """
+        
+        image = self.aug_images[image_index][:,:,0]
+        ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(image, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(ground_truth, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+    
+    def load_dataset(self, dataset_dir = None, tiled = False):
+        """Loads dataset from ``dataset_dir``
+    
+        Parameters
+        ----------
+        dataset_dir : str or none, optional
+            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+            
+        tiled : bool, optional
+            To set if tiling function is used
+        """
+        
+        # update dataset_dir if specified. If not, load dataset_dir from config file
+        if dataset_dir is None:
+            dataset_dir = self.config.get_parameter("dataset_dir")
+        else:
+            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+        
+        image_dirs = next(os.walk(dataset_dir))[1]
+        image_dirs = [f for f in image_dirs if not f[0] == '.']
+        
+        for img_dir in image_dirs:
+            # images
+            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if tiled is True:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.config.update_parameter(["images","num_rows"], num_rows)
+                self.config.update_parameter(["images","num_cols"], num_cols)
+                self.config.update_parameter(["images","padding"], padding)
+                self.train_images.extend(tile_image_list)
+            else:
+                self.train_images.extend([image,])
+            
+            #ground_truth
+            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+            if tiled is True:
+                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.train_ground_truth.extend(tile_ground_truth_list)
+            else:
+                self.train_ground_truth.extend(ground_truth)
+                
+    #######################
+    # Image augmentation
+    #######################
+    def augment_images(self):
+        """Augments images using the parameters in the config file"""
+        
+        # TODO: To allow for augmentation of multi-class images
+        
+        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+        
+        # increase number of images
+        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+        
+        print("Performing augmentations on {} images".format(len(self.aug_images)))
+        sys.stdout.flush()
+        
+        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+            
+            # target must be image and mask in order for albumentations to work
+            data = {"image": self.aug_images[i], 
+                    "mask": self.aug_ground_truth[i]}
+            augmented = augmentor(**data)
+
+            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+            
+            # add 
+            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+                from skimage.morphology import binary_dilation, disk
+                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+            else:
+                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+        self.aug_images = np.stack(self.aug_images, axis = 0)
+        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+        
+        mean = self.aug_images.mean()
+        std = self.aug_images.std()
+        
+        self.config.update_parameter(["images","mean"], float(mean))
+        self.config.update_parameter(["images","std"], float(std))
+        
+        print("Augmentations complete!")
+
+    def augmentations(self, p = None):
+        """Generates list of augmentations using parameters obtained from config file
+        
+        Parameters
+        ----------
+        p : int, optional
+            probability to apply any augmentations to image
+        
+        Returns
+        ----------
+        function
+            function used to augment images
+        """
+        from albumentations import (
+            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+        )
+        
+        augmentation_list = []
+        
+        if self.config.get_parameter("random_rotate") is True:
+            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+        
+        if self.config.get_parameter("flip") is True:
+            augmentation_list.append(Flip())
+            
+        if self.config.get_parameter("transpose") is True:
+            augmentation_list.append(Transpose())
+            
+        if self.config.get_parameter("blur_group") is True:
+            blur_augmentation = []
+            if self.config.get_parameter("motion_blur") is True:
+                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+            if self.config.get_parameter("median_blur") is True:
+                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+            if self.config.get_parameter("blur") is True:
+                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+            
+        if self.config.get_parameter("shift_scale_rotate") is True:
+            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                      scale_limit = self.config.get_parameter("scale_limit"),
+                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
+        if self.config.get_parameter("distortion_group") is True:
+            distortion_augmentation = []
+            if self.config.get_parameter("optical_distortion") is True:
+                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+            if self.config.get_parameter("elastic_transform") is True:
+                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+            if self.config.get_parameter("grid_distortion") is True:
+                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+            
+            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+        
+        if self.config.get_parameter("brightness_contrast_group") is True:
+            contrast_augmentation = []
+            if self.config.get_parameter("clahe") is True:
+                contrast_augmentation.append(CLAHE())
+            if self.config.get_parameter("sharpen") is True:
+                contrast_augmentation.append(IAASharpen())
+            if self.config.get_parameter("random_brightness_contrast") is True:
+                contrast_augmentation.append(RandomBrightnessContrast())
+           
+            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+            
+        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+        
+        return Compose(augmentation_list, p = p)
+
+############################### TODO ###############################
+#     def preapare_data(self):
+#         """        
+#         Performs augmentation if needed
+#         """
+        
+            
+#     # Create data generator
+#     # Return augmented images/ground_truth arrays of batch size
+#     def generator(features, labels, batch_size, seq_det):
+#         # create empty arrays to contain batch of features and labels
+#         batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3]))
+#         batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3]))
+
+#         while True:
+#             # Fill arrays of batch size with augmented data taken randomly from full passed arrays
+#             indexes = random.sample(range(len(features)), batch_size)
+#             # Perform the exactly the same augmentation for X and y
+#             random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes])
+#             batch_features[:,:,:,:] = random_augmented_images[:,:,:,:]
+#             batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:]
+
+#             yield batch_features, batch_labels
+            
+    # Train augmentation
+#     def do_augmentation(seq_det, X_train, y_train):
+#         # Use seq_det to build augmentation.
+#         # ....
+#         return np.array(X_train_aug), np.array(y_train_aug)
+
+#     seq = iaa.Sequential([
+#         iaa.Fliplr(0.5), # horizontally flip
+#         iaa.OneOf([
+#             iaa.Noop(),
+#             iaa.GaussianBlur(sigma=(0.0, 1.0)),
+#             iaa.Noop(),
+#             iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)),
+#             iaa.Noop(),
+#             iaa.PerspectiveTransform(scale=(0.04, 0.08)),
+#             iaa.Noop(),
+#             iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)),
+#         ]),
+#         # More as you want ...
+#     ])
+#     seq_det = seq.to_deterministic()
+    
+#     history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det),
+#                               epochs=EPOCHS,
+#                               steps_per_epoch=steps_per_epoch,
+#                               validation_data=(X_valid, y_valid),
+#                               verbose = 1, 
+#                               callbacks = [check_point]
+#                              ) 
+    
+    # Image augmentations
+            
+############################### END of TODO ###############################
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Dataset +
+
+

Creates Dataset object that is used to manipulate the training data.

+

Attributes

+
+
classes : list
+
List of dictionaries containing the class name and id
+
train_images : list
+
List of images that is used as the input for the network
+
train_ground_truth : list
+
List of images that is used as the ground truth for the network
+
+
+ +Expand source code + +
class Dataset(Image_Functions):
+    def __init__(self):
+        """Creates Dataset object that is used to manipulate the training data.
+    
+        Attributes
+        ----------
+        classes : list
+            List of dictionaries containing the class name and id
+            
+        train_images : list
+            List of images that is used as the input for the network
+            
+        train_ground_truth : list
+            List of images that is used as the ground truth for the network
+        """
+            
+        self.classes = []
+        self.train_images = []
+        self.train_ground_truth = []
+        
+        super().__init__()
+    
+    #######################
+    # Class id functions
+    #######################
+    def get_class_id(self, class_name):
+        """Returns the class id and adds class to list if not in list of classes.
+    
+        Parameters
+        ----------
+        class_name : str
+            Identity of class that will be associated with the class id
+            
+        Returns
+        ----------
+        int
+            Class id
+        """
+        
+        if len(self.classes) == 0:
+            self.classes.append({"class": class_name, "id": 0})
+            return 0
+        
+        for class_info in self.classes:
+            # if class exist, return class id
+            if class_info["class"] == class_name:
+                return class_info["id"]
+   
+        self.classes.append({"class": class_name, "id": len(self.classes)-1})
+        return len(self.classes)-1
+    
+    #######################
+    # Class id functions
+    #######################
+    def sanity_check(self, image_index):
+        """Plots the augmented image and ground_truth to check if everything is ok.
+    
+        Parameters
+        ----------
+        image_index : int
+            Index of the image and its corresponding ground_truth
+        """
+        
+        image = self.aug_images[image_index][:,:,0]
+        ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(image, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(ground_truth, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+    
+    def load_dataset(self, dataset_dir = None, tiled = False):
+        """Loads dataset from ``dataset_dir``
+    
+        Parameters
+        ----------
+        dataset_dir : str or none, optional
+            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+            
+        tiled : bool, optional
+            To set if tiling function is used
+        """
+        
+        # update dataset_dir if specified. If not, load dataset_dir from config file
+        if dataset_dir is None:
+            dataset_dir = self.config.get_parameter("dataset_dir")
+        else:
+            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+        
+        image_dirs = next(os.walk(dataset_dir))[1]
+        image_dirs = [f for f in image_dirs if not f[0] == '.']
+        
+        for img_dir in image_dirs:
+            # images
+            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if tiled is True:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.config.update_parameter(["images","num_rows"], num_rows)
+                self.config.update_parameter(["images","num_cols"], num_cols)
+                self.config.update_parameter(["images","padding"], padding)
+                self.train_images.extend(tile_image_list)
+            else:
+                self.train_images.extend([image,])
+            
+            #ground_truth
+            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+            if tiled is True:
+                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.train_ground_truth.extend(tile_ground_truth_list)
+            else:
+                self.train_ground_truth.extend(ground_truth)
+                
+    #######################
+    # Image augmentation
+    #######################
+    def augment_images(self):
+        """Augments images using the parameters in the config file"""
+        
+        # TODO: To allow for augmentation of multi-class images
+        
+        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+        
+        # increase number of images
+        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+        
+        print("Performing augmentations on {} images".format(len(self.aug_images)))
+        sys.stdout.flush()
+        
+        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+            
+            # target must be image and mask in order for albumentations to work
+            data = {"image": self.aug_images[i], 
+                    "mask": self.aug_ground_truth[i]}
+            augmented = augmentor(**data)
+
+            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+            
+            # add 
+            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+                from skimage.morphology import binary_dilation, disk
+                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+            else:
+                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+        self.aug_images = np.stack(self.aug_images, axis = 0)
+        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+        
+        mean = self.aug_images.mean()
+        std = self.aug_images.std()
+        
+        self.config.update_parameter(["images","mean"], float(mean))
+        self.config.update_parameter(["images","std"], float(std))
+        
+        print("Augmentations complete!")
+
+    def augmentations(self, p = None):
+        """Generates list of augmentations using parameters obtained from config file
+        
+        Parameters
+        ----------
+        p : int, optional
+            probability to apply any augmentations to image
+        
+        Returns
+        ----------
+        function
+            function used to augment images
+        """
+        from albumentations import (
+            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+        )
+        
+        augmentation_list = []
+        
+        if self.config.get_parameter("random_rotate") is True:
+            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+        
+        if self.config.get_parameter("flip") is True:
+            augmentation_list.append(Flip())
+            
+        if self.config.get_parameter("transpose") is True:
+            augmentation_list.append(Transpose())
+            
+        if self.config.get_parameter("blur_group") is True:
+            blur_augmentation = []
+            if self.config.get_parameter("motion_blur") is True:
+                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+            if self.config.get_parameter("median_blur") is True:
+                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+            if self.config.get_parameter("blur") is True:
+                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+            
+        if self.config.get_parameter("shift_scale_rotate") is True:
+            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                      scale_limit = self.config.get_parameter("scale_limit"),
+                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
+        if self.config.get_parameter("distortion_group") is True:
+            distortion_augmentation = []
+            if self.config.get_parameter("optical_distortion") is True:
+                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+            if self.config.get_parameter("elastic_transform") is True:
+                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+            if self.config.get_parameter("grid_distortion") is True:
+                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+            
+            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+        
+        if self.config.get_parameter("brightness_contrast_group") is True:
+            contrast_augmentation = []
+            if self.config.get_parameter("clahe") is True:
+                contrast_augmentation.append(CLAHE())
+            if self.config.get_parameter("sharpen") is True:
+                contrast_augmentation.append(IAASharpen())
+            if self.config.get_parameter("random_brightness_contrast") is True:
+                contrast_augmentation.append(RandomBrightnessContrast())
+           
+            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+            
+        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+        
+        return Compose(augmentation_list, p = p)
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def augment_images(self) +
+
+

Augments images using the parameters in the config file

+
+ +Expand source code + +
def augment_images(self):
+    """Augments images using the parameters in the config file"""
+    
+    # TODO: To allow for augmentation of multi-class images
+    
+    augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+    
+    # increase number of images
+    self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+    self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+    
+    print("Performing augmentations on {} images".format(len(self.aug_images)))
+    sys.stdout.flush()
+    
+    for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+        
+        # target must be image and mask in order for albumentations to work
+        data = {"image": self.aug_images[i], 
+                "mask": self.aug_ground_truth[i]}
+        augmented = augmentor(**data)
+
+        self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+        
+        # add 
+        if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+            from skimage.morphology import binary_dilation, disk
+            self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+        else:
+            self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+    self.aug_images = np.stack(self.aug_images, axis = 0)
+    self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+    
+    mean = self.aug_images.mean()
+    std = self.aug_images.std()
+    
+    self.config.update_parameter(["images","mean"], float(mean))
+    self.config.update_parameter(["images","std"], float(std))
+    
+    print("Augmentations complete!")
+
+
+
+def augmentations(self, p=None) +
+
+

Generates list of augmentations using parameters obtained from config file

+

Parameters

+
+
p : int, optional
+
probability to apply any augmentations to image
+
+

Returns

+
+
function
+
function used to augment images
+
+
+ +Expand source code + +
def augmentations(self, p = None):
+    """Generates list of augmentations using parameters obtained from config file
+    
+    Parameters
+    ----------
+    p : int, optional
+        probability to apply any augmentations to image
+    
+    Returns
+    ----------
+    function
+        function used to augment images
+    """
+    from albumentations import (
+        RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+        Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+        IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+        IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+    )
+    
+    augmentation_list = []
+    
+    if self.config.get_parameter("random_rotate") is True:
+        augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+    
+    if self.config.get_parameter("flip") is True:
+        augmentation_list.append(Flip())
+        
+    if self.config.get_parameter("transpose") is True:
+        augmentation_list.append(Transpose())
+        
+    if self.config.get_parameter("blur_group") is True:
+        blur_augmentation = []
+        if self.config.get_parameter("motion_blur") is True:
+            blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+        if self.config.get_parameter("median_blur") is True:
+            blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+        if self.config.get_parameter("blur") is True:
+            blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+        augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+        
+    if self.config.get_parameter("shift_scale_rotate") is True:
+        augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                  scale_limit = self.config.get_parameter("scale_limit"),
+                                                  rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                  p = self.config.get_parameter("shift_scale_rotate_p")))
+    if self.config.get_parameter("distortion_group") is True:
+        distortion_augmentation = []
+        if self.config.get_parameter("optical_distortion") is True:
+            distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+        if self.config.get_parameter("elastic_transform") is True:
+            distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+        if self.config.get_parameter("grid_distortion") is True:
+            distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+        
+        augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+    
+    if self.config.get_parameter("brightness_contrast_group") is True:
+        contrast_augmentation = []
+        if self.config.get_parameter("clahe") is True:
+            contrast_augmentation.append(CLAHE())
+        if self.config.get_parameter("sharpen") is True:
+            contrast_augmentation.append(IAASharpen())
+        if self.config.get_parameter("random_brightness_contrast") is True:
+            contrast_augmentation.append(RandomBrightnessContrast())
+       
+        augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+        
+    augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+    
+    return Compose(augmentation_list, p = p)
+
+
+
+def get_class_id(self, class_name) +
+
+

Returns the class id and adds class to list if not in list of classes.

+

Parameters

+
+
class_name : str
+
Identity of class that will be associated with the class id
+
+

Returns

+
+
int
+
Class id
+
+
+ +Expand source code + +
def get_class_id(self, class_name):
+    """Returns the class id and adds class to list if not in list of classes.
+
+    Parameters
+    ----------
+    class_name : str
+        Identity of class that will be associated with the class id
+        
+    Returns
+    ----------
+    int
+        Class id
+    """
+    
+    if len(self.classes) == 0:
+        self.classes.append({"class": class_name, "id": 0})
+        return 0
+    
+    for class_info in self.classes:
+        # if class exist, return class id
+        if class_info["class"] == class_name:
+            return class_info["id"]
+
+    self.classes.append({"class": class_name, "id": len(self.classes)-1})
+    return len(self.classes)-1
+
+
+
+def load_dataset(self, dataset_dir=None, tiled=False) +
+
+

Loads dataset from dataset_dir

+

Parameters

+
+
dataset_dir : str or none, optional
+
Folder to load the dataset from. If none, dataset_dir is obtained from config file
+
tiled : bool, optional
+
To set if tiling function is used
+
+
+ +Expand source code + +
def load_dataset(self, dataset_dir = None, tiled = False):
+    """Loads dataset from ``dataset_dir``
+
+    Parameters
+    ----------
+    dataset_dir : str or none, optional
+        Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+        
+    tiled : bool, optional
+        To set if tiling function is used
+    """
+    
+    # update dataset_dir if specified. If not, load dataset_dir from config file
+    if dataset_dir is None:
+        dataset_dir = self.config.get_parameter("dataset_dir")
+    else:
+        self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+    
+    image_dirs = next(os.walk(dataset_dir))[1]
+    image_dirs = [f for f in image_dirs if not f[0] == '.']
+    
+    for img_dir in image_dirs:
+        # images
+        image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+        
+        # percentile normalization
+        if self.config.get_parameter("percentile_normalization"):
+            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+        
+        if tiled is True:
+            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            self.config.update_parameter(["images","num_rows"], num_rows)
+            self.config.update_parameter(["images","num_cols"], num_cols)
+            self.config.update_parameter(["images","padding"], padding)
+            self.train_images.extend(tile_image_list)
+        else:
+            self.train_images.extend([image,])
+        
+        #ground_truth
+        ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+        if tiled is True:
+            tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            self.train_ground_truth.extend(tile_ground_truth_list)
+        else:
+            self.train_ground_truth.extend(ground_truth)
+
+
+
+def sanity_check(self, image_index) +
+
+

Plots the augmented image and ground_truth to check if everything is ok.

+

Parameters

+
+
image_index : int
+
Index of the image and its corresponding ground_truth
+
+
+ +Expand source code + +
def sanity_check(self, image_index):
+    """Plots the augmented image and ground_truth to check if everything is ok.
+
+    Parameters
+    ----------
+    image_index : int
+        Index of the image and its corresponding ground_truth
+    """
+    
+    image = self.aug_images[image_index][:,:,0]
+    ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+    plt.figure(figsize=(14, 14))
+    plt.axis('off')
+    plt.imshow(image, cmap='gray', 
+               norm=None, interpolation=None)
+    plt.show()
+
+    plt.figure(figsize=(14, 14))
+    plt.axis('off')
+    plt.imshow(ground_truth, cmap='gray', 
+               norm=None, interpolation=None)
+    plt.show()
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/image_functions.html b/html/models/internals/image_functions.html new file mode 100644 index 0000000..5cd254f --- /dev/null +++ b/html/models/internals/image_functions.html @@ -0,0 +1,1340 @@ + + + + + + +models.internals.image_functions API documentation + + + + + + + + + +
+
+
+

Module models.internals.image_functions

+
+
+
+ +Expand source code + +
import os
+import glob
+import sys
+
+import math
+import numpy as np
+
+#TODO: change to cv2?
+import skimage
+import skimage.io as skio
+
+class Image_Functions():
+    def list_images(self, image_dir, image_ext = '*.tif'):
+        """List images in the directory with the given file extension
+
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory to look for image files
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image_list : `list`
+            List of images found in the directory with the given file extension
+            
+        Notes
+        ----------
+        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+        """
+        # to bypass case sensitivity of file extensions in linux and possibly other systems
+        if sys.platform in ["win32",]:
+            image_extension = [image_ext]
+        else:
+            image_extension = [image_ext.lower(),image_ext.upper()]
+        
+        image_list = []
+        for ext in image_extension:
+            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+            
+        return image_list
+    
+    #######################
+    # Image IO functions
+    #######################
+    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+        """Loads images found in ``image_path``
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for image files
+        subfolder : `str`, optional
+            [Default: 'Images'] Subfolder in which to look for the image files
+        image_index : `int`, optional
+            [Default: 0] Index of image to load
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image : `array_like`
+            Loaded image
+            
+        Notes
+        ----------
+        Only one image from in each directory is loaded.
+        """
+        if os.path.isdir(image_path) is True:
+            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+            if len(image_list) > 1:
+               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+            # Load image
+            image = skio.imread(image_list[image_index])
+        else:
+            image = skio.imread(image_path)
+            
+        return image
+        
+    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for ground truth images
+        subfolder : `str`, optional
+            [Default: 'Masks'] Subfolder in which to look for the ground truth images
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of ground truth image file
+
+        Returns
+        ----------
+        output_ground_truth : `list`
+            List of ground truth images found in the directory with the given file extension
+            
+        class_ids : `list`
+            List of class ids of the ground truth images
+        """
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        
+        output_ground_truth = []
+        class_ids = []
+        
+        for ground_truth_path in image_list:
+            # add class if not in list
+            ground_truth_name = ground_truth_path.split('\\')[-1]
+            class_name = ground_truth_name.split('_')[0]
+            
+            # obtain class_id
+            class_ids.append(self.get_class_id(class_name))
+            
+            # Load image
+            ground_truth_img = skio.imread(ground_truth_path)
+            
+            # perform erosion so that the borders will still be there after augmentation
+            if self.config.get_parameter("use_binary_erosion") is True:
+                from skimage.morphology import binary_erosion, disk
+                # sets dtype back to unsigned integer in order for some augmentations to work
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            if self.config.get_parameter("use_binary_dilation") is True:
+                from skimage.morphology import binary_dilation, disk
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            # perform inversion of ground_truth if needed
+            if self.config.get_parameter("invert_ground_truth") is True:
+                ground_truth_img = skimage.util.invert(ground_truth_img)
+                
+            output_ground_truth.append(ground_truth_img)
+            
+        return output_ground_truth, class_ids
+    
+    def reshape_image(self, image):
+        """Reshapes the image to the correct dimenstions for Unet
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be reshaped
+
+        Returns
+        ----------
+        image : `array_like`
+            Reshaped image 
+        """
+        h, w = image.shape[:2]
+        image = np.reshape(image, (h, w, -1))
+        return image
+    
+    #######################
+    # Image padding
+    #######################
+    def pad_image(self, image, image_size, mode = 'constant'):
+        """Pad image to specified image_size
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be padded
+        image_size : `list`
+            Final size of padded image
+        mode : `str`, optional
+            [Default: 'constant'] Mode to pad the image
+
+        Returns
+        ----------
+        image : `array_like`
+            Padded image
+            
+        padding : `list`
+            List containing the number of pixels padded to each direction
+        """
+        h, w = image.shape[:2]
+        
+        top_pad = (image_size[0] - h) // 2
+        bottom_pad = image_size[0] - h - top_pad
+            
+        left_pad = (image_size[1] - w) // 2
+        right_pad = image_size[1] - w - left_pad
+
+        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+        image = np.pad(image, padding, mode = mode, constant_values=0)
+        
+        return image, padding
+    
+    def remove_pad_image(self, image, padding):
+        """Removes pad from image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Padded image
+        padding : `list`
+            List containing the number of padded pixels in each direction
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        
+        h, w = image.shape[:2]
+        
+        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+    
+    #######################
+    # Tiling functions
+    #######################
+    def tile_image(self, image, tile_size, tile_overlap_size):
+        """Converts an image into a list of tiled images
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be tiled
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        image_height, image_width = image.shape[:2]
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        if image_height <= tile_height and image_width <= tile_width:
+            return image
+        
+        num_rows = math.ceil(image_height/tile_height)
+        num_cols = math.ceil(image_width/tile_width)
+        num_tiles = num_rows*num_cols
+        
+        
+        # pad image to fit tile size
+        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+        
+        tile_image_list = []
+        
+        for tile_no in range(num_tiles):
+            tile_x_start = (tile_no // num_rows) * tile_width
+            tile_x_end = tile_x_start + tile_size[1]
+            
+            tile_y_start = (tile_no % num_rows) * tile_height
+            tile_y_end = tile_y_start + tile_size[0]
+            
+            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+            
+            # ensure input into unet is of correct shape
+            tile_image = self.reshape_image(tile_image)
+            
+            tile_image_list.append(tile_image)
+            
+        return tile_image_list, num_rows, num_cols, padding
+    
+    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+        """Stitches a list of tiled images back into a single image
+
+        Parameters
+        ----------
+        tile_list : `list`
+            List of tiled images
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+        num_rows : `int`
+            Number of rows of tiles
+        num_cols : `int`
+            Number of cols of tiles
+        padding : `list`
+            Amount of padding used during tiling
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        if num_rows == 1 and num_cols == 1:
+            image = tile_list[0]
+            
+            image = self.remove_pad_image(image, padding = padding)
+                
+            return image
+              
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        num_tiles = num_rows*num_cols
+        
+        for col in range(num_cols):
+            for row in range(num_rows):
+                tile_image = tile_list[num_rows*col + row][:,:,0]
+                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+                if row == 0:
+                    image_col = np.array(tile_image)
+                else:
+                    image_col = np.vstack((image_col, tile_image))
+            
+            if col == 0:
+                image = image_col
+            else:
+                image = np.hstack((image, image_col))
+        
+        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+        
+        if padding is not None:
+            image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+    
+    
+    #######################
+    # Image normalization
+    #######################
+    def percentile_normalization(self, image, in_bound=[3, 99.8]):
+        """Performs percentile normalization on the image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be normalized
+        in_bound : `list`
+            Upper and lower percentile used to normalize image
+
+        Returns
+        ----------
+        image : `array_like`
+            Normalized image
+            
+        image_min : `int`
+            Min value of ``image``
+            
+        image_max : `int`
+            Max value of ``image``
+        """
+        image_min = np.percentile(image, in_bound[0])
+        image_max = np.percentile(image, in_bound[1])
+        image = (image - image_min)/(image_max - image_min)
+
+        return image, image_min, image_max
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Image_Functions +(*args, **kwargs) +
+
+
+
+ +Expand source code + +
class Image_Functions():
+    def list_images(self, image_dir, image_ext = '*.tif'):
+        """List images in the directory with the given file extension
+
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory to look for image files
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image_list : `list`
+            List of images found in the directory with the given file extension
+            
+        Notes
+        ----------
+        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+        """
+        # to bypass case sensitivity of file extensions in linux and possibly other systems
+        if sys.platform in ["win32",]:
+            image_extension = [image_ext]
+        else:
+            image_extension = [image_ext.lower(),image_ext.upper()]
+        
+        image_list = []
+        for ext in image_extension:
+            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+            
+        return image_list
+    
+    #######################
+    # Image IO functions
+    #######################
+    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+        """Loads images found in ``image_path``
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for image files
+        subfolder : `str`, optional
+            [Default: 'Images'] Subfolder in which to look for the image files
+        image_index : `int`, optional
+            [Default: 0] Index of image to load
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image : `array_like`
+            Loaded image
+            
+        Notes
+        ----------
+        Only one image from in each directory is loaded.
+        """
+        if os.path.isdir(image_path) is True:
+            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+            if len(image_list) > 1:
+               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+            # Load image
+            image = skio.imread(image_list[image_index])
+        else:
+            image = skio.imread(image_path)
+            
+        return image
+        
+    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for ground truth images
+        subfolder : `str`, optional
+            [Default: 'Masks'] Subfolder in which to look for the ground truth images
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of ground truth image file
+
+        Returns
+        ----------
+        output_ground_truth : `list`
+            List of ground truth images found in the directory with the given file extension
+            
+        class_ids : `list`
+            List of class ids of the ground truth images
+        """
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        
+        output_ground_truth = []
+        class_ids = []
+        
+        for ground_truth_path in image_list:
+            # add class if not in list
+            ground_truth_name = ground_truth_path.split('\\')[-1]
+            class_name = ground_truth_name.split('_')[0]
+            
+            # obtain class_id
+            class_ids.append(self.get_class_id(class_name))
+            
+            # Load image
+            ground_truth_img = skio.imread(ground_truth_path)
+            
+            # perform erosion so that the borders will still be there after augmentation
+            if self.config.get_parameter("use_binary_erosion") is True:
+                from skimage.morphology import binary_erosion, disk
+                # sets dtype back to unsigned integer in order for some augmentations to work
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            if self.config.get_parameter("use_binary_dilation") is True:
+                from skimage.morphology import binary_dilation, disk
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            # perform inversion of ground_truth if needed
+            if self.config.get_parameter("invert_ground_truth") is True:
+                ground_truth_img = skimage.util.invert(ground_truth_img)
+                
+            output_ground_truth.append(ground_truth_img)
+            
+        return output_ground_truth, class_ids
+    
+    def reshape_image(self, image):
+        """Reshapes the image to the correct dimenstions for Unet
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be reshaped
+
+        Returns
+        ----------
+        image : `array_like`
+            Reshaped image 
+        """
+        h, w = image.shape[:2]
+        image = np.reshape(image, (h, w, -1))
+        return image
+    
+    #######################
+    # Image padding
+    #######################
+    def pad_image(self, image, image_size, mode = 'constant'):
+        """Pad image to specified image_size
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be padded
+        image_size : `list`
+            Final size of padded image
+        mode : `str`, optional
+            [Default: 'constant'] Mode to pad the image
+
+        Returns
+        ----------
+        image : `array_like`
+            Padded image
+            
+        padding : `list`
+            List containing the number of pixels padded to each direction
+        """
+        h, w = image.shape[:2]
+        
+        top_pad = (image_size[0] - h) // 2
+        bottom_pad = image_size[0] - h - top_pad
+            
+        left_pad = (image_size[1] - w) // 2
+        right_pad = image_size[1] - w - left_pad
+
+        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+        image = np.pad(image, padding, mode = mode, constant_values=0)
+        
+        return image, padding
+    
+    def remove_pad_image(self, image, padding):
+        """Removes pad from image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Padded image
+        padding : `list`
+            List containing the number of padded pixels in each direction
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        
+        h, w = image.shape[:2]
+        
+        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+    
+    #######################
+    # Tiling functions
+    #######################
+    def tile_image(self, image, tile_size, tile_overlap_size):
+        """Converts an image into a list of tiled images
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be tiled
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        image_height, image_width = image.shape[:2]
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        if image_height <= tile_height and image_width <= tile_width:
+            return image
+        
+        num_rows = math.ceil(image_height/tile_height)
+        num_cols = math.ceil(image_width/tile_width)
+        num_tiles = num_rows*num_cols
+        
+        
+        # pad image to fit tile size
+        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+        
+        tile_image_list = []
+        
+        for tile_no in range(num_tiles):
+            tile_x_start = (tile_no // num_rows) * tile_width
+            tile_x_end = tile_x_start + tile_size[1]
+            
+            tile_y_start = (tile_no % num_rows) * tile_height
+            tile_y_end = tile_y_start + tile_size[0]
+            
+            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+            
+            # ensure input into unet is of correct shape
+            tile_image = self.reshape_image(tile_image)
+            
+            tile_image_list.append(tile_image)
+            
+        return tile_image_list, num_rows, num_cols, padding
+    
+    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+        """Stitches a list of tiled images back into a single image
+
+        Parameters
+        ----------
+        tile_list : `list`
+            List of tiled images
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+        num_rows : `int`
+            Number of rows of tiles
+        num_cols : `int`
+            Number of cols of tiles
+        padding : `list`
+            Amount of padding used during tiling
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        if num_rows == 1 and num_cols == 1:
+            image = tile_list[0]
+            
+            image = self.remove_pad_image(image, padding = padding)
+                
+            return image
+              
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        num_tiles = num_rows*num_cols
+        
+        for col in range(num_cols):
+            for row in range(num_rows):
+                tile_image = tile_list[num_rows*col + row][:,:,0]
+                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+                if row == 0:
+                    image_col = np.array(tile_image)
+                else:
+                    image_col = np.vstack((image_col, tile_image))
+            
+            if col == 0:
+                image = image_col
+            else:
+                image = np.hstack((image, image_col))
+        
+        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+        
+        if padding is not None:
+            image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+    
+    
+    #######################
+    # Image normalization
+    #######################
+    def percentile_normalization(self, image, in_bound=[3, 99.8]):
+        """Performs percentile normalization on the image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be normalized
+        in_bound : `list`
+            Upper and lower percentile used to normalize image
+
+        Returns
+        ----------
+        image : `array_like`
+            Normalized image
+            
+        image_min : `int`
+            Min value of ``image``
+            
+        image_max : `int`
+            Max value of ``image``
+        """
+        image_min = np.percentile(image, in_bound[0])
+        image_max = np.percentile(image, in_bound[1])
+        image = (image - image_min)/(image_max - image_min)
+
+        return image, image_min, image_max
+
+

Subclasses

+ +

Methods

+
+
+def list_images(self, image_dir, image_ext='*.tif') +
+
+

List images in the directory with the given file extension

+

Parameters

+
+
image_dir : str
+
Directory to look for image files
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of the image file
+
+

Returns

+
+
image_list : list
+
List of images found in the directory with the given file extension
+
+

Notes

+

For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.

+
+ +Expand source code + +
def list_images(self, image_dir, image_ext = '*.tif'):
+    """List images in the directory with the given file extension
+
+    Parameters
+    ----------
+    image_dir : `str`
+        Directory to look for image files
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of the image file
+        
+    Returns
+    ----------
+    image_list : `list`
+        List of images found in the directory with the given file extension
+        
+    Notes
+    ----------
+    For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+    """
+    # to bypass case sensitivity of file extensions in linux and possibly other systems
+    if sys.platform in ["win32",]:
+        image_extension = [image_ext]
+    else:
+        image_extension = [image_ext.lower(),image_ext.upper()]
+    
+    image_list = []
+    for ext in image_extension:
+        image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+        
+    return image_list
+
+
+
+def load_ground_truth(self, image_path, subfolder='Masks', image_ext='*.tif') +
+
+

Loads ground truth images found in image_path and performs erosion/dilation/inversion if needed

+

Parameters

+
+
image_path : str
+
Path to look for ground truth images
+
subfolder : str, optional
+
[Default: 'Masks'] Subfolder in which to look for the ground truth images
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of ground truth image file
+
+

Returns

+
+
output_ground_truth : list
+
List of ground truth images found in the directory with the given file extension
+
class_ids : list
+
List of class ids of the ground truth images
+
+
+ +Expand source code + +
def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+    """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+    Parameters
+    ----------
+    image_path : `str`
+        Path to look for ground truth images
+    subfolder : `str`, optional
+        [Default: 'Masks'] Subfolder in which to look for the ground truth images
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of ground truth image file
+
+    Returns
+    ----------
+    output_ground_truth : `list`
+        List of ground truth images found in the directory with the given file extension
+        
+    class_ids : `list`
+        List of class ids of the ground truth images
+    """
+    image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+    
+    output_ground_truth = []
+    class_ids = []
+    
+    for ground_truth_path in image_list:
+        # add class if not in list
+        ground_truth_name = ground_truth_path.split('\\')[-1]
+        class_name = ground_truth_name.split('_')[0]
+        
+        # obtain class_id
+        class_ids.append(self.get_class_id(class_name))
+        
+        # Load image
+        ground_truth_img = skio.imread(ground_truth_path)
+        
+        # perform erosion so that the borders will still be there after augmentation
+        if self.config.get_parameter("use_binary_erosion") is True:
+            from skimage.morphology import binary_erosion, disk
+            # sets dtype back to unsigned integer in order for some augmentations to work
+            ground_truth_dtype = ground_truth_img.dtype
+            ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+        
+        if self.config.get_parameter("use_binary_dilation") is True:
+            from skimage.morphology import binary_dilation, disk
+            ground_truth_dtype = ground_truth_img.dtype
+            ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+        
+        # perform inversion of ground_truth if needed
+        if self.config.get_parameter("invert_ground_truth") is True:
+            ground_truth_img = skimage.util.invert(ground_truth_img)
+            
+        output_ground_truth.append(ground_truth_img)
+        
+    return output_ground_truth, class_ids
+
+
+
+def load_image(self, image_path, subfolder='Images', image_index=0, image_ext='*.tif') +
+
+

Loads images found in image_path

+

Parameters

+
+
image_path : str
+
Path to look for image files
+
subfolder : str, optional
+
[Default: 'Images'] Subfolder in which to look for the image files
+
image_index : int, optional
+
[Default: 0] Index of image to load
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of the image file
+
+

Returns

+
+
image : array_like
+
Loaded image
+
+

Notes

+

Only one image from in each directory is loaded.

+
+ +Expand source code + +
def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+    """Loads images found in ``image_path``
+
+    Parameters
+    ----------
+    image_path : `str`
+        Path to look for image files
+    subfolder : `str`, optional
+        [Default: 'Images'] Subfolder in which to look for the image files
+    image_index : `int`, optional
+        [Default: 0] Index of image to load
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of the image file
+        
+    Returns
+    ----------
+    image : `array_like`
+        Loaded image
+        
+    Notes
+    ----------
+    Only one image from in each directory is loaded.
+    """
+    if os.path.isdir(image_path) is True:
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        if len(image_list) > 1:
+           warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+        # Load image
+        image = skio.imread(image_list[image_index])
+    else:
+        image = skio.imread(image_path)
+        
+    return image
+
+
+
+def pad_image(self, image, image_size, mode='constant') +
+
+

Pad image to specified image_size

+

Parameters

+
+
image : array_like
+
Image to be padded
+
image_size : list
+
Final size of padded image
+
mode : str, optional
+
[Default: 'constant'] Mode to pad the image
+
+

Returns

+
+
image : array_like
+
Padded image
+
padding : list
+
List containing the number of pixels padded to each direction
+
+
+ +Expand source code + +
def pad_image(self, image, image_size, mode = 'constant'):
+    """Pad image to specified image_size
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be padded
+    image_size : `list`
+        Final size of padded image
+    mode : `str`, optional
+        [Default: 'constant'] Mode to pad the image
+
+    Returns
+    ----------
+    image : `array_like`
+        Padded image
+        
+    padding : `list`
+        List containing the number of pixels padded to each direction
+    """
+    h, w = image.shape[:2]
+    
+    top_pad = (image_size[0] - h) // 2
+    bottom_pad = image_size[0] - h - top_pad
+        
+    left_pad = (image_size[1] - w) // 2
+    right_pad = image_size[1] - w - left_pad
+
+    padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+    image = np.pad(image, padding, mode = mode, constant_values=0)
+    
+    return image, padding
+
+
+
+def percentile_normalization(self, image, in_bound=[3, 99.8]) +
+
+

Performs percentile normalization on the image

+

Parameters

+
+
image : array_like
+
Image to be normalized
+
in_bound : list
+
Upper and lower percentile used to normalize image
+
+

Returns

+
+
image : array_like
+
Normalized image
+
image_min : int
+
Min value of image
+
image_max : int
+
Max value of image
+
+
+ +Expand source code + +
def percentile_normalization(self, image, in_bound=[3, 99.8]):
+    """Performs percentile normalization on the image
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be normalized
+    in_bound : `list`
+        Upper and lower percentile used to normalize image
+
+    Returns
+    ----------
+    image : `array_like`
+        Normalized image
+        
+    image_min : `int`
+        Min value of ``image``
+        
+    image_max : `int`
+        Max value of ``image``
+    """
+    image_min = np.percentile(image, in_bound[0])
+    image_max = np.percentile(image, in_bound[1])
+    image = (image - image_min)/(image_max - image_min)
+
+    return image, image_min, image_max
+
+
+
+def remove_pad_image(self, image, padding) +
+
+

Removes pad from image

+

Parameters

+
+
image : array_like
+
Padded image
+
padding : list
+
List containing the number of padded pixels in each direction
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def remove_pad_image(self, image, padding):
+    """Removes pad from image
+
+    Parameters
+    ----------
+    image : `array_like`
+        Padded image
+    padding : `list`
+        List containing the number of padded pixels in each direction
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    
+    h, w = image.shape[:2]
+    
+    return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+
+
+
+def reshape_image(self, image) +
+
+

Reshapes the image to the correct dimenstions for Unet

+

Parameters

+
+
image : array_like
+
Image to be reshaped
+
+

Returns

+
+
image : array_like
+
Reshaped image
+
+
+ +Expand source code + +
def reshape_image(self, image):
+    """Reshapes the image to the correct dimenstions for Unet
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be reshaped
+
+    Returns
+    ----------
+    image : `array_like`
+        Reshaped image 
+    """
+    h, w = image.shape[:2]
+    image = np.reshape(image, (h, w, -1))
+    return image
+
+
+
+def tile_image(self, image, tile_size, tile_overlap_size) +
+
+

Converts an image into a list of tiled images

+

Parameters

+
+
image : array_like
+
Image to be tiled
+
tile_size : list
+
Size of each individual tile
+
tile_overlap_size : list
+
Amount of overlap (in pixels) between each tile
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def tile_image(self, image, tile_size, tile_overlap_size):
+    """Converts an image into a list of tiled images
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be tiled
+    tile_size : `list`
+        Size of each individual tile
+    tile_overlap_size : `list`
+        Amount of overlap (in pixels) between each tile
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    image_height, image_width = image.shape[:2]
+    tile_height = tile_size[0] - tile_overlap_size[0] * 2
+    tile_width = tile_size[1] - tile_overlap_size[1] * 2
+    
+    if image_height <= tile_height and image_width <= tile_width:
+        return image
+    
+    num_rows = math.ceil(image_height/tile_height)
+    num_cols = math.ceil(image_width/tile_width)
+    num_tiles = num_rows*num_cols
+    
+    
+    # pad image to fit tile size
+    image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+    
+    tile_image_list = []
+    
+    for tile_no in range(num_tiles):
+        tile_x_start = (tile_no // num_rows) * tile_width
+        tile_x_end = tile_x_start + tile_size[1]
+        
+        tile_y_start = (tile_no % num_rows) * tile_height
+        tile_y_end = tile_y_start + tile_size[0]
+        
+        tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+        
+        # ensure input into unet is of correct shape
+        tile_image = self.reshape_image(tile_image)
+        
+        tile_image_list.append(tile_image)
+        
+    return tile_image_list, num_rows, num_cols, padding
+
+
+
+def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding) +
+
+

Stitches a list of tiled images back into a single image

+

Parameters

+
+
tile_list : list
+
List of tiled images
+
tile_size : list
+
Size of each individual tile
+
tile_overlap_size : list
+
Amount of overlap (in pixels) between each tile
+
num_rows : int
+
Number of rows of tiles
+
num_cols : int
+
Number of cols of tiles
+
padding : list
+
Amount of padding used during tiling
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+    """Stitches a list of tiled images back into a single image
+
+    Parameters
+    ----------
+    tile_list : `list`
+        List of tiled images
+    tile_size : `list`
+        Size of each individual tile
+    tile_overlap_size : `list`
+        Amount of overlap (in pixels) between each tile
+    num_rows : `int`
+        Number of rows of tiles
+    num_cols : `int`
+        Number of cols of tiles
+    padding : `list`
+        Amount of padding used during tiling
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    if num_rows == 1 and num_cols == 1:
+        image = tile_list[0]
+        
+        image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+          
+    tile_height = tile_size[0] - tile_overlap_size[0] * 2
+    tile_width = tile_size[1] - tile_overlap_size[1] * 2
+    
+    num_tiles = num_rows*num_cols
+    
+    for col in range(num_cols):
+        for row in range(num_rows):
+            tile_image = tile_list[num_rows*col + row][:,:,0]
+            tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+            if row == 0:
+                image_col = np.array(tile_image)
+            else:
+                image_col = np.vstack((image_col, tile_image))
+        
+        if col == 0:
+            image = image_col
+        else:
+            image = np.hstack((image, image_col))
+    
+    image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+    
+    if padding is not None:
+        image = self.remove_pad_image(image, padding = padding)
+        
+    return image
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/index.html b/html/models/internals/index.html new file mode 100644 index 0000000..e61d3fa --- /dev/null +++ b/html/models/internals/index.html @@ -0,0 +1,86 @@ + + + + + + +models.internals API documentation + + + + + + + + + +
+ + +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/losses.html b/html/models/internals/losses.html new file mode 100644 index 0000000..0500ea2 --- /dev/null +++ b/html/models/internals/losses.html @@ -0,0 +1,705 @@ + + + + + + +models.internals.losses API documentation + + + + + + + + + +
+
+
+

Module models.internals.losses

+
+
+
+ +Expand source code + +
from keras import backend as K
+from keras.losses import binary_crossentropy, mean_absolute_error
+import tensorflow as tf
+
+def jaccard_distance_loss(y_true, y_pred, smooth=100):
+    """
+    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
+            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
+    
+    The jaccard distance loss is usefull for unbalanced datasets. This has been
+    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
+    gradient.
+    
+    Ref: https://en.wikipedia.org/wiki/Jaccard_index
+    
+    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
+    @author: wassname
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    sum_ = K.sum(y_true + y_pred, axis=-1)
+    jac = (intersection + smooth) / (sum_ - intersection + smooth)
+    return (1 - jac) * smooth
+
+def dice_coef(y_true, y_pred, smooth=1.):
+    """
+    Dice = (2*|X & Y|)/ (|X|+ |Y|)
+         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
+    ref: https://arxiv.org/pdf/1606.04797v1.pdf
+    
+    from wassname as well
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
+
+def dice_coef_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred)
+
+def bce_dice_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+def bce_ssim_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+# code download from: https://github.com/bermanmaxim/LovaszSoftmax
+def lovasz_grad(gt_sorted):
+    """
+    Computes gradient of the Lovasz extension w.r.t sorted errors
+    See Alg. 1 in paper
+    """
+    gts = tf.reduce_sum(gt_sorted)
+    intersection = gts - tf.cumsum(gt_sorted)
+    union = gts + tf.cumsum(1. - gt_sorted)
+    jaccard = 1. - intersection / union
+    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
+    return jaccard
+
+
+# --------------------------- BINARY LOSSES ---------------------------
+
+def lovasz_hinge(logits, labels, per_image=True, ignore=None):
+    """
+    Binary Lovasz hinge loss
+      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
+      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
+      per_image: compute the loss per image instead of per batch
+      ignore: void class id
+    """
+    if per_image:
+        def treat_image(log_lab):
+            log, lab = log_lab
+            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
+            log, lab = flatten_binary_scores(log, lab, ignore)
+            return lovasz_hinge_flat(log, lab)
+        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
+        loss = tf.reduce_mean(losses)
+    else:
+        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
+    return loss
+
+
+def lovasz_hinge_flat(logits, labels):
+    """
+    Binary Lovasz hinge loss
+      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
+      labels: [P] Tensor, binary ground truth labels (0 or 1)
+      ignore: label to ignore
+    """
+
+    def compute_loss():
+        labelsf = tf.cast(labels, logits.dtype)
+        signs = 2. * labelsf - 1.
+        errors = 1. - logits * tf.stop_gradient(signs)
+        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
+        gt_sorted = tf.gather(labelsf, perm)
+        grad = lovasz_grad(gt_sorted)
+        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
+        return loss
+
+    # deal with the void prediction case (only void pixels)
+    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
+                   lambda: tf.reduce_sum(logits) * 0.,
+                   compute_loss,
+                   strict=True,
+                   name="loss"
+                   )
+    return loss
+
+
+def flatten_binary_scores(scores, labels, ignore=None):
+    """
+    Flattens predictions in the batch (binary case)
+    Remove labels equal to 'ignore'
+    """
+    scores = tf.reshape(scores, (-1,))
+    labels = tf.reshape(labels, (-1,))
+    if ignore is None:
+        return scores, labels
+    valid = tf.not_equal(labels, ignore)
+    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
+    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
+    return vscores, vlabels
+
+def lovasz_loss(y_true, y_pred):
+    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
+    #logits = K.log(y_pred / (1. - y_pred))
+    logits = y_pred #Jiaxin
+    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
+    return loss
+
+# Difference of Structural Similarity
+
+def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
+    # There are additional parameters for this function
+    # Note: some of the 'modes' for edge behavior do not yet have a
+    # gradient definition in the Theano tree
+    #   and cannot be used for learning
+    
+    c1 = (k1 * max_value) ** 2
+    c2 = (k2 * max_value) ** 2
+
+    kernel = [kernel_size, kernel_size]
+    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
+    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
+
+    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+
+    # Reshape to get the var in the cells
+    bs, w, h, c = K.int_shape(patches_pred)
+    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
+    patches_true = K.reshape(patches_true, [-1, w, h, c])
+    # Get mean
+    u_true = K.mean(patches_true, axis=-1)
+    u_pred = K.mean(patches_pred, axis=-1)
+    # Get variance
+    var_true = K.var(patches_true, axis=-1)
+    var_pred = K.var(patches_pred, axis=-1)
+    # Get std dev
+    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
+
+    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
+    denom = ((K.square(u_true)
+              + K.square(u_pred)
+              + c1) * (var_pred + var_true + c2))
+    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
+    return K.mean((1.0 - ssim) / 2.0)
+
+def dssim_mae_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+#MSSim
+#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function
+def keras_SSIM_cs(y_true, y_pred):
+    axis=None
+    gaussian = make_kernel(1.5)
+    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+
+    u_x=K.mean(x, axis=axis)
+    u_y=K.mean(y, axis=axis)
+
+    var_x=K.var(x, axis=axis)
+    var_y=K.var(y, axis=axis)
+
+    cov_xy=cov_keras(x, y, axis)
+
+    K1=0.01
+    K2=0.03
+    L=1  # depth of image (255 in case the image has a differnt scale)
+
+    C1=(K1*L)**2
+    C2=(K2*L)**2
+    C3=C2/2
+
+    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
+    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
+    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
+
+    return [c,s,l]
+
+def keras_MS_SSIM(y_true, y_pred):
+    iterations = 5
+    x=y_true
+    y=y_pred
+    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
+    c=[]
+    s=[]
+    for i in range(iterations):
+        cs=keras_SSIM_cs(x, y)
+        c.append(cs[0])
+        s.append(cs[1])
+        l=cs[2]
+        if(i!=4):
+            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
+            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
+    c = tf.stack(c)
+    s = tf.stack(s)
+    cs = c*s
+
+    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
+    l=(l+1)/2
+    cs=(cs+1)/2
+
+    cs=cs**weight
+    cs = tf.reduce_prod(cs)
+    l=l**weight[-1]
+
+    ms_ssim = l*cs
+    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
+
+    return K.mean(ms_ssim)
+
+def mssim_mae_loss(y_true, y_pred):
+    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+
+
+
+
+

Functions

+
+
+def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0) +
+
+
+
+ +Expand source code + +
def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
+    # There are additional parameters for this function
+    # Note: some of the 'modes' for edge behavior do not yet have a
+    # gradient definition in the Theano tree
+    #   and cannot be used for learning
+    
+    c1 = (k1 * max_value) ** 2
+    c2 = (k2 * max_value) ** 2
+
+    kernel = [kernel_size, kernel_size]
+    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
+    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
+
+    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+
+    # Reshape to get the var in the cells
+    bs, w, h, c = K.int_shape(patches_pred)
+    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
+    patches_true = K.reshape(patches_true, [-1, w, h, c])
+    # Get mean
+    u_true = K.mean(patches_true, axis=-1)
+    u_pred = K.mean(patches_pred, axis=-1)
+    # Get variance
+    var_true = K.var(patches_true, axis=-1)
+    var_pred = K.var(patches_pred, axis=-1)
+    # Get std dev
+    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
+
+    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
+    denom = ((K.square(u_true)
+              + K.square(u_pred)
+              + c1) * (var_pred + var_true + c2))
+    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
+    return K.mean((1.0 - ssim) / 2.0)
+
+
+
+def bce_dice_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def bce_dice_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+
+
+def bce_ssim_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def bce_ssim_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+
+
+def dice_coef(y_true, y_pred, smooth=1.0) +
+
+

Dice = (2|X & Y|)/ (|X|+ |Y|) += +2sum(|A*B|)/(sum(A^2)+sum(B^2)) +ref: https://arxiv.org/pdf/1606.04797v1.pdf

+

from wassname as well

+
+ +Expand source code + +
def dice_coef(y_true, y_pred, smooth=1.):
+    """
+    Dice = (2*|X & Y|)/ (|X|+ |Y|)
+         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
+    ref: https://arxiv.org/pdf/1606.04797v1.pdf
+    
+    from wassname as well
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
+
+
+
+def dice_coef_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def dice_coef_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred)
+
+
+
+def dssim_mae_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def dssim_mae_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+def flatten_binary_scores(scores, labels, ignore=None) +
+
+

Flattens predictions in the batch (binary case) +Remove labels equal to 'ignore'

+
+ +Expand source code + +
def flatten_binary_scores(scores, labels, ignore=None):
+    """
+    Flattens predictions in the batch (binary case)
+    Remove labels equal to 'ignore'
+    """
+    scores = tf.reshape(scores, (-1,))
+    labels = tf.reshape(labels, (-1,))
+    if ignore is None:
+        return scores, labels
+    valid = tf.not_equal(labels, ignore)
+    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
+    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
+    return vscores, vlabels
+
+
+
+def jaccard_distance_loss(y_true, y_pred, smooth=100) +
+
+

Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) += sum(|AB|)/(sum(|A|)+sum(|B|)-sum(|AB|))

+

The jaccard distance loss is usefull for unbalanced datasets. This has been +shifted so it converges on 0 and is smoothed to avoid exploding or disapearing +gradient.

+

Ref: https://en.wikipedia.org/wiki/Jaccard_index

+

@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 +@author: wassname

+
+ +Expand source code + +
def jaccard_distance_loss(y_true, y_pred, smooth=100):
+    """
+    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
+            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
+    
+    The jaccard distance loss is usefull for unbalanced datasets. This has been
+    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
+    gradient.
+    
+    Ref: https://en.wikipedia.org/wiki/Jaccard_index
+    
+    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
+    @author: wassname
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    sum_ = K.sum(y_true + y_pred, axis=-1)
+    jac = (intersection + smooth) / (sum_ - intersection + smooth)
+    return (1 - jac) * smooth
+
+
+
+def keras_MS_SSIM(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def keras_MS_SSIM(y_true, y_pred):
+    iterations = 5
+    x=y_true
+    y=y_pred
+    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
+    c=[]
+    s=[]
+    for i in range(iterations):
+        cs=keras_SSIM_cs(x, y)
+        c.append(cs[0])
+        s.append(cs[1])
+        l=cs[2]
+        if(i!=4):
+            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
+            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
+    c = tf.stack(c)
+    s = tf.stack(s)
+    cs = c*s
+
+    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
+    l=(l+1)/2
+    cs=(cs+1)/2
+
+    cs=cs**weight
+    cs = tf.reduce_prod(cs)
+    l=l**weight[-1]
+
+    ms_ssim = l*cs
+    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
+
+    return K.mean(ms_ssim)
+
+
+
+def keras_SSIM_cs(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def keras_SSIM_cs(y_true, y_pred):
+    axis=None
+    gaussian = make_kernel(1.5)
+    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+
+    u_x=K.mean(x, axis=axis)
+    u_y=K.mean(y, axis=axis)
+
+    var_x=K.var(x, axis=axis)
+    var_y=K.var(y, axis=axis)
+
+    cov_xy=cov_keras(x, y, axis)
+
+    K1=0.01
+    K2=0.03
+    L=1  # depth of image (255 in case the image has a differnt scale)
+
+    C1=(K1*L)**2
+    C2=(K2*L)**2
+    C3=C2/2
+
+    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
+    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
+    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
+
+    return [c,s,l]
+
+
+
+def lovasz_grad(gt_sorted) +
+
+

Computes gradient of the Lovasz extension w.r.t sorted errors +See Alg. 1 in paper

+
+ +Expand source code + +
def lovasz_grad(gt_sorted):
+    """
+    Computes gradient of the Lovasz extension w.r.t sorted errors
+    See Alg. 1 in paper
+    """
+    gts = tf.reduce_sum(gt_sorted)
+    intersection = gts - tf.cumsum(gt_sorted)
+    union = gts + tf.cumsum(1. - gt_sorted)
+    jaccard = 1. - intersection / union
+    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
+    return jaccard
+
+
+
+def lovasz_hinge(logits, labels, per_image=True, ignore=None) +
+
+

Binary Lovasz hinge loss +logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) +labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) +per_image: compute the loss per image instead of per batch +ignore: void class id

+
+ +Expand source code + +
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
+    """
+    Binary Lovasz hinge loss
+      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
+      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
+      per_image: compute the loss per image instead of per batch
+      ignore: void class id
+    """
+    if per_image:
+        def treat_image(log_lab):
+            log, lab = log_lab
+            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
+            log, lab = flatten_binary_scores(log, lab, ignore)
+            return lovasz_hinge_flat(log, lab)
+        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
+        loss = tf.reduce_mean(losses)
+    else:
+        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
+    return loss
+
+
+
+def lovasz_hinge_flat(logits, labels) +
+
+

Binary Lovasz hinge loss +logits: [P] Variable, logits at each prediction (between -\infty and +\infty) +labels: [P] Tensor, binary ground truth labels (0 or 1) +ignore: label to ignore

+
+ +Expand source code + +
def lovasz_hinge_flat(logits, labels):
+    """
+    Binary Lovasz hinge loss
+      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
+      labels: [P] Tensor, binary ground truth labels (0 or 1)
+      ignore: label to ignore
+    """
+
+    def compute_loss():
+        labelsf = tf.cast(labels, logits.dtype)
+        signs = 2. * labelsf - 1.
+        errors = 1. - logits * tf.stop_gradient(signs)
+        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
+        gt_sorted = tf.gather(labelsf, perm)
+        grad = lovasz_grad(gt_sorted)
+        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
+        return loss
+
+    # deal with the void prediction case (only void pixels)
+    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
+                   lambda: tf.reduce_sum(logits) * 0.,
+                   compute_loss,
+                   strict=True,
+                   name="loss"
+                   )
+    return loss
+
+
+
+def lovasz_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def lovasz_loss(y_true, y_pred):
+    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
+    #logits = K.log(y_pred / (1. - y_pred))
+    logits = y_pred #Jiaxin
+    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
+    return loss
+
+
+
+def mssim_mae_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def mssim_mae_loss(y_true, y_pred):
+    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/network_config.html b/html/models/internals/network_config.html new file mode 100644 index 0000000..b11b990 --- /dev/null +++ b/html/models/internals/network_config.html @@ -0,0 +1,908 @@ + + + + + + +models.internals.network_config API documentation + + + + + + + + + +
+
+
+

Module models.internals.network_config

+
+
+
+ +Expand source code + +
import glob
+import os
+from ruamel.yaml import YAML
+
+class Network_Config(object):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is to be saved/read from
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file that will be loaded
+        **kwargs
+            For network parameters that are to be changed from the loaded config file
+
+        Attributes
+        ----------
+        yaml : :class:`ruamel.yaml.YAML`
+            YAML class with function needed to read/write YAML files 
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+        self.yaml=YAML()
+        
+        # load config file from model_dir
+        if config_filepath is not None:
+            
+            self.config = self.load_config_from_file(config_filepath)
+            print("Loaded config file from {}".format(config_filepath))
+        elif model_dir is not None:
+            try:
+                self.config = self.load_config_from_model_dir(model_dir)
+                print("Loaded config file from {}".format(model_dir))
+            except:
+                print("Please ensure that config_filepath is set or there is a config file in model_dir")
+                raise
+            
+        if model_dir is not None:
+            # update model_dir in config
+            print("Updating model_dir to {}".format(model_dir))
+            self.update_parameter(["general", "model_dir"], model_dir)
+        
+        # overwrite network parameters with parameters given during initialization
+        for key, value in kwargs.items():
+            self.update_parameter(self.find_key(key), value)
+            
+        # perform calculations
+        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
+        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
+                  
+    ######################
+    # Accessors/Mutators
+    ######################
+    def get_parameter(self, parameter, config = []):
+        """Output the value from the config file using the given key
+
+        Parameters
+        ----------
+        parameter : `list` or `str`
+            Key or list of keys used to find for the value in the config file
+        
+        config : `list`, optional
+            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+            
+        Returns
+        ----------
+        value : `str` or `int` or `list`
+            Value obtained from the specified key
+            
+        See Also
+        ----------
+        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+        """
+        assert isinstance(parameter, (list, str))
+        
+        # find for key in nested dictionary
+        if isinstance(parameter, str):
+            parameter = self.find_key(parameter)
+        
+        if config == []:
+            config = self.config
+        if config is None:
+            return None
+        
+        if not parameter:
+            return config
+        
+        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+    def update_parameter(self, parameter, value, config = None):
+        """Updates the parameter in the config file using a full addressed list
+
+        Parameters
+        ----------
+        parameter : `list`
+            List of keys that point to the correct item in the nested dictionary
+            
+        value : `str` or `int` or `list`
+            Value that is updated in the nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        TODO
+        """
+        
+        assert type(parameter) is list
+                
+        if config == None:
+            config = self.config
+        
+        if len(parameter) == 1:
+            config.update({parameter[0]: value})
+            return config
+        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+    def find_key(self, key, config = None):
+        """Find the list of keys to address the correct item in a nested dictionary
+
+        Parameters
+        ----------
+        key : `str`
+            Key that needs to be correctly addressed in a nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        key : `list`
+            Address of the key in the nested dictionary
+        """
+        
+        if config == None:
+            config = self.config
+            
+        key_path = []
+        for k, v in config.items():
+            if k == key:
+                return [k]
+            elif isinstance(v, dict):
+                found_key = self.find_key(key, config = v)
+                if found_key is not None:
+                    return [k] + found_key
+    
+    ######################
+    # Config IO options
+    ######################
+    def load_config_from_file(self, file_path):
+        """Load parameters from yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to load
+            
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+
+        with open(file_path, 'r') as input_file: 
+            config = self.yaml.load(input_file)
+            input_file.close()
+
+        return config
+    
+    def load_config_from_model_dir(self, model_dir):
+        """Finds for a config file from the model directory and loads it
+    
+        Parameters
+        ----------
+        model_dir : `str`
+            Folder to search for and load the config file
+
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+            
+        Raises
+        ------
+        IndexError
+            If there are no config file in the model_dir
+        """
+        
+        # check if yaml file exists in model_dir
+        try:
+            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+            if len(list_config_files) > 1:
+                print("Multiple config files found. Loading {}".format(list_config_files[0]))
+            else:
+                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+            return self.load_config_from_file(list_config_files[0])
+        except IndexError:
+            print("No config file found in model_dir.")
+            raise
+
+    def write_config(self, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            self.yaml.dump(self.config, output_file)
+
+        output_file.close()
+        
+        print("Config file written to: {}".format(file_path))
+    
+    def write_model(self, model, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        model : :class:`Keras.model`
+            Keras model that will be parsed and written to a yaml file
+        
+        file_path : `str`
+            Path of model file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            output_file.write(model.to_yaml())
+
+        output_file.close()
+        
+        print("Model file written to: {}".format(file_path))
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Network_Config +(model_dir=None, config_filepath=None, **kwargs) +
+
+

Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is to be saved/read from
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file that will be loaded
+
**kwargs
+
For network parameters that are to be changed from the loaded config file
+
+

Attributes

+
+
yaml : :class:ruamel.yaml.YAML
+
YAML class with function needed to read/write YAML files
+
config : dict
+
Dictionary containing the config parameters
+
+
+ +Expand source code + +
class Network_Config(object):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is to be saved/read from
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file that will be loaded
+        **kwargs
+            For network parameters that are to be changed from the loaded config file
+
+        Attributes
+        ----------
+        yaml : :class:`ruamel.yaml.YAML`
+            YAML class with function needed to read/write YAML files 
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+        self.yaml=YAML()
+        
+        # load config file from model_dir
+        if config_filepath is not None:
+            
+            self.config = self.load_config_from_file(config_filepath)
+            print("Loaded config file from {}".format(config_filepath))
+        elif model_dir is not None:
+            try:
+                self.config = self.load_config_from_model_dir(model_dir)
+                print("Loaded config file from {}".format(model_dir))
+            except:
+                print("Please ensure that config_filepath is set or there is a config file in model_dir")
+                raise
+            
+        if model_dir is not None:
+            # update model_dir in config
+            print("Updating model_dir to {}".format(model_dir))
+            self.update_parameter(["general", "model_dir"], model_dir)
+        
+        # overwrite network parameters with parameters given during initialization
+        for key, value in kwargs.items():
+            self.update_parameter(self.find_key(key), value)
+            
+        # perform calculations
+        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
+        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
+                  
+    ######################
+    # Accessors/Mutators
+    ######################
+    def get_parameter(self, parameter, config = []):
+        """Output the value from the config file using the given key
+
+        Parameters
+        ----------
+        parameter : `list` or `str`
+            Key or list of keys used to find for the value in the config file
+        
+        config : `list`, optional
+            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+            
+        Returns
+        ----------
+        value : `str` or `int` or `list`
+            Value obtained from the specified key
+            
+        See Also
+        ----------
+        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+        """
+        assert isinstance(parameter, (list, str))
+        
+        # find for key in nested dictionary
+        if isinstance(parameter, str):
+            parameter = self.find_key(parameter)
+        
+        if config == []:
+            config = self.config
+        if config is None:
+            return None
+        
+        if not parameter:
+            return config
+        
+        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+    def update_parameter(self, parameter, value, config = None):
+        """Updates the parameter in the config file using a full addressed list
+
+        Parameters
+        ----------
+        parameter : `list`
+            List of keys that point to the correct item in the nested dictionary
+            
+        value : `str` or `int` or `list`
+            Value that is updated in the nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        TODO
+        """
+        
+        assert type(parameter) is list
+                
+        if config == None:
+            config = self.config
+        
+        if len(parameter) == 1:
+            config.update({parameter[0]: value})
+            return config
+        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+    def find_key(self, key, config = None):
+        """Find the list of keys to address the correct item in a nested dictionary
+
+        Parameters
+        ----------
+        key : `str`
+            Key that needs to be correctly addressed in a nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        key : `list`
+            Address of the key in the nested dictionary
+        """
+        
+        if config == None:
+            config = self.config
+            
+        key_path = []
+        for k, v in config.items():
+            if k == key:
+                return [k]
+            elif isinstance(v, dict):
+                found_key = self.find_key(key, config = v)
+                if found_key is not None:
+                    return [k] + found_key
+    
+    ######################
+    # Config IO options
+    ######################
+    def load_config_from_file(self, file_path):
+        """Load parameters from yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to load
+            
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+
+        with open(file_path, 'r') as input_file: 
+            config = self.yaml.load(input_file)
+            input_file.close()
+
+        return config
+    
+    def load_config_from_model_dir(self, model_dir):
+        """Finds for a config file from the model directory and loads it
+    
+        Parameters
+        ----------
+        model_dir : `str`
+            Folder to search for and load the config file
+
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+            
+        Raises
+        ------
+        IndexError
+            If there are no config file in the model_dir
+        """
+        
+        # check if yaml file exists in model_dir
+        try:
+            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+            if len(list_config_files) > 1:
+                print("Multiple config files found. Loading {}".format(list_config_files[0]))
+            else:
+                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+            return self.load_config_from_file(list_config_files[0])
+        except IndexError:
+            print("No config file found in model_dir.")
+            raise
+
+    def write_config(self, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            self.yaml.dump(self.config, output_file)
+
+        output_file.close()
+        
+        print("Config file written to: {}".format(file_path))
+    
+    def write_model(self, model, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        model : :class:`Keras.model`
+            Keras model that will be parsed and written to a yaml file
+        
+        file_path : `str`
+            Path of model file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            output_file.write(model.to_yaml())
+
+        output_file.close()
+        
+        print("Model file written to: {}".format(file_path))
+
+

Methods

+
+
+def find_key(self, key, config=None) +
+
+

Find the list of keys to address the correct item in a nested dictionary

+

Parameters

+
+
key : str
+
Key that needs to be correctly addressed in a nested dictionary
+
config : list or none, optional
+
Used to iterate through nested dictionaries
+
+

Returns

+
+
key : list
+
Address of the key in the nested dictionary
+
+
+ +Expand source code + +
def find_key(self, key, config = None):
+    """Find the list of keys to address the correct item in a nested dictionary
+
+    Parameters
+    ----------
+    key : `str`
+        Key that needs to be correctly addressed in a nested dictionary
+        
+    config : `list` or `none`, optional
+        Used to iterate through nested dictionaries
+        
+    Returns
+    ----------
+    key : `list`
+        Address of the key in the nested dictionary
+    """
+    
+    if config == None:
+        config = self.config
+        
+    key_path = []
+    for k, v in config.items():
+        if k == key:
+            return [k]
+        elif isinstance(v, dict):
+            found_key = self.find_key(key, config = v)
+            if found_key is not None:
+                return [k] + found_key
+
+
+
+def get_parameter(self, parameter, config=[]) +
+
+

Output the value from the config file using the given key

+

Parameters

+
+
parameter : list or str
+
Key or list of keys used to find for the value in the config file
+
config : list, optional
+
Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+
+

Returns

+
+
value : str or int or list
+
Value obtained from the specified key
+
+

See Also

+
+
find_key
+
Function to identify the list of keys to address the correct item in a nested dictionary
+
+
+ +Expand source code + +
def get_parameter(self, parameter, config = []):
+    """Output the value from the config file using the given key
+
+    Parameters
+    ----------
+    parameter : `list` or `str`
+        Key or list of keys used to find for the value in the config file
+    
+    config : `list`, optional
+        Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+        
+    Returns
+    ----------
+    value : `str` or `int` or `list`
+        Value obtained from the specified key
+        
+    See Also
+    ----------
+    find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+    """
+    assert isinstance(parameter, (list, str))
+    
+    # find for key in nested dictionary
+    if isinstance(parameter, str):
+        parameter = self.find_key(parameter)
+    
+    if config == []:
+        config = self.config
+    if config is None:
+        return None
+    
+    if not parameter:
+        return config
+    
+    return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+
+
+def load_config_from_file(self, file_path) +
+
+

Load parameters from yaml file

+

Parameters

+
+
file_path : str
+
Path of config file to load
+
+

Returns

+
+
config : dict
+
Dictionary containing the config parameters
+
+
+ +Expand source code + +
def load_config_from_file(self, file_path):
+    """Load parameters from yaml file
+
+    Parameters
+    ----------
+    file_path : `str`
+        Path of config file to load
+        
+    Returns
+    ----------
+    config : `dict`
+        Dictionary containing the config parameters
+    """
+
+    with open(file_path, 'r') as input_file: 
+        config = self.yaml.load(input_file)
+        input_file.close()
+
+    return config
+
+
+
+def load_config_from_model_dir(self, model_dir) +
+
+

Finds for a config file from the model directory and loads it

+

Parameters

+
+
model_dir : str
+
Folder to search for and load the config file
+
+

Returns

+
+
config : dict
+
Dictionary containing the config parameters
+
+

Raises

+
+
IndexError
+
If there are no config file in the model_dir
+
+
+ +Expand source code + +
def load_config_from_model_dir(self, model_dir):
+    """Finds for a config file from the model directory and loads it
+
+    Parameters
+    ----------
+    model_dir : `str`
+        Folder to search for and load the config file
+
+    Returns
+    ----------
+    config : `dict`
+        Dictionary containing the config parameters
+        
+    Raises
+    ------
+    IndexError
+        If there are no config file in the model_dir
+    """
+    
+    # check if yaml file exists in model_dir
+    try:
+        list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+        if len(list_config_files) > 1:
+            print("Multiple config files found. Loading {}".format(list_config_files[0]))
+        else:
+            print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+        return self.load_config_from_file(list_config_files[0])
+    except IndexError:
+        print("No config file found in model_dir.")
+        raise
+
+
+
+def update_parameter(self, parameter, value, config=None) +
+
+

Updates the parameter in the config file using a full addressed list

+

Parameters

+
+
parameter : list
+
List of keys that point to the correct item in the nested dictionary
+
value : str or int or list
+
Value that is updated in the nested dictionary
+
config : list or none, optional
+
Used to iterate through nested dictionaries
+
+

Returns

+
+
TODO
+
 
+
+
+ +Expand source code + +
def update_parameter(self, parameter, value, config = None):
+    """Updates the parameter in the config file using a full addressed list
+
+    Parameters
+    ----------
+    parameter : `list`
+        List of keys that point to the correct item in the nested dictionary
+        
+    value : `str` or `int` or `list`
+        Value that is updated in the nested dictionary
+        
+    config : `list` or `none`, optional
+        Used to iterate through nested dictionaries
+        
+    Returns
+    ----------
+    TODO
+    """
+    
+    assert type(parameter) is list
+            
+    if config == None:
+        config = self.config
+    
+    if len(parameter) == 1:
+        config.update({parameter[0]: value})
+        return config
+    return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+
+
+def write_config(self, file_path) +
+
+

Writes parameters to yaml file

+

Parameters

+
+
file_path : str
+
Path of config file to write to
+
+
+ +Expand source code + +
def write_config(self, file_path):
+    """Writes parameters to yaml file
+
+    Parameters
+    ----------
+    file_path : `str`
+        Path of config file to write to
+    """
+    
+    with open(file_path, 'w') as output_file:  
+        self.yaml.dump(self.config, output_file)
+
+    output_file.close()
+    
+    print("Config file written to: {}".format(file_path))
+
+
+
+def write_model(self, model, file_path) +
+
+

Writes parameters to yaml file

+

Parameters

+
+
model : :class:Keras.model
+
Keras model that will be parsed and written to a yaml file
+
file_path : str
+
Path of model file to write to
+
+
+ +Expand source code + +
def write_model(self, model, file_path):
+    """Writes parameters to yaml file
+
+    Parameters
+    ----------
+    model : :class:`Keras.model`
+        Keras model that will be parsed and written to a yaml file
+    
+    file_path : `str`
+        Path of model file to write to
+    """
+    
+    with open(file_path, 'w') as output_file:  
+        output_file.write(model.to_yaml())
+
+    output_file.close()
+    
+    print("Model file written to: {}".format(file_path))
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/layers/index.html b/html/models/layers/index.html new file mode 100644 index 0000000..cc15a41 --- /dev/null +++ b/html/models/layers/index.html @@ -0,0 +1,71 @@ + + + + + + +models.layers API documentation + + + + + + + + + +
+
+
+

Module models.layers

+
+
+
+ +Expand source code + +
from __future__ import absolute_import, print_function
+
+
+
+

Sub-modules

+
+
models.layers.layers
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/layers/layers.html b/html/models/layers/layers.html new file mode 100644 index 0000000..9eca4b5 --- /dev/null +++ b/html/models/layers/layers.html @@ -0,0 +1,222 @@ + + + + + + +models.layers.layers API documentation + + + + + + + + + +
+
+
+

Module models.layers.layers

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tf
+
+def activation_function(inputs, acti):
+    if isinstance(acti, str):
+        return Activation(acti)(inputs)
+    else:
+        return acti(inputs)
+
+def regularizer_function(weight_regularizer):
+    if weight_regularizer == 0 or weight_regularizer == None:
+        return None
+    else:
+        return regularizers.l2(weight_regularizer)
+    
+def bn_relu_conv2d(inputs, filters, filter_size, 
+                    strides = 1, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
+                    kernel_initializer=kernel_initializer, 
+                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            
+    return output
+
+def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
+                            strides = 2, acti = None, padding = None, 
+                            kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
+                             kernel_initializer=kernel_initializer, 
+                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
+    return output
+
+def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
+    if mean_std_normalization is True:
+        print("Using normalization")
+        return Lambda(lambda x: (x - mean)/std)(inputs)
+    elif scale_input is True:
+        print("Not using normalization")
+        return Lambda(lambda x: x / 255)(inputs)
+    else:
+        return inputs
+            
+    
+
+
+
+
+
+
+
+

Functions

+
+
+def activation_function(inputs, acti) +
+
+
+
+ +Expand source code + +
def activation_function(inputs, acti):
+    if isinstance(acti, str):
+        return Activation(acti)(inputs)
+    else:
+        return acti(inputs)
+
+
+
+def bn_relu_conv2d(inputs, filters, filter_size, strides=1, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') +
+
+
+
+ +Expand source code + +
def bn_relu_conv2d(inputs, filters, filter_size, 
+                    strides = 1, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
+                    kernel_initializer=kernel_initializer, 
+                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            
+    return output
+
+
+
+def bn_relu_conv2dtranspose(inputs, filters, filter_size, strides=2, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') +
+
+
+
+ +Expand source code + +
def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
+                            strides = 2, acti = None, padding = None, 
+                            kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
+                             kernel_initializer=kernel_initializer, 
+                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
+    return output
+
+
+
+def normalize_input(inputs, scale_input=False, mean_std_normalization=False, mean=None, std=None) +
+
+
+
+ +Expand source code + +
def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
+    if mean_std_normalization is True:
+        print("Using normalization")
+        return Lambda(lambda x: (x - mean)/std)(inputs)
+    elif scale_input is True:
+        print("Not using normalization")
+        return Lambda(lambda x: x / 255)(inputs)
+    else:
+        return inputs
+
+
+
+def regularizer_function(weight_regularizer) +
+
+
+
+ +Expand source code + +
def regularizer_function(weight_regularizer):
+    if weight_regularizer == 0 or weight_regularizer == None:
+        return None
+    else:
+        return regularizers.l2(weight_regularizer)
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/models/.DS_Store b/models/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..23ef0c07f563399a297ab4aef4a937c1b08d174b GIT binary patch literal 6148 zcmeHKJ5EC}5S)bw1<|CW^cA>)6@?RU0Ro{wdLj}b1>LJS7e{0EBOrQ#jt0$2>#^56 zwmik#w*YLp-&_F;0CTz{-h7yv@4HXzq#}+MXFTHxYYf8c94$%S-XAFmN9=!yyd4Sjp$3#YGmQ-R=ty&CAI^(VKdf}LubXeTXbLwWR4#nbj z##^MrdZI=tAO#K;IL+n4>;E*R4>Ya9KR?m6#t rH_n5?A<8i^$}tySjxQrA^P11O-wVgYpfetHqJ9Qk7nv0JYXv?4i%J`s literal 0 HcmV?d00001 diff --git a/models/._.DS_Store b/models/._.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..8e82ed96c0d694f6a640da6c163a3ef7e4194513 GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIhYCu0iY;W;207T z1e9Qe=wPV8s%TVjGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!ns2l>ojwq-z z3ITE<8JWcjMXAO4rA5i93TgR8*$SC?C8>EOnfZB%IXRUIIjLzS3Q0MMdD+0OE==1{ PrcwP5_lXRH-2eXo=JOoa literal 0 HcmV?d00001 diff --git a/models/._CNN_Base.py b/models/._CNN_Base.py new file mode 100644 index 0000000000000000000000000000000000000000..88177346c137f3a38e4624af15d0123500ab066f GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103v0xDsI z=wMg?WDB5a0m{L|rIPb=^%4sTa#Hnj5{pYpi&Ill5=&B*1A;+%Jeo4%Ky+`f3c48} z;ZZOe0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd72z5CC;HK^P3=LNYRo6^c@e z^Gl18Qx($ki?S6m^GZ_lN;32F5_57Y6>?J3N)(cE67#ZwePO7sAx)wBAMOj-T(jq literal 0 HcmV?d00001 diff --git a/models/.ipynb_checkpoints/CNN_Base-checkpoint.py b/models/.ipynb_checkpoints/CNN_Base-checkpoint.py new file mode 100644 index 0000000..ab13adc --- /dev/null +++ b/models/.ipynb_checkpoints/CNN_Base-checkpoint.py @@ -0,0 +1,570 @@ +import os + +import glob +import datetime + +import skimage.io +import numpy as np + +import tensorflow as tf + +import keras +from keras import backend as K +from keras.models import Model, load_model +from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger + +from .internals.image_functions import Image_Functions +from .internals.network_config import Network_Config +from .internals.dataset import Dataset + +class CNN_Base(Dataset, Image_Functions): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates the base neural network class with basic functions + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is stored + config_filepath : `str`, optional + [Default: None] Filepath to the config file + **kwargs + Parameters that are passed to :class:`network_config.Network_Config` + + Attributes + ---------- + config : :class:`network_config.Network_Config` + Network_config object containing the config and necessary functions + """ + + super().__init__() + + self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs) + + self.config.update_parameter(["general", "now"], datetime.datetime.now()) + + if self.config.get_parameter("use_cpu") is True: + self.initialize_cpu() + else: + self.initialize_gpu() + + ####################### + # Logging functions + ####################### + def init_logs(self): + """Initiates the parameters required for the log file + """ + # Directory for training logs + print(self.config.get_parameter("name"), self.config.get_parameter("now")) + self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + + if self.config.get_parameter("save_best_weights") is False: + # Path to save after each epoch. Include placeholders that get filled by Keras. + self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}") + else: + self.checkpoint_best = os.path.join(self.log_dir, "weights_best.h5") + self.checkpoint_now = os.path.join(self.log_dir, "weights_now.h5") + + def write_logs(self): + """Writes the log file + """ + # Create log_dir if it does not exist + if os.path.exists(self.log_dir) is False: + os.makedirs(self.log_dir) + + # save the parameters used in current run to logs dir + self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + ####################### + # Initialization functions + ####################### + def summary(self): + """Summary of the layers in the model + """ + self.model.summary() + + def compile_model(self, optimizer, loss): + """Compiles model + Parameters + ---------- + optimizer + Gradient optimizer used in during the training of the network + loss + Loss function of the network + + metrics + To try : + + Class tf.compat.v1.keras.metrics.MeanIoU + Class tf.compat.v2.keras.metrics.MeanIoU + Class tf.compat.v2.metrics.MeanIoU + + """ + if self.config.get_parameter("metrics") == ['IoU']: + print("Metrics : IoU") + from .internals.metrics import mean_iou + self.model.compile(optimizer, loss = loss, metrics = [mean_iou]) + + #self.model.compile(optimizer, loss = loss, metrics = [tf.keras.metrics.MeanIoU(num_classes=1+self.config.get_parameter("nb_classes"))]) + else: + print("Metrics : {}".format(self.config.get_parameter("metrics"))) + self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics")) + + def initialize_model(self): + """Initializes the logs, builds the model, and chooses the correct initialization function + """ + # write parameters to yaml file + self.init_logs() + if self.config.get_parameter("for_prediction") is False: + self.write_logs() + + # build model + self.model = self.build_model(self.config.get_parameter("input_size")) + + # save model to yaml file + if self.config.get_parameter("for_prediction") is False: + self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training")) + self.initialize_model_normal() + + def initialize_cpu(self): + """Sets the session to only use the CPU + """ + config = tf.ConfigProto( + device_count = {'CPU' : 1, + 'GPU' : 0} + ) + session = tf.Session(config=config) + K.set_session(session) + + def get_free_gpu(self): + """Selects the gpu with the most free memory + """ + import subprocess + import os + import sys + from io import StringIO + import numpy as np + + output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE, shell=True).communicate()[0] + output = output.decode("ascii") + # assumes that it is on the popiah server and the last gpu is not used + memory_available = [int(x.split()[2]) for x in output.split("\n")[:-1]] + print("Setting GPU to use to PID {}".format(np.argmax(memory_available))) + return np.argmax(memory_available) + + def initialize_gpu(self): + """Sets the seesion to use the gpu specified in config file + """ + #if self.config.get_parameter("visible_gpu") == "None": + # gpu = self.get_free_gpu() + #else: + # gpu = self.config.get_parameter("visible_gpu") + + os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 + #os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) # needs to be a string + os.environ['CUDA_VISIBLE_DEVICES'] = str(0) # needs to be a string + + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + K.tensorflow_backend.set_session(sess) + + def initialize_model_normal(self): + """Initializes the optimizer and any specified callback functions + """ + opt = self.optimizer_function() + self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss"))) + + if self.config.get_parameter("for_prediction") == False: + self.callbacks = self.model_checkpoint_call(verbose = True) + + if self.config.get_parameter("use_tensorboard") is True: + self.callbacks.append(self.tensorboard_call()) + + if self.config.get_parameter("reduce_LR_on_plateau") is True: + self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"), + factor = self.config.get_parameter("reduce_LR_factor"), + patience = self.config.get_parameter("reduce_LR_patience"), + min_lr = self.config.get_parameter("reduce_LR_min_lr"), + verbose = True)) + + if self.config.get_parameter("early_stopping") is True: + self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"), + patience = self.config.get_parameter("early_stopping_patience"), + min_delta = self.config.get_parameter("early_stopping_min_delta"), + verbose = True)) + + ####################### + # Optimizer/Loss functions + ####################### + def optimizer_function(self, learning_rate = None): + """Initialize optimizer function + + Parameters + ---------- + learning_rate : `int` + Learning rate of the descent algorithm + + Returns + ---------- + optimizer + Function to call the optimizer + """ + if learning_rate is None: + learning_rate = self.config.get_parameter("learning_rate") + if self.config.get_parameter("optimizer_function") == 'sgd': + return keras.optimizers.SGD(lr = learning_rate, + decay = self.config.get_parameter("decay"), + momentum = self.config.get_parameter("momentum"), + nesterov = self.config.get_parameter("nesterov")) + elif self.config.get_parameter("optimizer_function") == 'rmsprop': + return keras.optimizers.RMSprop(lr = learning_rate, + decay = self.config.get_parameter("decay")) + elif self.config.get_parameter("optimizer_function") == 'adam': + return keras.optimizers.Adam(lr = learning_rate, + decay = self.config.get_parameter("decay")) + + def loss_function(self, loss): + """Initialize loss function + + Parameters + ---------- + loss : `str` + Name of the loss function + + Returns + ---------- + loss + Function to call loss function + """ + if loss == "binary_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_binary_crossentropy as loss + print("Loss : edge-enhanced binary crossentropy") + else: + print("Loss : binary crossentropy") + return loss + elif loss == "categorical_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_categorical_crossentropy as loss + print("Loss : Edge Enhanced categorical_crossentropy") + else: + print("ULoss : categorical_crossentropy") + return loss + elif loss == "jaccard_distance_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_jaccard_distance_loss as jaccard_distance_loss + print("Loss : edge-enhanced jaccard_distance_loss") + else: + print("Loss : jaccard distance loss") + from .internals.losses import jaccard_distance_loss + return jaccard_distance_loss + elif loss == "dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_dice_coef_loss as dice_coef_loss + print("Loss : edge-enhanced Dice loss") + else: + print("Loss : Dice loss") + from .internals.losses import dice_coef_loss + return dice_coef_loss + elif loss == "bce_dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_dice_loss as bce_dice_loss + print("Loss : Edge Enhanced 1 - Dice + BCE loss") + else: + print("Loss : 1 - Dice + BCE loss") + from .internals.losses import bce_dice_loss + return bce_dice_loss + elif loss == "ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_DSSIM_loss as DSSIM_loss + print("Loss : Edge Enhanced DSSIM loss") + else: + print("Loss : DSSIM loss") + from .internals.losses import DSSIM_loss + return DSSIM_loss + elif loss == "bce_ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_ssim_loss as bce_ssim_loss + print("Loss : Edge Enhanced BCE + DSSIM loss") + else: + print("Loss : BCE + DSSIM loss") + from .internals.losses import bce_ssim_loss + return bce_ssim_loss + + + elif loss == "mean_squared_error": + return keras.losses.mean_squared_error + elif loss == "mean_absolute_error": + return keras.losses.mean_absolute_error + + elif loss == "lovasz_hinge": + print("Loss : Lovasz-hinge loss") + from .internals.losses import lovasz_loss + return lovasz_loss + elif loss == "ssim_mae_loss": + print("Loss : DSSIM + MAE loss") + from .internals.losses import dssim_mae_loss + return dssim_mae_loss + else: + print("Loss : {}".format(loss)) + return loss + + + ####################### + # Callbacks + ####################### + def tensorboard_call(self): + """Initialize tensorboard call + """ + return TensorBoard(log_dir=self.log_dir, + batch_size = self.config.get_parameter("batch_size_per_GPU"), + write_graph=self.config.get_parameter("write_graph"), + write_images=self.config.get_parameter("write_images"), + write_grads=self.config.get_parameter("write_grads"), + update_freq='epoch', + histogram_freq=self.config.get_parameter("histogram_freq")) + + def model_checkpoint_call(self, verbose = 0): + """Initialize model checkpoint call + """ + if self.config.get_parameter("save_best_weights") is False: + return [ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)] + else: + return [ModelCheckpoint(self.checkpoint_best, save_best_only=True, save_weights_only=True, verbose=verbose), + ModelCheckpoint(self.checkpoint_now, save_weights_only=True, verbose=verbose)] + + ####################### + # Clear memory once training is done + ####################### + def end_training(self): + """Deletes model and releases gpu memory held by tensorflow + """ + # del reference to model + del self.model + + # clear memory + tf.reset_default_graph() + K.clear_session() + + # take hold of cuda device to shut it down + from numba import cuda + cuda.select_device(0) + cuda.close() + + ####################### + # Train Model + ####################### + def train_model(self, verbose = True): + """Trains model + + Parameters + ---------- + verbose : `int`, optional + [Default: True] Verbose output + """ + history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"), + batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True, + callbacks=self.callbacks, verbose=verbose) + + self.end_training() + + ####################### + # Predict using loaded model weights + ####################### + # TODO: change to load model from yaml file + def load_model(self, model_dir = None): # redo + """Loads model from h5 file + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Directory containing the model file + """ + # TODO: rewrite to load model from yaml file + if model_dir is None: + model_dir = self.config.get_parameter("model_dir") + + if os.path.isdir(model_dir) is True: + list_weights_files = glob.glob(os.path.join(model_dir,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + + model_dir = os.path.join(model_dir,list_weights_files[-1]) + + self.model.load_model(model_dir) + print("Loaded model from: " + model_dir) + + def load_weights(self, weights_path = None, weights_index = -1): + """Loads weights from h5 file + + Parameters + ---------- + weights_path : `str`, optional + [Default: None] Path containing the weights file or the directory to the weights file + weights_index : `int`, optional + [Default: -1] + """ + if weights_path is None: + weights_path = self.config.get_parameter("model_dir") + + if os.path.isdir(weights_path) is True: + if self.config.get_parameter("save_best_weights") is True: + weights_path = os.path.join(weights_path, "weights_best.h5") + else: + list_weights_files = glob.glob(os.path.join(weights_path,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + self.weights_path = list_weights_files[weights_index] + weights_path = os.path.join(weights_path, self.weights_path) + else: + self.weights_path = weights_path + + self.model.load_weights(weights_path) + print("Loaded weights from: " + weights_path) + + + def predict_images(self, image_dir): + """Perform prediction on images found in ``image_dir`` + + Parameters + ---------- + image_dir : `str` + Directory containing the images to perform prediction on + + Returns + ---------- + image : `array_like` + Last image that prediction was perfromed on + """ + + # load image list + from tqdm.notebook import tqdm + image_list = self.list_images(image_dir) + for image_path in tqdm(image_list): + #for image_path in image_list: + image = self.load_image(image_path = image_path) + #print(image.shape) + + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if self.config.get_parameter("tile_overlap_size") == [0,0]: + padding = None + if len(image.shape)==2: + image = np.expand_dims(image, axis = -1) + + # If length =3 : X Y C + elif len(image.shape)==3: + if image.shape[0] != self.config.get_parameter("tile_size")[0]: + if image.shape[1] != self.config.get_parameter("tile_size")[1]: + image = np.transpose(image,(1,2,0)) + + image = np.expand_dims(image, axis = 0) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + + # Else, length : N X Y Z / N X Y T + elif len(image.shape)==4: + if image.shape[1] != self.config.get_parameter("tile_size")[0]: # Means N X T Y + image = np.transpose(image,(0,1,3,2)) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + #if image.shape[0] != 1: + # image = np.transpose(image,(3,1,2,0)) + + + # Single slice image vs Stack of images (no need of new axis) + if len(image.shape)==3: + input_image = image[np.newaxis,:,:] + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + elif len(image.shape)==4: + output_image = [] + for i in tqdm(range(image.shape[0])): + input_image = image[i,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + if i == 0: + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + else: + #output_image = np.append(output_image,self.model.predict(input_image, verbose=1), axis = 0) + output_image = np.append(output_image,self.model.predict(input_image), axis = 0) + + else: + output_image = image + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + input_image = image[i,j,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + #output_image[i,j,:,:,:] = self.model.predict(input_image, verbose=1) + output_image[i,j,:,:,:] = self.model.predict(input_image) + + if padding is not None: + h, w = output_image.shape[1:3] + output_image = np.reshape(output_image, (h, w)) + output_image = self.remove_pad_image(output_image, padding = padding) + else: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + + pred_train_list = [] + for tile in tile_image_list: + + # reshape image to correct dimensions for unet + h, w = tile.shape[:2] + + tile = np.reshape(tile, (1, h, w, 1)) + + pred_train_list.extend(self.model.predict(tile, verbose=1)) + + output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"), + num_rows, num_cols, padding = padding) + + self.save_image(output_image, image_path) + #print(output_image.shape) + + return output_image + + def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'): + """Saves image to image_path + + Final location of image is as follows: + - image_path + - subfolder + - model/weights file name + + Parameters + ---------- + image : `array_like` + Image to be saved + image_path : `str` + Location to save the image in + subfolder : `str` + [Default: 'Masks'] Subfolder in which the image is to be saved in + suffix : `str` + [Default: '-preds'] Suffix to append to the filename of the predicted image + """ + image_dir = os.path.dirname(image_path) + + output_dir = os.path.join(image_dir, subfolder) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if self.config.get_parameter("save_best_weights") is True: + basename = os.path.basename(self.config.get_parameter("model_dir")) + else: + basename, _ = os.path.splitext(os.path.basename(self.weights_path)) + + output_dir = os.path.join(output_dir, basename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + filename, _ = os.path.splitext(os.path.basename(image_path)) + output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix)) + + if self.config.get_parameter("save_as_uint16") is True: + image = skimage.util.img_as_uint(image) + skimage.io.imsave(output_path, image) diff --git a/models/.ipynb_checkpoints/Unet-checkpoint.py b/models/.ipynb_checkpoints/Unet-checkpoint.py new file mode 100644 index 0000000..750186a --- /dev/null +++ b/models/.ipynb_checkpoints/Unet-checkpoint.py @@ -0,0 +1,109 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d + +###### +# Unet +###### +class Unet(CNN_Base): + """ + Unet functions + see https://www.nature.com/articles/s41592-018-0261-2 + """ + + def __init__(self, model_dir = None, name = 'Unet', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + def build_model(self, input_size, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, strides = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if strides is None: + strides = self.config.get_parameter("strides") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + ### Actual network### + inputs = Input(input_size) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + layer_store = [] + + # encoding arm + for _ in range(self.config.get_parameter("levels")): + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer_store.append(layer) + layer = MaxPooling2D((2, 2))(layer) + + filters = filters * 2 + + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + # decoding arm + for i in range(self.config.get_parameter("levels")): + layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer) + + layer = Concatenate(axis=3)([layer, layer_store[-i -1]]) + filters = filters // 2 + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + + + return Model(inputs=[inputs], outputs=[outputs], name='Unet') diff --git a/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py b/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py new file mode 100644 index 0000000..d9ed1a9 --- /dev/null +++ b/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py @@ -0,0 +1,501 @@ +from keras.layers import Input, concatenate, add, \ + Multiply, Lambda +from keras.layers.convolutional import Conv3D, MaxPooling3D, MaxPooling2D, UpSampling2D, \ + UpSampling3D, Conv2D +from keras.layers.core import Activation +from keras.layers.normalization import BatchNormalization +from keras.models import Model +from .CNN_Base import CNN_Base + + +# Get neural network +class RA_Unet(CNN_Base): + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def build_model(self, inp_shape): + name = self.config.get_parameter("name") + if name == 'Res_att_unet_2d': + model = self.build_res_atten_unet_2d(inp_shape) + return model + elif name == 'Res_att_unet_3d': + model = self.build_res_atten_unet_3d(inp_shape) + return model + + + # ============================================================ + # ======================Attention ResUnet 3D================================# + # ============================================================ + + + def attention_block(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='out'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block(output_trunk, output_channels=output_channels) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling3D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block(output_soft_mask) + skip_connections.append(output_skip_connection) + # print ('skip shape:', output_skip_connection.get_shape()) + + ## down sampling + output_soft_mask = MaxPooling3D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + + ## Output + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block(output, name=name) + + return output + + + def residual_block(self,input, input_channels=None, output_channels=None, kernel_size=(3, 3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + + strides = (stride, stride, stride) + + x = BatchNormalization()(input) + x = Activation('relu')(x) + x = Conv3D(input_channels, (1, 1, 1))(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(output_channels, (1, 1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv3D(output_channels, (1, 1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1): + data = Input(shape=input_shape) + pool_size = (2, 2, 2) + up_size = (2, 2, 2) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + # conv1 = Dropout(0.5)(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = residual_block(pool, output_channels=filter_num * 8) + # res1 = Dropout(0.5)(res1) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = residual_block(pool1, output_channels=filter_num * 16) + # res2 = Dropout(0.5)(res2) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = residual_block(pool2, output_channels=filter_num * 32) + # res3 = Dropout(0.5)(res3) + + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = residual_block(pool3, output_channels=filter_num * 64) + # res4 = Dropout(0.5)(res4) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = residual_block(pool4, output_channels=filter_num * 64) + res5 = residual_block(res5, output_channels=filter_num * 64) + + atb5 = attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = residual_block(merged1, output_channels=filter_num * 64) + # res5 = Dropout(0.5)(res5) + + atb6 = attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = residual_block(merged2, output_channels=filter_num * 32) + # res6 = Dropout(0.5)(res6) + + atb7 = attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = residual_block(merged3, output_channels=filter_num * 16) + # res7 = Dropout(0.5)(res7) + + atb8 = attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = residual_block(merged4, output_channels=filter_num * 8) + # res8 = Dropout(0.5)(res8) + + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + # conv9 = Dropout(0.5)(conv9) + + output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) + model = Model(data, output) + return model + + + # liver network do not modify + def build_res_atten_unet_3d(self, input_shape, merge_axis=-1, pool_size=(2, 2, 2) + , up_size=(2, 2, 2)): + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = self.residual_block(pool, output_channels=filter_num * 4) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = self.residual_block(pool1, output_channels=filter_num * 8) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = self.residual_block(pool2, output_channels=filter_num * 16) + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = self.residual_block(pool3, output_channels=filter_num * 32) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = self.residual_block(pool4, output_channels=filter_num * 64) + res5 = self.residual_block(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = self.residual_block(merged1, output_channels=filter_num * 32) + + atb6 = self.attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = self.residual_block(merged2, output_channels=filter_num * 16) + atb7 = self.attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = self.residual_block(merged3, output_channels=filter_num * 8) + atb8 = self.attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = self.residual_block(merged4, output_channels=filter_num * 4) + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + + + if self.config.get_parameter("nb_classes") == 1: + output = Conv3D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv3D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + # ============================================================ + # ======================Attention ResUnet 2D================================# + # ============================================================ + + + def attention_block_2d(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='at'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block_2d(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block_2d(output_trunk) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling2D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block_2d(output_soft_mask) + skip_connections.append(output_skip_connection) + + ## down sampling + output_soft_mask = MaxPooling2D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + + ## Output + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block_2d(output, name=name) + + return output + + + def residual_block_2d(self, input, input_channels=None, output_channels=None, kernel_size=(3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + acti = self.config.get_parameter("activation_function") + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + strides = (stride, stride) + x = BatchNormalization()(input) + x = Activation(acti)(x) + x = Conv2D(input_channels, (1, 1))(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(output_channels, (1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv2D(output_channels, (1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def build_res_atten_unet_2d(self, input_shape): + merge_axis = -1 # Feature maps are concatenated along last axis (for tf backend) + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + acti = self.config.get_parameter("activation_function") + + conv1 = Conv2D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation(acti)(conv1) + + # res0 = residual_block_2d(data, output_channels=filter_num * 2) + + pool = MaxPooling2D(pool_size=(2, 2))(conv1) + res1 = self.residual_block_2d(pool, output_channels=filter_num * 4) + + # res1 = residual_block_2d(atb1, output_channels=filter_num * 4) + + pool1 = MaxPooling2D(pool_size=(2, 2))(res1) + # pool1 = MaxPooling2D(pool_size=(2, 2))(atb1) + + res2 = self.residual_block_2d(pool1, output_channels=filter_num * 8) + + # res2 = residual_block_2d(atb2, output_channels=filter_num * 8) + pool2 = MaxPooling2D(pool_size=(2, 2))(res2) + # pool2 = MaxPooling2D(pool_size=(2, 2))(atb2) + + res3 = self.residual_block_2d(pool2, output_channels=filter_num * 16) + # res3 = residual_block_2d(atb3, output_channels=filter_num * 16) + pool3 = MaxPooling2D(pool_size=(2, 2))(res3) + # pool3 = MaxPooling2D(pool_size=(2, 2))(atb3) + + res4 = self.residual_block_2d(pool3, output_channels=filter_num * 32) + + # res4 = residual_block_2d(atb4, output_channels=filter_num * 32) + pool4 = MaxPooling2D(pool_size=(2, 2))(res4) + # pool4 = MaxPooling2D(pool_size=(2, 2))(atb4) + + res5 = self.residual_block_2d(pool4, output_channels=filter_num * 64) + # res5 = residual_block_2d(res5, output_channels=filter_num * 64) + res5 = self.residual_block_2d(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block_2d(res4, encoder_depth=1, name='atten1') + up1 = UpSampling2D(size=(2, 2))(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + # merged1 = concatenate([up1, atb4], axis=merge_axis) + + res5 = self.residual_block_2d(merged1, output_channels=filter_num * 32) + # atb5 = attention_block_2d(res5, encoder_depth=1) + + atb6 = self.attention_block_2d(res3, encoder_depth=2, name='atten2') + up2 = UpSampling2D(size=(2, 2))(res5) + # up2 = UpSampling2D(size=(2, 2))(atb5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + # merged2 = concatenate([up2, atb3], axis=merge_axis) + + res6 = self.residual_block_2d(merged2, output_channels=filter_num * 16) + # atb6 = attention_block_2d(res6, encoder_depth=2) + + # atb6 = attention_block_2d(res6, encoder_depth=2) + atb7 = self.attention_block_2d(res2, encoder_depth=3, name='atten3') + up3 = UpSampling2D(size=(2, 2))(res6) + # up3 = UpSampling2D(size=(2, 2))(atb6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + # merged3 = concatenate([up3, atb2], axis=merge_axis) + + res7 = self.residual_block_2d(merged3, output_channels=filter_num * 8) + # atb7 = attention_block_2d(res7, encoder_depth=3) + + # atb7 = attention_block_2d(res7, encoder_depth=3) + atb8 = self.attention_block_2d(res1, encoder_depth=4, name='atten4') + up4 = UpSampling2D(size=(2, 2))(res7) + # up4 = UpSampling2D(size=(2, 2))(atb7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + # merged4 = concatenate([up4, atb1], axis=merge_axis) + + res8 = self.residual_block_2d(merged4, output_channels=filter_num * 4) + # atb8 = attention_block_2d(res8, encoder_depth=4) + + # atb8 = attention_block_2d(res8, encoder_depth=4) + up = UpSampling2D(size=(2, 2))(res8) + # up = UpSampling2D(size=(2, 2))(atb8) + merged = concatenate([up, conv1], axis=merge_axis) + # res9 = residual_block_2d(merged, output_channels=filter_num * 2) + + conv9 = Conv2D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation(acti)(conv9) + + if self.config.get_parameter("nb_classes") == 1: + output = Conv2D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv2D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + +class Res_att_unet_2d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_2d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + + +class Res_att_unet_3d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_3d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) diff --git a/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py b/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py new file mode 100644 index 0000000..90b4724 --- /dev/null +++ b/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py @@ -0,0 +1,260 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose + +################################################ +# Unet + Resnet +################################################ + +class Unet_Resnet(CNN_Base): + """ + Unet + resnet functions + see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19 + """ + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def bottleneck_block(self, inputs, + upsample = False, + filters = 8, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + # Bottleneck_block + with tf.name_scope("Bottleneck_block" + name): + output = bn_relu_conv2d(inputs, filters, 1, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = bn_relu_conv2d(output, filters, 3, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + if upsample == True: + output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + output = Conv2D(filters * 4, (1,1), padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + else: + output = bn_relu_conv2d(output, filters*4, 1, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Dropout(dropout_value)(output) + + # reshape input to the same size as output + if upsample == True: + inputs = UpSampling2D()(inputs) + if strides == 2: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs) + + # ensure number of filters are correct between input and output + if output.shape[3] != inputs.shape[3]: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block" + name): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = MaxPooling2D()(output) + output = Conv2D(filters, 3, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + output = Dropout(dropout_value)(output) + + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block_up(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block_up" + name): + output = bn_relu_conv2d(inputs, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output) + + output = Dropout(dropout_value)(output) + + inputs = UpSampling2D()(inputs) + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + + def build_model(self, unet_input, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + + ### Actual network### + inputs = Input(unet_input) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + # encoder arm + layer_1 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, + kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer) + + layer_2 = self.simple_block(layer_1, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_2") + + layer = layer_2 + layer_store = [layer] + + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1): + strides = 2 + + # last layer of encoding arm is treated as across + if i == len(self.config.get_parameter("bottleneck_block")): + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i-2): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_up_layer_{}".format(2 + i)) + else: + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + filters = filters*2 + layer_store.append(layer) + + # decoder arm + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1): + filters = filters//2 + + # note that i should be positive possibly due to the way keras/tf model compile works + layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer]) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(i+6, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(i+6)) + + layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2]) + layer_14 = self.simple_block_up(layer_13, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_14") + + layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1]) + + layer_16 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), + name="Conv_layer_16")(layer_15) + + layer_17 = BatchNormalization()(layer_16) + layer_18 = activation_function(layer_17, acti) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + #outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18) + + return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name')) + +class Unet_Resnet101(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet50(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet_paper(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + """ + see https://arxiv.org/pdf/1608.04117.pdf + """ + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") \ No newline at end of file diff --git a/models/.ipynb_checkpoints/__init__-checkpoint.py b/models/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/CNN_Base.py b/models/CNN_Base.py new file mode 100644 index 0000000..ab13adc --- /dev/null +++ b/models/CNN_Base.py @@ -0,0 +1,570 @@ +import os + +import glob +import datetime + +import skimage.io +import numpy as np + +import tensorflow as tf + +import keras +from keras import backend as K +from keras.models import Model, load_model +from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger + +from .internals.image_functions import Image_Functions +from .internals.network_config import Network_Config +from .internals.dataset import Dataset + +class CNN_Base(Dataset, Image_Functions): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates the base neural network class with basic functions + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is stored + config_filepath : `str`, optional + [Default: None] Filepath to the config file + **kwargs + Parameters that are passed to :class:`network_config.Network_Config` + + Attributes + ---------- + config : :class:`network_config.Network_Config` + Network_config object containing the config and necessary functions + """ + + super().__init__() + + self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs) + + self.config.update_parameter(["general", "now"], datetime.datetime.now()) + + if self.config.get_parameter("use_cpu") is True: + self.initialize_cpu() + else: + self.initialize_gpu() + + ####################### + # Logging functions + ####################### + def init_logs(self): + """Initiates the parameters required for the log file + """ + # Directory for training logs + print(self.config.get_parameter("name"), self.config.get_parameter("now")) + self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + + if self.config.get_parameter("save_best_weights") is False: + # Path to save after each epoch. Include placeholders that get filled by Keras. + self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}") + else: + self.checkpoint_best = os.path.join(self.log_dir, "weights_best.h5") + self.checkpoint_now = os.path.join(self.log_dir, "weights_now.h5") + + def write_logs(self): + """Writes the log file + """ + # Create log_dir if it does not exist + if os.path.exists(self.log_dir) is False: + os.makedirs(self.log_dir) + + # save the parameters used in current run to logs dir + self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + ####################### + # Initialization functions + ####################### + def summary(self): + """Summary of the layers in the model + """ + self.model.summary() + + def compile_model(self, optimizer, loss): + """Compiles model + Parameters + ---------- + optimizer + Gradient optimizer used in during the training of the network + loss + Loss function of the network + + metrics + To try : + + Class tf.compat.v1.keras.metrics.MeanIoU + Class tf.compat.v2.keras.metrics.MeanIoU + Class tf.compat.v2.metrics.MeanIoU + + """ + if self.config.get_parameter("metrics") == ['IoU']: + print("Metrics : IoU") + from .internals.metrics import mean_iou + self.model.compile(optimizer, loss = loss, metrics = [mean_iou]) + + #self.model.compile(optimizer, loss = loss, metrics = [tf.keras.metrics.MeanIoU(num_classes=1+self.config.get_parameter("nb_classes"))]) + else: + print("Metrics : {}".format(self.config.get_parameter("metrics"))) + self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics")) + + def initialize_model(self): + """Initializes the logs, builds the model, and chooses the correct initialization function + """ + # write parameters to yaml file + self.init_logs() + if self.config.get_parameter("for_prediction") is False: + self.write_logs() + + # build model + self.model = self.build_model(self.config.get_parameter("input_size")) + + # save model to yaml file + if self.config.get_parameter("for_prediction") is False: + self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training")) + self.initialize_model_normal() + + def initialize_cpu(self): + """Sets the session to only use the CPU + """ + config = tf.ConfigProto( + device_count = {'CPU' : 1, + 'GPU' : 0} + ) + session = tf.Session(config=config) + K.set_session(session) + + def get_free_gpu(self): + """Selects the gpu with the most free memory + """ + import subprocess + import os + import sys + from io import StringIO + import numpy as np + + output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE, shell=True).communicate()[0] + output = output.decode("ascii") + # assumes that it is on the popiah server and the last gpu is not used + memory_available = [int(x.split()[2]) for x in output.split("\n")[:-1]] + print("Setting GPU to use to PID {}".format(np.argmax(memory_available))) + return np.argmax(memory_available) + + def initialize_gpu(self): + """Sets the seesion to use the gpu specified in config file + """ + #if self.config.get_parameter("visible_gpu") == "None": + # gpu = self.get_free_gpu() + #else: + # gpu = self.config.get_parameter("visible_gpu") + + os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 + #os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) # needs to be a string + os.environ['CUDA_VISIBLE_DEVICES'] = str(0) # needs to be a string + + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + K.tensorflow_backend.set_session(sess) + + def initialize_model_normal(self): + """Initializes the optimizer and any specified callback functions + """ + opt = self.optimizer_function() + self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss"))) + + if self.config.get_parameter("for_prediction") == False: + self.callbacks = self.model_checkpoint_call(verbose = True) + + if self.config.get_parameter("use_tensorboard") is True: + self.callbacks.append(self.tensorboard_call()) + + if self.config.get_parameter("reduce_LR_on_plateau") is True: + self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"), + factor = self.config.get_parameter("reduce_LR_factor"), + patience = self.config.get_parameter("reduce_LR_patience"), + min_lr = self.config.get_parameter("reduce_LR_min_lr"), + verbose = True)) + + if self.config.get_parameter("early_stopping") is True: + self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"), + patience = self.config.get_parameter("early_stopping_patience"), + min_delta = self.config.get_parameter("early_stopping_min_delta"), + verbose = True)) + + ####################### + # Optimizer/Loss functions + ####################### + def optimizer_function(self, learning_rate = None): + """Initialize optimizer function + + Parameters + ---------- + learning_rate : `int` + Learning rate of the descent algorithm + + Returns + ---------- + optimizer + Function to call the optimizer + """ + if learning_rate is None: + learning_rate = self.config.get_parameter("learning_rate") + if self.config.get_parameter("optimizer_function") == 'sgd': + return keras.optimizers.SGD(lr = learning_rate, + decay = self.config.get_parameter("decay"), + momentum = self.config.get_parameter("momentum"), + nesterov = self.config.get_parameter("nesterov")) + elif self.config.get_parameter("optimizer_function") == 'rmsprop': + return keras.optimizers.RMSprop(lr = learning_rate, + decay = self.config.get_parameter("decay")) + elif self.config.get_parameter("optimizer_function") == 'adam': + return keras.optimizers.Adam(lr = learning_rate, + decay = self.config.get_parameter("decay")) + + def loss_function(self, loss): + """Initialize loss function + + Parameters + ---------- + loss : `str` + Name of the loss function + + Returns + ---------- + loss + Function to call loss function + """ + if loss == "binary_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_binary_crossentropy as loss + print("Loss : edge-enhanced binary crossentropy") + else: + print("Loss : binary crossentropy") + return loss + elif loss == "categorical_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_categorical_crossentropy as loss + print("Loss : Edge Enhanced categorical_crossentropy") + else: + print("ULoss : categorical_crossentropy") + return loss + elif loss == "jaccard_distance_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_jaccard_distance_loss as jaccard_distance_loss + print("Loss : edge-enhanced jaccard_distance_loss") + else: + print("Loss : jaccard distance loss") + from .internals.losses import jaccard_distance_loss + return jaccard_distance_loss + elif loss == "dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_dice_coef_loss as dice_coef_loss + print("Loss : edge-enhanced Dice loss") + else: + print("Loss : Dice loss") + from .internals.losses import dice_coef_loss + return dice_coef_loss + elif loss == "bce_dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_dice_loss as bce_dice_loss + print("Loss : Edge Enhanced 1 - Dice + BCE loss") + else: + print("Loss : 1 - Dice + BCE loss") + from .internals.losses import bce_dice_loss + return bce_dice_loss + elif loss == "ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_DSSIM_loss as DSSIM_loss + print("Loss : Edge Enhanced DSSIM loss") + else: + print("Loss : DSSIM loss") + from .internals.losses import DSSIM_loss + return DSSIM_loss + elif loss == "bce_ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_ssim_loss as bce_ssim_loss + print("Loss : Edge Enhanced BCE + DSSIM loss") + else: + print("Loss : BCE + DSSIM loss") + from .internals.losses import bce_ssim_loss + return bce_ssim_loss + + + elif loss == "mean_squared_error": + return keras.losses.mean_squared_error + elif loss == "mean_absolute_error": + return keras.losses.mean_absolute_error + + elif loss == "lovasz_hinge": + print("Loss : Lovasz-hinge loss") + from .internals.losses import lovasz_loss + return lovasz_loss + elif loss == "ssim_mae_loss": + print("Loss : DSSIM + MAE loss") + from .internals.losses import dssim_mae_loss + return dssim_mae_loss + else: + print("Loss : {}".format(loss)) + return loss + + + ####################### + # Callbacks + ####################### + def tensorboard_call(self): + """Initialize tensorboard call + """ + return TensorBoard(log_dir=self.log_dir, + batch_size = self.config.get_parameter("batch_size_per_GPU"), + write_graph=self.config.get_parameter("write_graph"), + write_images=self.config.get_parameter("write_images"), + write_grads=self.config.get_parameter("write_grads"), + update_freq='epoch', + histogram_freq=self.config.get_parameter("histogram_freq")) + + def model_checkpoint_call(self, verbose = 0): + """Initialize model checkpoint call + """ + if self.config.get_parameter("save_best_weights") is False: + return [ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)] + else: + return [ModelCheckpoint(self.checkpoint_best, save_best_only=True, save_weights_only=True, verbose=verbose), + ModelCheckpoint(self.checkpoint_now, save_weights_only=True, verbose=verbose)] + + ####################### + # Clear memory once training is done + ####################### + def end_training(self): + """Deletes model and releases gpu memory held by tensorflow + """ + # del reference to model + del self.model + + # clear memory + tf.reset_default_graph() + K.clear_session() + + # take hold of cuda device to shut it down + from numba import cuda + cuda.select_device(0) + cuda.close() + + ####################### + # Train Model + ####################### + def train_model(self, verbose = True): + """Trains model + + Parameters + ---------- + verbose : `int`, optional + [Default: True] Verbose output + """ + history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"), + batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True, + callbacks=self.callbacks, verbose=verbose) + + self.end_training() + + ####################### + # Predict using loaded model weights + ####################### + # TODO: change to load model from yaml file + def load_model(self, model_dir = None): # redo + """Loads model from h5 file + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Directory containing the model file + """ + # TODO: rewrite to load model from yaml file + if model_dir is None: + model_dir = self.config.get_parameter("model_dir") + + if os.path.isdir(model_dir) is True: + list_weights_files = glob.glob(os.path.join(model_dir,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + + model_dir = os.path.join(model_dir,list_weights_files[-1]) + + self.model.load_model(model_dir) + print("Loaded model from: " + model_dir) + + def load_weights(self, weights_path = None, weights_index = -1): + """Loads weights from h5 file + + Parameters + ---------- + weights_path : `str`, optional + [Default: None] Path containing the weights file or the directory to the weights file + weights_index : `int`, optional + [Default: -1] + """ + if weights_path is None: + weights_path = self.config.get_parameter("model_dir") + + if os.path.isdir(weights_path) is True: + if self.config.get_parameter("save_best_weights") is True: + weights_path = os.path.join(weights_path, "weights_best.h5") + else: + list_weights_files = glob.glob(os.path.join(weights_path,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + self.weights_path = list_weights_files[weights_index] + weights_path = os.path.join(weights_path, self.weights_path) + else: + self.weights_path = weights_path + + self.model.load_weights(weights_path) + print("Loaded weights from: " + weights_path) + + + def predict_images(self, image_dir): + """Perform prediction on images found in ``image_dir`` + + Parameters + ---------- + image_dir : `str` + Directory containing the images to perform prediction on + + Returns + ---------- + image : `array_like` + Last image that prediction was perfromed on + """ + + # load image list + from tqdm.notebook import tqdm + image_list = self.list_images(image_dir) + for image_path in tqdm(image_list): + #for image_path in image_list: + image = self.load_image(image_path = image_path) + #print(image.shape) + + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if self.config.get_parameter("tile_overlap_size") == [0,0]: + padding = None + if len(image.shape)==2: + image = np.expand_dims(image, axis = -1) + + # If length =3 : X Y C + elif len(image.shape)==3: + if image.shape[0] != self.config.get_parameter("tile_size")[0]: + if image.shape[1] != self.config.get_parameter("tile_size")[1]: + image = np.transpose(image,(1,2,0)) + + image = np.expand_dims(image, axis = 0) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + + # Else, length : N X Y Z / N X Y T + elif len(image.shape)==4: + if image.shape[1] != self.config.get_parameter("tile_size")[0]: # Means N X T Y + image = np.transpose(image,(0,1,3,2)) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + #if image.shape[0] != 1: + # image = np.transpose(image,(3,1,2,0)) + + + # Single slice image vs Stack of images (no need of new axis) + if len(image.shape)==3: + input_image = image[np.newaxis,:,:] + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + elif len(image.shape)==4: + output_image = [] + for i in tqdm(range(image.shape[0])): + input_image = image[i,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + if i == 0: + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + else: + #output_image = np.append(output_image,self.model.predict(input_image, verbose=1), axis = 0) + output_image = np.append(output_image,self.model.predict(input_image), axis = 0) + + else: + output_image = image + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + input_image = image[i,j,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + #output_image[i,j,:,:,:] = self.model.predict(input_image, verbose=1) + output_image[i,j,:,:,:] = self.model.predict(input_image) + + if padding is not None: + h, w = output_image.shape[1:3] + output_image = np.reshape(output_image, (h, w)) + output_image = self.remove_pad_image(output_image, padding = padding) + else: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + + pred_train_list = [] + for tile in tile_image_list: + + # reshape image to correct dimensions for unet + h, w = tile.shape[:2] + + tile = np.reshape(tile, (1, h, w, 1)) + + pred_train_list.extend(self.model.predict(tile, verbose=1)) + + output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"), + num_rows, num_cols, padding = padding) + + self.save_image(output_image, image_path) + #print(output_image.shape) + + return output_image + + def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'): + """Saves image to image_path + + Final location of image is as follows: + - image_path + - subfolder + - model/weights file name + + Parameters + ---------- + image : `array_like` + Image to be saved + image_path : `str` + Location to save the image in + subfolder : `str` + [Default: 'Masks'] Subfolder in which the image is to be saved in + suffix : `str` + [Default: '-preds'] Suffix to append to the filename of the predicted image + """ + image_dir = os.path.dirname(image_path) + + output_dir = os.path.join(image_dir, subfolder) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if self.config.get_parameter("save_best_weights") is True: + basename = os.path.basename(self.config.get_parameter("model_dir")) + else: + basename, _ = os.path.splitext(os.path.basename(self.weights_path)) + + output_dir = os.path.join(output_dir, basename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + filename, _ = os.path.splitext(os.path.basename(image_path)) + output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix)) + + if self.config.get_parameter("save_as_uint16") is True: + image = skimage.util.img_as_uint(image) + skimage.io.imsave(output_path, image) diff --git a/models/Unet.py b/models/Unet.py new file mode 100644 index 0000000..750186a --- /dev/null +++ b/models/Unet.py @@ -0,0 +1,109 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d + +###### +# Unet +###### +class Unet(CNN_Base): + """ + Unet functions + see https://www.nature.com/articles/s41592-018-0261-2 + """ + + def __init__(self, model_dir = None, name = 'Unet', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + def build_model(self, input_size, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, strides = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if strides is None: + strides = self.config.get_parameter("strides") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + ### Actual network### + inputs = Input(input_size) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + layer_store = [] + + # encoding arm + for _ in range(self.config.get_parameter("levels")): + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer_store.append(layer) + layer = MaxPooling2D((2, 2))(layer) + + filters = filters * 2 + + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + # decoding arm + for i in range(self.config.get_parameter("levels")): + layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer) + + layer = Concatenate(axis=3)([layer, layer_store[-i -1]]) + filters = filters // 2 + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + + + return Model(inputs=[inputs], outputs=[outputs], name='Unet') diff --git a/models/Unet_ResAttnet.py b/models/Unet_ResAttnet.py new file mode 100644 index 0000000..d9ed1a9 --- /dev/null +++ b/models/Unet_ResAttnet.py @@ -0,0 +1,501 @@ +from keras.layers import Input, concatenate, add, \ + Multiply, Lambda +from keras.layers.convolutional import Conv3D, MaxPooling3D, MaxPooling2D, UpSampling2D, \ + UpSampling3D, Conv2D +from keras.layers.core import Activation +from keras.layers.normalization import BatchNormalization +from keras.models import Model +from .CNN_Base import CNN_Base + + +# Get neural network +class RA_Unet(CNN_Base): + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def build_model(self, inp_shape): + name = self.config.get_parameter("name") + if name == 'Res_att_unet_2d': + model = self.build_res_atten_unet_2d(inp_shape) + return model + elif name == 'Res_att_unet_3d': + model = self.build_res_atten_unet_3d(inp_shape) + return model + + + # ============================================================ + # ======================Attention ResUnet 3D================================# + # ============================================================ + + + def attention_block(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='out'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block(output_trunk, output_channels=output_channels) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling3D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block(output_soft_mask) + skip_connections.append(output_skip_connection) + # print ('skip shape:', output_skip_connection.get_shape()) + + ## down sampling + output_soft_mask = MaxPooling3D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + + ## Output + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block(output, name=name) + + return output + + + def residual_block(self,input, input_channels=None, output_channels=None, kernel_size=(3, 3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + + strides = (stride, stride, stride) + + x = BatchNormalization()(input) + x = Activation('relu')(x) + x = Conv3D(input_channels, (1, 1, 1))(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(output_channels, (1, 1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv3D(output_channels, (1, 1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1): + data = Input(shape=input_shape) + pool_size = (2, 2, 2) + up_size = (2, 2, 2) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + # conv1 = Dropout(0.5)(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = residual_block(pool, output_channels=filter_num * 8) + # res1 = Dropout(0.5)(res1) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = residual_block(pool1, output_channels=filter_num * 16) + # res2 = Dropout(0.5)(res2) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = residual_block(pool2, output_channels=filter_num * 32) + # res3 = Dropout(0.5)(res3) + + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = residual_block(pool3, output_channels=filter_num * 64) + # res4 = Dropout(0.5)(res4) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = residual_block(pool4, output_channels=filter_num * 64) + res5 = residual_block(res5, output_channels=filter_num * 64) + + atb5 = attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = residual_block(merged1, output_channels=filter_num * 64) + # res5 = Dropout(0.5)(res5) + + atb6 = attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = residual_block(merged2, output_channels=filter_num * 32) + # res6 = Dropout(0.5)(res6) + + atb7 = attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = residual_block(merged3, output_channels=filter_num * 16) + # res7 = Dropout(0.5)(res7) + + atb8 = attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = residual_block(merged4, output_channels=filter_num * 8) + # res8 = Dropout(0.5)(res8) + + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + # conv9 = Dropout(0.5)(conv9) + + output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) + model = Model(data, output) + return model + + + # liver network do not modify + def build_res_atten_unet_3d(self, input_shape, merge_axis=-1, pool_size=(2, 2, 2) + , up_size=(2, 2, 2)): + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = self.residual_block(pool, output_channels=filter_num * 4) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = self.residual_block(pool1, output_channels=filter_num * 8) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = self.residual_block(pool2, output_channels=filter_num * 16) + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = self.residual_block(pool3, output_channels=filter_num * 32) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = self.residual_block(pool4, output_channels=filter_num * 64) + res5 = self.residual_block(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = self.residual_block(merged1, output_channels=filter_num * 32) + + atb6 = self.attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = self.residual_block(merged2, output_channels=filter_num * 16) + atb7 = self.attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = self.residual_block(merged3, output_channels=filter_num * 8) + atb8 = self.attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = self.residual_block(merged4, output_channels=filter_num * 4) + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + + + if self.config.get_parameter("nb_classes") == 1: + output = Conv3D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv3D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + # ============================================================ + # ======================Attention ResUnet 2D================================# + # ============================================================ + + + def attention_block_2d(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='at'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block_2d(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block_2d(output_trunk) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling2D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block_2d(output_soft_mask) + skip_connections.append(output_skip_connection) + + ## down sampling + output_soft_mask = MaxPooling2D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + + ## Output + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block_2d(output, name=name) + + return output + + + def residual_block_2d(self, input, input_channels=None, output_channels=None, kernel_size=(3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + acti = self.config.get_parameter("activation_function") + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + strides = (stride, stride) + x = BatchNormalization()(input) + x = Activation(acti)(x) + x = Conv2D(input_channels, (1, 1))(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(output_channels, (1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv2D(output_channels, (1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def build_res_atten_unet_2d(self, input_shape): + merge_axis = -1 # Feature maps are concatenated along last axis (for tf backend) + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + acti = self.config.get_parameter("activation_function") + + conv1 = Conv2D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation(acti)(conv1) + + # res0 = residual_block_2d(data, output_channels=filter_num * 2) + + pool = MaxPooling2D(pool_size=(2, 2))(conv1) + res1 = self.residual_block_2d(pool, output_channels=filter_num * 4) + + # res1 = residual_block_2d(atb1, output_channels=filter_num * 4) + + pool1 = MaxPooling2D(pool_size=(2, 2))(res1) + # pool1 = MaxPooling2D(pool_size=(2, 2))(atb1) + + res2 = self.residual_block_2d(pool1, output_channels=filter_num * 8) + + # res2 = residual_block_2d(atb2, output_channels=filter_num * 8) + pool2 = MaxPooling2D(pool_size=(2, 2))(res2) + # pool2 = MaxPooling2D(pool_size=(2, 2))(atb2) + + res3 = self.residual_block_2d(pool2, output_channels=filter_num * 16) + # res3 = residual_block_2d(atb3, output_channels=filter_num * 16) + pool3 = MaxPooling2D(pool_size=(2, 2))(res3) + # pool3 = MaxPooling2D(pool_size=(2, 2))(atb3) + + res4 = self.residual_block_2d(pool3, output_channels=filter_num * 32) + + # res4 = residual_block_2d(atb4, output_channels=filter_num * 32) + pool4 = MaxPooling2D(pool_size=(2, 2))(res4) + # pool4 = MaxPooling2D(pool_size=(2, 2))(atb4) + + res5 = self.residual_block_2d(pool4, output_channels=filter_num * 64) + # res5 = residual_block_2d(res5, output_channels=filter_num * 64) + res5 = self.residual_block_2d(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block_2d(res4, encoder_depth=1, name='atten1') + up1 = UpSampling2D(size=(2, 2))(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + # merged1 = concatenate([up1, atb4], axis=merge_axis) + + res5 = self.residual_block_2d(merged1, output_channels=filter_num * 32) + # atb5 = attention_block_2d(res5, encoder_depth=1) + + atb6 = self.attention_block_2d(res3, encoder_depth=2, name='atten2') + up2 = UpSampling2D(size=(2, 2))(res5) + # up2 = UpSampling2D(size=(2, 2))(atb5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + # merged2 = concatenate([up2, atb3], axis=merge_axis) + + res6 = self.residual_block_2d(merged2, output_channels=filter_num * 16) + # atb6 = attention_block_2d(res6, encoder_depth=2) + + # atb6 = attention_block_2d(res6, encoder_depth=2) + atb7 = self.attention_block_2d(res2, encoder_depth=3, name='atten3') + up3 = UpSampling2D(size=(2, 2))(res6) + # up3 = UpSampling2D(size=(2, 2))(atb6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + # merged3 = concatenate([up3, atb2], axis=merge_axis) + + res7 = self.residual_block_2d(merged3, output_channels=filter_num * 8) + # atb7 = attention_block_2d(res7, encoder_depth=3) + + # atb7 = attention_block_2d(res7, encoder_depth=3) + atb8 = self.attention_block_2d(res1, encoder_depth=4, name='atten4') + up4 = UpSampling2D(size=(2, 2))(res7) + # up4 = UpSampling2D(size=(2, 2))(atb7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + # merged4 = concatenate([up4, atb1], axis=merge_axis) + + res8 = self.residual_block_2d(merged4, output_channels=filter_num * 4) + # atb8 = attention_block_2d(res8, encoder_depth=4) + + # atb8 = attention_block_2d(res8, encoder_depth=4) + up = UpSampling2D(size=(2, 2))(res8) + # up = UpSampling2D(size=(2, 2))(atb8) + merged = concatenate([up, conv1], axis=merge_axis) + # res9 = residual_block_2d(merged, output_channels=filter_num * 2) + + conv9 = Conv2D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation(acti)(conv9) + + if self.config.get_parameter("nb_classes") == 1: + output = Conv2D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv2D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + +class Res_att_unet_2d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_2d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + + +class Res_att_unet_3d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_3d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) diff --git a/models/Unet_Resnet.py b/models/Unet_Resnet.py new file mode 100644 index 0000000..90b4724 --- /dev/null +++ b/models/Unet_Resnet.py @@ -0,0 +1,260 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose + +################################################ +# Unet + Resnet +################################################ + +class Unet_Resnet(CNN_Base): + """ + Unet + resnet functions + see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19 + """ + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def bottleneck_block(self, inputs, + upsample = False, + filters = 8, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + # Bottleneck_block + with tf.name_scope("Bottleneck_block" + name): + output = bn_relu_conv2d(inputs, filters, 1, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = bn_relu_conv2d(output, filters, 3, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + if upsample == True: + output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + output = Conv2D(filters * 4, (1,1), padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + else: + output = bn_relu_conv2d(output, filters*4, 1, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Dropout(dropout_value)(output) + + # reshape input to the same size as output + if upsample == True: + inputs = UpSampling2D()(inputs) + if strides == 2: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs) + + # ensure number of filters are correct between input and output + if output.shape[3] != inputs.shape[3]: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block" + name): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = MaxPooling2D()(output) + output = Conv2D(filters, 3, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + output = Dropout(dropout_value)(output) + + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block_up(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block_up" + name): + output = bn_relu_conv2d(inputs, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output) + + output = Dropout(dropout_value)(output) + + inputs = UpSampling2D()(inputs) + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + + def build_model(self, unet_input, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + + ### Actual network### + inputs = Input(unet_input) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + # encoder arm + layer_1 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, + kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer) + + layer_2 = self.simple_block(layer_1, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_2") + + layer = layer_2 + layer_store = [layer] + + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1): + strides = 2 + + # last layer of encoding arm is treated as across + if i == len(self.config.get_parameter("bottleneck_block")): + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i-2): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_up_layer_{}".format(2 + i)) + else: + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + filters = filters*2 + layer_store.append(layer) + + # decoder arm + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1): + filters = filters//2 + + # note that i should be positive possibly due to the way keras/tf model compile works + layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer]) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(i+6, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(i+6)) + + layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2]) + layer_14 = self.simple_block_up(layer_13, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_14") + + layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1]) + + layer_16 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), + name="Conv_layer_16")(layer_15) + + layer_17 = BatchNormalization()(layer_16) + layer_18 = activation_function(layer_17, acti) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + #outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18) + + return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name')) + +class Unet_Resnet101(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet50(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet_paper(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + """ + see https://arxiv.org/pdf/1608.04117.pdf + """ + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/__pycache__/CNN_Base.cpython-36.pyc b/models/__pycache__/CNN_Base.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..969e68ade33db9b3353aa92261226a87edf81264 GIT binary patch literal 15946 zcmbVTdu$xXdB5Fz?LA2zDT3WF{V$Z(mae5z|BxE`%cA^2e1c!bL!^Y! zOs(o~W7XhybJgVU^lFB`v#UA&&aYbhU05yP+iVtE#nqBQebdcytFl^YRadM0o@tJ? z##hH%6RQ)g+G?#exjNaJTAe~$R^*xoTGOl3T+TNSwq{mmxNJ2KwGOWyHc~I7L_ri^ zONpX)*qh$XtR4}@N@}k37Ur8;o-;+$VO;v3Au5emZp3ADrSywi1 zuY~^2PNTh{CN6oRQ}@na`bhg?(+xehqsF+~nN6?0z2i69p(qtKa#8vl zYpy)+Z)|w-Ed#@1)Ul@z@)0f-M8Na>W*r2H`x}h6*VV&k0 z|7D^=@CPV(DWWnc{P<^5>xM8z`nAGpT4Y4_wG`-`6M1}Rg(V93&WWNZ;X5zNqJnQr zRK*y+3u0VM;JYYlViMmaF(nS*yDXj%)8gQ3#%e{(h(oBUio@avzQ@Fq;$7k>TE@i# z!bZ)6cuG7d-h-N&c&~U!97AbRJR=?vkD@ds=EQM89T3lod9i?+>CdDxsnvtniT5e% z%<{5x8Vg;nr=!KuB8jz1_yj+IN0{18Z5g{MBNdwKsh-g@h4H4@OMl6H-RPy9bT74) z-pUXGHq0=)mEFqirglvu#r1iS*)>FV+me^?ZV{HYg;Ie^B8N%j-4B?l)EVhvM*}^Vh zA?=g)wIGz&PT2kqF@oDnUtIWgQ4>Mt?%47#re#2{f;3k!B`!`j-6L?O2 zr*nCEuBfs>XUCJuavTgAI*u%Z<5f;$P~)8)0k(8@V$M<)y@!pKCu;;)+3>=l)+!-s zxXnhFM^MA>8#|r3jLHOFb6t+1uN>!*^iD$J6dZ^5$Z@`wdi#kpC$C-!z{ywZTj8_s zd+zF@7i>cgUcG`146gE8gRAl3UD&xT4*(l|0$M<-Ry3#08FSnyn%#*6EM8=tBq9CL zcMhL3_yoU)M@XmE*un|l#o6YPxogmA4sph}vQb?cXF9W$+aGq2Hi4E0t+Oe(=Wf4s|D9?a#k@^4Q0YwT_9)$6h>kp|2(a_lD=Jc|qvh^cowR zVQ{WHdl#5<+}rW%o5vS6pXuhKa<}vYFMZFmnfmP!QuqfV_=gKO(Bcamd zH1PUWzamK&-(mO>H@oS$eya3$dK(%Wgeb#J7FX0NJ4pcrz_ zkh3(-1Ifdt5es}OfIfHzuR#@$ROYbpggI^=H)>|h=++YMNTLe`yRN}1iqRywT~2}OTh zGqrmgQp0Yv`Hkg7BCVIn&f+SS4|KQv=PTcfp}rrV0Gc_KN_WSTNk;wdvKDxTI{?-* zB>5rC4P)Dsr$Sf;yj?Jjn(0=ipN_YhY>8fa0}|p6>e^2LGQUMzk2Mko`ZofbwCEnf z0u2i8FM&j7{MHUsWMGdjd7u;%D(s`iNdLDWvty1`83og$thBUh=PuV7RN(Q{P*sy<;SUsC?9<(ipp2xuDku zBMGmv2nK>Gv|5;&)9^cB*bEU+K8=QMc>w%se|gRtNJjY{w5th7OHaa%OV%i#MZJ8A z8uLU3a0s8(xeQA?RY-PTzK;wP@)E$bik+w#mQgmQGt)+QIyoOBz$53_ zLQg&=Uqyj8PH>BDLY{knnS2-HSJdy~XXMgR^0tG$u+#XysDW z51KbB0T(YkdrXMgv6$;GRfl_X;D|Sv58||G==mZ$Dd*m*) zI?v;c%5BoVIG1!B>CyD;7cO40q2kY6yt1&MiWej7@E@(q(OF%dtIB!W??koAC-JIE zyfnShnrlj0DD$`j^`M15=>{$NnfneVtMxPnr%l?qxxa{2<9orc16@EiVJc%86@m=+%ftFYRn#7F_cX*pL$w{vJ2Lo zUx8W4Dg%2M&}PAB@Y~JXB=Wc(#B5y{UW&Jp|A2QdKhJVSe4*rJJGOnX>Ep3M<^)`w8toshtar;Pe9@#mChKZ zQ8vp)_uzhXf}x*KiH;z-woIkZqd*jDrus~&Cg{TNWDj)ph$|f0u|BTSo%o!@%D{| zXt?u1t6|Syx95d@f&19K4cLPA{PR!K0nsn#fbuf7Fc*Zv?}TuMHoaz3W!<3OXe3l4 z8-S>`f=@t3;s(UlD^tW#pazT)kvfDG{-}99gLN2h5j^n)g=4b8PO}l>&0Kgw8MpTA z^Bgws@g*%|f!l|ZThj9M@3UM!G6qy;jvtZzmgaDm> z@Gi5hZ(m$mv|sJN^&R}pRaG(QtnEmj{762`DjnPol-cm*1q`gR?M`dww#r`gcf7XB zTwJ<%R+TWhR;S&l!|hW!;nfkEl0*qsIeh$84!(HHy#^k)ri|`^eJPb}9f<|{ zFvFAODM$=}F;5|uLz;!(GI)bP*rkI&Y@T&;c+W#v(7agoOsTixe}#Dy1& zi&#<+>jpbsy|LcV){-{2_xM`Hi)WUc(^poUrA0My=E~yp&f?jZmd>1YK5}XC>?Ia1 zYKrS$T3T5;eg16JzH&K{D&c@Hcv{Wf+pl=20vQoe|XDq$jy4($Z0>^7VJO=m;; zH^WW2L_i-RpjycB!gXqlqLK2$Xc!ShaN5B!qq`)J0P3gl(bj-fG^?iGp8e$zCX;Ln zv3X(+{0Cm3s3Fn%tgvAelcd%`HI@^?yc37hGPG$T1GD;H0g+`%M8*M`0ThITWZ_`j z8s2kkN<&ITMl`isQTx|*dq_qD)x!XYN|0Mosg#t;)Js(Ez@%W4g?dyYOf(OS6c(di zV@a>^q%^_(Yj@zgL7q;6g{HbMEXBq_WHcnE7@HGr`}PnM;n+86cM=m~U<|Y2HfPF{ zAfqVJjL>=Uw9({)? zp=WY%Pkn-IW={hlNBBH+VdUN*qTHfk3Q08&yF6D?wS?kM988t(C)f&^+=IHrNKyyb z4swPf2edJ&5N}TaQMd!yDb)B7wH<1f$5UV$I{I2-O_0}kK7z0rBJyd47u3lScAFbMVjr8W z!MK;aup`@p<9OfRb$tCo%96I52RsPb273!`s&r=u8#qx&<(;m%0Q~XnW0y0!n6;j!-=b3lG$tdY(_ejFM$ymv$ zOBlBR6p0Ey4K`-RHRZDrW5J4-#|7@4nr18AhkOkYW+6G-won42LSiFSgkE{DTXU$_ zHT1I3D;xEK4v2a|2PD0SNxw1F>x<}BMz36-+{uinaIYzppb;S5(_##*d1~zuyBlJB z+k7J>|8oe30@MVcED8moXDz`o8R3{hYk>nrX~uB?I1KsR00-V|V3`J7u@5U9SPmvw zW+E(y&|0F_9vx6a9A+#(ID%yzSdIX$+|M$W*#yhGA}mMITA|h+;*G%a0Au;)2$rt{ ziw(GHKgU?!onUz|!tx%pj!|n59v84Y#900&9N(JQs&UORuK6dffsO>s!>BQ35o>!S z!tyAvO!f1?0z(WCa}03;5XU2k#{hAlZvg_qG1Q;fn(oEk2KYeCL;Egt|9(iUkhD(} z>)>SHr$nL7(G6E}Xl?v0Ba|LTx9%~*eaH^#IfC{su`!C9My5YCxz=bSEb7!HFhD%u z;NDhcPvGL$Yj3*H+HtUH`s`U}AK>nz=k?O4&wSKJ*c#FvM%z6cLGIJ0X2@|MsRM=x<~gNa1ostO?NzM zPiipz>L?CXS;G)qr$<)fw86u5$<~h2_DOr*X2g%#r_Y?#1IAmsPaE72GXGTp@sd_d z%L>L?Tv=JVpx5(2G~dyI7(nBnh!;eCB2ua10E1r5@Cfm0lDh7(eFvc>JZ29Kbua?k z+kka^mDW+?plfiwgNtL~cv8aHo#Z-qE%2MT3XUqvP5*`)be&BY5?*&ELVKPo=eaUo zNGXDBg3(lkXWVl2{&(Z)=ovm{UwB@VoOg5-v=9wJLEnQzni?UrINnPmc!B&0EJQoz zU!oGl6qxbkPf_Vjdi)GMXyfEp=<&1k_&ItI5#;N5s9c1N%tZMM^!P=3e2pG-(=%7l zjy{KsiLW(gM#}gY)u7KF^WYQc@sIcfT|BgTP=-Hm85#EO?=F|oQiKtqTl8D?{@&6> zHaskozI9LjHmk!yYg+HX@}QU!{n0mB9~?aRAPV|&7-_e1Chlcddkc+?;U?S&LZOskqn;Zppa69iHrlX=r?1prAHsdR}1|{+YLro%3 zS-xad)lEn_AJ8p|A6>$MVijVz6xTKL-E_8C2OyUXq=dOFzyCE%8Pgb0m<8d`sRrLgCU0n zL>h`ea|ickQ1DcPpa9op?_~Hrn?S$-(>XTXxgo-{7l(j-oHR8`efc0phW~6vcir*5 zG)05icbrSB>fjcftA7h!BEK>>AeGEh^&CAOqX)&MW4oDr-@SJ8CsFqyq9Ktzh3Iz; zo?iE8VkPhEeU1cGh=RZ->-bh2%!To4z(Ar`VB`lq` zaN?`Umx!jE7Lv(O5HSrwG8{S_hP|9{V;X3S_%J=sp`f{u4mCcM^@V(pNDNR(ivcRJ z|BQBcwYYAu`UIb+U5I$`b9@gIU)3f=yFY_7is{78>;Id)by;?hy!Dcf1lc+Sn+&N6 zH(-`=j9UICmZ*vd0Xr-~g3EI`HIC*6GJbF&G7Oc|5V)Y&?5wXNfcq)*jriZv7f0#! zMyQHzXTynv)Hsz9e(dnZbP$A0;wrlP?*d00b>zrUo<*3vy?1X*ys!F>X)`%4){HTY zYmn|#a$JVMMm9Brfp}BfC}4~33WLO94~U&KLL`?csV@#wAe|2zjV(3xmd5o+3e4jc zz~mqbT}JZ33X1|vaF8RA9MG`3Uq6ow&-iGqOTT4rKBGg!$*$ZVBWP*x&hz*dkpx7M zyxV(o&Gcy>Nwiz$JfO?n3}y1@i-t*>KjPXEG!}5umLH|PD-G@FL#WW1B-utlnK_w_ zroYDbhDZVPWvC?rZ3Wva@y3s#Le=PcBTg^kEVjFX&(yI?d(Rh!1?Y5$$|>9o)aW8? zkW(^9r{&p?q-`X8Kqk?B@Iep9nA8#JcH)ag-g|_&5O2V~U;k8?L-r1Z%&0EMe3I_r zB7u&6FWtjU7$VhH3;}Ew^=otR%i*L@Tr+6CDG6LZ4D*e90ISuk6DcHIh<)$O6Du`D zQQv8T=0)m)D~;ha#bioIFrd3g^H08#V2F5> z)%M+KWz4zV0}<0EQ((58EAPj#@{foqAEw72)8kL@fWf7c-8idEOE|azNY#$&H52Qs z3i)T$qZ0Svl}U<+?Ga@1&r$aUV&WMTQbo_zR$swgyY*>PKiAW^hbIYA-5Tp*7y1Rzd^YJvD=_7kH&Xq=ts6Zv9Orp}LUaqi zn0n<}A2@r(VT=*p4B=&3W4D;r7T}Q927Hu&lZ@A%0n|i~?$e?=hEHQA=JlsuPPJd} zWik3^_ZeOGi?_C-Ibo-HMj!2Cr+dX-X`eY|_nA{NDrRtxUZ+x+$v0o7=dZtv_@S`2 z3*g@W?G?a@MNy_q1?+vfU%^Uhz*Fv3){$x1r!`IDJuk+=&4u=xc%K^P8obl#!Kv6y zU(NKZ{jvUdKM!h8-1<cwm@+OJ z-j&k5?KB(P-kuvC7s(_Vihn0CM2?$oz%zh$fQ93?6CIQIGvRg7LWHKM7fRYw&0x-S zI|S7iUAi^8Y%#0ipap#_+)J(zmDL2p`mp4i?hbqT%aXE9_Cs&Y_qXL= zpjMUG_|iKP{Tzv3hE(NW5nQ^791$eUxLD*Gcl2)Uz*Pj}v;xdqy6s?xE}sjSqILpQ zzU|$lwP^*Kk0`IQfW_~Vb_oj-wizegDYrl(DBQSrk!it`q5NdHokEq7XGMh9wMBNz zxjZ+iZ&?fcM%=8I7wsG^QvM|knAKekNB%9ZLMPzq%CdHsBJ4&(8C%NORK`s;HW(Ew zhIDd^9491K1iDoBn}Hgm1!^x;17)b!9xFgqBQive(yx#~u!X6l#>h>DJ)0@x8h9Ep zu&Pl*P~b5Bl@Y&s&X~p9ETUS^8HaP@27c`T8#rUkpstE|S~X**>21n5iYw%&5CN;v z#hdxCiD+87W=@&2$fJSXoE}Qp7@Z4#mF5Q9!P~icst@XPeNBI4Z%~$r?wV zS9xn}YaDuoe+Ux&XzK)GEi3pf1ai$O3P;ztn$<`s3?$Y$T*EXG55~0wWfkH=S%c$e zQxG=mK4s+u!&%Jp!)WZ0zj+(^Tj+uetVA*;0Q1@-dpt35$rPvGa}4J44rKUu6QE1+ zde=Og4&30EUjqW{EC0{OQ~<50BYOoAIR>&eS>wJq2ER&sgn9FkSL~ITD5Sn_ZZ_(h z!-$bi=-cs30g+HHK|JD9N*jSGOiEiL@5X*n^+2y&3L6)t`Lu&Caa@ZMJz&VqT(<2F- zD2sk7NFd7MAP%G=dmlP4FCT3498{(Q!J|rz)`k-eB|i>CDhE-GeSqiAA>Dr?g*p@?fk@)?I|TjjRBOdFP^GAy&cGfyC;u6slQ=ZNQ4~^>B*G}5KWK6Xdn9hWLl6un5J~#zPl4RpNTj^2h2t7{IgK`_>C+P7SJw8bfid@M* zqz7qbcI)KZRH7xTO7y$7I#NsV`?@4nyvzlYrS0zUhS)O{^7m(L!K%2;`sMR{UIEdBi zM8Vgo37ZmS1D2y-NYL+(14$MLFKKm5*HfMfrJ)6!$fQ%6IL1u@Ye{z{tA_@6Qd%0s zsXBoqt+6^YXpesL5&w{g0<^0`KVd-F5zHOR3gIUD3`nO-HtLjhN^k84$im@-6N}Oi Q^#@N%J(-0Yjeq9<0cwhb%>V!Z literal 0 HcmV?d00001 diff --git a/models/__pycache__/CNN_Base.cpython-37.pyc b/models/__pycache__/CNN_Base.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f4efe249b720be8e970ded1bcd8a4635cde853e GIT binary patch literal 16027 zcmbVTTaX;rS?SRw*7xa6+IeRe9nega@90Q1H+X@Bo#2v<`pWA=_+xh!ksgzgn_uX^f|IGi`it=yt691)<`6Pb+H<2)f zsV&8lPt{VzyJl(Psaq-WOj{Z8%vy$c=Byl^S}WJiTLqQc>aAkCWR==wt1RAAt*Lg! zs2}qswr8xF_N+CFx-`qQ4z%a2Ig!t{4z}m5d673-huVj&!>aO%!g4JCw!-r6 zVR!Cs$~wZ-HKkVg0OM6wYigKXck0`2ht;$&ec5AfE6le%huLk)*V19>g2P)oYk}A8 zHam@Q`l`!%b@$TMPj{}goWON@;go20e$%aQcfDpO2n*NTj_>hvp2JyKy28E2y2CGd zjfTrVP|+=Vomy!-4cETd>(qm$*YQzQUUh@p9^bald!3DDgNC;31di_pbsB5@mx?m} z_mOcGT4k*8sn01JD$|($cFxjSilyIHu-+M##WT$emcui{@~nVomK9kEPlJ`&6rMR& zVbgf#S(VM;Szxp50G>s5n$59;Z>v^`&9g%&DYL`u2%b~yS@scj6g3t07&B2a&7Nb& z*yAXvvX8MR*m2}$*z@cu_B3*{tj10N>HvFzEwUw)%zaKtDb_*k#EYSEes$G8hl#G% z^=NW5Ng}NRe*R~W1j=1ySG}*ON}z2h19hM=^=sNd|9$Npb)eY#K-toFQ?vjLEl6*r zcQf~u`qrMqzy5>5@V|Lsg zcUpLejWO#j$M?6?DwahD~H>w<>t zjb_X3IvDSV>?Up&c%m1H%_M9|4<}A+-*$LoG))@sX!dC51g3*wcClzKGlA-iSimzk zqV=>T+NDXlzcE_Nvq8X{>pg5q(rdDmi%C&5xHFH6p2ivf#a%}@y!9=&9+-FwoMs2d zeSG##hk+96uJ1d1XKd?ZTwox%WM9K6i89#%E8B28E~qKgJKk+8+w)z!-tApmt>weC z-|M%*NkC^<-NkvokCQHP+I{sd&UT=0rFPR%nT`{l+Rf|-dtbY+fk$Mcx-=j& z;05m`?XxWVGh2dJY#F;bVxt0+(RpI5ZC;e+$4UyKq`->cET#S@R>Xe9j3wDd-{kJi zUK6L*-0--dw3gSHJY-?218yR=S?pJT`mN#OTW5~{>hbn*cJ279$1e}VY2Uf!+Uu?# z*tgwgV>9qC_7@%ivro8PufBO=Y4e4CHp=%4Z;8S)r=MrT{#4XjpdzqxTmXW_`)aKi zrn?*hj?ZFW_(3G0=J{cYI82z@g6s=3m`d9T!Yrmj{0-+Ik&B>Fz-77HZ8>#!qJ~Bv zmd8NRWliOWsh^J|$x-StC(a1a`!C?tFC$S>ht<ncOgC?N(2m%kIuARB8D`G0q{`{!i7lezDk-0wmj2cc%3)n z{eECi_uHLCdDfS9+AaPVMkRI}tbre+m&cKW8TU@p4}AV4y%=q0+r{$wVk@lTZR#)L zr~C|qK7PFUN)h1Tn3b%L87FpYEkdcBp!#B zYqyC-dK;3cogGL+v)K`Ef=?v8dzG{AD!06pO@Z5I@!mflVcb-C4B8PY5UdIKEd9?CkW0h!&Ut;ZY* z12_-b(Z)xxNsVm53=lxh`BSj5^Ip3Pf$5u*Qyz)PWEJ)iYNQRc!NGX~L}KO@?y#oY z352pBs!hldMr+G@IGIvj1w0a7(FR1~Ho?^L{EFx1O+r4-mFhz zxdvSUyM87CnrJ~U=Y{?hY%JBWG)}N|>)EAkh(v!WLbr6;bvi5F^$83QG^I3anXTqa_fB*lw3VKD?S@pqWf(I9D~leSwDT6!Mms?fh7 zWhv=4QcWlRb<$k#HCs#;h{JZ$BxS9>>3MzxSNAw4)s7^B(2X3@wiBWs5knaQ%=Fvs4qO2%+{3{><&ri z*8VIutn3B92_!ygh)T*(E2^Q->J{~Q?Wo#6lu*~+HVJ|0)C@mCU?-6wDawS6`ZbNE z?x{l+CE5Un8S+{H^KBqys4&R-i)&CTg~-4j`m|ZFAiPd%hghB{$8sk7CeA4*(Q$B#%)QmDlWgpD0!*CNcNJ#67Hout^TJAkfgX5ac1?B@s-FrAFuLui+nO%b z5NZe^C4u%RC43&0ck%OyT_}1=)zqR^RQm__TPLXf$toEDPlVMM@j@%qQifusT38qH zt_^iObs?%!5L*P39;OHCR)(dJX2}zBPbbrssfUzdW{`n~lD)4EGt&0Qd!FS<0J6eo zwL{9Xa#tN@1E{FG#X)+vGyrUL;G#F7x<0q&ws30XHo$g}wlA5-ZO;$P4elc6wmrTx z+6dYM@~aqY5D}fm%BTCMI=7mv=`8x~rnz|2Tx8~D(Z(D!pbMIdXP>77B3~{7;x~*22cKmv?nXDS=0OTH6!_Qwp(g53fbCxJ-ppu(_DTgq_FKIVZn1}iS z!4qAOb0+O~Tg?D(+S2Jzy|ZU0f0BQx1vQI!=F5LT*r{no>DQ8tBx8#vZeu-`UTS%D zF!z^{iCcwg{)3<5ufG5S4u}cR=?Co!vh~a>E6e6v!&*7a`@Qup_sF0W=Qz~;9Y55X z9>0J-!*r+D?(T%?D_+;_gsCelS1yDFjH})2H0!Yc!VGiC5aXW$w2(I94Q@5D7q)ZDX||koNTWT%rZ_&~d%)q-NlLn!r z#otxMTN-bf-7KIrmKkVUhMfyCTY0#Jvfw6qP}nMhsv)RwT7*j0KRd=x+?b!ltb}OW z@4EHoMpLR&QXii*#;n4vrr*R8(bk74^2mUhpI@=hU0<_TmaXaY*O$-Q%NJf-Ie)?a z^ws4HSHoFR^xDeW%DGDyqN=rP2^%VpvHLf$Ad(KUE)ER$Iv|LE^IxR$Pa+8m=+>5Q zB0nrTt(JG&ZgB5*u*uI7&^ZFC2EzBaL6wnblhcC7`h^Z8qF3?)fbu4Hn;!z`AK~X$ zktksId9A4B)v_ixdw+%ol*uO3c}eW|ALB*p6+t)xR(61JXXI9W;q32L>4B3Q4X= ztyt+Ebdv;AIl@HaKu}IzA%1W$dMA}E9(+R!A>FkUV z8~8_yHY(9YMq0Jd0Bag2Lsx2pzku9D&`+4X~PX1U*;{6&H-k?uY!OSG-kKNsg`a;639-)w_`=HgRq(5-dHFBN_sxWv?p3iOEv`;SjsEDF#ZU0n zKS2gM6?rmdLAJzSp;d|FNBQ`OT?w=;a8){x0!FIl1Oe4NDx(8u+=B#--l=U4$McS8 z3tl{G%kuZM3Ah5_b_E=n)JY!_PB;pR{hyAG$(TVK@7Kr;{Ad<)3$#KmTbqO8$=?lO zeV z7C)cpTq%(P0K-7gE+`o}nf8w))SL8`oVtW?Nlqjt!a0z!AY6^V7!eEzm!Dv`&T46G zeF**>z+XfDmyUs4MtEXynTJ+ckXvi4)eW>V&?+6Z$|bGxW37ltzdhFKJ+vyKRc1(b zX^NFZt6AjAltb+lYO_>3AaYk(Wm|h!;r}&;BLvhmpbYXUp=C9}F%#jKMQu)aoOFTX z0C1@M<0Bk+Gl69eaQPwBePB75V407w971h@Y6o;cRd!fl`Sl4b8^Ce|aK&L-U|C48 zd?dnh6tyL)9l&u3ERP8+KbXMsKCqa8D-SaQ%SRI|$096`qjrjF2Qb}$%+8N8~8fIJ!7X4lVeIxeA|r>?V6i50Ah*sO1FO z2iV3WZt9u-!kP7E2mV#N&Vd2!0S9--D!L5!$!=%Ufz*z@R&y6F*!uwYpS~ogMr{_O zHq4Ze<~Z8^;Rte{x^SM%5aOEPaT9}qesTmoKdidH7$LoY?#&Bw$Z*#@1Y6&{9>G5h zB0R9=)ay`waO2_P+^)tYfHrtF1(7JLl1=;UUI z`kh9<;=S&<8=|m(bl(n_(MX`_SEBl)1j8>+;s{IY=t7i5{Zwd!$IFtfom|^z%|%ln ze#Sg^{(|f<-r9ZYV4!?K%n6Q{c08vN`dVIFTe&Rf^H?>ilqnYh;?(uL&lsW6Z*TN06 zEVPO(@0R2D?M)~WZht;Pdr1^7io$pzg$S|*LbFO@#BE3Je?K0M9N{zO<+F0h#g0yb z=Atghdwg&tsV(^8V#hJ{;QIVGF%fCzzfHN{qJ#*Ke~yyRQ$qeDv2pyjDfc^+yh8~s z0v{j=GZD7mrIL3k`4S~xrsO!1T28tRgzs6P7nq3}BtC&^)Mk%*&;i`PKe$#wMng=n zCy|l2%#eH9E?jLw1*RM{kf;pGZ-|_aLuoM*oR1xie-W2n{gd^Hz1; z3F@1|oowUM--dH2j9j`6?sPZ9=q|5G@j?Dr74yTixUCJ#o8WRDpxfk#x(N;MNz5S3 zk1+GEpn(4#CGSyHAqpWNaQ;=4z-Oq=9Az(K#pGS9o{^RcQf=H3 zJT_%%710_esvg-WWG0CPe-#-l;K=u5@MD2+3osq3LFyhZ;2_|s1Wo}_mcEw~@96{r z3YZMt>DR^PoW%xI}N z8C?7+N@|pxKoaZC8&t68V0b87p*5ssC%;M+mR|p8q9yNZeNH%4bhHzzM|SK$xuFEO zilM6!@|=q+65$nw@~T2XQg^dN#v&&Io*0zPTB?6`86I7_nwASi*CZSz2zK&KxN#zH zt_i0r^no2yZYnvG5(<`1b~Q}Zd(2q{zo5G;;iU>^xJ!<6CS#W1n*u#cuT-f{mQL(a z7rsq|#F3*3yi@p?(Te=^VY-eJ>&8n-5POy~EW!x4<2%mlw5B4yB$XnUqz*S5L5-ohAyM#&2a<3Qe8d_i33 z#CN{QqCJ|z8ToW#GaPj;AxL)jJ_#J^U{ zgcVdb5lDoam2s$yguqS5W^ZEy?%*zM0g*IaZ;E?MeWMvzd8gN~C6lr$l!qs>CpM-B ze`MlUkb7B?oa1E;)-UhALgAjp9;90lH$~`-gt~ zF;F}L2@L4$!|z36ok(I?Y&*PLEKOT}^EYwNkV(oif<*LzE=gpd4{)0XC$$kn0GmPW zI>PT|Q?yWSHy@Eqfn_I1#&8Z`wvuWhS%gcqpPz1Gc?JjShYk6>LQQb%F&_As3<*IB zbbn13f7tbi#Ngbbf^ndP%;SMHJIuZF$oy>a**6mm5qS!U{a|RCAlm+ch+dN+NQy1) zN3r~yAP0F%{%4eX9|?3V889az(>VOvx)fIOl8DL83qpo)s%jY`=Fwf*c!voyn?ch8eu9}*aW3S&a2xCiGMz3_z=VeS zyZW8~rhuip>ay~#{^pN{B64j2tqHeaq@-wG82>(d4DV&;g)PpK}*ryF)Q{GkmlcFy706x_5AEI~MYLAVf zg1G|wFx&ZJGU915;vYVw4;^@ZBN|ziM)nXm8q4>7zOf`Dp|yi6Y(TRw;lAGZtR~Y9 z9e4S}LCU*R16*$nb6E3*q#eyb<6mtk!`z))11+eCabSh+;+Aw-dGp2)I0yN0j1k@x z;T3C*-4bhU01lyXz(+wmNq?PrKur(mDlIA#@H$4K-F)tKrSr}pjov@MPw#w~zq1vM z2|F!D^eaQ`^dLVd>@y~a*ayax^okKYq}59bM)HH#DgDmta2+yZI|u6h-$4$PSY}0v zcEH{jhb8Q06?lq+(gvb6hcu@dyl2@As5#g98cx;hIMtw@P7g&!=x?Nk<>Az@GR$JN zr|?h-=mlZHdwQv|GWEmC8&D|#HNAU)R&Z--fEqS?4{C;r^Lq2k7_qW7 zH<*t4!g(3>I+z+%#ON|K*4={`E3m1`&0}blgTisZ`2^U^6_-=YtvQNWLX?atH8TVj zLnkv49(d!1D4~$t8)FgnkEV_!P51SNK2+(5#sE11L-)Zkj@+@2;4BrmsC$QVXUA?e zx7|ILI}QR;B$W8_U~D;VJH8kJqyv;2uan4_#FPncvNoJEc{x#%p5Z*kOcz5CebIee zvoDn8Fh8oPsg{8&$#q&~Yg)j1P~uxoR~Y!Kd<86As3^dmauC4#0hu0FPJmKKne^1^ zGJHuuV@oOEFd7TNd_ZauEOx4qfZwGYK?5+`br^%Y3dTFB*0p)7MAKR7c!9g_dE5Lu zD76Yg`I0+g$q$^w%b2MAOTdMC3&AD`x^XZ`iwX+voi40I5Kh|4^Up!F|83la{oOGSs#u7o`My;*9SPL;2igAX;$*dw`wP@jG zXJHV{$C`&du?+{hwqOsobxpX;}CpeIvO?{xM#n z*myFE(ju>eKkDFUG9o@Tfcwiojd=Mq<_$@-81ep!E8j3G?*z<8`ev)!*tjzEZ3c|7&WHj~F7c(|;uBzoAkiUW2d@)zcZ+!{_+-0b0eO@#*6n zWrkQ7`SL63oOU?M!+V4G2?L(SCplwHTSc@e{zt+X!AOn%-VbQj6uhWt>;+r!WNWDZYPg6n;Q~n7`$Rgt+(2nBV_!&ytl#siK zlb@S=lzg3%Z%{&_my<=u|Be!xZCHvvlB**!6(8#*<{#!jv8bF@f|rrdj`~j{G1RhY zOc^C3Z5%PGM&794-$MR~VHt&d(WvID`2+a{V^g%MEl6S!X6hVTTlr|mmk=BJPblEm zC=ud0Kl0GZNI|;g;nyF(qqCmQ!>J1)upMd`Si}3D35;9moOP zIyAb8l8hh@(D6A53~PQ=AAK?se}_bF+3_zR;A{k42fRe1p!FFooy>{vyNs9Q*3ylG V3^Oq(_)z|kQozXqENJ}G{tsm9vmgKf literal 0 HcmV?d00001 diff --git a/models/__pycache__/Unet.cpython-36.pyc b/models/__pycache__/Unet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dca2bd505bfd60c496cb4e010145b0f48a57ba4 GIT binary patch literal 2864 zcmaJ@-EZ8+5$E#b$m8)o^ex-6EhnkbYh&3~9XBJfjsu_DE75a{ulaCbY_o|oDY@6E@yu;yE`*GJHy|!+hM!$ z)mP<`W&PWF>v4hpJv8+%=$OTbu;~0tD1osp?98D~=2Dj+?g%ey&_?D{KMQDJFjq9Q zkcL@{w#?WQQP!sItV26lmv#--5WQ@HE*RPui`f!gGISu8vlY6Mt-dvCxxYtXl~_oQH4=ui|1i!-GAj`sePPo||_{4^8wM5}6)zsbK8yCBp-rvo{2cfpPG7 zFo-`*6z{vbo7ZadI5oyBCFhFAqiGHUux9zK6)(`vULH#>rtuI>VMFxQ+2hxQdLIaG zVN6h)5$Z7i8*4au`&tnr#q?;FdtXajuS)b8_~m&ad(oA4mHd8UUlkm7vv zMOjYNqq}#Hj*hm#^r_@q!y>zz$TA%YuI{P__dodE_MLn8KfH5q`{DgN+h*oq=+tUq z_+o28Gb)zWiM3Bg5XcotN#*?A{u?1yW$oMh4zrmvwk!L@#*hLP-GOeJ7>HRawP#4} z=1GPg_0)92r4C|nXj;axZh(76>6rGW69%UgPZ9}|C6^%VDlSIy3TPp(L8luBM~NKc zp*7=pC=#XO_#f7P-hcdP_ql@8+#T+h4}bgN?haQ6Wii=B_o!W?fHH%v$=C8ath9lK zp@kM%wU65zu&69qTV&%(|sl zwW}uUR$x~ZRH$|DG?=w7!>i7IqiUZ(tdu^x2D<@0!xa{mty%Nsf4{UYn|T@j>aRc1 zE1&hs0M^`K*V!UlI(6r}l%OYDK7~E2PStB!Y-QcLVvX^nj0RUhgUfcg4F8tA{O-%M zI9LVWuN>Il+VWu;vK4lt_SYKV>!*&o}Gu0U$fZFpOY(FRWv5m!kDlt?CPll z_h@sV{ZT65Dk$A%^VAq0CSuCv3WTKgCkbPa2XzatQi|CXZr0)vPscAxNImCSPW!5q zDdS47W<1GbRkHXjv6&qFOuNX?4sdi!4HHr4yS^*$z*h2C*!>ziOjx=h_#qeS4d#o! zr(H~n@;0*j$X-0)GUvkF@LDbTOJKZ#J7iH;VJ5JRsF?)U*-c)hO5%m1VZJxN+Fa4z zD9sZQU*z0Aq2a|yuEPW@7A7|{H6k9m2K zkR&DyxeT55pdO96Ttm8%OeRo1WE;65b{*_6i%2xMzbJ3QhlWPWSm7xdfBpg`?qO$K zcONNq)7&%fA$9I@`2dF06Uoj9@@yTo+@$0Z@Yw(G2t4t%kNB(fu?9s#~=Z;-%nI;2My$%cK~IqsQs z`qt)sdoVvLe=vXgJ{AD12v7jR82=ci5OT4O;`rq>5jDq;V^$2~*raUv2yJTDd4I@- z0LHn+eIn6BiH4iKR)J7>8iDKD9nT^bnd|T_#ClS`pdJKiqG)73Tk{%W&P!rcpW(*S zYJTz_uH8W9!hF)438htXi4|Q2$h`U^W`0z1gkTAPeZh)ffLUDT0i%eI`PZm+F%w(15=P!#nC&;*SSMeHEJq76aa?wuGd zdFNf8x z5FMhzC(oWgHC*&GdHILgtd`ok1=4X+@En?U9x?_aBFU!7V#KeVPlNd^6_gz z{Tv8xVNOt+5$Z7iYimN7!`#au`&tnr#q?`zAoXai#2)b3|FFOEMAx8Xr@dufL< zkm7uMzbNMF!{PAg=x7IAUr4?)o@K*C7U@`UHB=Ao|MG*~yZ7$@>h8VW_wV1`H6ur3 zr#1`I7h4OOF|n{-SznR~ByveoQaXRL|3ZjWT3_049A-0TYM1tzjVT2tx+C2*IS{i{ zYR{0`&65lx>Z!$?OC7`z(6oqS-GJ~+(kbmN<_vZzo+lDC3ob#~Ra{KuRj@){hfX&R zjuJV=O>4&SSR_it@!zfgd^pVVVwmlv!~I#tk&rAKKKyt%hD;oO1Xz{;fx%KkV~#S+ z&ipHR1LoaAH_=~<^zGv|>g_D;8#fV$XB8U8<1sXq+ZK9ny&;U8k&|ZWFzX4iTz`U|yus_zA2Uw%@4K}Z_24H>}80>Y8`G5tbZLojVSa3#I z^T3w>sQD0d!kTUY-O6E(lMq^~uuJlaoJ56FMzuzCMlR+l!xBotsdv(zS|^>ClftcK zoiiBs#QM{xC*8s;+hvn=OYo}Fn8l1E*!^_T_M%g}t%qe`f@`{vgumERRUAIn} zFaGe_(%O%lC_r_1R_}ud9GxJ9Uf|EBT#bxgSPnpz`!Rq|U=Be*rA@MDi7vv}Hc9MKj4Yi8V>(1Jr)!yn7cY-P&8EqP}=< z1EBVxv%JU-NJM_P^+3|GUE(@{hxEuQ*|P6A z$30UjzqRZlG|b8dM445|v!y-7NzYUi+RfKb(mL{t+aiN(z%Bcxll0E?0%KIbKM~ z^Fl_by8a#2dG$AAa*pCrO4LqmO~xf;rb)pr+SJ2t1v*WBuS59hQ8PIavmTb(B!y@+(ob}%H&w^>uT?QgTF}%ugn0_t4G#Niy)mar6A?(h-`ZQ E1L(uuu>b%7 literal 0 HcmV?d00001 diff --git a/models/__pycache__/Unet_ResAttnet.cpython-36.pyc b/models/__pycache__/Unet_ResAttnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cb0a4d1d5bf0f3bb16e7f9db3622183c54a4f04 GIT binary patch literal 9974 zcmdT~O^_T%R<6JPtNxvy(fmmojege+-jOsTOR_P>mTi``*TW*?)zZlBZB6!!TGQRr z%Bt3mhZ=U-5jGH**c(?)M3~^riNhW^aV8+5g2P5|(%v9AaR9#WWp_>YNF&>^gd^yV zdX<^)y{xRPd|$rzs@94{zm)#yqqo0fS^s8@{XCRk#}WJ$h_aO3vATBM?mBg+>(7=`vdZwGLXS=z2uItzRZoZ!H7U~7zyPaaUR4+-{>y*2ddPT~q&Q!Nr zucDk*na*@~x;|}N-?CIzochPDvx@;HG}#I)C;PJdQs}Ls`S29D?h;~ zty(HheY@8mgmHdjr?=4zTRj}H+f*vfzBlNE?SALuIQ^Yw_nvCj(sBCDo!+Chx8lNk z&E4~z|_t;yOdcpvoNZ+83SP1YbO`m1l%?AU)}BWyovhV7jmYSmlKaO3{^j_x)) z?Y&WJ>b)J+>O8SIP&>}PxxU`G)eKr29Qo*%8Ww^FD737)t*pAE7C*H%ZRIHUajxzv zPo*APbx)m?d9v#%bz0?B;jvv$t23&k$|z;jc{Qb~C}q_JHKR_Tlv9_~teQi~SMzEC zedIs2JgZ*7{Cy+N+i79zWCN6ue5e_``E zLf=`Bb6wtEL$!6yiBrL#-_mim(P;PDVWUxVV=riRHuW_6iPPIZZ0fB5vyf{vHag89 zXf!^z{_FaiuiX70XzAeY#)I(sjhlDhY6aWjPX8{)72K8n1F1A_w}Lmq5X~$7kM#-k zS>hmNA+ltE=PNNxyjR8i~SXDO@l(^!$4?X9@D1(AMJLodV(&D|Tc zJF1~g2d!Sx#j_dgO3z~8ajxC#H-h_3O!G?4i@CEOV3DZ&0%jF6 z>SFG)dL?r9-QC5|iP8u5Th@oA{aiAe_QRWZtjON?cbCA=2){by`_T73bN2m*-@jvp zSoSLY8I~TNOkR4+PV|;%`@>J}SXnf^?XP3J1J?VRwC^|JU|&yQk1%MpW4!)BdUba2~qZ11pl=!d#S( z3Q=ys+HyEz@cld1gtnO-kp}mB6WTszx^UnO`(mw_)ba}RH{=uyhz{%Ju!!rIBVVpr z-d)C6&9j&p&P+KQI<=V=X>n~rqmI%sDILH5;-K>lk@>f*5W zYNJJ;+*m_yY~@wlUb7RthJw+l*trTVC8K!Xm-q{G;bnKH~aloPsLuR9fWaKw;lliTS^mkHStPkNQxvhMNJ#5RSXEE z00P+UN^(jjQ-1>@ak16g03z##YW2hW1|;l$Y=^O}W4j#}hL0fBgWh&r8P06(6SwMzgY&n?*{ z*F(9QM#(NYo?Uf)+xJ+jx&^!HlukIFA8|cb7138l`r>%(D6O*C zrG6f!qHJV~m1ldEQ4QQ2Y&#oe#j2Z$QEm=8o#no6eY2UkFUzAv`h|wxSDTlwUca)o za^NlmcQ7_o{~uG z6=>?qB;4bS!HDT_6t$cfjDCr%>m*_$`R$el{4|30UMo(UVTq;a-=xg1l1%QX`YPJC zaRj_0D`ggN*6|%5yJ|`3jL@o6v1hQjOE~Y%juz->b`1nPAp~r2k@ClRNY>};z`9T{ z?lX8IYJT5S3;U@M`=4@WEINUB`tVTj^wY8~f#)c>vIm&5hZUE_j>%dEYas*O9CV5E zT(ySFwVDGr2X21I^%JfSZXVn`xP>9NkZ`dZ?iax=f?FJNO9{6GZW-M27?*3ZGEq-Q zB|5FD@|n9og&j7frZDCTbgN-Dszy^=^vtRl^E7m)kLVWQ7Eg=r%!KX<=$?pX65Uzo z!p@U>m zx=YYqg6?SE&m_`kAiWIfybdCk zja`fWz*SiE4Ytt(MF$@3i09^RI~dBW4$o`D^ELe|6dpo>e+WFVdKqFuo^J3p9RRZ- z9vV4qNX$~-Vo5emnv^bs)Go$(vpbp{+23q;;CDBAgKq41TY9V2kimMY88-F1(1F2s|ZjiG!;+Rfv2@zq=MTZmc#^}J&SX}1#TTZ>43HbzU&!(Kg zc1ajyEsMUe4Gy)rDdu31hMxo97rqa^4>%(~pYZ9B7ldB`pI|_KG2tVm5XV0omB1%h zl3z~vW$-JaUjd(RN&b|yOdV;-Mdbt(tDz65CMZs!hv_g65UeJUF(djjV^?I4xu^(O zG~jk(LVp(ev(bq}e@^u0pg+2!dC|`US`5G~B>MBX{vz}jqlHBOr0Ac7{%FQdiGB_M zIV%8pI?+D`{UzuxMW++}GopV6`h1?8k>x~w8S-Z#e>Tb_^5>p<_6GR)?9W5~{P5XJ z8GUo*0Mu1!+xe-ZA0gxu&>V7bM`{7!N$Cr{7tr^Z#++=k&yoxaa0xE}W-mq;hcK(( z!P1R023~^a0AdGw3P3#2(I&h{N<{q4kpSslG?W?~@Qd zb)Dq*NbZpQKFM8@4@o{EX^{MYgaMhpN3ubpNLnPDBwHl+N!ldLiRo>U4oR1!N3uiG zCwWMsNf_qokYqseh~$SP$04IW3+}(*2soD(e7G5dWKPLJ_+busb5ce?g5U#4n4A%o zNIf|-P%%hawU?Y1(BIyf5gI-->9eL{2YExnAb;F;36BDa|7t%s-(n({ua<9NJ_e9D zmXD#YH_ElZuK?gMwm3f5@{_NaYcYLFUgOwY%ReL6^4Gsau4QH-Tk>?SCH>r7%k2qm zW>*;3nY_(p<|VJ-RVTTYjLJU7#w7JGmur!&0lAi+4RbBU|MOgnAjJBFpGVK@E9X<5 z83nzFo<78yG|7|~N9+1XxQ|rmvH27e={W}LxYmJnJPE0l#=^NPXcf0*EO&yQHN$zm zO0q_Bjl?Hmz^4l&&&-+}2?7s2SbdW^FO$4N@@pi_66;?ld5z>ZNM0uq9691#RiB=K z)0`!u`f(|b(qzg*JgYB}@>o26&Xa@(XCv{d{u>qSWutUAy&eZ?4yJ68vQUH&7b$Nh z!Lxc5TyK>8*pzT_nEGI-2-t||E#(jh851!z4>G27L}o1Y@qEd{)Q89oQy)J=P>2+W z{wG||q&~7wr9OTEKMNtOq&{*;eGF^xD`;xDDEBp`KH}-aEw`~b=t=I(8@B*I0 zF#M4$!?9VAaeFwfoE_V{SJ3g%1c+FHBtZOUB|!AIsPkJS@)E)0*pJVB{2>IlxSk0l zR*G*Cn7-rxx!I5LC}7fvlQ%OL(P6Sw{~iH=THJS#|G;Z6=0B1k0QnB(Nf0n{)#&=C zBiwY`-fPnfl&_A2#pMZls%9K!lR`h4b z?ua2m4%w140s`vKL;sxUn`}uw$(B%mQS=v~KbkRy2sva-JOl*PKL!2sqHi)L`6OdP z{Uy;~g8t}gC5Q(W9H#moK3KvUHzuIyNl$f{0); z7|?sX${(=HF~@(j>+*`?Ab|Pxl}*&4BVK%oL}CuXsKb~;wsj(n{t=1n=q^hpopUti zIE*y@h(iB=!i{AN_fvu?LnjMwLl|%r90?il-lOW3+^UPi8J&lr23iqnAO%zPj-J^n z`p%aOIaXE8UaxuU!~KaM{JCq^rz}#ES_0?lM8N$IHuWb~Nd z*9RnzK@KmykJ{(X;icuOe@r=X3VGF9k>3;Wg?pnB`;7)(Y7aUr7aEO+c&lbO@-=|I zK+Rhu%-8E(lCvbTret+3qZ${7-ydwWI-N%2FjodMoToTnqU$OM7ilm9lEw4Lmi>4A zv;MnCn7!@e9jQocfSLTbMlf0@RVXH4H7Soi>)vrzS9}-b#Sv|l0{jH3T6vZbJ0rd6;B<} z)GZ0e>XSY8#y^shH+Q3vna_O$_4O2cr5Va<(7cDk)niIrJ8||^0}8L;qO&`WbyxlO7?#NnbC6N literal 0 HcmV?d00001 diff --git a/models/__pycache__/Unet_ResAttnet.cpython-37.pyc b/models/__pycache__/Unet_ResAttnet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90dcc9ea4df7740a1c64c9d78749f001f0774123 GIT binary patch literal 9748 zcmdT~OK=>=d7jtq&dxru0P!LSf-hN{$N&jYB;_cKNKv+BS%^bLrn8B(UZNM^61$6K z&nyiggV-?`DM^_w-*P1{sZuKEoO19Xr<_ws)l?3y%89<^ampdd_jS(m9rmy_|~@#nuZlzakR8>0D zo#@pXHKem5*PZN5HYQEu8-{R2{(&L#?a2eXF@?M*3dk4QQ^+4fz9>q_msEaQl;1Pz zl_%(>QO`t~xBG)(5EV9d`WvmF-NzBxEg>TJ-C;NA47wjh*>AUcw?(U-jk0g<^zX0T zh>GvFcE7W;)9v)P#&fICy+3%b)f-SZo zFOH*)!Y77fG>RC%Gg0p5wdVW%cCfx-#eJ}G<}4`S@aG`{}x4WBi6759U?H{z{mXA@$H=7&X zmhU&4pBn#tW2M&*R(iKPD|dE!Z6>7Xtz3J1Wdkj*d=q88782~GzoM?@t4#A|+rJhB zC|(|XB#+?=Wp-KJhq+gcZ)`c~OL~L6Io{-(to=GfXxud(nZkT*?pvWHtZhr44vj}v z$PzoW-!o8Zqx1?&?ZZ+v1kQTo^jp1lR6%!|tsrO)(WB<7I8v}C>Q0o!{M_tpMWroN z8MGwq0*u$p?O~@Yno>8=?x#&WTfwH}G&&yTJN-e^zth5?A7?(JhDXjJtp%T7RM5>U z-2@7Jj+0>)P1p2HN6Ed(WVlA^$||kxonf$%r3HR0;bc`~_Y2HvVeV&zwV%V;Rior$ z?94~TV?#J|m^K(u8)N6n<1uB%>9CD%^hp4fG1YC_wg-b%h)gO-b8DTFlP-_=X!ia z)+vR>N13qn7!x^dv+z`#Wz|F7W^~KA_c!Q4Hpqp=175$lyFS+b)&2bL0!nm`s{13> zKQr&m!qz=7LUolO9~Q!5n4dGYEM66Q@0KxUtw%>)8`|%VS$i01_rQwV8qXC({=kei zMGc{Gz7mvh{YvPmYgTrb&{zE|rsA=#gc(u9SXXexf4~!|z{N{@-}08?A7*#EPq16M z-QC&PPRs8E!NC98%1TS_cJ43l$gP#uZGYv$aq3cin7e7Ej z1=9=A-Q2j->i64SAAkX`fFd^legl5%wz4`DQO@sd^>#X9gP6)6&usKP9KH(?VqVT@ zfYNsK2()SL9`z0P^{!eLuC`F)KXDj)^~4p@+ij}bU49)m-Rk;RkWnKQvl(Tz>gb48jttupI_9rK7#Rm> z<5;d)2Ie?+*(}=*(zPs7X4!Jgn(djM!(7cSnl-CzR!z^EMopYq-z?fySe4BwYj5T` zkLgqU3DI4?m52=vCVzPx98LvZ|Ahpg#RN1>$AD!iz_Q_7EPaW7a`?Up{rZ+H&`X- z2eKpDhzwL-yX-kuh#f2|SCEtEDY$rP*HBKvYt-|~HRL6#E>kFPC~UVSAf@Sd_S#Wa zUqktX{3h$XLNUHn%2kwYb#O39j4HK#C*%Q){%CsQ#QJMHXG zT(qG^VuLf1Ki*V21>XQ>1q!PkBF?}n>fp!(Slxs@h4quu0F%JR1J0_c2ed}Y7FZ#P z+(+x}?T29A2w0UR?vDQnq9<&8$3(yv0Z86nit=lg_TY|O} zYs;y&3~dG4%7~WJt~!>_hGkJ^i2BgppTPQ=5fkWh6}Gj&4Qt`V7UQlO`aB8S$s@K! zF)5~$?bMjsqG?c7h#)>`^mKSNz`6K?WHiA);{&r^F4)Hr`7X4ty22= z>a>Y*<2;xVvw#hjok8qihi49~{j=d&xbxgP*0qdUdf;R)VSbH>uQRKFP`O*|Uf)0| zjPe8QeUx*^cT|b;nkNkLo-9$k0ik>m3l>-Y3-H$~EMu&R1{^vT&#m2#udW(bT#e6b z@%f_sO?j3?n-IH9m^>TOU*vfC{ijq*Rmqmr?6*hA&D(JOOueaW2r;8xE*&p^I zuh*7a?I!yZIieM`oI2UbT)zQPOVv)u2O3ic6 z)!2Rtt?bQ>VBPptKRr7AHZtr2ew>vAYd%f?Z#C*+zXjAI+8KAPM?k;FK)vIL)D@~_ z#}F`sQlJ|aRv;fz*aH9obvlQ|kO;_}i?*=HVYwrI=7A@9_@dS$D&F^$-s55!>kFx# zad}bci(D>aeJRzKphrZH_RG){C)s`_)mNaeDt#4tA|>?`s$}9wNj|KkcvuTOU^ww` z0xe7i1>j#TMT;qAKQ(eiwwMn~DJC8pv!90jba*VapHcQRuutx2R@oO)T%1eoXL0>` z*w2S^sr_+fe;oG7h%G4le2R@HQu_tiFT#E?JdxU;RQ4xfpFEMJwEhz6pF;gpVJ@wI z8Y|2Av)7o%XMYCu&&1DOrO`HL&J1QwdE5G#CGTS|r|>e?%tA8<++^*u{g=`9h{cRr zVV_kqC?eoH3xquvo&&|O3{w~FLC-pK?RpULK|iO2#ynB~>+<0K$F zDoFfSY+%1+B85Z?AaR6f@j!bdBnkt|&mvKN`~^uA-R6r)lz#?^^4Grti83`tgFH>5 zWM4?4+#IteZIT-!Q4Vtj&?|=|N?v#murMi7<@1p!+-;FW`FTvD6u)#5MInUVX6dbz zj_m*`3SN|8(Dj8ml4ovu3R^~)5g1Eu z(B&G%MT%D`_^Lv4B1NW36m+6dPUOhO?=VP}ud(LWD1M8AY_I%nimMdAL-7X1H423t zM}n)`)Ax7+1MxyaqdZP%sL1Mb5E}E(AJ$K;XV4iO8IFX4kpDpfYgxi$GwQKGVvviq zz{T32Hf(U&WVc zUQBHKJSH~&3D?uahWiw;@k_*6*u5%ZBM)LD&M~fGlo#fIO~ghtdAQ^@H-~-2lErTL zbv%MG{~Mt=6yt!!opi_3^oBdKMn8+_P%~c<9p1Bu4*51){T79K3*fMi&*wP)6cx8P zhuIk;!`A>Lu=Nw^*b&(qP|4VCJY`KMBb) zSe-J8R&WqHBfwYum!>`#{As!a{23;vPUi%08LOD0J}~uNkROhqwc@&hM6bvXrO!jp zz@2(8)hqf#=?l;^aHqbQ>Jy)Utd0w#=>ar&}%0dk+$_!Bn%bi^-zNaZ4h ziuV<69LD=t$#!{tq-VEu^o}NUgDc}zI<*rBd^)S3*KRu>0+~a53iiGM!gxAXKBeu zK~datkEv4>YAUEnv4m_?ir>s{w7cDA^N?!7RGwvZzEGAm2+l2k3c|%Ox)tvo@09lr z_@=i!yf0KWHlR#@T)p`7IXlxY9GuZF9Mo3{4i@r^TtKDulr*O;j{*+=6^J8WB-r|k zgxL4-s-AShV=Mkf!C6;dA86`~>my#Cv)_7L*qx|49DporAHI=~->+*!+Q=Ci#$%vD zL^Tln=-`)}Y3laOX=`ux=J;0t>E{RdRzlqt?(SnY_zA_IQ?L>F7ZhpinOsR-goDA) zN7R=MuBlr26~^`in)~&Qt^7+|?u!iPf4pEg*TmRx@=m_^aMJ4EM}||4M|~zL;%!mO zU+%U(!pm4qOWUZb_Z}IhvJlbyXtibX$vfR4-<7tynqP5QRY9!+SWa3=XNe|`Sjx8A z%{0ba?2mpvq+WIs literal 0 HcmV?d00001 diff --git a/models/__pycache__/Unet_Resnet.cpython-36.pyc b/models/__pycache__/Unet_Resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ac83f3a686e7dd2dab7b0f2423d2a0f78eb6c81 GIT binary patch literal 7049 zcmcgx%X1t@8K0h+-JPACl~&T~X~mWwalA>ahu;n%mBc3Dkq8nyI1F;8#@i!#rP+sj zW*tY&78D8<4v6^1g%dY8aHMkJFW^s@qPS2{r4yW}A_vUx>zUmhDKa<^7*&7U{dIp| zKfXtQ{pozUY?kxyz4yDzit3wEJt*oMUEez9rVX0v3MWIN|u&9Yr?j@Tp3QF~P4@_wZ`W{=5w!5?o<*b}mD z_|@j5J=vVHr)0b6v*xrtE$b%gGxn^kGk?xL!b{xp=bQ8PJa8pm_Md7VwU0KB*~gj- z_CoWxeY|}@XZHuPsH@4ld(Qd`& zEA_B(U)DjLf5UCw;ci^GF4~=THw10|TDx`s-1T@g`MoLJR?uk&UTnS7x#2cDextQ< z?s|>ICJlJw#?3cn%X-s&@U3<`?N#Fvdh2fJwNPU170y9n32&q8yP~n}i2&`w9k;&a zwfG*ytDtRMTU~Wtbpx-Ki$`1OvOT9EH#6Z57VfNfTWDZ6)eOfWcH~aW5uV?5>a-Oe zPw#Js{Vmn$5c^*-tps03#ZySZY?U)xh{@z7)bf0S zTfB@~0iq(|GTZn_(G|N0QBB6BcUoTPyzK?}*}iKo;3Le!a|=S^7W&%>WMANU3wOh? z6TGmrgyY;=3_1d5?uo^EySY@q>vlpQ5OdGFGdu#zm<`4sQrjub|uU zL~J;aLL+n>D2N_-{<k?1c+-{*)id3H8 zQpFEKP4<@g$e`blR61yiRah{%8DTCe?l5ld=AzOL>y=t_DOVbi%sAj2=A+yWOU9+M zd=eIRl!$?nMLI8F-amwTRN6GSKC5gN!9lK=m%u+6mG(s}8MCE5WTNsJ8iQNAdO{JJ zn4uK04Rr^;<1OWF<=sTsl~ud)YC8;lujSRZoI8HIz9r8V_NSA%#f6T`Ig~Fh1fghf zFNnvtJkj!esSXWtE6|Sd_r1o(-4Hf6Fz-E5x|%Ld;zyh!aGJm~05|tY!nK@unlOh9 z*dt}FX+yHXq?rl&{gMgsuBk%;)N90?z=1lcPO`^RtcL5c*>ann6V%&K^J(gvAwbSR zOc9X!Mydu=fa!U0?!N1HJuwPY%@U-sab9|;AU3+4fIOrZ7uFj-tTd2&jLSUnu1-RF z$d+TBmJ#RSg5kKu0=OpNIPtICIUpqJ51s{3^5blTnW}|iP>oqkW#ejvaGHU(!H#NE zY+l`-&)U;LhINye;iH|>)~=xhKLJRMsYu53aY&|=s;dd}Frm(B&kS|oOsKF4V^F2i zYf%BpZon|K-F(^yMTYYC$C*%HRjN45kLRKS&-YbaDy(^k!WLF->qdi4I91j&SRu4b zD%e01#B;z(87B&mYSN&;Rua$Ch_eK81SScT2^P;`hb{Zm;*wbY~P_Doi6S+ad18~DE;gUvpXbyDv~ zV#&deK`@aay&nlsrOS-G8$4ew6^))1Q{Oj1nC50d3e4bnNl_U1mm-P(5OJBS_GngI zPPZeZ#B!PA8i++YhrYxlcIY?AYM;o!&5UTJIg?@!jX06-|46E%S@B@6w=i{30U*JW zl}{+e*1Flc9nu*W78ueZUt1)=Bsz7%}H*tD9 z2>GzeXKyJFe!K4k&v1p;ex%Lf1wv-sX;b_IpwK6RPWutrO5Rh<+KVb8jx`$Gp4)PQkURailYHjgjCJaWHT1-#pzh-KlmQDa&M$+Lc!9u| z2)syuJhW}m(z`W zy+^mL0nu5|6*6b?xT_Z=-OhljRun6=j3>Oex2u^4?10H$9xU)I(%lD*K;G~6A>&rg z+x;!BTsU;VMZ26m;7bTY?<5Z>gxHn!M$7e`;d>YFD|6VA(<1z(C?qZ<5o59z+>j(Z z3HJW+=+UD+lt)=C>QW5peLAd<$9X)K*Bcvgc>|Btjw=w4B7zkZiwlZQGNBRjG)i3b zTHU56BZsw%AwdMiQFm>g{eTw2hSZ{GfJW zP`fy&T^iIb4{BEg-OmRDywaU+!%y=u4fF@U22gSqf^-WZ`N2{FW<=GQt{a-3M1Tn1 zDUdf!@`@k@Pk7=vDvDryJ%(5`5wnW3FJWc-X;O?enmE50U9(6&i|R^cSN{vE?}T zx~`x06dZ@Q>y9H60#d64>06>cZ=n{K(_BN{^L>n4pk4wsCXP{^LToV$Kmnjkco@2_ zj_3-bKl2327)pg&a$Ftb1|KOreZf(HK^{n#IUtp5$N_0Gx1+-~=i-sfA+0R0tkw~y z$&c8Ov!Dl6@CLwvq>Gj$UEuB2O=KpxwxLGqj>;%zNHF>aqVoYp=k$0|9K>^&xVV}m zU~)2-k~}n|la=;QFT*|BJRJ7`A-)5IY{FerY?&PJYpBJQZimB=GiLufq0OPafD=7+ z<*dncbUghNU!X2|?3Bg%GfH3pC~OL9H@;t$a7u%84$@8H7RDhN@HT$L4FWd_5Fhbv z0t5UF8_rX%Na3VT>d{xQM^lAq$@R1_o2vM0&brS330d#g-u-~ zHu2qr>WSs`SB&2;uc>{UrW(~#GPxTyUE;>u+*uS2WL$77lGL1~e? zN%ZIow!Y#a)@Q)_%#2>-edbK}$^Abw)7zNl?=)2^cWT$3K|tz; zcjFvViEa>E@@Fy03JV%2$cgsx@@5W_I7CJdO+Jb>GlV^1lw5HIn&BH&{$pgQy7EdKvRj z)<{qaKv0GzX5WZS{7WQgi*>*Kev+z~ppJ4%nr>FcTQW+TB3g3gNnVm}HRahu;o?iXY?wi6F6qQ$w7o(e_AQYi4)d zGwV2FwxCd`a6rU2E*xxbaNtPgz+b?hFhy~pph_n=QAG}z-`6v{TFEjvJTj{Ow)^X^ zzkYm={`%A9a@kbymGd8b@VoPh@<(d)J{^@;P(q6-3RgJu6+8W9HbYzW)j+egK(}>9 zc+JlRc{?8z>_TAJhQ#T9F)(d2DA^_1&iPhQw#&hgJroSv!xESGE5V38BI^ZzG#InT zWZm$q!MHshOxP2$UG!NnX-~?!iTad1E$hsmu@CSPxBS^))}95f#LNCO!9n|AaL7Is z%-M6nVf%1!#6H55*Aza)hd);Mus6G@*+_+ul%PkSOeq+Op8m(qx zUaCipyRwea`8VBQg}X`Ns%W)aod{#|S6a=xr>-W$>F;&nHp6x+^b+gc_BA(X`;F$> zsjD@Xm_+dAwd-%mmi3l<@7t|b)~hBZ^w!Hxa-P} zOzv(+y)D)05PM%StAyV`#ZySZY?U)x(G|N0QH>|1cbi`1yyJ!V*|=rS;Umo43v)u^=6c%+WnbudbGM?X z9lo@%fa6@B58DD~?uq$&D_E%Ca@&z77M2$0mlhY#FI+r-;pFL)rQ5OdGFGcDCq=164sQrjudvhh zL}ECQLL+hIx=dy9E>+W?6e1*p^J z=(A9$eg!2w0mX`x2MilECzXfnKKpctDUlkpSmSD(j|;IL8*!0qTSm78d5lQ7$fSGG5xs#idQwEj4E{u0)Z}*ykMO z9Scqiwc`c%rKJ0I=3+IA0j<2-8Q&Bt=ukxgIqB$gMUhu^~EfuS=a6}QF&~P z!H2f=bVOughEmMd)J=Gjx0QF4_fpXhFWZ&Nttj%nrdMBgR{U0dU7jthQ77X|3T>Bj zsA5tGBhlbqn2fG_qUrfkIU3|-pev&vdX2SP5iGE8?AxS*HC-IVk2pr)IDzK?uGiFU zlCh`()Nlm0bWtb63t~4h&^yjaY5;a;)%%&T7PFQb2 z(I;v4DFWmU!~_AUW290r1{j~0__>f~VPJAgt;CN1x8nxe$YZFzY;WL*Q7jIwTs zxeTWQr{Rhq+?fog0jI|~iF-Z6>A*qg68C5lMfcQmd<#ty|MTUT)@{vMcOY=Qge0RQm^Ca=a7@@;%#tJt;fy&RQdaG6pz*;VD{ zi`}vQxSdr^_D02H4^=*e6_D9+@o*ZafKg}=QOva#=~)?q)aQ23Bri}sr}n^raT5tO}+Ek6Hg7Y>CY)o?Y?^F?hQq* z=P;*LSn-gYzlzfwimSUc=ZMstaY=ntQjf=FNj>qH4jz*_IDwr^M#FGrlc-Ham3Rt{ zQ0-2GJK5p0q;1e8P5dU(5%V9<@XrB%VoQssF>0oH9NY)!Czbu_qLH|?#^M>2l)45k z&u?jn!CzQTuNHZ$$cu^mUs9q7JPWd9=#)$Zp4)W7h&#QzrOj- z5nl?=r8nOwW&^+1tr<^ydu~|$K_@Y;FmNcBg;&vQEuj(8pE?!87RFL`?(x~0>0Z^uh#_9ZxLmj}=L9AeRx^d04egRyQ> zS#303-`RNr#B_=dAx#G8*52<4X@C)D-dQPFcmr-3I!bl z5f2hDN>cQioxl_F-Hx|_AR#9CReFv`Ne*x4HH4{dyN!pt7^3kP2;>MzWu<7oc0k5S zw%MVluT;KN>z=4DQapW~Om~u#dT19ib?jA6C8ZP-MlInbtYKT&Voz%uX~dnYw>r(p zHnOp&`?WLu+Sz{XT)%d{U%MbEEZZBml~y_pKTEVU&>#L9K*?E%y)8uAdrJkFAysF( zZfJTM*dZpT_}pN5rZWS5d3BEIh|mrE>F627FZIoU2E8a%)b{kDUD1v+Lp!z){ghhG zco=FrmckH|jUp5|u5C;X1|tLesj=l+@l|nzwoOL6EI*jslN$Mq!le9wd__5l=};D` z1DE3@mgC&%xPI1Ca2(#MJC1zYORW;5Z>jpcjapL9k_dIr_dz#Dy##7Z9HKf!&SDyX z0zH|@FmzoV(iKL3<`I+;lnS*ZT^)4&j})H1@E|}x!K2IMj>U{5whOS38P>god|8MwBl#_B_rQEZT6^fi^|`WS@Y6e&4~ z+c0r{InAu(WLhMBKac^!6-y^gPrhy1zHGt)Q_z^6=&PtDl}?+(atG}EHA0&^1_Dm> zWR=sT{OLS;M?6Pe@|-CH^JkP$hNo--dN#UScW^BIR16YF;s&Vkf`5ltTqAIu0Pzvu zAu!hiy~*S}H0n<%GSg!)$>qP0TX{fkXBWR@v5EI+i*!j!{T>vWc!jVe!^easZ0aI8 ziSJVX7ZA{2LBCr-6T9}mOFr)t^DitP8ub^NhWt9@L+z)KPi`P-x?@-QWMp7{&{E`F zQpNca>?;rZDr~$Yv!j%R9Za;k;$GwKd`qk?wE617(z(S8^NVMemd?+k*&mSF%>FVa zuemMXo+RVt|D}*W2g6jzdS-<)j~(JJLH}olcoWn7ou-O2TS0_^{9+F;FWWE;Eo4AIaZ)mp4oW+b#1@_?&`5GD$;_V4=G~Uxq1?Xfr-6w) zQ*yS+u_u!v*;wxg;WZ;c3E-r8TnZ@U5t8V`8%Sh3K3{?>8`5a1+Ieb6 zwKtaV*4Qf~G^d%)t2S3SQ>gwd|=H$!Z!QH#N z2e;3%lo7INRjuj1V)#Rl*%JFDiDHVk40*{>l!7xQ&MZ&f^kKOe7xlGw?GQ8=?3@qd zyz|BexDB=m#@SPzj44nIgVz8)PWy!9$^0bxDBnV&4HB_yh6D<%El=z57c2z f=3;f|B~sV+62Mp!09M^&oNcHaT3v8j`w-a=nix8o literal 0 HcmV?d00001 diff --git a/models/internals/._losses.py b/models/internals/._losses.py new file mode 100644 index 0000000000000000000000000000000000000000..7285ea6d4e347fa551189ea46db2e1f744488694 GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103v0xDsI z=wMg?WDB5a0m{L|rIPb=^%4sTa#Hnj5{pYpi&Ill5=&B*1A;+%jGHpzKy=ph2_R|| zjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mk0L~Bqbv8j54CF#GGK&?8Qj7CT zi;`0n((;S46*BWmQu9hO^YapOaw-*aQqxKll5!IBvVnbJsIDPRq52>06&VJ(|NjBf ChA9jH literal 0 HcmV?d00001 diff --git a/models/internals/.ipynb_checkpoints/__init__-checkpoint.py b/models/internals/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/dataset-checkpoint.py b/models/internals/.ipynb_checkpoints/dataset-checkpoint.py new file mode 100644 index 0000000..f0dfe1c --- /dev/null +++ b/models/internals/.ipynb_checkpoints/dataset-checkpoint.py @@ -0,0 +1,304 @@ +import os, sys +import numpy as np + +import matplotlib.pyplot as plt + +from tqdm import tqdm + +from .image_functions import Image_Functions + +class Dataset(Image_Functions): + def __init__(self): + """Creates Dataset object that is used to manipulate the training data. + + Attributes + ---------- + classes : list + List of dictionaries containing the class name and id + + train_images : list + List of images that is used as the input for the network + + train_ground_truth : list + List of images that is used as the ground truth for the network + """ + + self.classes = [] + self.train_images = [] + self.train_ground_truth = [] + + super().__init__() + + ####################### + # Class id functions + ####################### + def get_class_id(self, class_name): + """Returns the class id and adds class to list if not in list of classes. + + Parameters + ---------- + class_name : str + Identity of class that will be associated with the class id + + Returns + ---------- + int + Class id + """ + + if len(self.classes) == 0: + self.classes.append({"class": class_name, "id": 0}) + return 0 + + for class_info in self.classes: + # if class exist, return class id + if class_info["class"] == class_name: + return class_info["id"] + + self.classes.append({"class": class_name, "id": len(self.classes)-1}) + return len(self.classes)-1 + + ####################### + # Class id functions + ####################### + def sanity_check(self, image_index): + """Plots the augmented image and ground_truth to check if everything is ok. + + Parameters + ---------- + image_index : int + Index of the image and its corresponding ground_truth + """ + + image = self.aug_images[image_index][:,:,0] + ground_truth = self.aug_ground_truth[image_index][:,:,0] + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(image, cmap='gray', + norm=None, interpolation=None) + plt.show() + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(ground_truth, cmap='gray', + norm=None, interpolation=None) + plt.show() + + def load_dataset(self, dataset_dir = None, tiled = False): + """Loads dataset from ``dataset_dir`` + + Parameters + ---------- + dataset_dir : str or none, optional + Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file + + tiled : bool, optional + To set if tiling function is used + """ + + # update dataset_dir if specified. If not, load dataset_dir from config file + if dataset_dir is None: + dataset_dir = self.config.get_parameter("dataset_dir") + else: + self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir) + image_dirs = next(os.walk(dataset_dir))[1] + image_dirs = [f for f in image_dirs if not f[0] == '.'] + + for img_dir in image_dirs: + # images + image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder")) + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if tiled is True: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.config.update_parameter(["images","num_rows"], num_rows) + self.config.update_parameter(["images","num_cols"], num_cols) + self.config.update_parameter(["images","padding"], padding) + self.train_images.extend(tile_image_list) + else: + self.train_images.extend([image,]) + + #ground_truth + ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder")) + if tiled is True: + tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.train_ground_truth.extend(tile_ground_truth_list) + else: + self.train_ground_truth.extend(ground_truth) + + ####################### + # Image augmentation + ####################### + def augment_images(self): + """Augments images using the parameters in the config file""" + + # TODO: To allow for augmentation of multi-class images + + augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p")) + + # increase number of images + self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images") + self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images") + + print("Performing augmentations on {} images".format(len(self.aug_images))) + sys.stdout.flush() + + for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"): + + # target must be image and mask in order for albumentations to work + data = {"image": self.aug_images[i], + "mask": self.aug_ground_truth[i]} + augmented = augmentor(**data) + + self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"])) + + # add + if self.config.get_parameter("use_binary_dilation_after_augmentation") is True: + from skimage.morphology import binary_dilation, disk + self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size")))) + else: + self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool)) + + self.aug_images = np.stack(self.aug_images, axis = 0) + self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0) + + mean = self.aug_images.mean() + std = self.aug_images.std() + + self.config.update_parameter(["images","mean"], float(mean)) + self.config.update_parameter(["images","std"], float(std)) + + print("Augmentations complete!") + + def augmentations(self, p = None): + """Generates list of augmentations using parameters obtained from config file + + Parameters + ---------- + p : int, optional + probability to apply any augmentations to image + + Returns + ---------- + function + function used to augment images + """ + from albumentations import ( + RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, + Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform, + IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, + IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose + ) + + augmentation_list = [] + + if self.config.get_parameter("random_rotate") is True: + augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9 + + if self.config.get_parameter("flip") is True: + augmentation_list.append(Flip()) + + if self.config.get_parameter("transpose") is True: + augmentation_list.append(Transpose()) + + if self.config.get_parameter("blur_group") is True: + blur_augmentation = [] + if self.config.get_parameter("motion_blur") is True: + blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p"))) + if self.config.get_parameter("median_blur") is True: + blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p"))) + if self.config.get_parameter("blur") is True: + blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p"))) + augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) + + if self.config.get_parameter("shift_scale_rotate") is True: + augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"), + scale_limit = self.config.get_parameter("scale_limit"), + rotate_limit = self.config.get_parameter("rotate_limit"), + p = self.config.get_parameter("shift_scale_rotate_p"))) + if self.config.get_parameter("distortion_group") is True: + distortion_augmentation = [] + if self.config.get_parameter("optical_distortion") is True: + distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p"))) + if self.config.get_parameter("elastic_transform") is True: + distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p"))) + if self.config.get_parameter("grid_distortion") is True: + distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p"))) + + augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p"))) + + if self.config.get_parameter("brightness_contrast_group") is True: + contrast_augmentation = [] + if self.config.get_parameter("clahe") is True: + contrast_augmentation.append(CLAHE()) + if self.config.get_parameter("sharpen") is True: + contrast_augmentation.append(IAASharpen()) + if self.config.get_parameter("random_brightness_contrast") is True: + contrast_augmentation.append(RandomBrightnessContrast()) + + augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p"))) + + augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True)) + + return Compose(augmentation_list, p = p) + +############################### TODO ############################### +# def preapare_data(self): +# """ +# Performs augmentation if needed +# """ + + +# # Create data generator +# # Return augmented images/ground_truth arrays of batch size +# def generator(features, labels, batch_size, seq_det): +# # create empty arrays to contain batch of features and labels +# batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3])) +# batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3])) + +# while True: +# # Fill arrays of batch size with augmented data taken randomly from full passed arrays +# indexes = random.sample(range(len(features)), batch_size) +# # Perform the exactly the same augmentation for X and y +# random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes]) +# batch_features[:,:,:,:] = random_augmented_images[:,:,:,:] +# batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:] + +# yield batch_features, batch_labels + + # Train augmentation +# def do_augmentation(seq_det, X_train, y_train): +# # Use seq_det to build augmentation. +# # .... +# return np.array(X_train_aug), np.array(y_train_aug) + +# seq = iaa.Sequential([ +# iaa.Fliplr(0.5), # horizontally flip +# iaa.OneOf([ +# iaa.Noop(), +# iaa.GaussianBlur(sigma=(0.0, 1.0)), +# iaa.Noop(), +# iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)), +# iaa.Noop(), +# iaa.PerspectiveTransform(scale=(0.04, 0.08)), +# iaa.Noop(), +# iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)), +# ]), +# # More as you want ... +# ]) +# seq_det = seq.to_deterministic() + +# history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det), +# epochs=EPOCHS, +# steps_per_epoch=steps_per_epoch, +# validation_data=(X_valid, y_valid), +# verbose = 1, +# callbacks = [check_point] +# ) + + # Image augmentations + +############################### END of TODO ############################### \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py b/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py new file mode 100644 index 0000000..ebc9dba --- /dev/null +++ b/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py @@ -0,0 +1,366 @@ +import os +import glob +import sys + +import math +import numpy as np + +import skimage +import skimage.io as skio + +class Image_Functions(): + def list_images(self, image_dir, image_ext = '*.tif'): + """List images in the directory with the given file extension + + Parameters + ---------- + image_dir : `str` + Directory to look for image files + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image_list : `list` + List of images found in the directory with the given file extension + + Notes + ---------- + For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase. + """ + # to bypass case sensitivity of file extensions in linux and possibly other systems + if sys.platform in ["win32",]: + image_extension = [image_ext] + else: + image_extension = [image_ext.lower(),image_ext.upper()] + + image_list = [] + for ext in image_extension: + image_list.extend(glob.glob(os.path.join(image_dir,ext))) + + return image_list + + ####################### + # Image IO functions + ####################### + def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'): + """Loads images found in ``image_path`` + + Parameters + ---------- + image_path : `str` + Path to look for image files + subfolder : `str`, optional + [Default: 'Images'] Subfolder in which to look for the image files + image_index : `int`, optional + [Default: 0] Index of image to load + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image : `array_like` + Loaded image + + Notes + ---------- + Only one image from in each directory is loaded. + """ + if os.path.isdir(image_path) is True: + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + if len(image_list) > 1: + warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index])) + # Load image + image = skio.imread(image_list[image_index]) + else: + image = skio.imread(image_path) + + image = np.expand_dims(image, axis=-1) + return image + + def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'): + """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed + + Parameters + ---------- + image_path : `str` + Path to look for ground truth images + subfolder : `str`, optional + [Default: 'Masks'] Subfolder in which to look for the ground truth images + image_ext : `str`, optional + [Default: '*.tif'] File extension of ground truth image file + + Returns + ---------- + output_ground_truth : `list` + List of ground truth images found in the directory with the given file extension + + class_ids : `list` + List of class ids of the ground truth images + """ + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + output_ground_truth = [] + class_ids = [] + for ground_truth_path in image_list: + # add class if not in list + ground_truth_name = ground_truth_path.split('\\')[-1] + class_name = ground_truth_name.split('_')[0] + # obtain class_id + class_ids.append(self.get_class_id(class_name)) + + # Load image + ground_truth_img = skio.imread(ground_truth_path) + + # If one mask in 2D, add one extra dimension for the class + if len(ground_truth_img.shape) == 2: + ground_truth_img = np.expand_dims(ground_truth_img, axis=-1) + else: + # Transpore dimension to get class at the end + if ground_truth_img.shape[-1] != self.config.get_parameter("nb_classes"): + ground_truth_img = np.transpose(ground_truth_img,(1,2,0)) + + # perform erosion so that the borders will still be there after augmentation + if self.config.get_parameter("use_binary_erosion") is True: + from skimage.morphology import binary_erosion, disk + # sets dtype back to unsigned integer in order for some augmentations to work + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + if self.config.get_parameter("use_binary_dilation") is True: + from skimage.morphology import binary_dilation, disk + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + # perform inversion of ground_truth if needed + if self.config.get_parameter("invert_ground_truth") is True: + ground_truth_img = skimage.util.invert(ground_truth_img) + + # Concatenate masks from different files together + if len(output_ground_truth) == 0: + output_ground_truth.append(ground_truth_img) + else: + output_ground_truth = np.concatenate((output_ground_truth,ground_truth_img[None,:,:]), axis=-1) + + # If multiclass segmentation, add one mask for non-assigned pixels + if self.config.get_parameter("nb_classes")>1: + last_mask = np.ones([ground_truth_img.shape[0],ground_truth_img.shape[1]])*np.amax(ground_truth_img) + last_mask = ((last_mask - np.sum(output_ground_truth, axis =-1))>0)*np.amax(ground_truth_img) # To get rid of overlap and negative values + last_mask = np.expand_dims(last_mask, axis=-1) + output_ground_truth = np.concatenate((output_ground_truth,last_mask), axis=-1) + + return output_ground_truth, class_ids + + def reshape_image(self, image): + """Reshapes the image to the correct dimenstions for Unet + + Parameters + ---------- + image : `array_like` + Image to be reshaped + + Returns + ---------- + image : `array_like` + Reshaped image + """ + h, w = image.shape[:2] + image = np.reshape(image, (h, w, -1)) + return image + + ####################### + # Image padding + ####################### + def pad_image(self, image, image_size, mode = 'constant'): + """Pad image to specified image_size + + Parameters + ---------- + image : `array_like` + Image to be padded + image_size : `list` + Final size of padded image + mode : `str`, optional + [Default: 'constant'] Mode to pad the image + + Returns + ---------- + image : `array_like` + Padded image + + padding : `list` + List containing the number of pixels padded to each direction + """ + h, w = image.shape[:2] + + top_pad = (image_size[0] - h) // 2 + bottom_pad = image_size[0] - h - top_pad + + left_pad = (image_size[1] - w) // 2 + right_pad = image_size[1] - w - left_pad + + padding = ((top_pad, bottom_pad), (left_pad, right_pad)) + image = np.pad(image, padding, mode = mode, constant_values=0) + + return image, padding + + def remove_pad_image(self, image, padding): + """Removes pad from image + + Parameters + ---------- + image : `array_like` + Padded image + padding : `list` + List containing the number of padded pixels in each direction + + Returns + ---------- + image : `array_like` + Image without padding + """ + + h, w = image.shape[:2] + + return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]] + + ####################### + # Tiling functions + ####################### + def tile_image(self, image, tile_size, tile_overlap_size): + """Converts an image into a list of tiled images + + Parameters + ---------- + image : `array_like` + Image to be tiled + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + + Returns + ---------- + image : `array_like` + Image without padding + """ + image_height, image_width = image.shape[:2] + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + if image_height <= tile_height and image_width <= tile_width: + return image + + num_rows = math.ceil(image_height/tile_height) + num_cols = math.ceil(image_width/tile_width) + num_tiles = num_rows*num_cols + + + # pad image to fit tile size + image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2)) + + tile_image_list = [] + + for tile_no in range(num_tiles): + tile_x_start = (tile_no // num_rows) * tile_width + tile_x_end = tile_x_start + tile_size[1] + + tile_y_start = (tile_no % num_rows) * tile_height + tile_y_end = tile_y_start + tile_size[0] + + tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end] + + # ensure input into unet is of correct shape + tile_image = self.reshape_image(tile_image) + + tile_image_list.append(tile_image) + + return tile_image_list, num_rows, num_cols, padding + + def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): + """Stitches a list of tiled images back into a single image + + Parameters + ---------- + tile_list : `list` + List of tiled images + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + num_rows : `int` + Number of rows of tiles + num_cols : `int` + Number of cols of tiles + padding : `list` + Amount of padding used during tiling + + Returns + ---------- + image : `array_like` + Image without padding + """ + if num_rows == 1 and num_cols == 1: + image = tile_list[0] + + image = self.remove_pad_image(image, padding = padding) + + return image + + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + num_tiles = num_rows*num_cols + + for col in range(num_cols): + for row in range(num_rows): + tile_image = tile_list[num_rows*col + row][:,:,0] + tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)] + if row == 0: + image_col = np.array(tile_image) + else: + image_col = np.vstack((image_col, tile_image)) + + if col == 0: + image = image_col + else: + image = np.hstack((image, image_col)) + + image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2)) + + if padding is not None: + image = self.remove_pad_image(image, padding = padding) + + return image + + + ####################### + # Image normalization + ####################### + def percentile_normalization(self, image, in_bound=[3, 99.8]): + """Performs percentile normalization on the image + + Parameters + ---------- + image : `array_like` + Image to be normalized + in_bound : `list` + Upper and lower percentile used to normalize image + + Returns + ---------- + image : `array_like` + Normalized image + + image_min : `int` + Min value of ``image`` + + image_max : `int` + Max value of ``image`` + """ + image_min = np.percentile(image, in_bound[0]) + image_max = np.percentile(image, in_bound[1]) + image = (image - image_min)/(image_max - image_min) + + return image, image_min, image_max diff --git a/models/internals/.ipynb_checkpoints/losses-checkpoint.py b/models/internals/.ipynb_checkpoints/losses-checkpoint.py new file mode 100644 index 0000000..7e8ec84 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/losses-checkpoint.py @@ -0,0 +1,328 @@ +from keras import backend as K +from keras.losses import binary_crossentropy, mean_absolute_error, categorical_crossentropy +import keras +import tensorflow as tf +import numpy as np +from scipy import ndimage + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(y_true * y_pred, axis=-1) + sum_ = K.sum(y_true + y_pred, axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + + +def dice_coef(y_true, y_pred, smooth=1.): + """ + Dice = (2*|X & Y|)/ (|X|+ |Y|) + = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) + ref: https://arxiv.org/pdf/1606.04797v1.pdf + + from wassname as well + """ + intersection = K.sum(y_true * y_pred, axis=-1) + return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth) + +def dice_coef_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + +def bce_dice_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +def bce_ssim_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +# code download from: https://github.com/bermanmaxim/LovaszSoftmax +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + gts = tf.reduce_sum(gt_sorted) + intersection = gts - tf.cumsum(gt_sorted) + union = gts + tf.cumsum(1. - gt_sorted) + jaccard = 1. - intersection / union + jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) + return jaccard + + +# --------------------------- EDGE DETECTION --------------------------- + +def edge_detection(y_true, y_pred): + size = 5 + in_channel = y_pred.shape[-1] # Number of class + + fil = np.ones([size, size]) + fil[int(size/2), int(size/2)] = 1.0 - size**2 + fil = tf.convert_to_tensor(fil, tf.float32) + fil = tf.stack([fil]*in_channel, axis=2) + fil = tf.expand_dims(fil, 3) + + GT_edge_enhanced = tf.nn.depthwise_conv2d(y_true, fil, strides=[1, 1, 1, 1], padding="SAME") + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + + # Define threshold values on Laplacian filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.1)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.1)) + + GT_edge1 = tf.gather_nd(y_true, Index_1) + GT_edge2 = tf.gather_nd(y_true, Index_2) + + Pred_edge1 = tf.gather_nd(y_pred, Index_1) + Pred_edge2 = tf.gather_nd(y_pred, Index_2) + + + y_true = tf.concat([K.flatten(y_true), K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([K.flatten(y_pred), K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def edge_detection_sobel(y_true, y_pred): + y_true = K.cast(y_true, "float32") + y_pred = K.cast(y_pred, "float32") + GT_edge_enhanced = tf.image.sobel_edges(y_true) + #y_true = K.flatten(y_true) + #y_pred = K.flatten(y_pred) + #GT_edge_enhanced = K.flatten(GT_edge_enhanced) +#converting the datatypes of y_true, y_pred to make sure they are of same dtypes + + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + GT_edge_enhanced = tf.keras.backend.sum(GT_edge_enhanced, axis = -1) # Sum X and Y Sobel + + y_true = K.flatten(y_true) + y_pred = K.flatten(y_pred) + GT_edge_enhanced = K.flatten(GT_edge_enhanced) + + # Define threshold values on sobel filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.001)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.001)) + + GT_edge1 = tf.gather(y_true, Index_1) + GT_edge2 = tf.gather(y_true, Index_2) + + Pred_edge1 = tf.gather(y_pred, Index_1) + Pred_edge2 = tf.gather(y_pred, Index_2) + + + y_true = tf.concat([y_true, K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([y_pred, K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def EE_bce_dice_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_dice_loss(y_true, y_pred) + + +def EE_jaccard_distance_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return jaccard_distance_loss(y_true, y_pred) + +def EE_dice_coef_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return dice_coef_loss(y_true, y_pred) + +def EE_bce_ssim_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_ssim_loss(y_true, y_pred) + +def EE_binary_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return binary_crossentropy(y_true, y_pred) + +def EE_categorical_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return categorical_crossentropy(y_true, y_pred) + + +# --------------------------- BINARY LOSSES --------------------------- + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + def treat_image(log_lab): + log, lab = log_lab + log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) + log, lab = flatten_binary_scores(log, lab, ignore) + return lovasz_hinge_flat(log, lab) + losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) + loss = tf.reduce_mean(losses) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + + def compute_loss(): + labelsf = tf.cast(labels, logits.dtype) + signs = 2. * labelsf - 1. + errors = 1. - logits * tf.stop_gradient(signs) + errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") + gt_sorted = tf.gather(labelsf, perm) + grad = lovasz_grad(gt_sorted) + loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") + return loss + + # deal with the void prediction case (only void pixels) + loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), + lambda: tf.reduce_sum(logits) * 0., + compute_loss, + strict=True, + name="loss" + ) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = tf.reshape(scores, (-1,)) + labels = tf.reshape(labels, (-1,)) + if ignore is None: + return scores, labels + valid = tf.not_equal(labels, ignore) + vscores = tf.boolean_mask(scores, valid, name='valid_scores') + vlabels = tf.boolean_mask(labels, valid, name='valid_labels') + return vscores, vlabels + +def lovasz_loss(y_true, y_pred): + y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32') + #logits = K.log(y_pred / (1. - y_pred)) + logits = y_pred #Jiaxin + loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None) + return loss + +# Difference of Structural Similarity + +def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0): + # There are additional parameters for this function + # Note: some of the 'modes' for edge behavior do not yet have a + # gradient definition in the Theano tree + # and cannot be used for learning + + c1 = (k1 * max_value) ** 2 + c2 = (k2 * max_value) ** 2 + + kernel = [kernel_size, kernel_size] + y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:])) + y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:])) + + patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + + # Reshape to get the var in the cells + bs, w, h, c = K.int_shape(patches_pred) + patches_pred = K.reshape(patches_pred, [-1, w, h, c]) + patches_true = K.reshape(patches_true, [-1, w, h, c]) + # Get mean + u_true = K.mean(patches_true, axis=-1) + u_pred = K.mean(patches_pred, axis=-1) + # Get variance + var_true = K.var(patches_true, axis=-1) + var_pred = K.var(patches_pred, axis=-1) + # Get std dev + covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred + + ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2) + denom = ((K.square(u_true) + + K.square(u_pred) + + c1) * (var_pred + var_true + c2)) + ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero + return K.mean((1.0 - ssim) / 2.0) + +def dssim_mae_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred) + +#MSSim +#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function +def keras_SSIM_cs(y_true, y_pred): + axis=None + gaussian = make_kernel(1.5) + x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME') + y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME') + + u_x=K.mean(x, axis=axis) + u_y=K.mean(y, axis=axis) + + var_x=K.var(x, axis=axis) + var_y=K.var(y, axis=axis) + + cov_xy=cov_keras(x, y, axis) + + K1=0.01 + K2=0.03 + L=1 # depth of image (255 in case the image has a differnt scale) + + C1=(K1*L)**2 + C2=(K2*L)**2 + C3=C2/2 + + l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1) + c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2) + s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3) + + return [c,s,l] + +def keras_MS_SSIM(y_true, y_pred): + iterations = 5 + x=y_true + y=y_pred + weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] + c=[] + s=[] + for i in range(iterations): + cs=keras_SSIM_cs(x, y) + c.append(cs[0]) + s.append(cs[1]) + l=cs[2] + if(i!=4): + x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1)))) + y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1)))) + c = tf.stack(c) + s = tf.stack(s) + cs = c*s + + #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values + l=(l+1)/2 + cs=(cs+1)/2 + + cs=cs**weight + cs = tf.reduce_prod(cs) + l=l**weight[-1] + + ms_ssim = l*cs + ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim) + + return K.mean(ms_ssim) + +def mssim_mae_loss(y_true, y_pred): + return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred) diff --git a/models/internals/.ipynb_checkpoints/metrics-checkpoint.py b/models/internals/.ipynb_checkpoints/metrics-checkpoint.py new file mode 100644 index 0000000..a426549 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/metrics-checkpoint.py @@ -0,0 +1,23 @@ +"""Metrics for measuring machine learning algorithm performances +adapted from https://github.com/deaspo/Unet_MedicalImagingSegmentation +""" + +from keras import backend as K +import tensorflow as tf +import numpy as np + +def mean_iou(y_true, y_pred): + prec = [] + for t in np.arange(0.5, 1.0, 0.05): + #y_pred_ = tf.to_int32(y_pred > t) + y_pred_ = tf.cast(y_pred > t, tf.int32) + if K.int_shape(y_pred)[-1] >1: + num_class = K.int_shape(y_pred)[-1] + else: + num_class = K.int_shape(y_pred)[-1]+1 + score, up_opt = tf.compat.v1.metrics.mean_iou(y_true, y_pred_, num_class) + K.get_session().run(tf.compat.v1.local_variables_initializer()) + with tf.control_dependencies([up_opt]): + score = tf.identity(score) + prec.append(score) + return K.mean(K.stack(prec), axis=0) \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/network_config-checkpoint.py b/models/internals/.ipynb_checkpoints/network_config-checkpoint.py new file mode 100644 index 0000000..9455a2c --- /dev/null +++ b/models/internals/.ipynb_checkpoints/network_config-checkpoint.py @@ -0,0 +1,237 @@ +import glob +import os +from ruamel.yaml import YAML + +class Network_Config(object): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters. + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is to be saved/read from + config_filepath : `str`, optional + [Default: None] Filepath to the config file that will be loaded + **kwargs + For network parameters that are to be changed from the loaded config file + + Attributes + ---------- + yaml : :class:`ruamel.yaml.YAML` + YAML class with function needed to read/write YAML files + config : `dict` + Dictionary containing the config parameters + """ + self.yaml=YAML() + + # load config file from model_dir + if config_filepath is not None: + + self.config = self.load_config_from_file(config_filepath) + print("Loaded config file from {}".format(config_filepath)) + elif model_dir is not None: + try: + self.config = self.load_config_from_model_dir(model_dir) + print("Loaded config file from {}".format(model_dir)) + except: + print("Please ensure that config_filepath is set or there is a config file in model_dir") + raise + + if model_dir is not None: + # update model_dir in config + print("Updating model_dir to {}".format(model_dir)) + self.update_parameter(["general", "model_dir"], model_dir) + + # overwrite network parameters with parameters given during initialization + for key, value in kwargs.items(): + self.update_parameter(self.find_key(key), value) + + # perform calculations + self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),]) + self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count + + ###################### + # Accessors/Mutators + ###################### + def get_parameter(self, parameter, config = []): + """Output the value from the config file using the given key + + Parameters + ---------- + parameter : `list` or `str` + Key or list of keys used to find for the value in the config file + + config : `list`, optional + Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary + + Returns + ---------- + value : `str` or `int` or `list` + Value obtained from the specified key + + See Also + ---------- + find_key : Function to identify the list of keys to address the correct item in a nested dictionary + """ + assert isinstance(parameter, (list, str)) + + # find for key in nested dictionary + if isinstance(parameter, str): + parameter = self.find_key(parameter) + + if config == []: + config = self.config + if config is None: + return None + + if not parameter: + return config + + return self.get_parameter(parameter[1:], config = config.get(parameter[0])) + + def update_parameter(self, parameter, value, config = None): + """Updates the parameter in the config file using a full addressed list + + Parameters + ---------- + parameter : `list` + List of keys that point to the correct item in the nested dictionary + + value : `str` or `int` or `list` + Value that is updated in the nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + TODO + """ + + assert type(parameter) is list + + if config == None: + config = self.config + + if len(parameter) == 1: + config.update({parameter[0]: value}) + return config + return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0])) + + def find_key(self, key, config = None): + """Find the list of keys to address the correct item in a nested dictionary + + Parameters + ---------- + key : `str` + Key that needs to be correctly addressed in a nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + key : `list` + Address of the key in the nested dictionary + """ + + if config == None: + config = self.config + + key_path = [] + for k, v in config.items(): + if k == key: + return [k] + elif isinstance(v, dict): + found_key = self.find_key(key, config = v) + if found_key is not None: + return [k] + found_key + + ###################### + # Config IO options + ###################### + def load_config_from_file(self, file_path): + """Load parameters from yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to load + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + """ + + with open(file_path, 'r') as input_file: + config = self.yaml.load(input_file) + input_file.close() + + return config + + def load_config_from_model_dir(self, model_dir): + """Finds for a config file from the model directory and loads it + + Parameters + ---------- + model_dir : `str` + Folder to search for and load the config file + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + + Raises + ------ + IndexError + If there are no config file in the model_dir + """ + + # check if yaml file exists in model_dir + try: + list_config_files = glob.glob(os.path.join(model_dir,'*config.yml')) + if len(list_config_files) > 1: + print("Multiple config files found. Loading {}".format(list_config_files[0])) + else: + print("Config file exists in model directory. Loading {}".format(list_config_files[0])) + return self.load_config_from_file(list_config_files[0]) + except IndexError: + print("No config file found in model_dir.") + raise + + def write_config(self, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to write to + """ + + with open(file_path, 'w') as output_file: + self.yaml.dump(self.config, output_file) + + output_file.close() + + print("Config file written to: {}".format(file_path)) + + def write_model(self, model, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + model : :class:`Keras.model` + Keras model that will be parsed and written to a yaml file + + file_path : `str` + Path of model file to write to + """ + + with open(file_path, 'w') as output_file: + output_file.write(model.to_yaml()) + + output_file.close() + + print("Model file written to: {}".format(file_path)) \ No newline at end of file diff --git a/models/internals/__init__.py b/models/internals/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/internals/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/internals/__pycache__/__init__.cpython-36.pyc b/models/internals/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa628a317f6b03109b966d447d0c7b673debb956 GIT binary patch literal 216 zcmX|(F$%&!5JhJ?|Q#h=b(L1*E^-ll{)C~AlIm~)f80i fN?irTyV~1=6QZ$(5a-Ra8*R95E4>xG6d3pc)3G_^ literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/__init__.cpython-37.pyc b/models/internals/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10f34d8848832b17581096d17b6e568233f7e8d6 GIT binary patch literal 234 zcmXwyu?oU45QdW!MGDo$m*`?PClPh3i;K8)=uw-9fnHM5bnuaUrLIoSZcbhX5AMJ3 zzaQ=nS(Y-YY+6-ony+a7B?xxZ{gOm6#ao8Fo#_ZGv(3lqX{fWW(S!fRCqrLh@vO5;@AZkZ6O%tC4qyI)Yay_ea?{rDZ`o mtO+hwhh8G>+FsHZ?QKm2V5|uMtL`z*HWUx7-g3J8p=LiDt3IOu literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/dataset.cpython-36.pyc b/models/internals/__pycache__/dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4298ab7d07c19275263b806d2412e8b262eee20a GIT binary patch literal 7665 zcmcgx&2QYs6(_meFKR#Kl`J_<%B0T6296sfX}%hyaqQS}>nK6&AO)d>p?0_uSN!sj z8(YdQP(UtH6i|^~dMJV(ieB1N4?XqNV^2L7b1hIH|3VK%e{aa8DA`Ve9=gJgX5PGc z^JeC~-+MFre7kM5Yaf67>+dVdKb6u)NBs?alMNJ1VXCiK*?-kiWm~f<^1nW*S~b)( zzaG@Bx~i}W)BQ%!w3@1NRbf?DyQ{F8+dQaP22*b+!}?c{scZ~Yp{KW4@Rf=d8r^Gw zv+defrr{{{qA(c|*Wy#jE6LlaxC-fDX`d@&m8nd-+kh^Z^{!&+*?d-&by<@ccU9=N z#9HX7vmR@+4k(S!6vaY3-17)Hg zfK@I>bN+%sm$>VsZer&0%xG)kj#4w-aZ=Mu%xU5>GmXr^3B7pggB4x)a>on3aNA^1 z8?kiJDD zn{As%(~#LIpQbxsx4f^JGZTSbCJ_&sdSh4>^?VyetGrUNe6J>=noMJti-v9EsBmU3+S56Zt^x%BGh2Z_s*&Cw)%@uinHFT2TZ8pWF*VC~6f5HZ(JHoY)) zdFY@<763!M5btpVX17H4*Fd3kYbyS<{a$Wb7Zx**$WG})wn`3r3EzaHNMW-R^#IPX ztMPMGpJ+4bDqm^`YG${09wYi?<)gK^Hq%&TU6~R0dDQiF<%V+W{D)EsjV$C<*Wl=x zj4}(y$!HV0&b%-&Y2}8{f;e;Q&=Wv)O zw&|Pwz?O#ms+pv`Feb9BYs?K(FWn<=$dw?!yzTkExdqEll4#_?CmBevlq1J@kU!+> zEG8-Z@|l8!lP&6`!zH=+l0Z&SGt@*?Y9us|@h&L5M+F_}up%nH8}b_JqUOZ08?s@A zlVu1)4wCD&^12to!rL@h79C=1ZM$i1lOFpnC<#edIvsUc)wPz^Qd`aJML(gtkM0{ z<$LK4!7&g!+WnvGP|gErVD24&)=49{Ch4$3GO;6-Jg7U0ICqmc3K=O-RSJ#^@%CL&iN<50Z*yl47Nd^>Cl-1b@j$dCy2KH}4Fc*=<0}{`i(~b=FtATK zx{Dsw%hQWW?5CoJ*{0kTx^u@%M9mA5o#-}@59LtLY;vizc2SiL7+?v-@3GDV(JmPis=yjSo2dsHY)wzAlN#y-n`FtzCs_5aGOJuxFs|~2dgn(}|3dxfjk%7|b?`SP z&6zsW4>D;gt6^;YP@7lt85;*G{~hYaY(Z&)!#LD7EA#5SHm||%mv$Rps8YwJf~y6s zGM(Gdr?RQf>+{CEIX5I#V;$Dr)@BXTyP)(yX&$5WK{0YlYto+8$=kvXMo_D|jg`vr zo%^h0Ia|r5HeZ^z3N3$uu}iZhb_&5F)9+qQ+q5pMOqp~s3-ni&TQ~F8Qr7w~OPmJI z4{=D>BL|@&kACKuM}hgtCwbFm9{=PMY5#>2e7kT^5&$0>4@N(L>Hl6kZy{=q+NJcsN)k(`)}hlir%9rh+m(OT$-e&>_2rm&tGW& z>|CV_uldmk$<6DiECyNK3?`E4)>v*jB~C>{tRcA?QMC45n?TI*y?qH^!Z_-|kcJo9 zTa+N)6ivx2p|CHj5rEuxVq1~`Rg|@gMmP;@9^KAsqsUK0Jw{%F?X_0RFl86oWFfjI zYT22JHpRzyk&lSpG{!>R5~VQ)^4q)a9uU~QlL{?LgnrxccZDJMB?Co0cG4Z8Pa-em z&tUt6A=Q?hyiSA~1|!;wz8qCnj#ExJ>`27rPZDpH1~bdZwiOpfcuLgs8 zO`>miR>kO*$tuRT*A>)HBc2d`lKvg3D@7><(FrndanxwO6C=tTACiE($X!@1Si8j!o@Jub_Wll{lvb2+ly zfFvIqW=5g;**u?me>K-diln4J!vILunVXCNb<#2Pz)5yx)p`OKbbHGS9li(0$r2{p z8ROQmP(sSOxk%c(CwL*)gRCFHTt$9wRA^x=>Q3Tt^vZFalQci$P zCzp_$!snPHgPZE0GEfKFKsB_p6+jREpQ$XXJ@p0coCe4N_!xkrRc(KD5u=WaDlR%_ zAhQ6RmyuTDLZbOfpw6`bE+16pdZ5E~k-7!7Ii9=b4cSI&79cqbjQLW4bSY@f+d+HY z2|Dv`(4F^!-n<|5=gYzJ{1jY&0M}np5sT-Pio%Tvq?nFG_2;wI$>|xnBT;cV0tiW< z$|-P1$@hE7cj^FOBO!#i;gquPud=Uktd9T)@{wl#6tkYrr5X8%|IAKf)^1KkigsdF zqJH>(CY#<Gs&h9|(a*njGw%MIyfRxw zUWt(i2TwZ}5wEYhq08l~sC@7N4j;p|JVKPBL-dV_bdmRdyK6w4-N7G_Wyd_)a<-6Q z<7Eh$KVDe;Jv8hqES`n)3G+Br@Mh6wMHuhn zaW)Dr@hBFZwH5XD z-x8fnNJf0+S<$?Scz_27yitw`u=O|nDHr{BaXrG|%g7QVPB$gdxyn6OYWLp4Qx=%y zc#1q?4LV#zCQS-lb*4$;IpId+C9W`JGq1lBkvut#@s7*Dl+`i%#*V}Bz9rT&CEw)U z_D&kQNpcA*<(N+Ba<$cWL-*aWs9!?JgyvQoYrxJRv8Scpw7SRIluU5?#X#s|+N&l+ zWlOe#RDcFztF`Xtd!Ln0MZRsSI??DTz zCMS)p*3smcVmYA`vlBY9d?8{fqe(Gn$gZr>%9ff>Ke%@+w9xrJrsJPcF{6U^!0I6{ z$rqQ@HCOM{MmH#>7Y$72WS(5+MVkigg7hOLyv^-mCJy+^ZUdVsdR zRh+m@KT*(;=StH1=#FdEk+hJBSZDGhzRw8jq5BXQbMR+rC+M+gs9G(@zwPWLw)FYo zn$>aqttmYfWjhT2+dJlt3_zSkJap5YSz0ddhwjHu#jgI{^* z!Yuko+haUe``#9Q`JtMqW&imjG(aZq`HRUSY9yQ5! M9-dJRdhXZ$3ylLa{Qv*} literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/dataset.cpython-37.pyc b/models/internals/__pycache__/dataset.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0164c2f29240afc8957a8501321971f6fe615085 GIT binary patch literal 7668 zcmcgx&2QYs6(_mePwl6?k|oD!*~Ce@fnx<}`fa3f?AmdgC`Rle1>uCDcDNEHetF1^ zEp-VLu!|H0T%?yCilA5(y|kwudg`gio_a3kTA)Dwg&d0h-jGXCa-0S|lnc(zyqS6L z&CHwMdoz5k(`hMqI<@!S`}Nlq<)2hoJUTM3;*D=0VG2_{#mxRyQNKzJ*UdWe6{dR)ziBpA<(k5(toA@*HK%#MVz!uiQyJAi0as;nqzXN`!~9QFlu+nh z_w8-Rx;hQUi5mv-m|88KN}h>dN5)Y|3sd`0nW#)<+JgqPfz$_zsb})cD(kT(YduiS z8e3v*)YMs@byyda#)pcom`!Lh5cMl|V#iK0CMD>he+@4Qj145)%1p7cRF!!xsoYoQ z`XQRi;b`_>(CIRF?8J$UoSzYH?KUE)*qBluWk52vf*#J8#|%zP=+&qhhkmXkKUd9`9Bnd2d zSG|v|vZwKLl%H!exp6+$4%N(x?>$BIE6TfTsW#JCWnGz3>+{I#>&i{#&iQvF7wTE? ztFB?&XFSR@Y^<7n9(&?#Cznr4A*ccMiAmJ$cixAd`V91 ziW@eEv0|G3`>wF0S%1ff6JD4W+1YjG1c{sM)85FHpj~>`^*mz>`yh_Pv5OtbK!V*I zZIj3Cj(nVjkYbm

`^)MV@rHEC*j=$O(8xny5;Rgyu5d1BLe~p(PzvM8$IgUPE5g z>?m>qHmY#44AGLEm?2H^7|OsH$2-n= zkF3%8z~TGJ4uLc9JKX!9>`=-9P%!5nAnc?OT$glMA(_~bN-oqLhnzcc6b6hGC@Wc* z(PnNkpU~R!6(oS5`o!Ii-2+Ed!pTJF+uYuV;pk)Ej)WeB+!q}QGEoR01TQ&dq_umW`awlp^B<3z}`v_{)jQgaD~af zB`5qdq1H3N9wz;raF4p zlj?3`-khm3{V?OEvf3dTtd>@DiH$>*{|@=qY(Z(FMeCtuqbZ2I*Z;`k*wAQ3jy2a!PyNnbm1?1zq%@ zQgs_+mHoS)F_PtMB)7G6DQy>8{z7WG#7@CCWcod-X@|yzktw@9NP+&Ua_3fFTFOfA zWKq-5{vH4I^coD+3Fp`1j9EsCdrDN=icSJ*4Y#PytGB8ozS z8V0Sl!q9t+I=4atuAeXnjcNN&@{o8DNEXaC#Zqak(oGbBEQJGv)i?3RXOU3w`tC4= z$uot}dQXe>nffd3jt+;dNiY%W1-VV(hqMbX3UzW~(|+2zNX~j3`q3-9#FvEBl!LEc z$-@LnYh&4o3V{*=(V-Z-{o_io+O0{xpC;lq8=d(!StG|Wtg%G zZ8CW6i(0m(qC@U5T7(~>KaDU@r$lK?0QA{+l{z!Cl}#&lOl;O&f1CCW zKSvEzOZ1KeShN!SYpCEaQ-Xt^!k?jbjVy$4nYk*Pm&IniP6iX@wf-?!#Lpv9It{g} ztsa!@*546fT;QI5(9U*#|QDMGK@S<1Z`ELl%fjM{)yE0%G(;foP zQLf~)%Aq<$y+!CucuVaTv|3VU`n-`eLD%m~ln3Uwc9-UD3M}sZJ?SLfMJ+r4j;s$g z#KM1;0gOJ=B;o)N`PUvxQp{vAviSqIw z>Mi@H(yBj9acHK#vue`cU74?Sc4td5z=d`w1NmefW@ z#f+Z14|HckfXB+gH!o$ORh)$}blxT4qPV8mF{lf&VEPCn&0Zdp$z2{>5jM9PnM2yH zd5y#J;N;+$8xBWM>l5c=-Hb3Wev;;*4_0$sBuk3%GYkM{ojLIsP$xHr?%VO6%$m>Q zbZ%|Afz9`^ak9Y3vL`rXER>MEZY^T;-ifwg?1I-1V6LJ`SqVNPl^dY&mx!$Ujva_f zoUnt_W&J49^CJ%jny+k*o)n#9VhBgXeac- z?S1}368Q!(s0S=-R%hsYz5GR|1*_kwXeRcoznn00G}4% zXjMB{UHGWutcr8Z8SpFs=UWIXasJRe#aC0!$CmdisqX98x(MC;T8g`_v?0p~&3puB zek)z_5ia@dwBvWuuHQ|2elP9&{dC|D(q(@+J%z15#MWO?;fqrWMX~V-q?nFC_2;wI z`RN&LN21~^1Q1#TX+%K)C)@pAwmWqnU?T%KszazL>;5Y18prAgfWRMN=1(E@bk4t( z_xShhG^F-&DnhgqQW4_iy_ zTj+HPH2~idw_(BUZ*p`5?!;}S;I>-iP8Yc~^jd$Ym-w8?`DpwD%mV==ae1PkHfC$k zgo13Mp9TH+{6AT=d`Y%!=PkeVNXsu5EpN+~OL%kc!W}$sUxL5B<^&FxSEBNM2RM8T+j0+4@($sD4Wx^>_lq3^ zqU^~1m?%5q;g-FH02>!Ui2QM(;_c%`0B`2XGLJ-65)=#BMdl0K`YZzXqK|=RK`ics zbAh>^D%xh=W<|8##?5T#U*=&Xy03@aJ)kRG@2cmbeErfTc*QuvU9F|9WphDKQ) zy>IT=9M@T5EmQI}?r!fSffL7aVem%Cv#2qLkJ6yyz{%Deck!4(T6FW&BP6uzZNpZ| z>;@+YG~Od=nI536Zxt(U(f0{j@?1%}4&8CgI)WB55%WyG#Glc_eBv{R3mN=HnhClr z8mVU6_U_vIu_gEXXwB@}-qw_E2D3fS$JCZl;!r{xgl|)_Ly1cXoqqWqB_1U{B>^QN zB@rceD53jz9#fJ~GNt4LN{&LiWfVtevNIhm)&E1hIVhu?Jl~lEX_sZXdAhV%$0}Be zWh#?T+!pW~7|oyX##J*c+w#SwGXk|WoC}|l?->MaeVh(k@~o&Mr8@a) zR7aHd;sxcrcCd0HsLK{`q-|aushgt~eWjs~0G1_MmW7WjQ*1Izv@Pq-)b_F(`F({C zp&);jl53P~Q9|mAPX0}2?0BAK9gPs!(xyi8@|8ZfwD876YN+kC#*>Y;)<$E*sb;jq zzl%RkH!-4$qYXal(1BSD5Vl9So%Y-2FSs^%?r z7n`i2ZWYy!6gO!b$YucV17wv=fB@NKG28hE$+lJjf*??~*?E(E=hTA_wOg_wVXDDf zx9;QI*E!#L+O1HxA>d@-e{!9^?&_7p%ex`M8q25KHKK>Zr`RYCt1 zQ4!V8)vh646*XZaXNtOLe6C=u*F;mSqogWYVgn_XXp2jD*Tkl{jJGYWh^>8Xpo(p= zgLZXs6|?wGx_*1??t8sk)1V*wVGw;~zI7w^2Ys3gJ(c|T5#G_qNMdEE3hl5W+ljWX z&h(&_XhZExL8_lA``S$3S7yfX_Y?i5^4V)MGckrnY!1!1GQ?gaYGR&gXX=@%Dv#d& zm68~kh3@`TQxOfiv(&p5qn*+;#@>aqxrN;t`N8zq*>fXLIMGQId*kSB zXEO3o;NZ-r(!+Y)m>0c#mXYJ4(!+6k*z~}0Mw!@mc&6Ji8m%EX`K$U#}!e7B(RA@*|b$lYiWjRtFLLN z*KRLM@QsB&X@x5zsiIroq|SJ%_|Y!0OvlKOOb@jLQYq9!OMaRtL!HYE$g6gyC5F&` ztIf>Vg!HOHKQ!c@lROIp5^W~>j*>}qTpQZO_pB1w{0*wJN?ac{k_zz41f5D!fz(&s zQ`Tsk_>fC_bSMuW&V}Kd$T#&Z1hZrl67YLe@hsVnrh9{MB*26@Mm$@# zv#o}3-!J=ufsYRS{_=2yY`na4;lvMwcT9tVF&;j1(04y`ZgYo1yt9S5qFiydkzmW` zU&zY?DVl>TrF+sF`G?*LN`Vsq7d+qji9Zu=ei@8ToG>W2REA^14bR1{&-I5N0Y!-Z z95FrtR{bPAj-$=q;=UHxF+`I}isO%3 zO-(;S4|$mi)~G-qc|mF&xiUakKn>;63c->a18NTaFs=Aw>A51+f=OEUjwfy)prFQ4 zrz%P03yfiqs+GLit!JmpJj08T8`%sOwH#Q;bf^MI04a$*w9&3|sL8)& zSg~R1R56Ynyg_y1(hkjJZP*mnnR=+pe?+VG#1@-pXp`4~uCM`h|07^RjauW4S$*7| zHB&m@pZ!uiFY{U@X=n-$>TQ7LP-1X)fy+ zc{>{!UzLFdh~II4Uqbx8WO0h+6zI!ryI_edoHL;>b9qm*W2?0SDy58?){{rrOoA{)DV7j2nP*mFY-^l?xlr(1-GN$ zdfu-nlJ8)^?j;t3<;}h;cThD?Iyz5SzD1>^=H^xNT<6tHRh0@XQ(9&;8ToNqabar* zqTAT_;%tG~T=^ZMZ^15xP zJ<>Ec#-W@Xgrji(M7~Y^X>qA3;**Jo!6SN0Qdem;?~xkQ*dIY-5)8y?9Ygh9c!Kz) z1`GixSa<9m!vUC%J1th9^1DR&dz6sUVkOzVbPl(swUU3j8;fkrmgq`R09P}$vksK+ zww8MMMYv@f(BWmlWQ=O?vKN`=KNc zABNf(ZXpLC*xM!#>04p?MFu7>nC#;{&ym@b78J+x`q>wakT0|_V#(Q{=WZr|gdHq) z5X&D>p5!I1=2Phy8LR#g6{-iRdbG+9YkAAeg!_B6izqsythLp)div_>c`mh1Er>}J zyFpx%w095BvGhh6R zU{>t)IsO(cgB+Z%*PfVV?E2)&@XUhgcn@AO?SOxbfJU(qnEZkd25YU{lw2He+zP8F zs5pZ*DqSx&p!e7vO}%JIPISzyQ(usgJnTaIFK-bo0!B&PoK_KK0b+~P-V5V69CO|p zc>_{LBCSb({~*g&^YyOES`Foyoc#+3X0Fm^TO)MMen8NjRf#M6l+jZ{%~Hnh;$ua| z^chE3NXl5!^i0A?sc;D`1*6k*Kzn5QB#cX4A}oA%{u3nZ(tK& zgl_j18Kw{tjQOqkC8L+K&9cu<{8BQ^i{SPQ{3(nKwa+{X1};Z=#Dw(H%Z4g z=do3WX-SU`f%q%qn|~v|VOO~YUU61=Wwm(;%M>~+S;oQXuCUA;mI0z*8*LM;!q7}m z4E0-fLxI5vIn0!h94R?Dp+>m|40-x(>WJ5A zPsi)#w<^SqguEhjgwqh9yN_Evs)5KO6$V2M zS$Qg#AevQ(qc z6+;G(6q6r;5$lSmp25q&7sldJyisaVDMQ3s(!gz`CjTSWhXx1dnvW#%CSYU@@TAW0 z4Zx^VD}>PiFSF_+ffle$lP0&oH;2lwV!#SkIbSb8uSP4&W`MQ51^jUha%jP^$!cIilCHmv&p>nO)-iOp261H5Ij3z%J3J)mxl#*Xl!ef#XEB}g;Us2LUlIqyqRLA)+&UN$4 zRTRn&wIRDFOZc*)0Zm3=&!Vt{^+dD|x~{MQdxl2Ph#Y|N4fThr1wd?Twpu~@BfJ5O zuiRd_$XF!!LnoE=CK<@{!0 z@e(=j?UuK!OHgtL<>&M+U?>ZtQYfdRWg=tG_hr#vNG(6#-Qu4IFyAuk9j?++^!h4me_dA+d*XCB(pT`x=FSD0e3J?|ugX8AHjUKk^(c!o(D2akLT`Rk>PUhfes(yS&66_b4;Kcs~1G&0;WG$9K{_9^ixnaeciNU$XZ6)E%- zwUJn={f7Nr`!B3b`*mx}de8a^*Tc`}XtYfo$cL5``Aki~_~axDGyj+h>>`Ma8&u5? fv#>M`aNfwN^^=09F~^-Z0-L6*=+n# literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/image_functions.cpython-37.pyc b/models/internals/__pycache__/image_functions.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dd3576922101140bc59fe51c43cc8094f9bc12e GIT binary patch literal 9709 zcmc&)+ix7#d7t~v&MwL2iWVu!cE(N`i-s*aPLKdb5IByiMny%dmW?_aE34t2S#rqT znbnzDiDYMADjVoa0KXR~NFXl-3iPE9ea(N6zRpvDASl$=zWJs7ecxR6BASv72^nI~ zoXec=+`jMkT@F83T5=RTOV)3G^Q(6i<&RV-A03(Z@r$>S2t}v^rIWwaj>=`Nqw!ns z==?T1CcmwY#qZkC?$lM~uN0vP{c}a=kF<^>)H}+i@fb9f?H;A&Q;jE3<0~`<=wIQ! zDn@u&Sfcj1+A+i{qAncdOwka{&lQaJs#q2)sHusTSVfI3+TsGo-eDhio^m{ZHdMy0v`27OE_#-5VvabsD zz>@7$+f^re*h;k%?NmXkpDMfBMBh~=#^Lu<{kn4Z)rpxJCq`nPn2EJtJF!zWHBYru z^;A`r`)~c7k{XzW=KV}ll)nk$#0_XRu^WVLvgf-ZkbW+1CRXj)+#NGGYdvPM~RV_s3dNpVgxq~P=aQl(W zC&uGWN#}Ghjgrn=ZZx7@^agVje|_EWd*ea!p8E!G?i+XA8*>YAqdqT=3GyJ*C4J;4 zV;N2dt(Nn4HAX&<0qq|aMDO_(@y=oh{w~IH#-aGuTf7}5{<*Wcf!!Je;rP(q@nT=N z@ll-k!}u+CH1JX3;>^a<$9lbl7d?NLvFoAH$8r1E^w9MN102FbU-qakNK?glG|DTk zReP|hX6C~n{NcNs^-RZ*nLQeK2~KX9ncP1!xmRW}+lb8A9Yi~s7R8w{@{+yG*pGs6 z)5?t4AM~?&zNg@kGK+4VmAtlMyv&(yeWs)OUle%(GV;}jTf;Ef8tw#Jd(qIRj1PL5@YTUy9_l(NWf*Q|)p?+$2$4Mxv}a zs;#v&Lv_?wwc{%{=SBJ2OrOl+##pN8)-$Oyo*I56;4It;G9=t5S_<)mh}-hhRN2?L z&Vb-*r&?+V?YG**Ow81Lqze7OkiSfQgYEw>L*TFo>-y7Wl)`1Nn^j6 zTEY|-=vb)*@vnKuB>s^nAo#ibS1fw(9@li~i0<8+O2s!3bn075XvHri>9?ujX+j>4 zclyylfEx?2c)F11TMa3{Q}qQWKims?^TQFu@$$~Zl^_)UAq@(~xcAgS-?{7Fhwc>mu!KBZ=kQawmGzU*g@2EQn4*UhU0w(}Bc)rsUe=6>L7!HoyD6F#IzHZ1Wed#zm}KMlom)|fPCqD5rv%K3%r%$TH3)azzn?t*KRkdR z!#fP(%_Z3Y6-hFhnL&&m@+GQRq6&TBhnfA*lOehScqo@y1X5lC;5i7Q%nF9m_e7?J zqpaZ{j=WGnQw`(Ink11g5r-kGUh!t9k)JN}3@=8m<})C9Vob`DHKsv3kb}uJlw(@F zvfffR)TX)yJaM#T_4xA5MKq~8W#%m}K8Sk;^}|E`ZTuKUT99SHhLWiJum}p+(1i+d zH((i<_*q}7hhb`5SH4v5ib*Z4od8wR~}$#kwy6XdSUie_v>M+Q-WK zusC@)%==eiXI1+k+6Ts$nzXSOmwA`4&-`v*g7tQ)qW%g|UsEP4=?dt-oSHeM4FxkW zX4`q!SBi1yTbj$tbYzsQz6OZD;QhXW_+823l*lpAm)Um4B$>y17u@ec)suw8t}i39 z_O?U-%}$vhd;m)iIXCFLq3=Vgr+D#VG{hN`uK>1^#%BjumZN7=6z7ec3;%P1_9qAW z<`!u*PDbOTo0ID1M}4ko`sAQ9O!2K;f!@H2<8A<(;@pAxGnYOqZ16K?Qk`(2D85oZ zlN-RhOzpBg^%%UZpzRXXIpI#1iTd%TF4s{d-=u_;OQ$`K{q9Z>dh)1Su=B^8TDCl2 zl^G(458%2X?Z&~e-?=c;uVjDpTPgaLRq|~b6J%t6@^sYN8m$fUuc(<&$J2L`-DA&R7-WC_jRaJHRXVYr+~#j@|i#48Fv;2HVJSq zlH|<86dS?=3-ErYz5uUCzjW#R?or|`_e@o)z&FzrBah-gK*5t#gpg9THp+LS^PNbTji zN6QS_sBER&fbIitFlH?|l@lE^+ic9pND*P7{g*e07Ga_!Pa~@((FkBIGG`}Bl4!_z zd*Js;4~eWUgWbJ6TPxPPC~I}p=W_OMAej3>Yi*5yHU9xTFD*)3)u)P)QaGiOv27F< zWK5rM_=Kd4B~8yIjHGHq`9jO(DVM<_U$pcAy0D&2?*oR`Ve|m8QH<~Q*|{a9I^~vc zVDO#?Y`13_rW6v4`K|dSr&qAeyhBd>QZdZ);PxE%DOe1R&pZkSu10u*B~?b~O>@d` zl8$f6V~Y&al0F>*@mJ0_e;~eLKez>6aTj@I@$(9nDN0DuH+Gp_obCe4Oko)y3bxVK zz$y&Q1jRbBPEl7&jnq0&u@DcHRyhI(tz{cJlso+6plB-`=7x2l7#>*PA3{-Tmv zsBfjXA|U#xUxg}YrEpa7?aCKW7jW|1n2DLTG1GS1gsYDnT-$$#)7qrDrj0&b5}4jU zW1D^vu_=fsF`Pc)e}s5|K^`K`h<=j^kHC7NUE(QkJtx`7*$d;rtl)tv)}qomO5{$V zJ{T(zqzN7bVhpuH-R5b4yX3$dokQ#AL-^0U*5X_DDx@lxuT9J>dFbPYghyiX75zVh zfYn&Q0Q?+b@A<(%eh9A5OgIOKOI=4^zJp}*!c+v|I+UB4t#WpDPdZIL_U@if=$;jE zuZMv^2*1H!7cCAK@&+5~t1F|2xD}vWFB-&IopSU|%rc*?@b@`!Vx~@wE5fMLKX;zRDlQLYIa zo_=dOf^piD!MG{;9>EAbb5XcHL5hJOz9!#9{3d_HjEA_@5DOw9w}g&Z8KQ4@aCb*7 z5P77+V5lK0PvttKMO__GV-j>6{zrm)JcfXOoiq;r5ha65>7)q(H_`?Gq`9u3rzuQC zrSepxxD-PMkCc!Lfe{;uupYs|KnY`UE#74fb%}r{sk!JoWoJ= z#$fhxxajRwx2JO`atrlm^ez#I1GR<6nv0Ax-IrNcA+v*Gcguefzg7d2UOZujn+1)Nh zFT35$>2~3ak0F2Etl90}hcTMB}TuiNk1Kjn6~_#A_Fr~?V4qMW|d2snBRN<7NW892t gL6irhX@Ijv&g`G&Yx)@hBmakl4dAe~{MXd}ALn=qApigX literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/losses.cpython-36.pyc b/models/internals/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d31d721ccbbe995d576e5a0293bcfe1e92123744 GIT binary patch literal 9786 zcmbtaS!^6fdhTR2*2}EnA-d|XqpPZ`zW*#!X~pg4qlRvZ>{$PbDm;wbV%;+Qy&{IGaR zoIpM&z9d}aN5s?O8RSRB&xn)aE12n+cvhT3&2e#EoEBBoJSEPE=TLJ(yeQ6z^Qiff zSP+Y-am5935&6^N1#t=aGvczig8a+Eyr)%PdQ6*C)zkcn*I4%hQ8m+c>fTBecKWej_oWPFI@j=GzZJ@M!|UwnzDy$qqTTgc{$qg|8Yazy zUk(o)#w|S2Z6vX_rYBmW&tkKM@u4v=hc$<>-CScOW?~%H66A9lQOC4_E%d~u(yWHh zu)V*)xfWggzUDO=o)qqj?&;*`+-Kdllj=Dlljq#z?ZkcF9oBMhdV_v1x>3J-I(hlb zO>{r4=%o4_6R3KAx3$XeadLmvch|D%L_3PTpy9h6Y?|AS+4`SZaDI2y-|(B)+|@YlMc2-q^Mm=# z_IkVLi?%l(%GNn5oy+#H-VTJnwfp9czU=J2+QJvlx7zV)e`UTAcF%2kQ51Mx|6Ft7 z;tPu_moHxNn~fEJdJyR|H4(#@Gmv5Ug807c>Q=al-JzhYh3#nr$}hDysdWh z_$AE%h?-CE;P>g$2=Rn=8xJG?6%a#w1Mv#qrZ5w8mXS?uc#K5b z(S^NZjLJI7xgFvH#y>Eh-~Jumv)gT;4rpIIGeq_n-W6Iih`Wntc5$pw{_?wv3gtIJ zsLpa6AD-N5Z!j==qIquN()mmC=PzEqdU<1E9u>RLXv(lVI!YIy-t;>iZkDu9Rb3X4 zweZmI2A}F~QZ;1>)v}BP=lNdWlYZ5b6I55BWRj98O6Vw6G7Bm#2z-6L5&F%iP>g7= zHD_9vbI2P8mGGl{By(syfjuUfvt!JW^e~pQg?+k&h0?24V{Gy#rmT#nWEh-A_lRfG z)- z+@Tb7!=?-2>AoIrc+oZ_XY2eMa&ryuV{8ruR!*c^|QJ>iSh`1)SAb`TYn9U6gqA5nkLFeu)VPk z5re;$fy2OtCWf$Pu#JfoGkpxluuCsoBB*=V)aAcSxv)>=NXS~`g>2r(OKo9Y}K z&al0(Rs@UDvGHHa5_#c#XbtSRv{p`RQNTDh-kCtTf^u<(){@I=CQ&oNHF?xbVI&&Y z$>==j0@FIz7c-jbOP~*6%xMy#oI6=w*wI_2D4`F1jp_rj%6*DiUK;gb25lOEQ)T%v zA(>BkbnuUf6Gw$@#Gqegyok`Q>6TtJD*8zcB2CRd@Ak@b|y}gzL_zjr< zCSXsk4)>$?9q1HDH>L>A)W3jIkr<5OY-Tkj)BvvNW1*%DfSVZK&S>-nD^NXH40JxD zsonuSk*L{hz?NpsF>GNk;47$A)#kHSFaUs0t-GLhmKi~NwDbS`;6FZoW97|pcxF{U ztX7nUegS)?AZfF4pO&I<#qaPLMyb8-OD~ed8m!CZHKHz2a+#7VNYb1F8l$XQ*aeq- zk-EG@$r2?uD50~KHz^@W_9QT7_W}b#6>E7H9iwNEXpoEe%as9%qK>~k;5eb5(B-S> zH3A1Mn>d+eCMoQpvV4GT#i3>@8xnyOCId^7w%t!pPu;mw-=nI30WT;s1)!B*L+K06 zPbTMn^Uq=a&$M25_N?QzFS-u0eD+(%0jy)M-SqyR1-$-67eI!`ehU~L#K>y-r9Jb% z@kQq+;Osa5Ap*jW`_;d)XBEp|bQL7q_FKgLy7<*lHh|kY;4LK}HR7!BlL;Hno4C?U=u;4RT2i zjAVI-VVkf?b(1$|Xqv4;Zz^A!8(p~7a5bZA?zeBAb-#Mn{nk6~H{s=aD;@uA*5-P# z>wAq=x7XhCJMQTfKi-6A?LH5mt{FdM2l_ch551y}2YIn)(EF@h&${ZUU^s?-_}%a~ z<5jooMeC7!`aJpB3&SB`5!P7}UW3EV<|bR190fp|%WW_oquBQZ*`HLo;>C^C?8Dlv zAe8|PNK8-r{shYJsj-rm8OHB|+ycIa? zIrSN1o>?g-^Fr;-Z_~ob57cx=KdMjZ+fR>=Jb%3tB7zjXG(YwZ2DL@}u;*8EO5x@kNT?I}_ON`A^O|AFaWgxW z(GTs>Z@Wc%tkRNXzL9A=X zKp&lq=SL1(6J#FS14IIFMuqK9l#m5OejwZm_~rvhmk-F60eW}zS@H&HH^~*DJO@Op z-6R);kZ!Uh%0teOrDVsBl0N85R+t4l%w(I$B#Vgxxa^YTlRWMDFbbgE7+LwuDG2EL zQ3J72$RzgLC>*t(S`_SJ`>8fTII0I>P$w=1mVO=|RV^t7W0UEds7NOwgqzrdqmZ1u zKy@}%t;@5Z=Nwo!KLA@dHO>w!1$G&Bd<5RC z28}TMkwwLaOhFeM011DHD-PL?f-bx~7DXq}YsigHEY-Q*@wzL*d+9IHC&Hs`e|eu} z4SV0uJ|96PIAx7k9gLEE@P@)bq_&T6Q-==#B9W)%y=v29X!F#H#&?i5kA==3f{5tb zG^bLUgNbS2&oQ`d6#pZnk9>{?D9}bE8d?7%pM#uXlHbO5wn|;IY zh8xhj3^bSbg;@BM;^9*~u8MWk${Sv%E$T{7WPl%5s`phmL>~+CDLT_TX&wrO{oO(k z#&w=7Ew6-Ohwd3j)l#TejjzB<@;SA6-dcV`wSdTGV^oiz&D!8^@Pb0LO^$$rzcO?W zq&Mk`J<^-b=oWP+SA))n_gRtO!u*Z2KxpViBr04U>fgvK!*9>#@80{tTf z&D87}!=exEqvn(E%>KdMf7*Vu^vOpjzWZ-q|L8ZCjtD`q_ zs4H_YYm!R^T;x$`YYL@2+A3T+z@-8%_o!`Jy_XcQOH)*nOeRyTsb-+ZCK1&~Daqk3 zOHXD%zYuEExQR9Mobm8!ecy%-{O_j&P^Dd&7U~>sh zfvzIHnF?XnVm2dXVnLa}@FF&bI(f2Rv0p|{CX>R!weFNaFpJm(PvtPhX;BsqMDU0L zgP;X{4+xsGkRZG@OFat-7$5IVqKA_dkb*+L>7LK{DjAp-^p&get+MTCA12-nw5`C{_mG1vKCmU^fmczeA%XNNNQj zlRA5d_V&U}1X$mbadmc=aroXWby{8@AOLsiTdDrAX7=k_lsqJbC_!`fkY?GaZ#_(n z*T7A$EvEYGsc{SBtwlT+QoSSJr^V>X=^0Za6TF5xUja3u5e_hII8X(kC5)A+%(IO9yC6zQrM*N90=ez-~dq;K^_OHCP0; z{B7->KZAY&5ZQh&Bso|>1(7Gcq5~9Mal^ z8T>39xo{4{3Ox$zY1D!@$6K1@cbM4-&~l@GPmZwL@>wHT@16aYhEX^ki})>3h`|o> zNsa(LC_sl`EusL>74cm|8o*6#Fhl`wFo>xqE*}4m^1mkkJd5|znXy)ROae(6CzGiyJw#&ogwveY>%k3DsJ??v=>&{82y*4+ z(;b4J>4`M&MRn30a)J5{-SwUf1;>LCFo44Z-473^gQJH0J-ju7{O6GP^xpq3@wkAJsWhPk+blH8=^6eZkvQr;z+-sZiom6_A$RRn}@dU-jLT$Hq^{7&3g zZ7NM~2mNmEp~Sya(PdMk4LV0dQQTSGR39PgC?ynvW%pHu#woVW0dDykQ54~o6lRqa z<&+d(WHWa+dc)Q#t5QjNjWNgj%pW~=lp|f8I@j@U8~u*|5_U+7=z5_H1x=PdY<;76 juvl_T{8`SV^OWN{HYoL%FCKJ`7b{MocoMb89rynLRL{tt*vjyIcRNMb`aLz3R=KGi)v z(_Pg$RXsB{)kw>D6-y}au-XS!0(oFT+7}S;KtKq@?gJ7?JOl|uy>XEuiwA;uU_nUm z{r{=%xomdWDATHQ{&PQd&i7wyu8)iqHT<=j8}Cg#scHXAg~6W%;wqk~sB414OsDH!>^DQ%s(bBS-4|)bK(snu(|;(i zLfxcw@R!3wm+=Cg=tU&4wxTCmqEF#cgz=%#HwQI`2Q^k=CdOecK|ZY!bxiBq!c1%` zO=)-y%liTDwdmsSYhJzXN#Q={o=(2*KHGpz} z*QQqC3NQ8|KaOVH+so*+>_u+P_XD0UT5dIB^oc^Z6}$B?So3AmkK8bD&%0hAXxgX~ zhVe2w#i8r1g)QOw8{Kv&T0zsrO4x?i^*xCqPuY~7XyFcLws-xe-?;29$8k5hJUiS1Sg-HW2Y>-e*c zxeHIv*PglXtly~D{MvMgv{9pV5JC?W6 ziXP%hwThAIucRi{*bsZ1H&jz@d*$(R3(JW6Hc%bM;{b&YE za4@^=>fpAk)aDSBDbYqJ^a*`Rmj^+ALKT|!>B12D1p7Q5hW<++20-72K0s|wF~no- zfsV&Wv@KoeTgI@gt1{Su8G*om_j`PJFSY z(PaTy6A%4et(dZeT$YjGI^XYk(yv%@gz82q8KY#J61qv1Oo2)Z0U*gLyp4@ZCE9_YXZ@4ah%eG&Xs zeDT(;mtW&o`sr4WqE?5y&+J(JIIT_`OMf<=A=Dcv5PS*BeLWe%dRprnswB`hjn>0j z-vn(W<}sA}7DkzgB@AJH%Qyo0FHB)&lqGDUrnQD4j4h*YB^J&zcelz|dm-#}p{62t zAphL3;X-V>uZC-0bQdx+_Jat*!d;({GchEtB*DvCE#et&`M!It-JEgf+*aUrAyg&T z5Ah=DT_sr@iwfCRYQ&AS2)We54S~PXT)o#(c^E)bR_wIUjH_y`)NIBP13R^Q0g0`A zCS3Lcz0~GCSLs#ev0OyFqS-}#6vC#gpVQ^zC=bC$t;3_gj7Y z*EDb$IMBop)+CNGv0|o=X`A{s)iADTvGu@4tF@A&V@94&tfo$KO5hTpuu=EG!Q~9v z3oAvim?iS+*R+Hq3Ljd1J1#vaC$=bJ9vfpuP##6Ov_*T#Wi?}{DRWI8HRG6x=5;bU z54ym#&h^ENruq`-1DJDyfX2C#<%KQ1X^Ii_p|??eAV;}RG0RKCKFpvE18^E$d`L*< z`yF2VLt^<%jy)u9We}v+t!uB{05|2MShj>8rTMNWNG+%NMmzN4r{YuH|N5{af;deIowPl5@i^s;eWFPqT4Mj137 z^CVg%sn{V~kk%Qp#g9->5{XnRKta-p)ofpnjTQ5O#j=W(E*OOR7^t^basWT%jW7Xw zYIV3Djdv0N3Zsc4IHUiA0!l?)#kHSFaUs0t=pi^FEWC5spJ3o{=a|xTJ80PW=3Sl997Es zB92R;(PoQ2Ek$9?Z}asT;D5 zx?Fjk5;BnFi zPr%u4{X+zVA9tpIb=NLl|DwAf6}8_kj$s!+$x;9IuC2U*((O6}6%>g}nw#D-B4L^s za{dFY0YNE@dk&draNQv)6TJapxI$vlgdiiM4dSVv3&vwBwyFKTwrT!~*3Tt5Fp}jR zhFQYa)O9|bfmOB)ZK)h+Zgk;G!@rC!yFY*Zocq;t?pyD=KL^*=tF`@eS)1#{uJ6^C z-EM2cZ@Z^!e!LEM+I{k!R?vv=u-AN6(E|^t?Ll7bn)Ej7)pM@8D%gx+56(6`&3M`E zc+qO)o<2{`_1s_z7=u-ogqPu^v!%(#CAf`#|M|aLVbEO?3Y!p2|vttYsNA0;p94Ax7rAl%~Q&3 zYYIilTbN$HiKLQGa|$TP`EJv~@Pg!9)a~m?s1y0|FnW;lo?*msGy9X}otyhE1}Hn* zvGlTi2xi5&Ejz&H?tc8anBw*Q*a1h?;&904;|a{UA2+D;-OCOGG;=Ejrc{N(Gfsy8 z2njhnjOTkG2)*~U`}zvGZ}3tQ4ITyD@;;=?eL6d!cT1llN03g_TCo$tevZs5K27pI z2V8%&mSt$4?e)zN7`bcjTiq ziA8Rr{02y2S=Wug!06TMrm(pTuRO8YPtDzZmznJ)<+w#IMMo9;L$L&`oLK!p7$}VkchFbb z>`lKDu0iWE&|E$jV&PMYhfndmxEf;L#k9QUwOgX9G(`saL8Y1j$H{(c9;$?$+CmV< zRh~92*TS$(pAkr@QaD%5qhLysIJJ5HG{2VVqWqegRpECCVwU@VO-IYvwaMgn@GC>h zKu(iZ_%S)nH$mH|1O*)ly=278F$~Hj{IiB*GmT>ACfTtfc8v{ zo-r(Xy>FuClV6zn-JAb-_uj%MAD#HtzrOL&Z!E~usIC+4=Cudtvio5DCawSJ~ zA_s?kj7u2BrE!$A}cC2*E}f? z(+Y>So$}OJox?Mqmf+eW)KlfyPod*&RKcJ4{$kY8UY}2Msy!`dld7*W!xDe1F=|n& zuc!KQs@Kz8FZ+PgQ(YY_uWFd$k2+&@#5+~3>SU4o2D{J?!pdQEBk{ z%Z7v4&q?Eqeg>ZZls<(h(I`JhjpOD?2nilD6c6P8lPSlHLNb2f{|`KWj?=U6Pisl) zyPvZ)D%jcVcs?swd`%vHBlvX;9j;4Dk@?8$&ZC!LGe_t!T@a*QK@^HbKSi7HV@9$d zfUK;I)WF_01XW=qSrlIa+!0Vk2}To_5KILt4B8M*MQPAhh)E2fv}yK>BG)g8e7`J= z{zy_vM!*6NgvKbbT1Jm0V+7j%xIp}g;E!kYFojl877hgJkOJ`0g1!d?g>VZAB2rV- zvygzHF>VY!oTPvh6#7jh#bli4;=Vh2q89CwV5iCCK$8vTf${dE1;sDSDuz6RbCqY2 z4A^EMESp1NNsLx0rlpRz>Q@ztmEm>)4G7$Chfd`OG~0cuE&!R-*+YP~8?GZJ`o4@S zQ`?NgOqx1PuLlr-yY!7zzmuB1>INluNWDqWoZX>S)~XwKQsWhH(<}3-{%UHx0P=-- zJWr*1TYiUD)s?fNkWa8lM|cla{%TW?N(3>6KZ2r;eq293gnSta13wCh!a_NxpVnPN z{voCq!jQfe4E02tUDXr9H@R0c7xxaOJ`| z4D0bIte#;DM(%8BkiTGNGeCO{`|X`!yXCWHu--rSmKK{TgbHRM3Sm?D1vvsV-ls@= z5e0y*i1!-M0B&M~Aqp75Af}%9W>GqVeWNG(Vsr_uMLeYh-xcCA!nVlqm}n{Es|QMu z_~k_w@y);dlaD^kefm2K&A-3!Z_nPi`SC*YKmWzs`nwB%yU;ZL)Sj4H_?w01r5iWp ze~tZx@=g@o3B(UcAgNgHSZYfT5fr}RH0O1@a9JJnWZ|wR^P;BA5f=8d>Bl$fv%HO6T zxplwQT#gyf@(-v+Z$W`X^7o0(ccLl@%{{Vx0Q1*wG15;F%xQlV`={S%mePl?Q8xds z>n~BnS9eUM2a2o>jvDd@7&V0a=aBd`?*Etgom~?DChe2f&~*7jq>w>*m61(WF>d33 zV-{fs{t`-ZlM*uABsZRvFB46p`K*`9%;_^Ka={;Gc{`F^i?pcxMSNXaSDM}qdY$ea zNnd5*FxFe3$sXi?M^GP^DPqgvYk8a~aywN>`xT-nFfA!6EGaT8DLg7EIw{|v#G~XE zC1hW-70Rkql3ruX@j3HL@3_istJb-Kf34`X{pWE)T14Ll%23c`x5IKbiU*4&$HdQa c#+*kS*ResVU%q(IIbIxf3dNJCJ?^;w2boR!6aWAK literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/metrics.cpython-36.pyc b/models/internals/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4394623b74752c236326079e8323949e1213a370 GIT binary patch literal 989 zcmYjQy>1jS5VrUKE+mBG1Vl)1?MWyRMB*nx1PK}hL4pW^P6sPz?QFcaYcIAhB+*`V zP$Fumq2L900G@!_6%tiCY8n`aD9T9l?RY#hp4rb{o}F!fS^xg(xsT8TOR8SQh#o60@?J%< zZ7x`^WJC$)AZ0EUH``UOW(r~}A~L4igp%4Y+S^jH>TMfS>y@RY9OA~CiSRnCBR8V|Y(Y4qL$4%&}%7z^a*>XGq#HUIR{$=B5b3>(S; z{GEX8a((a@M1{`npnukSqP7uHMC8nRX3GW{(Z+^cnB{90KeR2dQoT)TW+ULFCdPU@ zS8ZJVXWk}2yGXel+nF3_(oAdM#QJJ1?8Q<7W$6x4oNSg%r(AHuNy+z_vfWGyqhy&< zRs(5FWSnW6aIg$Fd(c$v{5Eh0*iaiFx1ZQ(FEwfm&FrPMVz3RES!@RJe@8lKiE))? zCD9tPGO5@g8rP|;%~zx@!O(wh-CG$x(@g1MR+#HIZVvA=Ju zLBKj1b+O)4^N`;H)fWH|4sh4&;0~S#ynxT)Iot(3jOw>+oTgM}X==j}Q$#CwE7c5u yZ5t-ER9mHdqud>}A@mI8BNyS$IBfI;Ty{apZD?&=vODmEk#>23cM2ys@%{lklN}TQ literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/metrics.cpython-37.pyc b/models/internals/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21817de825b95cabb01b8b947e1547d9508703ce GIT binary patch literal 997 zcmYjQOK%i85VqaVnIsUh(ZY($z-^>eTEGpV&Auc+KqQEGXaw7G+D?y?w%u!cvPsl) zV~;D&aGH_W1Al-Yz%StJ6B4J~ICFt=0#ZtQPdDO*;=c# zEEr^Lb@5PfSKi}HRJ6G*X$=`Sc`dne)CsNnN6}KbUcY)7 zLHTp_naG1ig?!!TL=cDtxFT6{eH(nOJuu6Vtp{o^03aOWRkDIt@G{^9{3AYtS3!@XhCQE^CDT<|`ec_I zYE5{VW&yzW9amPH?MDA&!aMZ|OaSTA5D{t|w|xpOhoJlhv^Ht@FYp^98_vaK5$8B3 FUjZ62CN2N~ literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/network_config.cpython-36.pyc b/models/internals/__pycache__/network_config.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46e5f998061346a471b3893de314b63eb9646b91 GIT binary patch literal 6930 zcmd5>&2!tv6~_V~MN*U^%Zfh~CqdIR9j39|rkNz8>15=w>ogNbaqT#6SS2*fT~d%B zfZknNCPQ7K^q6T6y|l;Pdgx65ik|uxU^>0@lzUITw7<6immgLdr^&RXjRmmS&-Z@s z_ufAJaA~QwRQ~9rbI&@?-<+wRkLN91>ia0T!&&S!^E+!Yyt}b`;59wwyyVGW>wF3K3SZ_=;9lh`{1omrGyjv>^0v^2soZZ} zNs~_0-EPyg^ef|9#Pv2V^>-B7Id(=)i;Y~)_N(%xk+bKH++&71Py5{6E799AV@~qM zh?zEfgJ;mHJVNbME4)#wJ5F+jIrI;`J*xS{`M^ospE*VEU!mT|s6~5>w7^Te+;vgo zGUwq7BX6&A>|Av|T;Fkz{G$@Jz*Qajn6E@_?*H}<){K4&yn=qtUX532Ex6Z?S(lx4 zs8!Kov1n1p`cP{Gv=si;b(||wgj%RzR#riJZ%?#!pm#$Zw9`a~QKIlBf+Uw|&)8zx=Io_1m8%!i(yx@L#HthJ2p^`U!PWkr6` zRt>^~7#rVg$DvZ2cV!<+k2k1xgRJ_lZAh!6n!vOS4kHL`42mgCkd!YS$_SV+tw|he z0)#?H&@YeLdUo`ys3B#`!5EQI(w!FV1d?`D+r#ziM!eH9$mR3#(eTENScJf(NL1fI zBM1A8(t#2sbO8B7iD-oPaAvS5$?-(M9G<(~e0-EGbVVX$7-v=cB`cZ7 ztQIA`zHX^#D6*=CK+I!vDLM$dqD4wgM4Z*`g}S|)w_2S%w|YXh-n?JW`$|{{G;jMi! z$daE?NJs6MHK`peTu7L_+9{~0*ycuWkX2f(D2a5dMGUW=N8y~VconzK zR#=6(tj<qEgavZdsE4b7y3JoIHOuHTT*j20A z`-GVqyvY|pxjqs55!^}F*>~kDs3{%6p}=ueR4fQ0u#W4TKo18;MOUyf7HUDt$FJh;H;iBmpEmG55!#SgU0syEsz% zE+OBLt+gq#^tZ);K2VDw?a)jr=&PLe2^Rs7HMLyW$Q%XQ(dKO#@XD|y% zMT5yDx>Z1IK%~-P(iWLdGi4s!cD8(3!OBTbKb8`6oTi@CMXsU9JkUwQlTXrf)dDha zKs;f{LOwNa|2iHNb#&HNy&9{z6=UMQ>oeaSUYIlO8LDbigiAlV6J;3t=6E1$K8A(w zSJaAjKVc&ew(Cj?`Me{9g!GlWsM$iGhA@t7cH~B=MabnnYS+|n#01=Yw*Ee&EBSPBsX|pylMlS`72|W3tW^qAv^Y%AE z1`TLO{}0T=%nz@d`yoD-%wtYC$127m6GVky6gz$eezBXH+2Lbw#Cz{veb;v0mQMq3 zbB5#V0o*#yV~+e56=WQl7mFk-=VFkjsQNh+4c}Ob{5Dmbr@~m$oS{(j9b9S|g>!m^ zj09eBmHBM=?3{_rJACT}7tqyuL9!C#(OkI+oUhm<}#m#lZ!(0y}wm zdx#irya3?>ec;TO5CD+KQ5YWCZ zVOhaHo)kJJL7{yL74C`6Z8BM-kibC|07rn0g%{d!|}_7r-1t> zy->S?w7%Fhkqt2LY?N}8t^!2z85EgMp4)hW#zGG0bM%d{+^k@1#Fu(EWBVC<(5!aS zeja+}uE1d3_yh7J16;X3I7J}$0dn#PHMWfVBca~dN46<=rXRV2DlX#+Ht;~MfaBEs z%XNfs&K}D%14kZ!?j7TV516_?b#6L1+Y_mK;R@hNhmHxGtK7AlvlK;YUzM(@j}gF$ zP`p8BQ)pp26a-UKOg{rz`|=nRH9_Jxa}LXx+-~@ppY}u|DfNPQ0PD+2?KoAU;IOP} zH`*-DL{xDiq17?QZ*Zv!3TL@ap}jkNevZl>IoHs5R@EMQ(prqFAL3!~o(7X?sCduI zm5wOhg||oY2j1S7uj&%_ko0l6Pcgbl2(6;7G+PIlER5US0-|(fza~NPCLx40&vZhF zF90-^@Q($@0V;@`XHS?mE;J_~#JCidr8ovt;)S-T5TY6FS`|MYx!F_RiMT4#CV-L> zp={$gPb({?ECK;akeHOl1k(Qlbj^_H%`j4raT`e)k>ZouVF6JX6+4It5{E1lOS}is3#!GX8$}gDWP( zl;?!RLr^RytYM$;ld~SPoab{%OiEq@aYD>o`BVjz)LF6lt==SjZJCx(j31Kq7K}6XgA}g7$ z*@8}6v?e)+h~hLrhgnImkgrlPdm6|V%@{4Sq`%;Mj)r=jog*}?pNIz2xZ!Rigfssa zDcP1F8{BGTwHA%lN08FW>aEuOei-Lp&KgyT= P27u@&k{MEitnU5``TZtY literal 0 HcmV?d00001 diff --git a/models/internals/__pycache__/network_config.cpython-37.pyc b/models/internals/__pycache__/network_config.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd3e172a4f0ff14cef5b65bfa6502c8b675f1d00 GIT binary patch literal 6948 zcmd5>&2!tv72gFwilQh*mX*X-oCHnNbeKt`CeyERI~lunon&Gwt{umXnuLbAO9~PQ z(7T{zGUO#rkD2z+OL~%e>!CCKD|+f*fa&zoQ|>+W(*E88Tz*(?oF>y2HkQC*Ki~Vk z-+TM?ljY?q!?j%g@WWHjGWIum8DAbA&f(VIK_Qsn5!=as`3}ds6FD7s$K~vGCS2it z!h|Qi`|eIj@awEz`W(I3cAaP5d*`oQZqnQMa`A8uw|*CeWHj22Bi4i?+)wz9+h#2; zJW;~4Cr*m8Sin_{+mgzyF z_8S+HxD~dyn=~z5W!ww6-@>i`jv{6E*^o8(&=GupQN1)|htAMB;;3^|PdJAqdb`g# zi(eaZ(`K*#3|f_ke5MuNsMSptKfxLOLvNRAK4$N+*!ejtdjA^r9!4$NW26O966Lmo z8i%ucFAUwo$`QNBKG=BQ8F`};wZOeN^e|tE+T8i=O{^LH7DNU8*kM&H(pvCaJ>qTt zB%@YEi>0DP?Z}L81hgFd)nV*{l0ho9KPxLgxxFWwsh{o!sozZEGzepjH|fW@O#N=4 zf{sk3(tZ#NztxMIX_%lUrHh&m4C1+m05{z2J%B} zDDiJgUk7)k*n%9e4V854SlBmU(`bc}>;~!Xmz{Oo4U>@I^3i-6Ef?!S7)3OFlmrl% zo%O{R_YVTq*0zGSbSY8CN#3X@pk0}bG`gHD8vpKV5g)@N_29+Vz!Qtc*L^)1_wR!KF!Y3Uz?5ZD+LQeWaFz@?0J z&p;yw`;5|o5-n37@`n=92=BqnU}2o&iGVpcb)zeSl-4x?D|GYGC|hXDSgIh(7VVd; zWFE6>7grtZ=gul-JQ&r4`1a zu`m1Ca$Ba8@AY!#X&JRtiI(%&cnyU$WaS*)nM*xOcY~-Wf6vqg+WupEs}rYNo!jBo zZqkwTphRcu{F_@%82;89sO!K|wr+re^p;tn-oiGd0s-i)f|!a;Z+81xrO^oEFl{u5 z@AdO2*vX1pacX>(SGdD#e2tg!RBg44vF?N12n~4wx86mO zg4R=>IxXkEqt{aRV{U5jrd|L6dqndiIF~ltchq->Y_Bwedx0CNsB=WcL$2Q#I{0OK za47JC{FlyYHgrJwT)3y%5xn82_+Bpszk`zB?xnzsLB5gS#NG?@F&xv=WAkW-cV+BD z-V@`0G{UtyC+dqro!%llFw|=;jO_j`*{2WG!cST>llFT$cLIbGA2^#@E-Yq_5bbF8 zq1efI-iI*#jbaiYQsr(&CB618_(rFgo;buXMkw{Be@))$g(_c_lFgpdkW19}kLe7* zHr2atS4Eq(FV|$+Q}MrL&s;r)w=%K=UUMNDaoVwH;CIOu;CRW`VtPyBx+|MuD?}Bk zkXnu}`*K}M|9qqqTX77NkIfajyi_=AavPzLaT>PzMkO;E#K%AgC3Wujn~8$dQW96k zhYEuSq&!*N_U9aCp{S^HwM;)1&>Rq{(;#lj%%hny7fw7|Ij^-;q^F-Mg*i@APwJx9 zQDiR2rS7W7>3PutGH*bvVW>krHE#byJZS33*4NxBuR0ZD;-2Gi&l#MVGwm7TYEryQ z7yVLrXzW|yfvovHEPTJBS5xO>K6GKbjzR#q=Z+8{(pTZ2<{ZK{1a)MyLnnmQBLH(B zv}@`&W7}v0+x{oyW&8&`A{mI?Es1;1Vqjln-T{0pqKHCIfC$O;tDCvS5h3mJv{{>o zqniN61fG0Rv$&wTdHb6ng9fyt{|Dw_=7$f?y^9-?p8%)ziS+oZ)zBA5I>pGp4>l1sO-?Ml#OIxfs+5s(ubd-7}V=zDX6Q zsW6r_XDHPCecXBl1v|M)Mgp(6#yviGcFsiR-M{sMWFh3@t(6#$=BO2*n~mV>M((}K z@L}8>W6l-F7}yHz!lo}Ye18F21VwP=Qf_8zK{d@6n!Hs z?^G~0qRn(SMcfADn|L5sAozXfR~rc7*dEXG1S1zfcaL!12TYxxv1<$`eIj)?SOr|^2r^-F zox65(vZ6@s%hEOVF#Ul1@Qb zoBn0!iek8rM#kR{Ub$fMO?ie$-UG!#lSOi2qUPy-gXgwq7iUJcohT!*xm{n(ympk_ z&YVPNo*_7-Xnpm8z zJA55RxbCchXbsiYzsMM^L}x70lJ7Z;YRUHrOTLJtk(x!KbBcJKLyxGqu+AdID(L*| z4SsWza!-1ijREQZE*IzNuXJ)+%lH8jRnzLHCMgc>X>u0$fD?QP+b^h6^g7*~1<4^z zG&ok+7HYGl#3Tt!+GkExSj(#$KE+#8nvZkl#wtV>>%oUkm&Z{^cBi@ z=Y%ks-youF5RsKk*K8q88nh-ghlt`dK*=gauuwmsV)itUEtxSIW=Vg+_Z$ti8b3v7 z*f z4@F0J^&=`K3!DUV 1: + warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index])) + # Load image + image = skio.imread(image_list[image_index]) + else: + image = skio.imread(image_path) + + image = np.expand_dims(image, axis=-1) + return image + + def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'): + """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed + + Parameters + ---------- + image_path : `str` + Path to look for ground truth images + subfolder : `str`, optional + [Default: 'Masks'] Subfolder in which to look for the ground truth images + image_ext : `str`, optional + [Default: '*.tif'] File extension of ground truth image file + + Returns + ---------- + output_ground_truth : `list` + List of ground truth images found in the directory with the given file extension + + class_ids : `list` + List of class ids of the ground truth images + """ + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + output_ground_truth = [] + class_ids = [] + for ground_truth_path in image_list: + # add class if not in list + ground_truth_name = ground_truth_path.split('\\')[-1] + class_name = ground_truth_name.split('_')[0] + # obtain class_id + class_ids.append(self.get_class_id(class_name)) + + # Load image + ground_truth_img = skio.imread(ground_truth_path) + + # If one mask in 2D, add one extra dimension for the class + if len(ground_truth_img.shape) == 2: + ground_truth_img = np.expand_dims(ground_truth_img, axis=-1) + else: + # Transpore dimension to get class at the end + if ground_truth_img.shape[-1] != self.config.get_parameter("nb_classes"): + ground_truth_img = np.transpose(ground_truth_img,(1,2,0)) + + # perform erosion so that the borders will still be there after augmentation + if self.config.get_parameter("use_binary_erosion") is True: + from skimage.morphology import binary_erosion, disk + # sets dtype back to unsigned integer in order for some augmentations to work + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + if self.config.get_parameter("use_binary_dilation") is True: + from skimage.morphology import binary_dilation, disk + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + # perform inversion of ground_truth if needed + if self.config.get_parameter("invert_ground_truth") is True: + ground_truth_img = skimage.util.invert(ground_truth_img) + + # Concatenate masks from different files together + if len(output_ground_truth) == 0: + output_ground_truth.append(ground_truth_img) + else: + output_ground_truth = np.concatenate((output_ground_truth,ground_truth_img[None,:,:]), axis=-1) + + # If multiclass segmentation, add one mask for non-assigned pixels + if self.config.get_parameter("nb_classes")>1: + last_mask = np.ones([ground_truth_img.shape[0],ground_truth_img.shape[1]])*np.amax(ground_truth_img) + last_mask = ((last_mask - np.sum(output_ground_truth, axis =-1))>0)*np.amax(ground_truth_img) # To get rid of overlap and negative values + last_mask = np.expand_dims(last_mask, axis=-1) + output_ground_truth = np.concatenate((output_ground_truth,last_mask), axis=-1) + + return output_ground_truth, class_ids + + def reshape_image(self, image): + """Reshapes the image to the correct dimenstions for Unet + + Parameters + ---------- + image : `array_like` + Image to be reshaped + + Returns + ---------- + image : `array_like` + Reshaped image + """ + h, w = image.shape[:2] + image = np.reshape(image, (h, w, -1)) + return image + + ####################### + # Image padding + ####################### + def pad_image(self, image, image_size, mode = 'constant'): + """Pad image to specified image_size + + Parameters + ---------- + image : `array_like` + Image to be padded + image_size : `list` + Final size of padded image + mode : `str`, optional + [Default: 'constant'] Mode to pad the image + + Returns + ---------- + image : `array_like` + Padded image + + padding : `list` + List containing the number of pixels padded to each direction + """ + h, w = image.shape[:2] + + top_pad = (image_size[0] - h) // 2 + bottom_pad = image_size[0] - h - top_pad + + left_pad = (image_size[1] - w) // 2 + right_pad = image_size[1] - w - left_pad + + padding = ((top_pad, bottom_pad), (left_pad, right_pad)) + image = np.pad(image, padding, mode = mode, constant_values=0) + + return image, padding + + def remove_pad_image(self, image, padding): + """Removes pad from image + + Parameters + ---------- + image : `array_like` + Padded image + padding : `list` + List containing the number of padded pixels in each direction + + Returns + ---------- + image : `array_like` + Image without padding + """ + + h, w = image.shape[:2] + + return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]] + + ####################### + # Tiling functions + ####################### + def tile_image(self, image, tile_size, tile_overlap_size): + """Converts an image into a list of tiled images + + Parameters + ---------- + image : `array_like` + Image to be tiled + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + + Returns + ---------- + image : `array_like` + Image without padding + """ + image_height, image_width = image.shape[:2] + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + if image_height <= tile_height and image_width <= tile_width: + return image + + num_rows = math.ceil(image_height/tile_height) + num_cols = math.ceil(image_width/tile_width) + num_tiles = num_rows*num_cols + + + # pad image to fit tile size + image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2)) + + tile_image_list = [] + + for tile_no in range(num_tiles): + tile_x_start = (tile_no // num_rows) * tile_width + tile_x_end = tile_x_start + tile_size[1] + + tile_y_start = (tile_no % num_rows) * tile_height + tile_y_end = tile_y_start + tile_size[0] + + tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end] + + # ensure input into unet is of correct shape + tile_image = self.reshape_image(tile_image) + + tile_image_list.append(tile_image) + + return tile_image_list, num_rows, num_cols, padding + + def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): + """Stitches a list of tiled images back into a single image + + Parameters + ---------- + tile_list : `list` + List of tiled images + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + num_rows : `int` + Number of rows of tiles + num_cols : `int` + Number of cols of tiles + padding : `list` + Amount of padding used during tiling + + Returns + ---------- + image : `array_like` + Image without padding + """ + if num_rows == 1 and num_cols == 1: + image = tile_list[0] + + image = self.remove_pad_image(image, padding = padding) + + return image + + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + num_tiles = num_rows*num_cols + + for col in range(num_cols): + for row in range(num_rows): + tile_image = tile_list[num_rows*col + row][:,:,0] + tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)] + if row == 0: + image_col = np.array(tile_image) + else: + image_col = np.vstack((image_col, tile_image)) + + if col == 0: + image = image_col + else: + image = np.hstack((image, image_col)) + + image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2)) + + if padding is not None: + image = self.remove_pad_image(image, padding = padding) + + return image + + + ####################### + # Image normalization + ####################### + def percentile_normalization(self, image, in_bound=[3, 99.8]): + """Performs percentile normalization on the image + + Parameters + ---------- + image : `array_like` + Image to be normalized + in_bound : `list` + Upper and lower percentile used to normalize image + + Returns + ---------- + image : `array_like` + Normalized image + + image_min : `int` + Min value of ``image`` + + image_max : `int` + Max value of ``image`` + """ + image_min = np.percentile(image, in_bound[0]) + image_max = np.percentile(image, in_bound[1]) + image = (image - image_min)/(image_max - image_min) + + return image, image_min, image_max diff --git a/models/internals/losses.py b/models/internals/losses.py new file mode 100644 index 0000000..7e8ec84 --- /dev/null +++ b/models/internals/losses.py @@ -0,0 +1,328 @@ +from keras import backend as K +from keras.losses import binary_crossentropy, mean_absolute_error, categorical_crossentropy +import keras +import tensorflow as tf +import numpy as np +from scipy import ndimage + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(y_true * y_pred, axis=-1) + sum_ = K.sum(y_true + y_pred, axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + + +def dice_coef(y_true, y_pred, smooth=1.): + """ + Dice = (2*|X & Y|)/ (|X|+ |Y|) + = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) + ref: https://arxiv.org/pdf/1606.04797v1.pdf + + from wassname as well + """ + intersection = K.sum(y_true * y_pred, axis=-1) + return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth) + +def dice_coef_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + +def bce_dice_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +def bce_ssim_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +# code download from: https://github.com/bermanmaxim/LovaszSoftmax +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + gts = tf.reduce_sum(gt_sorted) + intersection = gts - tf.cumsum(gt_sorted) + union = gts + tf.cumsum(1. - gt_sorted) + jaccard = 1. - intersection / union + jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) + return jaccard + + +# --------------------------- EDGE DETECTION --------------------------- + +def edge_detection(y_true, y_pred): + size = 5 + in_channel = y_pred.shape[-1] # Number of class + + fil = np.ones([size, size]) + fil[int(size/2), int(size/2)] = 1.0 - size**2 + fil = tf.convert_to_tensor(fil, tf.float32) + fil = tf.stack([fil]*in_channel, axis=2) + fil = tf.expand_dims(fil, 3) + + GT_edge_enhanced = tf.nn.depthwise_conv2d(y_true, fil, strides=[1, 1, 1, 1], padding="SAME") + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + + # Define threshold values on Laplacian filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.1)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.1)) + + GT_edge1 = tf.gather_nd(y_true, Index_1) + GT_edge2 = tf.gather_nd(y_true, Index_2) + + Pred_edge1 = tf.gather_nd(y_pred, Index_1) + Pred_edge2 = tf.gather_nd(y_pred, Index_2) + + + y_true = tf.concat([K.flatten(y_true), K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([K.flatten(y_pred), K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def edge_detection_sobel(y_true, y_pred): + y_true = K.cast(y_true, "float32") + y_pred = K.cast(y_pred, "float32") + GT_edge_enhanced = tf.image.sobel_edges(y_true) + #y_true = K.flatten(y_true) + #y_pred = K.flatten(y_pred) + #GT_edge_enhanced = K.flatten(GT_edge_enhanced) +#converting the datatypes of y_true, y_pred to make sure they are of same dtypes + + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + GT_edge_enhanced = tf.keras.backend.sum(GT_edge_enhanced, axis = -1) # Sum X and Y Sobel + + y_true = K.flatten(y_true) + y_pred = K.flatten(y_pred) + GT_edge_enhanced = K.flatten(GT_edge_enhanced) + + # Define threshold values on sobel filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.001)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.001)) + + GT_edge1 = tf.gather(y_true, Index_1) + GT_edge2 = tf.gather(y_true, Index_2) + + Pred_edge1 = tf.gather(y_pred, Index_1) + Pred_edge2 = tf.gather(y_pred, Index_2) + + + y_true = tf.concat([y_true, K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([y_pred, K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def EE_bce_dice_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_dice_loss(y_true, y_pred) + + +def EE_jaccard_distance_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return jaccard_distance_loss(y_true, y_pred) + +def EE_dice_coef_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return dice_coef_loss(y_true, y_pred) + +def EE_bce_ssim_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_ssim_loss(y_true, y_pred) + +def EE_binary_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return binary_crossentropy(y_true, y_pred) + +def EE_categorical_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return categorical_crossentropy(y_true, y_pred) + + +# --------------------------- BINARY LOSSES --------------------------- + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + def treat_image(log_lab): + log, lab = log_lab + log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) + log, lab = flatten_binary_scores(log, lab, ignore) + return lovasz_hinge_flat(log, lab) + losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) + loss = tf.reduce_mean(losses) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + + def compute_loss(): + labelsf = tf.cast(labels, logits.dtype) + signs = 2. * labelsf - 1. + errors = 1. - logits * tf.stop_gradient(signs) + errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") + gt_sorted = tf.gather(labelsf, perm) + grad = lovasz_grad(gt_sorted) + loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") + return loss + + # deal with the void prediction case (only void pixels) + loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), + lambda: tf.reduce_sum(logits) * 0., + compute_loss, + strict=True, + name="loss" + ) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = tf.reshape(scores, (-1,)) + labels = tf.reshape(labels, (-1,)) + if ignore is None: + return scores, labels + valid = tf.not_equal(labels, ignore) + vscores = tf.boolean_mask(scores, valid, name='valid_scores') + vlabels = tf.boolean_mask(labels, valid, name='valid_labels') + return vscores, vlabels + +def lovasz_loss(y_true, y_pred): + y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32') + #logits = K.log(y_pred / (1. - y_pred)) + logits = y_pred #Jiaxin + loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None) + return loss + +# Difference of Structural Similarity + +def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0): + # There are additional parameters for this function + # Note: some of the 'modes' for edge behavior do not yet have a + # gradient definition in the Theano tree + # and cannot be used for learning + + c1 = (k1 * max_value) ** 2 + c2 = (k2 * max_value) ** 2 + + kernel = [kernel_size, kernel_size] + y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:])) + y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:])) + + patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + + # Reshape to get the var in the cells + bs, w, h, c = K.int_shape(patches_pred) + patches_pred = K.reshape(patches_pred, [-1, w, h, c]) + patches_true = K.reshape(patches_true, [-1, w, h, c]) + # Get mean + u_true = K.mean(patches_true, axis=-1) + u_pred = K.mean(patches_pred, axis=-1) + # Get variance + var_true = K.var(patches_true, axis=-1) + var_pred = K.var(patches_pred, axis=-1) + # Get std dev + covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred + + ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2) + denom = ((K.square(u_true) + + K.square(u_pred) + + c1) * (var_pred + var_true + c2)) + ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero + return K.mean((1.0 - ssim) / 2.0) + +def dssim_mae_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred) + +#MSSim +#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function +def keras_SSIM_cs(y_true, y_pred): + axis=None + gaussian = make_kernel(1.5) + x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME') + y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME') + + u_x=K.mean(x, axis=axis) + u_y=K.mean(y, axis=axis) + + var_x=K.var(x, axis=axis) + var_y=K.var(y, axis=axis) + + cov_xy=cov_keras(x, y, axis) + + K1=0.01 + K2=0.03 + L=1 # depth of image (255 in case the image has a differnt scale) + + C1=(K1*L)**2 + C2=(K2*L)**2 + C3=C2/2 + + l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1) + c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2) + s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3) + + return [c,s,l] + +def keras_MS_SSIM(y_true, y_pred): + iterations = 5 + x=y_true + y=y_pred + weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] + c=[] + s=[] + for i in range(iterations): + cs=keras_SSIM_cs(x, y) + c.append(cs[0]) + s.append(cs[1]) + l=cs[2] + if(i!=4): + x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1)))) + y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1)))) + c = tf.stack(c) + s = tf.stack(s) + cs = c*s + + #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values + l=(l+1)/2 + cs=(cs+1)/2 + + cs=cs**weight + cs = tf.reduce_prod(cs) + l=l**weight[-1] + + ms_ssim = l*cs + ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim) + + return K.mean(ms_ssim) + +def mssim_mae_loss(y_true, y_pred): + return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred) diff --git a/models/internals/metrics.py b/models/internals/metrics.py new file mode 100644 index 0000000..a426549 --- /dev/null +++ b/models/internals/metrics.py @@ -0,0 +1,23 @@ +"""Metrics for measuring machine learning algorithm performances +adapted from https://github.com/deaspo/Unet_MedicalImagingSegmentation +""" + +from keras import backend as K +import tensorflow as tf +import numpy as np + +def mean_iou(y_true, y_pred): + prec = [] + for t in np.arange(0.5, 1.0, 0.05): + #y_pred_ = tf.to_int32(y_pred > t) + y_pred_ = tf.cast(y_pred > t, tf.int32) + if K.int_shape(y_pred)[-1] >1: + num_class = K.int_shape(y_pred)[-1] + else: + num_class = K.int_shape(y_pred)[-1]+1 + score, up_opt = tf.compat.v1.metrics.mean_iou(y_true, y_pred_, num_class) + K.get_session().run(tf.compat.v1.local_variables_initializer()) + with tf.control_dependencies([up_opt]): + score = tf.identity(score) + prec.append(score) + return K.mean(K.stack(prec), axis=0) \ No newline at end of file diff --git a/models/internals/network_config.py b/models/internals/network_config.py new file mode 100644 index 0000000..9455a2c --- /dev/null +++ b/models/internals/network_config.py @@ -0,0 +1,237 @@ +import glob +import os +from ruamel.yaml import YAML + +class Network_Config(object): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters. + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is to be saved/read from + config_filepath : `str`, optional + [Default: None] Filepath to the config file that will be loaded + **kwargs + For network parameters that are to be changed from the loaded config file + + Attributes + ---------- + yaml : :class:`ruamel.yaml.YAML` + YAML class with function needed to read/write YAML files + config : `dict` + Dictionary containing the config parameters + """ + self.yaml=YAML() + + # load config file from model_dir + if config_filepath is not None: + + self.config = self.load_config_from_file(config_filepath) + print("Loaded config file from {}".format(config_filepath)) + elif model_dir is not None: + try: + self.config = self.load_config_from_model_dir(model_dir) + print("Loaded config file from {}".format(model_dir)) + except: + print("Please ensure that config_filepath is set or there is a config file in model_dir") + raise + + if model_dir is not None: + # update model_dir in config + print("Updating model_dir to {}".format(model_dir)) + self.update_parameter(["general", "model_dir"], model_dir) + + # overwrite network parameters with parameters given during initialization + for key, value in kwargs.items(): + self.update_parameter(self.find_key(key), value) + + # perform calculations + self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),]) + self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count + + ###################### + # Accessors/Mutators + ###################### + def get_parameter(self, parameter, config = []): + """Output the value from the config file using the given key + + Parameters + ---------- + parameter : `list` or `str` + Key or list of keys used to find for the value in the config file + + config : `list`, optional + Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary + + Returns + ---------- + value : `str` or `int` or `list` + Value obtained from the specified key + + See Also + ---------- + find_key : Function to identify the list of keys to address the correct item in a nested dictionary + """ + assert isinstance(parameter, (list, str)) + + # find for key in nested dictionary + if isinstance(parameter, str): + parameter = self.find_key(parameter) + + if config == []: + config = self.config + if config is None: + return None + + if not parameter: + return config + + return self.get_parameter(parameter[1:], config = config.get(parameter[0])) + + def update_parameter(self, parameter, value, config = None): + """Updates the parameter in the config file using a full addressed list + + Parameters + ---------- + parameter : `list` + List of keys that point to the correct item in the nested dictionary + + value : `str` or `int` or `list` + Value that is updated in the nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + TODO + """ + + assert type(parameter) is list + + if config == None: + config = self.config + + if len(parameter) == 1: + config.update({parameter[0]: value}) + return config + return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0])) + + def find_key(self, key, config = None): + """Find the list of keys to address the correct item in a nested dictionary + + Parameters + ---------- + key : `str` + Key that needs to be correctly addressed in a nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + key : `list` + Address of the key in the nested dictionary + """ + + if config == None: + config = self.config + + key_path = [] + for k, v in config.items(): + if k == key: + return [k] + elif isinstance(v, dict): + found_key = self.find_key(key, config = v) + if found_key is not None: + return [k] + found_key + + ###################### + # Config IO options + ###################### + def load_config_from_file(self, file_path): + """Load parameters from yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to load + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + """ + + with open(file_path, 'r') as input_file: + config = self.yaml.load(input_file) + input_file.close() + + return config + + def load_config_from_model_dir(self, model_dir): + """Finds for a config file from the model directory and loads it + + Parameters + ---------- + model_dir : `str` + Folder to search for and load the config file + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + + Raises + ------ + IndexError + If there are no config file in the model_dir + """ + + # check if yaml file exists in model_dir + try: + list_config_files = glob.glob(os.path.join(model_dir,'*config.yml')) + if len(list_config_files) > 1: + print("Multiple config files found. Loading {}".format(list_config_files[0])) + else: + print("Config file exists in model directory. Loading {}".format(list_config_files[0])) + return self.load_config_from_file(list_config_files[0]) + except IndexError: + print("No config file found in model_dir.") + raise + + def write_config(self, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to write to + """ + + with open(file_path, 'w') as output_file: + self.yaml.dump(self.config, output_file) + + output_file.close() + + print("Config file written to: {}".format(file_path)) + + def write_model(self, model, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + model : :class:`Keras.model` + Keras model that will be parsed and written to a yaml file + + file_path : `str` + Path of model file to write to + """ + + with open(file_path, 'w') as output_file: + output_file.write(model.to_yaml()) + + output_file.close() + + print("Model file written to: {}".format(file_path)) \ No newline at end of file diff --git a/models/layers/.ipynb_checkpoints/layers-checkpoint.py b/models/layers/.ipynb_checkpoints/layers-checkpoint.py new file mode 100644 index 0000000..507d984 --- /dev/null +++ b/models/layers/.ipynb_checkpoints/layers-checkpoint.py @@ -0,0 +1,59 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +def activation_function(inputs, acti): + if isinstance(acti, str): + return Activation(acti)(inputs) + else: + return acti(inputs) + +def regularizer_function(weight_regularizer): + if weight_regularizer == 0 or weight_regularizer == None: + return None + else: + return regularizers.l2(weight_regularizer) + +def bn_relu_conv2d(inputs, filters, filter_size, + strides = 1, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + return output + +def bn_relu_conv2dtranspose(inputs, filters, filter_size, + strides = 2, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + return output + +def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None): + if mean_std_normalization is True: + print("Using normalization") + return Lambda(lambda x: (x - mean)/std)(inputs) + elif scale_input is True: + print("Not using normalization") + return Lambda(lambda x: x / 255)(inputs) + else: + return inputs + + \ No newline at end of file diff --git a/models/layers/__init__.py b/models/layers/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/layers/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/layers/__pycache__/__init__.cpython-36.pyc b/models/layers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95b5193d169d8c1821fec60a344119e357691f89 GIT binary patch literal 213 zcmX|(u?oUK5JdMbC?X^_{zMuZK}AFat!>0c4wq<7IKAA3yG`LI`Acc7`~@pFjSFv> zVV0Su=`ib`&ocmcGyjw9HcT9|lu!x_l{kq?o}_?M9w1LX+(0hdQE`dhHl)#1wGY&0 zbufKJv1>UyWye4%zaKFHVV=rl!D e`BK}cZ1L<~5REg0IH{lQV8vZy^;+;uWZ(xITsWct literal 0 HcmV?d00001 diff --git a/models/layers/__pycache__/__init__.cpython-37.pyc b/models/layers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d403101cf876092a08c60a660a3c460bf377b75 GIT binary patch literal 231 zcmXv`J&OV{5KXcuhoCn8iEVB=D>?MwKpPvm#=>FOL@tm?mLyyFll)8DT3Op!-K6lr zd-FbEzNgcPV9ds+(~|GKgf|qMH8U+(w9wKCl1EAEsHD*9AnNMDi>TEwZ`u@GpAA~- zLd?Ugi`Hkn^!}V}@cX(NCde`O(ICe1I#w(<5l6)rdHn$S4Affq3uKCknqaeobI=C1 n+)=~k*PEPxxN`|yb2TvmqV+Z-esRp&in literal 0 HcmV?d00001 diff --git a/models/layers/__pycache__/layers.cpython-36.pyc b/models/layers/__pycache__/layers.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80df40c2fbfe9862496fa7f358bcde8be709deb5 GIT binary patch literal 2291 zcma)7-EZ4A5GQF_mSxBJaz2`*ZL)RUybS@?eY6;gU|oX%!x~{gk{2PUp^8eXhLQ}D z?nj_Kxj_4}KV<*T9{IYb{0n>8?kGEPyA>D>9`BAv>h8z8^Vc&oe#d?M_^3(9pJZyd z!2bYK{Q-m%P6Y|8m4*~#CRkPrYnc-|6!kSx&)m?>8et>zLeJ_P(aikN&st&2%5~As zX2O}Q6LzxMaMtQvF_+DU^OkRjg={fgw7e&}*;2TaEr-iiZVH;Mge#W!fnN=;S)Piu zaGke!`!xwS_zdsBdYxb6^L*ho4R7!@-sMZc-Q??hg|7m)$>{;sHB4?c?%}P3= zB^g2Z<#^=;pr8Rb?WZcum5%ZlKBlykm;e}Gia;piL=g4wL@tB+uWuiIe|)6Ch2!{{ z{_gI*0)qvlqXspTM*T;h+d&dwQLVwnMJ&#Lau)@%t0vfIdl}iMW|2POs)bK zQ-JgQbMpBU# zH0)08U3!Pnj=h_s4wLI3U*PqI#85GI_&`0F*2#OoXfh%Q^RG=!%Vz0}7;pf7$x3#@ z%6eHVU0o|1Bg*M-ZrMc3vI!5tf}UdIR>kxc-g6VpmXO>)vWa93h;i~LONnw!=R-j88bDOP1c|)^k`u^J z;VeXJoxk#X3$@H{PAsnQz(<WEUJ=^OHfo-khH&TH?gV6D|2Sk)^l?!rD~ z*|?yg5Nt6wg2VI0BdF!m&3w9T{i~)H(E)t5lDs9WUXjv%CR5t zsqnq~!uGr!d_x73oNrCOl|Yt4#%$jgMH~sW159Omz7iCAbND|TJHT79ps?7jQ!Kdn z8|1NPvNz$ce3D;+?k-I04mS0HOmi(8pz77D3TP?tXzZU^#=!azSAXKUZRqlIy!#6z z_~wvKR|?xDW+&$3i@pLY)d51N2ft-YA^TC^b0wBW07G7{WXzB&+1c9*g5DOO9O=&NojF@>sf$KGk?t3%^f8y7OMp-Y6(ti89HKQF`(dv7GF zK4FWag{HSYYzh@052Z{1Rm>XGeaCcA{c*_|+`@(xsqL{hSt3spi5SUQB-obaJlfHp zjSv5Cs*oq5cwv}6shSsdS=&Es?N}_Ur+%*vz59UDP$WCpkrkG!$2=&xuAOb#h7~9S Nt?M=555|Y-{R5q}79Rir literal 0 HcmV?d00001 diff --git a/models/layers/__pycache__/layers.cpython-37.pyc b/models/layers/__pycache__/layers.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33e0960abda9475b721bc822b4ee3bec09bde428 GIT binary patch literal 2304 zcma)7OK%%D5GJYBYPFK(m;8vGhtsBM)rW-|1=@pQ)Jb48Xd)D_ec3~V^>S@5OzsL? z%1MmvNd}Tj|3iE1-|3Opo_y}Dmv)9L#d6R9rNH3~IpofK^U$N28K1ytxi4NE)(QC& zg~@P%`5tEe2M|s;6(lHE8c>j#V6qZaq!Tz4_bZ|*-N2Q#peDV*vwM!HOF!^sBWPH; zDw=X8n31iZC1-VOl`FxD<$d5+gIkuT zVl7zb4c@#Y!EHXnTd>~YxA;6?xTL{dzQ)^p3AlTFov-j!;5InjC!O0@;1}t2n5jO` zcr1)BvXJ*B@|}vQK1~NhZ5AGdIy&uTN`@l22z8RB#@~%}a&CFJ?LG_T5f4r6iOL4q zP{UpKah9HMJu$Q8dZ5BIA7puKnumk^P!2?ro@_noP~)S*v;BkTR%$*EU;UJ2<5OlD za2ka=PGOqLF6VHgsp6BN2vu?ss~qIoQ5c=YDUTR>0|VCz%*QbEkAP@WP)$cnEo)ZL z5iQ6F+%Lu}CjbTQfzf`FCuy$3G=h)iS}9Bb^e;gma^r;H_3uP2!}i~gyE4^Xd6aZd zGZ`bJxa{se?M47!_XpUMAuxw&tn;pgH1CS=HP}2}HV3cGBEDGS*gqboAY|zrnvGv3 z)Td3RR)LEs$}n)f;adS|dB*ZNSjibvZS+QyU%|KEz(?yF>nXg}VG5r%Ol$#a3oqm3 zkz_fmqWCa!5Q)ln2L*K{N%~6M`b&#*{c|#(oA3Lz3JeW2B-vOh^h#=@c*EKEb zg)?HnK`07VuwzzKi%Q|@N>Lk8PJeNWI%*bm2n*|V98(*FoI}2t8pI^wac&mQVwJ|C zpQMRSFe_vAZg0xyj-#sZb&H@vlU`UXxu1Qe$L zQS}{2>;^~`s9^Oe+_nUqwmw{&e;q%& zlS)T}rZDgk1FAlQ-Mt8-f&*6vCT4Wr15z-^W=}oT&|F895>4NbH}shC3U@BO>&mrS zd!SVcMZ>Qb_zQyzMHiiXIGgr<5^6 zu4QL$uh;9`2b4p7YN}A-VQ!js*(}?q!bmC1mC1hDPZg$6S?Sm|&3t*OJlu>j6kZ7 zNvsefHH!p0vYJOd`lIpTk1o&DvB+K;rjN_Mg$>rW4qH1G%QDpO)uFcyC=ErtgUvX{ ba`l)8CD*pINt>_&Ww7gdHTZAwVS4`p?MoWP literal 0 HcmV?d00001 diff --git a/models/layers/layers.py b/models/layers/layers.py new file mode 100644 index 0000000..507d984 --- /dev/null +++ b/models/layers/layers.py @@ -0,0 +1,59 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +def activation_function(inputs, acti): + if isinstance(acti, str): + return Activation(acti)(inputs) + else: + return acti(inputs) + +def regularizer_function(weight_regularizer): + if weight_regularizer == 0 or weight_regularizer == None: + return None + else: + return regularizers.l2(weight_regularizer) + +def bn_relu_conv2d(inputs, filters, filter_size, + strides = 1, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + return output + +def bn_relu_conv2dtranspose(inputs, filters, filter_size, + strides = 2, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + return output + +def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None): + if mean_std_normalization is True: + print("Using normalization") + return Lambda(lambda x: (x - mean)/std)(inputs) + elif scale_input is True: + print("Not using normalization") + return Lambda(lambda x: x / 255)(inputs) + else: + return inputs + + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b6f70bf --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +matplotlib==3.1.2 +scikit-image==0.17.2 +scikit-learn==0.22.1 +ruamel.yaml==0.16.12 +tqdm==4.53.0 +tensorflow-gpu==1.14.0 +protobuf==3.11.2 +PyYAML==5.3 +albumentations==0.5.1 +Keras==2.3.0 +jupyterlab==1.2.5 +jupyter-tensorboard==0.1.10 +tensorboard==1.14.0 +numba==0.48.0 +ipywidgets==7.5.1 +npm +h5py==2.10.0 + + +#conda install -c conda-forge nodejs==16.13.2 diff --git a/unets.ipynb b/unets.ipynb new file mode 100644 index 0000000..08253ce --- /dev/null +++ b/unets.ipynb @@ -0,0 +1,549 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import required modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "import os\n", + "import warnings\n", + "warnings.simplefilter(action='ignore', category=FutureWarning)\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# import user classes\n", + "from models.Unet import Unet\n", + "from models.Unet_Resnet import Unet_Resnet101, Unet_Resnet50, Unet_Resnet_paper\n", + "from models.Unet_ResAttnet import Res_att_unet_2d, Res_att_unet_3d\n", + "\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "# functions for visualization\n", + "def display_images(image, cmap='gray', norm=None, interpolation='bilinear'):\n", + "\n", + " plt.figure(figsize=(14, 14))\n", + " plt.axis('off')\n", + " plt.imshow(image, cmap=cmap,\n", + " norm=norm, interpolation=interpolation)\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialize model" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded config file from configs/default_singleclass_unet.yml\n", + "WARNING:tensorflow:From C:\\Users\\cjt678\\Desktop\\Unets\\models\\CNN_Base.py:169: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", + "\n", + "WARNING:tensorflow:From C:\\Users\\cjt678\\Desktop\\Unets\\models\\CNN_Base.py:171: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", + "\n" + ] + } + ], + "source": [ + "model = Unet(config_filepath=\"configs/default_singleclass_unet.yml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Load / augment dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing augmentations on 200 images\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Augmentation of images: 100%|███████████████| 200/200 [00:00<00:00, 413.20it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Augmentations complete!\n" + ] + } + ], + "source": [ + "model.load_dataset()\n", + "model.augment_images()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Training" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Unet 2022-02-15 13:41:48.599567\n", + "Config file written to: /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\Unet-20220215T1341-config.yml\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n", + "\n", + "Model file written to: /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\Unet-20220215T1341-model.yml\n", + "Training using single GPU or CPU..\n", + "Loss : edge-enhanced Dice loss\n", + "Metrics : IoU\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\ops\\metrics_impl.py:1178: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\ops\\metrics_impl.py:1179: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n" + ] + } + ], + "source": [ + "model.initialize_model()\n", + "# If pre-trained model, please indicate the path \n", + "#model.load_weights('/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/OneClass/Best/Res_att_unet_2d-20200504T0646_BCE/weights_now.h5')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "#model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", + "\n", + "Train on 180 samples, validate on 20 samples\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:200: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.\n", + "\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:206: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.\n", + "\n", + "Epoch 1/120\n", + "180/180 [==============================] - 19s 107ms/step - loss: 0.7970 - mean_iou: 0.4572 - val_loss: 0.8440 - val_mean_iou: 0.5045\n", + "\n", + "Epoch 00001: val_loss improved from inf to 0.84399, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00001: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:343: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.\n", + "\n", + "Epoch 2/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.5830 - mean_iou: 0.5309 - val_loss: 0.8166 - val_mean_iou: 0.5575\n", + "\n", + "Epoch 00002: val_loss improved from 0.84399 to 0.81663, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00002: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 3/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.4035 - mean_iou: 0.5728 - val_loss: 0.7998 - val_mean_iou: 0.5881\n", + "\n", + "Epoch 00003: val_loss improved from 0.81663 to 0.79979, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00003: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 4/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.3243 - mean_iou: 0.5984 - val_loss: 0.8206 - val_mean_iou: 0.6103\n", + "\n", + "Epoch 00004: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00004: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 5/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.2794 - mean_iou: 0.6194 - val_loss: 0.8618 - val_mean_iou: 0.6287\n", + "\n", + "Epoch 00005: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00005: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 6/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.2433 - mean_iou: 0.6364 - val_loss: 0.8946 - val_mean_iou: 0.6449\n", + "\n", + "Epoch 00006: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00006: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 7/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.2270 - mean_iou: 0.6518 - val_loss: 0.8884 - val_mean_iou: 0.6584\n", + "\n", + "Epoch 00007: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00007: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 8/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.2077 - mean_iou: 0.6640 - val_loss: 0.7788 - val_mean_iou: 0.6703\n", + "\n", + "Epoch 00008: val_loss improved from 0.79979 to 0.77882, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00008: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 9/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1966 - mean_iou: 0.6751 - val_loss: 0.6477 - val_mean_iou: 0.6810\n", + "\n", + "Epoch 00009: val_loss improved from 0.77882 to 0.64773, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00009: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 10/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1959 - mean_iou: 0.6857 - val_loss: 0.6879 - val_mean_iou: 0.6904\n", + "\n", + "Epoch 00010: val_loss did not improve from 0.64773\n", + "\n", + "Epoch 00010: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 11/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1838 - mean_iou: 0.6945 - val_loss: 0.6889 - val_mean_iou: 0.6987\n", + "\n", + "Epoch 00011: val_loss did not improve from 0.64773\n", + "\n", + "Epoch 00011: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 12/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1772 - mean_iou: 0.7020 - val_loss: 0.4161 - val_mean_iou: 0.7060\n", + "\n", + "Epoch 00012: val_loss improved from 0.64773 to 0.41609, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00012: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 13/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1653 - mean_iou: 0.7099 - val_loss: 0.4036 - val_mean_iou: 0.7138\n", + "\n", + "Epoch 00013: val_loss improved from 0.41609 to 0.40355, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00013: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 14/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1576 - mean_iou: 0.7174 - val_loss: 0.3715 - val_mean_iou: 0.7209\n", + "\n", + "Epoch 00014: val_loss improved from 0.40355 to 0.37149, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00014: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 15/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1485 - mean_iou: 0.7243 - val_loss: 0.2858 - val_mean_iou: 0.7280\n", + "\n", + "Epoch 00015: val_loss improved from 0.37149 to 0.28581, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00015: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 16/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1411 - mean_iou: 0.7313 - val_loss: 0.3070 - val_mean_iou: 0.7345\n", + "\n", + "Epoch 00016: val_loss did not improve from 0.28581\n", + "\n", + "Epoch 00016: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 17/120\n", + "180/180 [==============================] - 12s 65ms/step - loss: 0.1451 - mean_iou: 0.7372 - val_loss: 0.2580 - val_mean_iou: 0.7402\n", + "\n", + "Epoch 00017: val_loss improved from 0.28581 to 0.25796, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00017: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 18/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1419 - mean_iou: 0.7428 - val_loss: 0.2959 - val_mean_iou: 0.7455\n", + "\n", + "Epoch 00018: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00018: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 19/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1355 - mean_iou: 0.7479 - val_loss: 0.2934 - val_mean_iou: 0.7505\n", + "\n", + "Epoch 00019: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00019: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 20/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1255 - mean_iou: 0.7529 - val_loss: 0.3173 - val_mean_iou: 0.7553\n", + "\n", + "Epoch 00020: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00020: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 21/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1196 - mean_iou: 0.7576 - val_loss: 0.2384 - val_mean_iou: 0.7599\n", + "\n", + "Epoch 00021: val_loss improved from 0.25796 to 0.23842, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00021: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 22/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1284 - mean_iou: 0.7620 - val_loss: 0.2996 - val_mean_iou: 0.7642\n", + "\n", + "Epoch 00022: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00022: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 23/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1166 - mean_iou: 0.7661 - val_loss: 0.2548 - val_mean_iou: 0.7682\n", + "\n", + "Epoch 00023: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00023: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 24/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1101 - mean_iou: 0.7702 - val_loss: 0.2796 - val_mean_iou: 0.7721\n", + "\n", + "Epoch 00024: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00024: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 25/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1057 - mean_iou: 0.7739 - val_loss: 0.3131 - val_mean_iou: 0.7758\n", + "\n", + "Epoch 00025: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00025: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 26/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1040 - mean_iou: 0.7773 - val_loss: 0.2209 - val_mean_iou: 0.7792\n", + "\n", + "Epoch 00026: val_loss improved from 0.23842 to 0.22088, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00026: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 27/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1021 - mean_iou: 0.7811 - val_loss: 0.2432 - val_mean_iou: 0.7828\n", + "\n", + "Epoch 00027: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00027: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 28/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.0953 - mean_iou: 0.7844 - val_loss: 0.2320 - val_mean_iou: 0.7862\n", + "\n", + "Epoch 00028: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00028: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 29/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.0952 - mean_iou: 0.7878 - val_loss: 0.2883 - val_mean_iou: 0.7893\n", + "\n", + "Epoch 00029: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00029: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 30/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.0942 - mean_iou: 0.7907 - val_loss: 0.2919 - val_mean_iou: 0.7922\n", + "\n", + "Epoch 00030: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00030: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 31/120\n", + " 84/180 [=============>................] - ETA: 5s - loss: 0.0872 - mean_iou: 0.7928" + ] + } + ], + "source": [ + "model.train_model()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Prediction" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Config file exists in model directory. Loading /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/Unet-20191024T0603-config.yml\n", + "Loaded config file from /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/\n", + "Updating model_dir to /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/\n", + "Unet 2021-05-19 07:03:55.120803\n", + "Predicting using single GPU or CPU..\n", + "Loss : Edge Enhanced categorical_crossentropy\n", + "Metrics : ['categorical_accuracy']\n", + "Loaded weights from: /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/weights_best.h5\n" + ] + } + ], + "source": [ + "# Please remember to change to the correct folder containing the network weights\n", + "model_dir = '/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/'\n", + "model = Unet(model_dir = model_dir,\n", + " for_prediction=True,\n", + " tile_size = [512,512],\n", + " tile_overlap_size = [0,0])\n", + "#model = Unet(model_dir = model_dir,\n", + "# use_cpu = True,\n", + "# config_filepath=None,\n", + "# for_prediction=True,\n", + "# save_as_uint16=True,\n", + "# tile_size = [512,512],\n", + "# tile_overlap_size = [0,0])\n", + "model.initialize_model()\n", + "model.load_weights(model_dir+'weights_best.h5') # leave blank to load last h5 file in folder" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d921fb6051344743bb061dee2f4dbf25", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=2.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TiffPage 0: TypeError: read_bytes() missing 3 required positional arguments: 'dtype', 'count', and 'offsetsize'\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1bbb46d4142244019af1b5369a11fa20", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=2.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TiffPage 0: TypeError: read_bytes() missing 3 required positional arguments: 'dtype', 'count', and 'offsetsize'\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "940c529fb73542a0a9912b667e67a328", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=68.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n" + ] + } + ], + "source": [ + "# popiah\n", + "_ = model.predict_images('/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/prediction/')" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [], + "source": [ + "model.end_training()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Please remember to restart or stop the notebook once you are done. Thank you. \n", + "\n", + "Alternatively, run the line above" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}