diff --git a/Untitled.ipynb b/Untitled.ipynb new file mode 100644 index 0000000..9632afc --- /dev/null +++ b/Untitled.ipynb @@ -0,0 +1,89 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "676fb056", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Default GPU Device:/device:GPU:0\n" + ] + } + ], + "source": [ + "import tensorflow as tf \n", + "\n", + "if tf.test.gpu_device_name(): \n", + "\n", + " print('Default GPU Device:{}'.format(tf.test.gpu_device_name()))\n", + "\n", + "else:\n", + "\n", + " print(\"Please install GPU version of TF\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb1acd79", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/configs/._default_multiclass_unet.yml b/configs/._default_multiclass_unet.yml new file mode 100644 index 0000000..e7d5edc Binary files /dev/null and b/configs/._default_multiclass_unet.yml differ diff --git a/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml b/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml new file mode 100644 index 0000000..f5be855 --- /dev/null +++ b/configs/.ipynb_checkpoints/default_multiclass_unet-checkpoint.yml @@ -0,0 +1,145 @@ +general: +#### General settings #### + dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/MultiClass/RI/Train' + model_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/Networks_edge/RI/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 10 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'bce_dice_loss' #'bce_dice_loss', 'binary_crossentropy', 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: False + + metrics: + - 'categorical_accuracy' # 'binary_accuracy','categorical_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'softmax' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 3 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml b/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml new file mode 100644 index 0000000..6b58bf7 --- /dev/null +++ b/configs/.ipynb_checkpoints/default_singleclass_unet-checkpoint.yml @@ -0,0 +1,146 @@ +general: +#### General settings #### + dataset_dir: '/Users/cjt678/Desktop/Unets/Data/' + #dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Anne/Unet_Noyau/train/' + model_dir: '/Users/cjt678/Desktop/Unets/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 120 + val_split: 0.1 + batch_size_per_GPU: 6 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'dice_loss' #'bce_dice_loss', 'binary_crossentropy' 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: True + + metrics: + - 'IoU' # 'binary_accuracy','categorical_accuracy', 'IoU' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'sigmoid' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w,z + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_multiclass_unet.yml b/configs/default_multiclass_unet.yml new file mode 100644 index 0000000..f5be855 --- /dev/null +++ b/configs/default_multiclass_unet.yml @@ -0,0 +1,145 @@ +general: +#### General settings #### + dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/MultiClass/RI/Train' + model_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Richard/Networks_edge/RI/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 10 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'bce_dice_loss' #'bce_dice_loss', 'binary_crossentropy', 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: False + + metrics: + - 'categorical_accuracy' # 'binary_accuracy','categorical_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'softmax' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 3 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_singleclass_unet.yml b/configs/default_singleclass_unet.yml new file mode 100644 index 0000000..6b58bf7 --- /dev/null +++ b/configs/default_singleclass_unet.yml @@ -0,0 +1,146 @@ +general: +#### General settings #### + dataset_dir: '/Users/cjt678/Desktop/Unets/Data/' + #dataset_dir: '/mnt/mbi/home/mbirdm/AI/data_ai/Anne/Unet_Noyau/train/' + model_dir: '/Users/cjt678/Desktop/Unets/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: None + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + + # File Saving + save_as_uint16: True + +model: +#### Model parameters #### + filters: 16 # convolution filters + levels: 4 # for unet + num_epochs: 120 + val_split: 0.1 + batch_size_per_GPU: 6 + + optimizer: + optimizer_function: 'adam' #'sgd','rmsprop', 'adam' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'dice_loss' #'bce_dice_loss', 'binary_crossentropy' 'categorical_crossentropy','jaccard_distance_loss','lovasz_hinge','dice_loss','sparse_categorical_crossentropy' + edge_enhance: True + + metrics: + - 'IoU' # 'binary_accuracy','categorical_accuracy', 'IoU' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + strides: [1, 1] + + activation: + activation_function: 'relu' #'relu', 'sigmoid','softmax', 'tanh' + final_activation: 'sigmoid' #'relu', 'sigmoid','softmax', 'tanh' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [512,512] # h,w,z + tile_overlap_size: [0,0] + image_channel: 1 + nb_classes: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 20 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/configs/default_unet.yml b/configs/default_unet.yml new file mode 100644 index 0000000..4db72fd --- /dev/null +++ b/configs/default_unet.yml @@ -0,0 +1,138 @@ +general: +#### General settings #### + dataset_dir: '/tf/Documents/Unet/Training_sets/' + model_dir: '/tf/Documents/Unet/Networks/' + image_subfolder: 'Images' + ground_truth_subfolder: 'Masks' + + # CPU/GPU settings + visible_gpu: 0 + use_cpu: False + for_prediction: False + + #callbacks + reduce_LR_on_plateau: True + use_tensorboard: True + early_stopping: False + +model: +#### Model parameters #### + filters: 32 # convolution filters + levels: 4 # for unet + num_epochs: 100 + val_split: 0.1 + batch_size_per_GPU: 32 + + optimizer: + optimizer_function: 'rmsprop' + learning_rate: 0.0001 + decay: 0 + momentum: 0.9 + nesterov: True + + loss: 'binary_crossentropy' #'bce_dice_loss' + metrics: + - 'binary_accuracy' + + dropout_value: 0.5 + weight_regularizer: 0 + + initializer: 'he_normal' + + activation: + activation_function: 'relu' + final_activation: 'sigmoid' #'relu' + + padding: 'same' + +images: +#### Image/Ground truth settings #### + tile_size: [128,128] # h,w + tile_overlap_size: [0,0] + image_channel: 1 + invert_ground_truth: False + use_binary_erosion: False + use_binary_dilation: False + use_binary_dilation_after_augmentation: False + disk_size: 1 + + # image normalization during dataset loading + percentile_normalization: True + percentile: [3, 99.8] + + # patch normalization during test time + scale_input: False + mean_std_normalization: False + mean: 0 + std: 0 + +augmentation: +#### Image augmentations settings #### + augmentation_library: 'albumentations' + num_augmented_images: 10 + augmentations_p: 0.9 + + random_rotate: True + random_rotate_p: 0.9 + + flip: True + transpose: True + + blur_group: False + blur_group_p: 0.3 + + motion_blur: False + motion_blur_p: 0.1 + median_blur: False + median_blur_limit: 3 + median_blur_p: 0.3 + blur: False + blur_limit: 3 + blur_p: 0.3 + + shift_scale_rotate: True + shift_scale_rotate_p: 0.3 + shift_limit: 0.0625 + scale_limit: 0.5 + rotate_limit: 45 + + distortion_group: False + distortion_group_p: 0.2 + optical_distortion: False + optical_distortion_p: 0.3 + elastic_transform: False + elastic_transform_p: 0.3 + grid_distortion: False + grid_distortion_p: 0.3 + + brightness_contrast_group: False + brightness_contrast_group_p: 0.3 + clahe: False + sharpen: False + random_brightness_contrast: False + +callbacks: +#### Callback settings #### + # Tensorboard settings + tensorboard: + write_graph: False + write_images: False + write_grads: False + histogram_freq: 0 + + reduceLR: + # Reduce LR on plateau settings + reduce_LR_monitor: 'val_loss' + reduce_LR_patience: 10 + reduce_LR_factor: 0.5 + reduce_LR_min_lr: 0.000001 + + earlystopping: + # Early stopping settings + early_stopping_monitor: 'val_loss' + early_stopping_patience: 10 + early_stopping_min_delta: 0 + + modelcheckpoint: + # Model checkpoint settings + save_best_weights: True \ No newline at end of file diff --git a/html/models/CNN_Base.html b/html/models/CNN_Base.html new file mode 100644 index 0000000..37dfaaf --- /dev/null +++ b/html/models/CNN_Base.html @@ -0,0 +1,1604 @@ + + + + + + +models.CNN_Base API documentation + + + + + + + + + +
+
+
+

Module models.CNN_Base

+
+
+
+ +Expand source code + +
import os
+
+import glob
+import datetime
+
+import skimage.io
+import numpy as np
+
+import tensorflow as tf
+
+import keras
+from keras import backend as K
+from keras.models import Model, load_model
+from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger
+
+from .internals.image_functions import Image_Functions
+from .internals.network_config import Network_Config
+from .internals.dataset import Dataset
+
+class CNN_Base(Dataset, Image_Functions):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates the base neural network class with basic functions
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is stored
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file
+        **kwargs
+            Parameters that are passed to :class:`network_config.Network_Config`
+
+        Attributes
+        ----------
+        config : :class:`network_config.Network_Config`
+            Network_config object containing the config and necessary functions
+        """
+        
+        super().__init__()
+        
+        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
+        
+        self.config.update_parameter(["general", "now"], datetime.datetime.now())
+        
+        if self.config.get_parameter("use_cpu") is True:
+            self.initialize_cpu()
+        else:
+            self.initialize_gpu()
+    
+    #######################
+    # Logging functions
+    #######################
+    def init_logs(self):
+        """Initiates the parameters required for the log file
+        """
+        # Directory for training logs
+        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        
+        # Path to save after each epoch. Include placeholders that get filled by Keras.
+        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+        
+    def write_logs(self):
+        """Writes the log file
+        """
+        # Create log_dir if it does not exist
+        if os.path.exists(self.log_dir) is False:
+            os.makedirs(self.log_dir)
+            
+        # save the parameters used in current run to logs dir
+        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+        
+    #######################
+    # Initialization functions
+    #######################
+    def summary(self):
+        """Summary of the layers in the model
+        """
+        self.model.summary()
+        
+    def compile_model(self, optimizer, loss):
+        """Compiles model
+        
+        Parameters
+        ----------
+        optimizer
+            Gradient optimizer used in during the training of the network
+        loss
+            Loss function of the network
+        """
+        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+    def initialize_model(self):
+        """Initializes the logs, builds the model, and chooses the correct initialization function
+        """
+        # write parameters to yaml file
+        self.init_logs()
+        if self.config.get_parameter("for_prediction") is False:
+            self.write_logs()
+            
+        # build model
+        self.model = self.build_model(self.config.get_parameter("input_size"))
+        
+        # save model to yaml file
+        if self.config.get_parameter("for_prediction") is False:
+            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+        self.initialize_model_normal()
+            
+    def initialize_cpu(self):
+        """Sets the session to only use the CPU
+        """
+        config = tf.ConfigProto(
+                        device_count = {'CPU' : 1,
+                                        'GPU' : 0}
+                       )
+        session = tf.Session(config=config)
+        K.set_session(session)   
+        
+    def initialize_gpu(self):
+        """Sets the seesion to use the gpu specified in config file
+        """
+        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+        
+        config = tf.ConfigProto()
+        config.gpu_options.allow_growth = True
+        sess = tf.Session(config=config)
+        K.tensorflow_backend.set_session(sess)
+    
+    def initialize_model_normal(self):
+        """Initializes the optimizer and any specified callback functions
+        """
+        opt = self.optimizer_function()
+        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+        
+        if self.config.get_parameter("for_prediction") == False:
+            self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+            if self.config.get_parameter("use_tensorboard") is True:
+                self.callbacks.append(self.tensorboard_call())
+                
+            if self.config.get_parameter("reduce_LR_on_plateau") is True:
+                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                        factor = self.config.get_parameter("reduce_LR_factor"),
+                                                        patience = self.config.get_parameter("reduce_LR_patience"),
+                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                        verbose = True))
+            
+            if self.config.get_parameter("early_stopping") is True:
+                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                    patience = self.config.get_parameter("early_stopping_patience"),
+                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                    verbose = True))
+                
+    #######################
+    # Optimizer/Loss functions
+    #######################         
+    def optimizer_function(self, learning_rate = None):
+        """Initialize optimizer function
+        
+        Parameters
+        ----------
+        learning_rate : `int`
+            Learning rate of the descent algorithm
+            
+        Returns
+        ----------
+        optimizer
+            Function to call the optimizer
+        """
+        if learning_rate is None:
+            learning_rate = self.config.get_parameter("learning_rate")
+        if self.config.get_parameter("optimizer_function") == 'sgd':
+            return keras.optimizers.SGD(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"), 
+                                        momentum = self.config.get_parameter("momentum"), 
+                                        nesterov = self.config.get_parameter("nesterov"))
+        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+            return keras.optimizers.RMSprop(lr = learning_rate, 
+                                            decay = self.config.get_parameter("decay"))
+        elif self.config.get_parameter("optimizer_function") == 'adam':
+            return keras.optimizers.Adam(lr = learning_rate, 
+                                         decay = self.config.get_parameter("decay"))
+        
+    def loss_function(self, loss):
+        """Initialize loss function
+        
+        Parameters
+        ----------
+        loss : `str`
+            Name of the loss function
+            
+        Returns
+        ----------
+        loss
+            Function to call loss function
+        """
+        if loss == "binary_crossentropy":
+            print("Using binary crossentropy")
+            return loss
+        elif loss == "jaccard_distance_loss":
+            print("Using jaccard distance loss")
+            from .internals.losses import jaccard_distance_loss
+            return jaccard_distance_loss
+        elif loss == "lovasz_hinge":
+            print("Using Lovasz-hinge loss")
+            from .internals.losses import lovasz_loss
+            return lovasz_loss
+        elif loss == "dice_loss":
+            print("Using Dice loss")
+            from .internals.losses import dice_coef_loss
+            return dice_coef_loss
+        elif loss == "bce_dice_loss":
+            print("Using 1 - Dice + BCE loss")
+            from .internals.losses import bce_dice_loss
+            return bce_dice_loss
+        elif loss == "ssim_loss":
+            print("Using DSSIM loss")
+            from .internals.losses import DSSIM_loss
+            return DSSIM_loss
+        elif loss == "bce_ssim_loss":
+            print("Using BCE + DSSIM loss")
+            from .internals.losses import bce_ssim_loss
+            return bce_ssim_loss
+        elif loss == "mean_squared_error":
+            return keras.losses.mean_squared_error
+        elif loss == "mean_absolute_error":
+            return keras.losses.mean_absolute_error
+        elif loss == "ssim_mae_loss":
+            print("Using DSSIM + MAE loss")
+            from .internals.losses import dssim_mae_loss
+            return dssim_mae_loss
+        else:
+            print("Using {}".format(loss))
+            return loss
+        
+    #######################
+    # Callbacks
+    #######################     
+    def tensorboard_call(self):
+        """Initialize tensorboard call
+        """
+        return TensorBoard(log_dir=self.log_dir, 
+                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                           write_graph=self.config.get_parameter("write_graph"),
+                           write_images=self.config.get_parameter("write_images"), 
+                           write_grads=self.config.get_parameter("write_grads"), 
+                           update_freq='epoch', 
+                           histogram_freq=self.config.get_parameter("histogram_freq"))
+    
+    def model_checkpoint_call(self, verbose = 0):
+        """Initialize model checkpoint call
+        """
+        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+    
+    #######################
+    # Clear memory once training is done
+    #######################
+    def end_training(self):
+        """Deletes model and releases gpu memory held by tensorflow
+        """
+        # del reference to model
+        del self.model
+        
+        # clear memory
+        tf.reset_default_graph()
+        K.clear_session()
+        
+        # take hold of cuda device to shut it down
+        from numba import cuda
+        cuda.select_device(0)
+        cuda.close()
+    
+    #######################
+    # Train Model
+    #######################
+    def train_model(self, verbose = True):
+        """Trains model
+        
+        Parameters
+        ----------
+        verbose : `int`, optional
+            [Default: True] Verbose output
+        """      
+        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                                 callbacks=self.callbacks, verbose=verbose)
+        
+        self.end_training()
+        
+    #######################
+    # Predict using loaded model weights
+    ####################### 
+    # TODO: change to load model from yaml file
+    def load_model(self, model_dir = None):
+        """Loads model from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the model file
+        """
+        # TODO: rewrite to load model from yaml file
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+            
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            
+            model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+        self.model.load_model(model_dir)
+        print("Loaded model from: " + model_dir)
+        
+    def load_weights(self, model_dir = None, weights_index = -1):
+        """Loads weights from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the weights file
+        weights_index : `int`, optional
+            [Default: -1] 
+        """
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+        
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            self.weights_path = list_weights_files[weights_index]
+            model_dir = os.path.join(model_dir, self.weights_path)
+        else:
+            self.weights_path = model_dir
+        
+        self.model.load_weights(model_dir)
+        print("Loaded weights from: " + model_dir)
+       
+    def predict_images(self, image_dir):
+        """Perform prediction on images found in ``image_dir``
+        
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory containing the images to perform prediction on
+            
+        Returns
+        ----------
+        image : `array_like`
+            Last image that prediction was perfromed on
+        """
+        # load image list
+        image_list = self.list_images(image_dir)
+        
+        for image_path in image_list:
+            image = self.load_image(image_path = image_path)
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if self.config.get_parameter("tile_overlap_size") == [0,0]:
+                padding = None
+                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+                input_image = image[np.newaxis,:,:,np.newaxis]
+                
+                output_image = self.model.predict(input_image, verbose=1)
+                
+                if padding is not None: 
+                    h, w = output_image.shape[1:3]
+                    output_image = np.reshape(output_image, (h, w))
+                    output_image = self.remove_pad_image(output_image, padding = padding)
+            else:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                
+                pred_train_list = []
+                for tile in tile_image_list:
+
+                    # reshape image to correct dimensions for unet
+                    h, w = tile.shape[:2]
+                    
+                    tile = np.reshape(tile, (1, h, w, 1))
+
+                    pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                                 num_rows, num_cols, padding = padding)
+            
+            self.save_image(output_image, image_path)
+            
+        return output_image
+    
+    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+        """Saves image to image_path
+        
+        Final location of image is as follows:
+          - image_path
+              - subfolder
+                 - model/weights file name
+        
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be saved
+        image_path : `str`
+            Location to save the image in
+        subfolder : `str`
+            [Default: 'Masks'] Subfolder in which the image is to be saved in
+        suffix : `str`
+            [Default: '-preds'] Suffix to append to the filename of the predicted image
+        """
+        image_dir = os.path.dirname(image_path)
+        
+        output_dir = os.path.join(image_dir, subfolder)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+        
+        output_dir = os.path.join(output_dir, basename)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        filename, _ = os.path.splitext(os.path.basename(image_path))
+        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+        
+        skimage.io.imsave(output_path, image)
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class CNN_Base +(model_dir=None, config_filepath=None, **kwargs) +
+
+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class CNN_Base(Dataset, Image_Functions):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates the base neural network class with basic functions
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is stored
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file
+        **kwargs
+            Parameters that are passed to :class:`network_config.Network_Config`
+
+        Attributes
+        ----------
+        config : :class:`network_config.Network_Config`
+            Network_config object containing the config and necessary functions
+        """
+        
+        super().__init__()
+        
+        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
+        
+        self.config.update_parameter(["general", "now"], datetime.datetime.now())
+        
+        if self.config.get_parameter("use_cpu") is True:
+            self.initialize_cpu()
+        else:
+            self.initialize_gpu()
+    
+    #######################
+    # Logging functions
+    #######################
+    def init_logs(self):
+        """Initiates the parameters required for the log file
+        """
+        # Directory for training logs
+        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        
+        # Path to save after each epoch. Include placeholders that get filled by Keras.
+        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+        
+    def write_logs(self):
+        """Writes the log file
+        """
+        # Create log_dir if it does not exist
+        if os.path.exists(self.log_dir) is False:
+            os.makedirs(self.log_dir)
+            
+        # save the parameters used in current run to logs dir
+        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+        
+    #######################
+    # Initialization functions
+    #######################
+    def summary(self):
+        """Summary of the layers in the model
+        """
+        self.model.summary()
+        
+    def compile_model(self, optimizer, loss):
+        """Compiles model
+        
+        Parameters
+        ----------
+        optimizer
+            Gradient optimizer used in during the training of the network
+        loss
+            Loss function of the network
+        """
+        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+    def initialize_model(self):
+        """Initializes the logs, builds the model, and chooses the correct initialization function
+        """
+        # write parameters to yaml file
+        self.init_logs()
+        if self.config.get_parameter("for_prediction") is False:
+            self.write_logs()
+            
+        # build model
+        self.model = self.build_model(self.config.get_parameter("input_size"))
+        
+        # save model to yaml file
+        if self.config.get_parameter("for_prediction") is False:
+            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+        self.initialize_model_normal()
+            
+    def initialize_cpu(self):
+        """Sets the session to only use the CPU
+        """
+        config = tf.ConfigProto(
+                        device_count = {'CPU' : 1,
+                                        'GPU' : 0}
+                       )
+        session = tf.Session(config=config)
+        K.set_session(session)   
+        
+    def initialize_gpu(self):
+        """Sets the seesion to use the gpu specified in config file
+        """
+        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+        
+        config = tf.ConfigProto()
+        config.gpu_options.allow_growth = True
+        sess = tf.Session(config=config)
+        K.tensorflow_backend.set_session(sess)
+    
+    def initialize_model_normal(self):
+        """Initializes the optimizer and any specified callback functions
+        """
+        opt = self.optimizer_function()
+        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+        
+        if self.config.get_parameter("for_prediction") == False:
+            self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+            if self.config.get_parameter("use_tensorboard") is True:
+                self.callbacks.append(self.tensorboard_call())
+                
+            if self.config.get_parameter("reduce_LR_on_plateau") is True:
+                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                        factor = self.config.get_parameter("reduce_LR_factor"),
+                                                        patience = self.config.get_parameter("reduce_LR_patience"),
+                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                        verbose = True))
+            
+            if self.config.get_parameter("early_stopping") is True:
+                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                    patience = self.config.get_parameter("early_stopping_patience"),
+                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                    verbose = True))
+                
+    #######################
+    # Optimizer/Loss functions
+    #######################         
+    def optimizer_function(self, learning_rate = None):
+        """Initialize optimizer function
+        
+        Parameters
+        ----------
+        learning_rate : `int`
+            Learning rate of the descent algorithm
+            
+        Returns
+        ----------
+        optimizer
+            Function to call the optimizer
+        """
+        if learning_rate is None:
+            learning_rate = self.config.get_parameter("learning_rate")
+        if self.config.get_parameter("optimizer_function") == 'sgd':
+            return keras.optimizers.SGD(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"), 
+                                        momentum = self.config.get_parameter("momentum"), 
+                                        nesterov = self.config.get_parameter("nesterov"))
+        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+            return keras.optimizers.RMSprop(lr = learning_rate, 
+                                            decay = self.config.get_parameter("decay"))
+        elif self.config.get_parameter("optimizer_function") == 'adam':
+            return keras.optimizers.Adam(lr = learning_rate, 
+                                         decay = self.config.get_parameter("decay"))
+        
+    def loss_function(self, loss):
+        """Initialize loss function
+        
+        Parameters
+        ----------
+        loss : `str`
+            Name of the loss function
+            
+        Returns
+        ----------
+        loss
+            Function to call loss function
+        """
+        if loss == "binary_crossentropy":
+            print("Using binary crossentropy")
+            return loss
+        elif loss == "jaccard_distance_loss":
+            print("Using jaccard distance loss")
+            from .internals.losses import jaccard_distance_loss
+            return jaccard_distance_loss
+        elif loss == "lovasz_hinge":
+            print("Using Lovasz-hinge loss")
+            from .internals.losses import lovasz_loss
+            return lovasz_loss
+        elif loss == "dice_loss":
+            print("Using Dice loss")
+            from .internals.losses import dice_coef_loss
+            return dice_coef_loss
+        elif loss == "bce_dice_loss":
+            print("Using 1 - Dice + BCE loss")
+            from .internals.losses import bce_dice_loss
+            return bce_dice_loss
+        elif loss == "ssim_loss":
+            print("Using DSSIM loss")
+            from .internals.losses import DSSIM_loss
+            return DSSIM_loss
+        elif loss == "bce_ssim_loss":
+            print("Using BCE + DSSIM loss")
+            from .internals.losses import bce_ssim_loss
+            return bce_ssim_loss
+        elif loss == "mean_squared_error":
+            return keras.losses.mean_squared_error
+        elif loss == "mean_absolute_error":
+            return keras.losses.mean_absolute_error
+        elif loss == "ssim_mae_loss":
+            print("Using DSSIM + MAE loss")
+            from .internals.losses import dssim_mae_loss
+            return dssim_mae_loss
+        else:
+            print("Using {}".format(loss))
+            return loss
+        
+    #######################
+    # Callbacks
+    #######################     
+    def tensorboard_call(self):
+        """Initialize tensorboard call
+        """
+        return TensorBoard(log_dir=self.log_dir, 
+                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                           write_graph=self.config.get_parameter("write_graph"),
+                           write_images=self.config.get_parameter("write_images"), 
+                           write_grads=self.config.get_parameter("write_grads"), 
+                           update_freq='epoch', 
+                           histogram_freq=self.config.get_parameter("histogram_freq"))
+    
+    def model_checkpoint_call(self, verbose = 0):
+        """Initialize model checkpoint call
+        """
+        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+    
+    #######################
+    # Clear memory once training is done
+    #######################
+    def end_training(self):
+        """Deletes model and releases gpu memory held by tensorflow
+        """
+        # del reference to model
+        del self.model
+        
+        # clear memory
+        tf.reset_default_graph()
+        K.clear_session()
+        
+        # take hold of cuda device to shut it down
+        from numba import cuda
+        cuda.select_device(0)
+        cuda.close()
+    
+    #######################
+    # Train Model
+    #######################
+    def train_model(self, verbose = True):
+        """Trains model
+        
+        Parameters
+        ----------
+        verbose : `int`, optional
+            [Default: True] Verbose output
+        """      
+        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                                 callbacks=self.callbacks, verbose=verbose)
+        
+        self.end_training()
+        
+    #######################
+    # Predict using loaded model weights
+    ####################### 
+    # TODO: change to load model from yaml file
+    def load_model(self, model_dir = None):
+        """Loads model from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the model file
+        """
+        # TODO: rewrite to load model from yaml file
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+            
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            
+            model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+        self.model.load_model(model_dir)
+        print("Loaded model from: " + model_dir)
+        
+    def load_weights(self, model_dir = None, weights_index = -1):
+        """Loads weights from h5 file
+        
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Directory containing the weights file
+        weights_index : `int`, optional
+            [Default: -1] 
+        """
+        if model_dir is None:
+            model_dir = self.config.get_parameter("model_dir")
+        
+        if os.path.isdir(model_dir) is True:
+            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+            list_weights_files.sort() # To ensure that [-1] gives the last file
+            self.weights_path = list_weights_files[weights_index]
+            model_dir = os.path.join(model_dir, self.weights_path)
+        else:
+            self.weights_path = model_dir
+        
+        self.model.load_weights(model_dir)
+        print("Loaded weights from: " + model_dir)
+       
+    def predict_images(self, image_dir):
+        """Perform prediction on images found in ``image_dir``
+        
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory containing the images to perform prediction on
+            
+        Returns
+        ----------
+        image : `array_like`
+            Last image that prediction was perfromed on
+        """
+        # load image list
+        image_list = self.list_images(image_dir)
+        
+        for image_path in image_list:
+            image = self.load_image(image_path = image_path)
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if self.config.get_parameter("tile_overlap_size") == [0,0]:
+                padding = None
+                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+                input_image = image[np.newaxis,:,:,np.newaxis]
+                
+                output_image = self.model.predict(input_image, verbose=1)
+                
+                if padding is not None: 
+                    h, w = output_image.shape[1:3]
+                    output_image = np.reshape(output_image, (h, w))
+                    output_image = self.remove_pad_image(output_image, padding = padding)
+            else:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                
+                pred_train_list = []
+                for tile in tile_image_list:
+
+                    # reshape image to correct dimensions for unet
+                    h, w = tile.shape[:2]
+                    
+                    tile = np.reshape(tile, (1, h, w, 1))
+
+                    pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                                 num_rows, num_cols, padding = padding)
+            
+            self.save_image(output_image, image_path)
+            
+        return output_image
+    
+    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+        """Saves image to image_path
+        
+        Final location of image is as follows:
+          - image_path
+              - subfolder
+                 - model/weights file name
+        
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be saved
+        image_path : `str`
+            Location to save the image in
+        subfolder : `str`
+            [Default: 'Masks'] Subfolder in which the image is to be saved in
+        suffix : `str`
+            [Default: '-preds'] Suffix to append to the filename of the predicted image
+        """
+        image_dir = os.path.dirname(image_path)
+        
+        output_dir = os.path.join(image_dir, subfolder)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+        
+        output_dir = os.path.join(output_dir, basename)
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+            
+        filename, _ = os.path.splitext(os.path.basename(image_path))
+        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+        
+        skimage.io.imsave(output_path, image)
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def compile_model(self, optimizer, loss) +
+
+

Compiles model

+

Parameters

+
+
optimizer
+
Gradient optimizer used in during the training of the network
+
loss
+
Loss function of the network
+
+
+ +Expand source code + +
def compile_model(self, optimizer, loss):
+    """Compiles model
+    
+    Parameters
+    ----------
+    optimizer
+        Gradient optimizer used in during the training of the network
+    loss
+        Loss function of the network
+    """
+    self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
+
+
+
+def end_training(self) +
+
+

Deletes model and releases gpu memory held by tensorflow

+
+ +Expand source code + +
def end_training(self):
+    """Deletes model and releases gpu memory held by tensorflow
+    """
+    # del reference to model
+    del self.model
+    
+    # clear memory
+    tf.reset_default_graph()
+    K.clear_session()
+    
+    # take hold of cuda device to shut it down
+    from numba import cuda
+    cuda.select_device(0)
+    cuda.close()
+
+
+
+def init_logs(self) +
+
+

Initiates the parameters required for the log file

+
+ +Expand source code + +
def init_logs(self):
+    """Initiates the parameters required for the log file
+    """
+    # Directory for training logs
+    print(self.config.get_parameter("name"), self.config.get_parameter("now"))
+    self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+    
+    # Path to save after each epoch. Include placeholders that get filled by Keras.
+    self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
+    self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
+
+
+
+def initialize_cpu(self) +
+
+

Sets the session to only use the CPU

+
+ +Expand source code + +
def initialize_cpu(self):
+    """Sets the session to only use the CPU
+    """
+    config = tf.ConfigProto(
+                    device_count = {'CPU' : 1,
+                                    'GPU' : 0}
+                   )
+    session = tf.Session(config=config)
+    K.set_session(session)   
+
+
+
+def initialize_gpu(self) +
+
+

Sets the seesion to use the gpu specified in config file

+
+ +Expand source code + +
def initialize_gpu(self):
+    """Sets the seesion to use the gpu specified in config file
+    """
+    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
+    
+    config = tf.ConfigProto()
+    config.gpu_options.allow_growth = True
+    sess = tf.Session(config=config)
+    K.tensorflow_backend.set_session(sess)
+
+
+
+def initialize_model(self) +
+
+

Initializes the logs, builds the model, and chooses the correct initialization function

+
+ +Expand source code + +
def initialize_model(self):
+    """Initializes the logs, builds the model, and chooses the correct initialization function
+    """
+    # write parameters to yaml file
+    self.init_logs()
+    if self.config.get_parameter("for_prediction") is False:
+        self.write_logs()
+        
+    # build model
+    self.model = self.build_model(self.config.get_parameter("input_size"))
+    
+    # save model to yaml file
+    if self.config.get_parameter("for_prediction") is False:
+        self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+    print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
+    self.initialize_model_normal()
+
+
+
+def initialize_model_normal(self) +
+
+

Initializes the optimizer and any specified callback functions

+
+ +Expand source code + +
def initialize_model_normal(self):
+    """Initializes the optimizer and any specified callback functions
+    """
+    opt = self.optimizer_function()
+    self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
+    
+    if self.config.get_parameter("for_prediction") == False:
+        self.callbacks = [self.model_checkpoint_call(verbose = True)]
+
+        if self.config.get_parameter("use_tensorboard") is True:
+            self.callbacks.append(self.tensorboard_call())
+            
+        if self.config.get_parameter("reduce_LR_on_plateau") is True:
+            self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
+                                                    factor = self.config.get_parameter("reduce_LR_factor"),
+                                                    patience = self.config.get_parameter("reduce_LR_patience"),
+                                                    min_lr = self.config.get_parameter("reduce_LR_min_lr"),
+                                                    verbose = True))
+        
+        if self.config.get_parameter("early_stopping") is True:
+            self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
+                                                patience = self.config.get_parameter("early_stopping_patience"),
+                                                min_delta = self.config.get_parameter("early_stopping_min_delta"),
+                                                verbose = True))
+
+
+
+def load_model(self, model_dir=None) +
+
+

Loads model from h5 file

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Directory containing the model file
+
+
+ +Expand source code + +
def load_model(self, model_dir = None):
+    """Loads model from h5 file
+    
+    Parameters
+    ----------
+    model_dir : `str`, optional
+        [Default: None] Directory containing the model file
+    """
+    # TODO: rewrite to load model from yaml file
+    if model_dir is None:
+        model_dir = self.config.get_parameter("model_dir")
+        
+    if os.path.isdir(model_dir) is True:
+        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+        list_weights_files.sort() # To ensure that [-1] gives the last file
+        
+        model_dir = os.path.join(model_dir,list_weights_files[-1])
+
+    self.model.load_model(model_dir)
+    print("Loaded model from: " + model_dir)
+
+
+
+def load_weights(self, model_dir=None, weights_index=-1) +
+
+

Loads weights from h5 file

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Directory containing the weights file
+
weights_index : int, optional
+
[Default: -1]
+
+
+ +Expand source code + +
def load_weights(self, model_dir = None, weights_index = -1):
+    """Loads weights from h5 file
+    
+    Parameters
+    ----------
+    model_dir : `str`, optional
+        [Default: None] Directory containing the weights file
+    weights_index : `int`, optional
+        [Default: -1] 
+    """
+    if model_dir is None:
+        model_dir = self.config.get_parameter("model_dir")
+    
+    if os.path.isdir(model_dir) is True:
+        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
+        list_weights_files.sort() # To ensure that [-1] gives the last file
+        self.weights_path = list_weights_files[weights_index]
+        model_dir = os.path.join(model_dir, self.weights_path)
+    else:
+        self.weights_path = model_dir
+    
+    self.model.load_weights(model_dir)
+    print("Loaded weights from: " + model_dir)
+
+
+
+def loss_function(self, loss) +
+
+

Initialize loss function

+

Parameters

+
+
loss : str
+
Name of the loss function
+
+

Returns

+
+
loss
+
Function to call loss function
+
+
+ +Expand source code + +
def loss_function(self, loss):
+    """Initialize loss function
+    
+    Parameters
+    ----------
+    loss : `str`
+        Name of the loss function
+        
+    Returns
+    ----------
+    loss
+        Function to call loss function
+    """
+    if loss == "binary_crossentropy":
+        print("Using binary crossentropy")
+        return loss
+    elif loss == "jaccard_distance_loss":
+        print("Using jaccard distance loss")
+        from .internals.losses import jaccard_distance_loss
+        return jaccard_distance_loss
+    elif loss == "lovasz_hinge":
+        print("Using Lovasz-hinge loss")
+        from .internals.losses import lovasz_loss
+        return lovasz_loss
+    elif loss == "dice_loss":
+        print("Using Dice loss")
+        from .internals.losses import dice_coef_loss
+        return dice_coef_loss
+    elif loss == "bce_dice_loss":
+        print("Using 1 - Dice + BCE loss")
+        from .internals.losses import bce_dice_loss
+        return bce_dice_loss
+    elif loss == "ssim_loss":
+        print("Using DSSIM loss")
+        from .internals.losses import DSSIM_loss
+        return DSSIM_loss
+    elif loss == "bce_ssim_loss":
+        print("Using BCE + DSSIM loss")
+        from .internals.losses import bce_ssim_loss
+        return bce_ssim_loss
+    elif loss == "mean_squared_error":
+        return keras.losses.mean_squared_error
+    elif loss == "mean_absolute_error":
+        return keras.losses.mean_absolute_error
+    elif loss == "ssim_mae_loss":
+        print("Using DSSIM + MAE loss")
+        from .internals.losses import dssim_mae_loss
+        return dssim_mae_loss
+    else:
+        print("Using {}".format(loss))
+        return loss
+
+
+
+def model_checkpoint_call(self, verbose=0) +
+
+

Initialize model checkpoint call

+
+ +Expand source code + +
def model_checkpoint_call(self, verbose = 0):
+    """Initialize model checkpoint call
+    """
+    return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
+
+
+
+def optimizer_function(self, learning_rate=None) +
+
+

Initialize optimizer function

+

Parameters

+
+
learning_rate : int
+
Learning rate of the descent algorithm
+
+

Returns

+
+
optimizer
+
Function to call the optimizer
+
+
+ +Expand source code + +
def optimizer_function(self, learning_rate = None):
+    """Initialize optimizer function
+    
+    Parameters
+    ----------
+    learning_rate : `int`
+        Learning rate of the descent algorithm
+        
+    Returns
+    ----------
+    optimizer
+        Function to call the optimizer
+    """
+    if learning_rate is None:
+        learning_rate = self.config.get_parameter("learning_rate")
+    if self.config.get_parameter("optimizer_function") == 'sgd':
+        return keras.optimizers.SGD(lr = learning_rate, 
+                                    decay = self.config.get_parameter("decay"), 
+                                    momentum = self.config.get_parameter("momentum"), 
+                                    nesterov = self.config.get_parameter("nesterov"))
+    elif self.config.get_parameter("optimizer_function") == 'rmsprop':
+        return keras.optimizers.RMSprop(lr = learning_rate, 
+                                        decay = self.config.get_parameter("decay"))
+    elif self.config.get_parameter("optimizer_function") == 'adam':
+        return keras.optimizers.Adam(lr = learning_rate, 
+                                     decay = self.config.get_parameter("decay"))
+
+
+
+def predict_images(self, image_dir) +
+
+

Perform prediction on images found in image_dir

+

Parameters

+
+
image_dir : str
+
Directory containing the images to perform prediction on
+
+

Returns

+
+
image : array_like
+
Last image that prediction was perfromed on
+
+
+ +Expand source code + +
def predict_images(self, image_dir):
+    """Perform prediction on images found in ``image_dir``
+    
+    Parameters
+    ----------
+    image_dir : `str`
+        Directory containing the images to perform prediction on
+        
+    Returns
+    ----------
+    image : `array_like`
+        Last image that prediction was perfromed on
+    """
+    # load image list
+    image_list = self.list_images(image_dir)
+    
+    for image_path in image_list:
+        image = self.load_image(image_path = image_path)
+        
+        # percentile normalization
+        if self.config.get_parameter("percentile_normalization"):
+            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+        
+        if self.config.get_parameter("tile_overlap_size") == [0,0]:
+            padding = None
+            if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
+                image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
+            input_image = image[np.newaxis,:,:,np.newaxis]
+            
+            output_image = self.model.predict(input_image, verbose=1)
+            
+            if padding is not None: 
+                h, w = output_image.shape[1:3]
+                output_image = np.reshape(output_image, (h, w))
+                output_image = self.remove_pad_image(output_image, padding = padding)
+        else:
+            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            
+            pred_train_list = []
+            for tile in tile_image_list:
+
+                # reshape image to correct dimensions for unet
+                h, w = tile.shape[:2]
+                
+                tile = np.reshape(tile, (1, h, w, 1))
+
+                pred_train_list.extend(self.model.predict(tile, verbose=1))
+
+            output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
+                                             num_rows, num_cols, padding = padding)
+        
+        self.save_image(output_image, image_path)
+        
+    return output_image
+
+
+
+def save_image(self, image, image_path, subfolder='Masks', suffix='-preds') +
+
+

Saves image to image_path

+

Final location of image is as follows: +- image_path +- subfolder +- model/weights file name

+

Parameters

+
+
image : array_like
+
Image to be saved
+
image_path : str
+
Location to save the image in
+
subfolder : str
+
[Default: 'Masks'] Subfolder in which the image is to be saved in
+
suffix : str
+
[Default: '-preds'] Suffix to append to the filename of the predicted image
+
+
+ +Expand source code + +
def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
+    """Saves image to image_path
+    
+    Final location of image is as follows:
+      - image_path
+          - subfolder
+             - model/weights file name
+    
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be saved
+    image_path : `str`
+        Location to save the image in
+    subfolder : `str`
+        [Default: 'Masks'] Subfolder in which the image is to be saved in
+    suffix : `str`
+        [Default: '-preds'] Suffix to append to the filename of the predicted image
+    """
+    image_dir = os.path.dirname(image_path)
+    
+    output_dir = os.path.join(image_dir, subfolder)
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+        
+    basename, _ = os.path.splitext(os.path.basename(self.weights_path))
+    
+    output_dir = os.path.join(output_dir, basename)
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+        
+    filename, _ = os.path.splitext(os.path.basename(image_path))
+    output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
+    
+    skimage.io.imsave(output_path, image)
+
+
+
+def summary(self) +
+
+

Summary of the layers in the model

+
+ +Expand source code + +
def summary(self):
+    """Summary of the layers in the model
+    """
+    self.model.summary()
+
+
+
+def tensorboard_call(self) +
+
+

Initialize tensorboard call

+
+ +Expand source code + +
def tensorboard_call(self):
+    """Initialize tensorboard call
+    """
+    return TensorBoard(log_dir=self.log_dir, 
+                       batch_size = self.config.get_parameter("batch_size_per_GPU"), 
+                       write_graph=self.config.get_parameter("write_graph"),
+                       write_images=self.config.get_parameter("write_images"), 
+                       write_grads=self.config.get_parameter("write_grads"), 
+                       update_freq='epoch', 
+                       histogram_freq=self.config.get_parameter("histogram_freq"))
+
+
+
+def train_model(self, verbose=True) +
+
+

Trains model

+

Parameters

+
+
verbose : int, optional
+
[Default: True] Verbose output
+
+
+ +Expand source code + +
def train_model(self, verbose = True):
+    """Trains model
+    
+    Parameters
+    ----------
+    verbose : `int`, optional
+        [Default: True] Verbose output
+    """      
+    history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
+                             batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
+                             callbacks=self.callbacks, verbose=verbose)
+    
+    self.end_training()
+
+
+
+def write_logs(self) +
+
+

Writes the log file

+
+ +Expand source code + +
def write_logs(self):
+    """Writes the log file
+    """
+    # Create log_dir if it does not exist
+    if os.path.exists(self.log_dir) is False:
+        os.makedirs(self.log_dir)
+        
+    # save the parameters used in current run to logs dir
+    self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/Unet.html b/html/models/Unet.html new file mode 100644 index 0000000..b67adc9 --- /dev/null +++ b/html/models/Unet.html @@ -0,0 +1,417 @@ + + + + + + +models.Unet API documentation + + + + + + + + + +
+
+
+

Module models.Unet

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tf
+
+from .CNN_Base import CNN_Base
+from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d
+    
+######
+# Unet
+######
+class Unet(CNN_Base):
+    """
+    Unet functions
+    see https://www.nature.com/articles/s41592-018-0261-2
+    """
+    
+    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        
+    def build_model(self, input_size, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+        
+        ### Actual network###
+        inputs = Input(input_size)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+        
+        layer_store = []
+        
+        # encoding arm
+        for _ in range(self.config.get_parameter("levels")):
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer_store.append(layer)
+            layer = MaxPooling2D((2, 2))(layer)
+            
+            filters = filters * 2
+            
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        # decoding arm 
+        for i in range(self.config.get_parameter("levels")):
+            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+            
+            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+            filters = filters // 2
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Unet +(model_dir=None, name='Unet', **kwargs) +
+
+

Unet functions +see https://www.nature.com/articles/s41592-018-0261-2

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet(CNN_Base):
+    """
+    Unet functions
+    see https://www.nature.com/articles/s41592-018-0261-2
+    """
+    
+    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        
+    def build_model(self, input_size, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+        
+        ### Actual network###
+        inputs = Input(input_size)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+        
+        layer_store = []
+        
+        # encoding arm
+        for _ in range(self.config.get_parameter("levels")):
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer_store.append(layer)
+            layer = MaxPooling2D((2, 2))(layer)
+            
+            filters = filters * 2
+            
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        # decoding arm 
+        for i in range(self.config.get_parameter("levels")):
+            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+            
+            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+            filters = filters // 2
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+

Ancestors

+ +

Methods

+
+
+def build_model(self, input_size, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) +
+
+
+
+ +Expand source code + +
def build_model(self, input_size, mean_std_normalization = None, 
+                dropout_value = None, acti = None, padding = None, 
+                kernel_initializer = None, weight_regularizer = None):
+    
+    ### get parameters from config file ###
+    filters = self.config.get_parameter("filters")
+    
+    if dropout_value is None:
+        dropout_value = self.config.get_parameter("dropout_value")
+    if acti is None:
+        acti = self.config.get_parameter("activation_function")
+    if padding is None:
+        padding = self.config.get_parameter("padding")
+    if kernel_initializer is None:
+        kernel_initializer = self.config.get_parameter("initializer")
+    if weight_regularizer is None:
+        weight_regularizer = self.config.get_parameter("weight_regularizer")
+    if mean_std_normalization is None:
+        if self.config.get_parameter("mean_std_normalization") == True:
+            mean = self.config.get_parameter("mean")
+            std = self.config.get_parameter("std")
+        else:
+            mean = None
+            std = None
+    
+    ### Actual network###
+    inputs = Input(input_size)
+    
+    # normalize images
+    layer = normalize_input(inputs, 
+                            scale_input = self.config.get_parameter("scale_input"),
+                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                            mean = mean, std = std)
+    
+    layer_store = []
+    
+    # encoding arm
+    for _ in range(self.config.get_parameter("levels")):
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer_store.append(layer)
+        layer = MaxPooling2D((2, 2))(layer)
+        
+        filters = filters * 2
+        
+    
+    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    # decoding arm 
+    for i in range(self.config.get_parameter("levels")):
+        layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
+        
+        layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
+        filters = filters // 2
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+    outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
+    
+    return Model(inputs=[inputs], outputs=[outputs], name='Unet')
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/Unet_Resnet.html b/html/models/Unet_Resnet.html new file mode 100644 index 0000000..97475e5 --- /dev/null +++ b/html/models/Unet_Resnet.html @@ -0,0 +1,1095 @@ + + + + + + +models.Unet_Resnet API documentation + + + + + + + + + +
+
+
+

Module models.Unet_Resnet

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tfconv
+
+from .CNN_Base import CNN_Base
+from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose
+        
+################################################
+# Unet + Resnet
+################################################
+
+class Unet_Resnet(CNN_Base):
+    """
+    Unet + resnet functions
+    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
+    """
+    
+    def __init__(self, model_dir = None, **kwargs):       
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+    def bottleneck_block(self, inputs, 
+                         upsample = False,
+                         filters = 8,
+                         strides = 1, dropout_value = None, acti = None, padding = None, 
+                         kernel_initializer = None, weight_regularizer = None, name = None):            
+        # Bottleneck_block
+        with tf.name_scope("Bottleneck_block" + name):
+            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            if upsample == True:
+                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+                output = Conv2D(filters * 4, (1,1), padding=padding, 
+                                kernel_initializer=kernel_initializer, 
+                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            else:
+                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Dropout(dropout_value)(output)
+            
+            # reshape input to the same size as output
+            if upsample == True:
+                inputs = UpSampling2D()(inputs)
+            if strides == 2:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+            
+            # ensure number of filters are correct between input and output
+            if output.shape[3] != inputs.shape[3]:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+        
+    def simple_block(self, inputs, filters,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):
+            
+        with tf.name_scope("Simple_block" + name):
+            output = BatchNormalization()(inputs)
+            output = activation_function(output, acti)
+            output = MaxPooling2D()(output)
+            output = Conv2D(filters, 3, padding=padding, strides=strides,
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+            output = Dropout(dropout_value)(output)
+
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+            
+            return Add()([output, inputs])
+        
+    def simple_block_up(self, inputs, filters,
+                        strides = 1, dropout_value = None, acti = None, padding = None, 
+                        kernel_initializer = None, weight_regularizer = None, name = None):
+        
+        with tf.name_scope("Simple_block_up" + name):
+            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+            output = Dropout(dropout_value)(output)
+            
+            inputs = UpSampling2D()(inputs)
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+    
+
+    def build_model(self, unet_input, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+            
+        
+        ### Actual network###
+        inputs = Input(unet_input)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+
+        # encoder arm
+        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                         kernel_initializer = kernel_initializer, 
+                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+        
+        layer_2 = self.simple_block(layer_1, filters, 
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_2")
+        
+        layer = layer_2
+        layer_store = [layer]
+        
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+            strides = 2
+            
+            # last layer of encoding arm is treated as across    
+            if i == len(self.config.get_parameter("bottleneck_block")):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i-2):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                    
+                layer = self.bottleneck_block(layer, upsample = True,
+                                              filters = filters, strides = 1,
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_up_layer_{}".format(2 + i))
+            else:       
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i - 1):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                filters = filters*2
+                layer_store.append(layer)
+
+        # decoder arm
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+            filters = filters//2  
+
+            # note that i should be positive possibly due to the way keras/tf model compile works
+            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+            
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(i+6, count))
+
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(i+6))
+        
+        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+        layer_14 = self.simple_block_up(layer_13, filters,
+                                        dropout_value = dropout_value, acti = acti, padding = padding, 
+                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                        name = "_layer_14")
+
+        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+        
+        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                          name="Conv_layer_16")(layer_15)
+        
+        layer_17 = BatchNormalization()(layer_16)
+        layer_18 = activation_function(layer_17, acti)
+
+        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+    
+class Unet_Resnet101(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+class Unet_Resnet50(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
+        
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+        
+class Unet_Resnet_paper(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        """
+        see https://arxiv.org/pdf/1608.04117.pdf
+        """
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Unet_Resnet +(model_dir=None, **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet(CNN_Base):
+    """
+    Unet + resnet functions
+    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
+    """
+    
+    def __init__(self, model_dir = None, **kwargs):       
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+    def bottleneck_block(self, inputs, 
+                         upsample = False,
+                         filters = 8,
+                         strides = 1, dropout_value = None, acti = None, padding = None, 
+                         kernel_initializer = None, weight_regularizer = None, name = None):            
+        # Bottleneck_block
+        with tf.name_scope("Bottleneck_block" + name):
+            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            
+            if upsample == True:
+                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+                output = Conv2D(filters * 4, (1,1), padding=padding, 
+                                kernel_initializer=kernel_initializer, 
+                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            else:
+                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Dropout(dropout_value)(output)
+            
+            # reshape input to the same size as output
+            if upsample == True:
+                inputs = UpSampling2D()(inputs)
+            if strides == 2:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+            
+            # ensure number of filters are correct between input and output
+            if output.shape[3] != inputs.shape[3]:
+                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+        
+    def simple_block(self, inputs, filters,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):
+            
+        with tf.name_scope("Simple_block" + name):
+            output = BatchNormalization()(inputs)
+            output = activation_function(output, acti)
+            output = MaxPooling2D()(output)
+            output = Conv2D(filters, 3, padding=padding, strides=strides,
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+            output = Dropout(dropout_value)(output)
+
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+            
+            return Add()([output, inputs])
+        
+    def simple_block_up(self, inputs, filters,
+                        strides = 1, dropout_value = None, acti = None, padding = None, 
+                        kernel_initializer = None, weight_regularizer = None, name = None):
+        
+        with tf.name_scope("Simple_block_up" + name):
+            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+            output = Dropout(dropout_value)(output)
+            
+            inputs = UpSampling2D()(inputs)
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+            return Add()([output, inputs])
+    
+
+    def build_model(self, unet_input, mean_std_normalization = None, 
+                    dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None):
+        
+        ### get parameters from config file ###
+        filters = self.config.get_parameter("filters")
+        
+        if dropout_value is None:
+            dropout_value = self.config.get_parameter("dropout_value")
+        if acti is None:
+            acti = self.config.get_parameter("activation_function")
+        if padding is None:
+            padding = self.config.get_parameter("padding")
+        if kernel_initializer is None:
+            kernel_initializer = self.config.get_parameter("initializer")
+        if weight_regularizer is None:
+            weight_regularizer = self.config.get_parameter("weight_regularizer")
+        if mean_std_normalization is None:
+            if self.config.get_parameter("mean_std_normalization") == True:
+                mean = self.config.get_parameter("mean")
+                std = self.config.get_parameter("std")
+            else:
+                mean = None
+                std = None
+            
+        
+        ### Actual network###
+        inputs = Input(unet_input)
+        
+        # normalize images
+        layer = normalize_input(inputs, 
+                                scale_input = self.config.get_parameter("scale_input"),
+                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                                mean = mean, std = std)
+
+        # encoder arm
+        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                         kernel_initializer = kernel_initializer, 
+                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+        
+        layer_2 = self.simple_block(layer_1, filters, 
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_2")
+        
+        layer = layer_2
+        layer_store = [layer]
+        
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+            strides = 2
+            
+            # last layer of encoding arm is treated as across    
+            if i == len(self.config.get_parameter("bottleneck_block")):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i-2):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                    
+                layer = self.bottleneck_block(layer, upsample = True,
+                                              filters = filters, strides = 1,
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_up_layer_{}".format(2 + i))
+            else:       
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name = "_layer_{}".format(2 + i))
+
+                for count in range(conv_layer_i - 1):
+                    layer = self.bottleneck_block(layer, filters = filters, 
+                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
+                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                                  name="_layer_{}-{}".format(2 + i, count))
+                filters = filters*2
+                layer_store.append(layer)
+
+        # decoder arm
+        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+            filters = filters//2  
+
+            # note that i should be positive possibly due to the way keras/tf model compile works
+            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+            
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(i+6, count))
+
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(i+6))
+        
+        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+        layer_14 = self.simple_block_up(layer_13, filters,
+                                        dropout_value = dropout_value, acti = acti, padding = padding, 
+                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                        name = "_layer_14")
+
+        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+        
+        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                          name="Conv_layer_16")(layer_15)
+        
+        layer_17 = BatchNormalization()(layer_16)
+        layer_18 = activation_function(layer_17, acti)
+
+        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+        
+        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def bottleneck_block(self, inputs, upsample=False, filters=8, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def bottleneck_block(self, inputs, 
+                     upsample = False,
+                     filters = 8,
+                     strides = 1, dropout_value = None, acti = None, padding = None, 
+                     kernel_initializer = None, weight_regularizer = None, name = None):            
+    # Bottleneck_block
+    with tf.name_scope("Bottleneck_block" + name):
+        output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+        
+        if upsample == True:
+            output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
+                                            kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+            output = Conv2D(filters * 4, (1,1), padding=padding, 
+                            kernel_initializer=kernel_initializer, 
+                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
+        else:
+            output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
+                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+        output = Dropout(dropout_value)(output)
+        
+        # reshape input to the same size as output
+        if upsample == True:
+            inputs = UpSampling2D()(inputs)
+        if strides == 2:
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
+        
+        # ensure number of filters are correct between input and output
+        if output.shape[3] != inputs.shape[3]:
+            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+        return Add()([output, inputs])
+
+
+
+def build_model(self, unet_input, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) +
+
+
+
+ +Expand source code + +
def build_model(self, unet_input, mean_std_normalization = None, 
+                dropout_value = None, acti = None, padding = None, 
+                kernel_initializer = None, weight_regularizer = None):
+    
+    ### get parameters from config file ###
+    filters = self.config.get_parameter("filters")
+    
+    if dropout_value is None:
+        dropout_value = self.config.get_parameter("dropout_value")
+    if acti is None:
+        acti = self.config.get_parameter("activation_function")
+    if padding is None:
+        padding = self.config.get_parameter("padding")
+    if kernel_initializer is None:
+        kernel_initializer = self.config.get_parameter("initializer")
+    if weight_regularizer is None:
+        weight_regularizer = self.config.get_parameter("weight_regularizer")
+    if mean_std_normalization is None:
+        if self.config.get_parameter("mean_std_normalization") == True:
+            mean = self.config.get_parameter("mean")
+            std = self.config.get_parameter("std")
+        else:
+            mean = None
+            std = None
+        
+    
+    ### Actual network###
+    inputs = Input(unet_input)
+    
+    # normalize images
+    layer = normalize_input(inputs, 
+                            scale_input = self.config.get_parameter("scale_input"),
+                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
+                            mean = mean, std = std)
+
+    # encoder arm
+    layer_1 = Conv2D(filters, (3, 3), padding = padding, 
+                     kernel_initializer = kernel_initializer, 
+                     kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
+    
+    layer_2 = self.simple_block(layer_1, filters, 
+                                dropout_value = dropout_value, acti = acti, padding = padding, 
+                                kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                name = "_layer_2")
+    
+    layer = layer_2
+    layer_store = [layer]
+    
+    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
+        strides = 2
+        
+        # last layer of encoding arm is treated as across    
+        if i == len(self.config.get_parameter("bottleneck_block")):
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(2 + i))
+
+            for count in range(conv_layer_i-2):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(2 + i, count))
+                
+            layer = self.bottleneck_block(layer, upsample = True,
+                                          filters = filters, strides = 1,
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_up_layer_{}".format(2 + i))
+        else:       
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name = "_layer_{}".format(2 + i))
+
+            for count in range(conv_layer_i - 1):
+                layer = self.bottleneck_block(layer, filters = filters, 
+                                              dropout_value = dropout_value, acti = acti, padding = padding, 
+                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                              name="_layer_{}-{}".format(2 + i, count))
+            filters = filters*2
+            layer_store.append(layer)
+
+    # decoder arm
+    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
+        filters = filters//2  
+
+        # note that i should be positive possibly due to the way keras/tf model compile works
+        layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
+        
+        for count in range(conv_layer_i - 1):
+            layer = self.bottleneck_block(layer, filters = filters, 
+                                          dropout_value = dropout_value, acti = acti, padding = padding, 
+                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                          name="_layer_{}-{}".format(i+6, count))
+
+        layer = self.bottleneck_block(layer, upsample = True,
+                                      filters = filters, strides = 1,
+                                      dropout_value = dropout_value, acti = acti, padding = padding, 
+                                      kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                      name = "_layer_{}".format(i+6))
+    
+    layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
+    layer_14 = self.simple_block_up(layer_13, filters,
+                                    dropout_value = dropout_value, acti = acti, padding = padding, 
+                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
+                                    name = "_layer_14")
+
+    layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
+    
+    layer_16 = Conv2D(filters, (3, 3), padding = padding, 
+                      kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
+                      name="Conv_layer_16")(layer_15)
+    
+    layer_17 = BatchNormalization()(layer_16)
+    layer_18 = activation_function(layer_17, acti)
+
+    outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
+    
+    return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
+
+
+
+def simple_block(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def simple_block(self, inputs, filters,
+                 strides = 1, dropout_value = None, acti = None, padding = None, 
+                 kernel_initializer = None, weight_regularizer = None, name = None):
+        
+    with tf.name_scope("Simple_block" + name):
+        output = BatchNormalization()(inputs)
+        output = activation_function(output, acti)
+        output = MaxPooling2D()(output)
+        output = Conv2D(filters, 3, padding=padding, strides=strides,
+                        kernel_initializer=kernel_initializer, 
+                        kernel_regularizer=regularizer_function(weight_regularizer))(output)
+
+        output = Dropout(dropout_value)(output)
+
+        inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
+        
+        return Add()([output, inputs])
+
+
+
+def simple_block_up(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) +
+
+
+
+ +Expand source code + +
def simple_block_up(self, inputs, filters,
+                    strides = 1, dropout_value = None, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = None):
+    
+    with tf.name_scope("Simple_block_up" + name):
+        output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
+                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
+
+        output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
+
+        output = Dropout(dropout_value)(output)
+        
+        inputs = UpSampling2D()(inputs)
+        inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
+
+        return Add()([output, inputs])
+
+
+
+

Inherited members

+ +
+
+class Unet_Resnet101 +(model_dir=None, name='Unet_Resnet101', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet101(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+class Unet_Resnet50 +(model_dir=None, name='Unet_Resnet50', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

Creates the base neural network class with basic functions

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is stored
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file
+
**kwargs
+
Parameters that are passed to :class:network_config.Network_Config
+
+

Attributes

+
+
config : :class:network_config.Network_Config
+
Network_config object containing the config and necessary functions
+
+
+ +Expand source code + +
class Unet_Resnet50(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
+        
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+class Unet_Resnet_paper +(model_dir=None, name='Unet_Resnet101', **kwargs) +
+
+

Unet + resnet functions +see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

+

see https://arxiv.org/pdf/1608.04117.pdf

+
+ +Expand source code + +
class Unet_Resnet_paper(Unet_Resnet):
+    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
+        """
+        see https://arxiv.org/pdf/1608.04117.pdf
+        """
+        super().__init__(model_dir = model_dir, **kwargs)
+        
+        self.config.update_parameter(["model","name"], name)
+        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
+
+        # store parameters for ease of use (may need to remove in the future)
+        self.conv_layer = self.config.get_parameter("bottleneck_block")
+
+

Ancestors

+ +

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/index.html b/html/models/index.html new file mode 100644 index 0000000..b530452 --- /dev/null +++ b/html/models/index.html @@ -0,0 +1,86 @@ + + + + + + +models API documentation + + + + + + + + + +
+
+
+

Module models

+
+
+
+ +Expand source code + +
from __future__ import absolute_import, print_function
+
+
+
+

Sub-modules

+
+
models.CNN_Base
+
+
+
+
models.Unet
+
+
+
+
models.Unet_Resnet
+
+
+
+
models.internals
+
+
+
+
models.layers
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/dataset.html b/html/models/internals/dataset.html new file mode 100644 index 0000000..d653901 --- /dev/null +++ b/html/models/internals/dataset.html @@ -0,0 +1,958 @@ + + + + + + +models.internals.dataset API documentation + + + + + + + + + +
+
+
+

Module models.internals.dataset

+
+
+
+ +Expand source code + +
import os, sys
+import numpy as np
+
+import matplotlib.pyplot as plt
+
+from tqdm import tqdm
+
+from .image_functions import Image_Functions      
+
+class Dataset(Image_Functions):
+    def __init__(self):
+        """Creates Dataset object that is used to manipulate the training data.
+    
+        Attributes
+        ----------
+        classes : list
+            List of dictionaries containing the class name and id
+            
+        train_images : list
+            List of images that is used as the input for the network
+            
+        train_ground_truth : list
+            List of images that is used as the ground truth for the network
+        """
+            
+        self.classes = []
+        self.train_images = []
+        self.train_ground_truth = []
+        
+        super().__init__()
+    
+    #######################
+    # Class id functions
+    #######################
+    def get_class_id(self, class_name):
+        """Returns the class id and adds class to list if not in list of classes.
+    
+        Parameters
+        ----------
+        class_name : str
+            Identity of class that will be associated with the class id
+            
+        Returns
+        ----------
+        int
+            Class id
+        """
+        
+        if len(self.classes) == 0:
+            self.classes.append({"class": class_name, "id": 0})
+            return 0
+        
+        for class_info in self.classes:
+            # if class exist, return class id
+            if class_info["class"] == class_name:
+                return class_info["id"]
+   
+        self.classes.append({"class": class_name, "id": len(self.classes)-1})
+        return len(self.classes)-1
+    
+    #######################
+    # Class id functions
+    #######################
+    def sanity_check(self, image_index):
+        """Plots the augmented image and ground_truth to check if everything is ok.
+    
+        Parameters
+        ----------
+        image_index : int
+            Index of the image and its corresponding ground_truth
+        """
+        
+        image = self.aug_images[image_index][:,:,0]
+        ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(image, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(ground_truth, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+    
+    def load_dataset(self, dataset_dir = None, tiled = False):
+        """Loads dataset from ``dataset_dir``
+    
+        Parameters
+        ----------
+        dataset_dir : str or none, optional
+            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+            
+        tiled : bool, optional
+            To set if tiling function is used
+        """
+        
+        # update dataset_dir if specified. If not, load dataset_dir from config file
+        if dataset_dir is None:
+            dataset_dir = self.config.get_parameter("dataset_dir")
+        else:
+            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+        
+        image_dirs = next(os.walk(dataset_dir))[1]
+        image_dirs = [f for f in image_dirs if not f[0] == '.']
+        
+        for img_dir in image_dirs:
+            # images
+            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if tiled is True:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.config.update_parameter(["images","num_rows"], num_rows)
+                self.config.update_parameter(["images","num_cols"], num_cols)
+                self.config.update_parameter(["images","padding"], padding)
+                self.train_images.extend(tile_image_list)
+            else:
+                self.train_images.extend([image,])
+            
+            #ground_truth
+            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+            if tiled is True:
+                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.train_ground_truth.extend(tile_ground_truth_list)
+            else:
+                self.train_ground_truth.extend(ground_truth)
+                
+    #######################
+    # Image augmentation
+    #######################
+    def augment_images(self):
+        """Augments images using the parameters in the config file"""
+        
+        # TODO: To allow for augmentation of multi-class images
+        
+        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+        
+        # increase number of images
+        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+        
+        print("Performing augmentations on {} images".format(len(self.aug_images)))
+        sys.stdout.flush()
+        
+        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+            
+            # target must be image and mask in order for albumentations to work
+            data = {"image": self.aug_images[i], 
+                    "mask": self.aug_ground_truth[i]}
+            augmented = augmentor(**data)
+
+            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+            
+            # add 
+            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+                from skimage.morphology import binary_dilation, disk
+                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+            else:
+                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+        self.aug_images = np.stack(self.aug_images, axis = 0)
+        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+        
+        mean = self.aug_images.mean()
+        std = self.aug_images.std()
+        
+        self.config.update_parameter(["images","mean"], float(mean))
+        self.config.update_parameter(["images","std"], float(std))
+        
+        print("Augmentations complete!")
+
+    def augmentations(self, p = None):
+        """Generates list of augmentations using parameters obtained from config file
+        
+        Parameters
+        ----------
+        p : int, optional
+            probability to apply any augmentations to image
+        
+        Returns
+        ----------
+        function
+            function used to augment images
+        """
+        from albumentations import (
+            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+        )
+        
+        augmentation_list = []
+        
+        if self.config.get_parameter("random_rotate") is True:
+            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+        
+        if self.config.get_parameter("flip") is True:
+            augmentation_list.append(Flip())
+            
+        if self.config.get_parameter("transpose") is True:
+            augmentation_list.append(Transpose())
+            
+        if self.config.get_parameter("blur_group") is True:
+            blur_augmentation = []
+            if self.config.get_parameter("motion_blur") is True:
+                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+            if self.config.get_parameter("median_blur") is True:
+                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+            if self.config.get_parameter("blur") is True:
+                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+            
+        if self.config.get_parameter("shift_scale_rotate") is True:
+            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                      scale_limit = self.config.get_parameter("scale_limit"),
+                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
+        if self.config.get_parameter("distortion_group") is True:
+            distortion_augmentation = []
+            if self.config.get_parameter("optical_distortion") is True:
+                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+            if self.config.get_parameter("elastic_transform") is True:
+                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+            if self.config.get_parameter("grid_distortion") is True:
+                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+            
+            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+        
+        if self.config.get_parameter("brightness_contrast_group") is True:
+            contrast_augmentation = []
+            if self.config.get_parameter("clahe") is True:
+                contrast_augmentation.append(CLAHE())
+            if self.config.get_parameter("sharpen") is True:
+                contrast_augmentation.append(IAASharpen())
+            if self.config.get_parameter("random_brightness_contrast") is True:
+                contrast_augmentation.append(RandomBrightnessContrast())
+           
+            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+            
+        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+        
+        return Compose(augmentation_list, p = p)
+
+############################### TODO ###############################
+#     def preapare_data(self):
+#         """        
+#         Performs augmentation if needed
+#         """
+        
+            
+#     # Create data generator
+#     # Return augmented images/ground_truth arrays of batch size
+#     def generator(features, labels, batch_size, seq_det):
+#         # create empty arrays to contain batch of features and labels
+#         batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3]))
+#         batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3]))
+
+#         while True:
+#             # Fill arrays of batch size with augmented data taken randomly from full passed arrays
+#             indexes = random.sample(range(len(features)), batch_size)
+#             # Perform the exactly the same augmentation for X and y
+#             random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes])
+#             batch_features[:,:,:,:] = random_augmented_images[:,:,:,:]
+#             batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:]
+
+#             yield batch_features, batch_labels
+            
+    # Train augmentation
+#     def do_augmentation(seq_det, X_train, y_train):
+#         # Use seq_det to build augmentation.
+#         # ....
+#         return np.array(X_train_aug), np.array(y_train_aug)
+
+#     seq = iaa.Sequential([
+#         iaa.Fliplr(0.5), # horizontally flip
+#         iaa.OneOf([
+#             iaa.Noop(),
+#             iaa.GaussianBlur(sigma=(0.0, 1.0)),
+#             iaa.Noop(),
+#             iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)),
+#             iaa.Noop(),
+#             iaa.PerspectiveTransform(scale=(0.04, 0.08)),
+#             iaa.Noop(),
+#             iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)),
+#         ]),
+#         # More as you want ...
+#     ])
+#     seq_det = seq.to_deterministic()
+    
+#     history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det),
+#                               epochs=EPOCHS,
+#                               steps_per_epoch=steps_per_epoch,
+#                               validation_data=(X_valid, y_valid),
+#                               verbose = 1, 
+#                               callbacks = [check_point]
+#                              ) 
+    
+    # Image augmentations
+            
+############################### END of TODO ###############################
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Dataset +
+
+

Creates Dataset object that is used to manipulate the training data.

+

Attributes

+
+
classes : list
+
List of dictionaries containing the class name and id
+
train_images : list
+
List of images that is used as the input for the network
+
train_ground_truth : list
+
List of images that is used as the ground truth for the network
+
+
+ +Expand source code + +
class Dataset(Image_Functions):
+    def __init__(self):
+        """Creates Dataset object that is used to manipulate the training data.
+    
+        Attributes
+        ----------
+        classes : list
+            List of dictionaries containing the class name and id
+            
+        train_images : list
+            List of images that is used as the input for the network
+            
+        train_ground_truth : list
+            List of images that is used as the ground truth for the network
+        """
+            
+        self.classes = []
+        self.train_images = []
+        self.train_ground_truth = []
+        
+        super().__init__()
+    
+    #######################
+    # Class id functions
+    #######################
+    def get_class_id(self, class_name):
+        """Returns the class id and adds class to list if not in list of classes.
+    
+        Parameters
+        ----------
+        class_name : str
+            Identity of class that will be associated with the class id
+            
+        Returns
+        ----------
+        int
+            Class id
+        """
+        
+        if len(self.classes) == 0:
+            self.classes.append({"class": class_name, "id": 0})
+            return 0
+        
+        for class_info in self.classes:
+            # if class exist, return class id
+            if class_info["class"] == class_name:
+                return class_info["id"]
+   
+        self.classes.append({"class": class_name, "id": len(self.classes)-1})
+        return len(self.classes)-1
+    
+    #######################
+    # Class id functions
+    #######################
+    def sanity_check(self, image_index):
+        """Plots the augmented image and ground_truth to check if everything is ok.
+    
+        Parameters
+        ----------
+        image_index : int
+            Index of the image and its corresponding ground_truth
+        """
+        
+        image = self.aug_images[image_index][:,:,0]
+        ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(image, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+
+        plt.figure(figsize=(14, 14))
+        plt.axis('off')
+        plt.imshow(ground_truth, cmap='gray', 
+                   norm=None, interpolation=None)
+        plt.show()
+    
+    def load_dataset(self, dataset_dir = None, tiled = False):
+        """Loads dataset from ``dataset_dir``
+    
+        Parameters
+        ----------
+        dataset_dir : str or none, optional
+            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+            
+        tiled : bool, optional
+            To set if tiling function is used
+        """
+        
+        # update dataset_dir if specified. If not, load dataset_dir from config file
+        if dataset_dir is None:
+            dataset_dir = self.config.get_parameter("dataset_dir")
+        else:
+            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+        
+        image_dirs = next(os.walk(dataset_dir))[1]
+        image_dirs = [f for f in image_dirs if not f[0] == '.']
+        
+        for img_dir in image_dirs:
+            # images
+            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+            
+            # percentile normalization
+            if self.config.get_parameter("percentile_normalization"):
+                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+            
+            if tiled is True:
+                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.config.update_parameter(["images","num_rows"], num_rows)
+                self.config.update_parameter(["images","num_cols"], num_cols)
+                self.config.update_parameter(["images","padding"], padding)
+                self.train_images.extend(tile_image_list)
+            else:
+                self.train_images.extend([image,])
+            
+            #ground_truth
+            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+            if tiled is True:
+                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+                self.train_ground_truth.extend(tile_ground_truth_list)
+            else:
+                self.train_ground_truth.extend(ground_truth)
+                
+    #######################
+    # Image augmentation
+    #######################
+    def augment_images(self):
+        """Augments images using the parameters in the config file"""
+        
+        # TODO: To allow for augmentation of multi-class images
+        
+        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+        
+        # increase number of images
+        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+        
+        print("Performing augmentations on {} images".format(len(self.aug_images)))
+        sys.stdout.flush()
+        
+        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+            
+            # target must be image and mask in order for albumentations to work
+            data = {"image": self.aug_images[i], 
+                    "mask": self.aug_ground_truth[i]}
+            augmented = augmentor(**data)
+
+            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+            
+            # add 
+            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+                from skimage.morphology import binary_dilation, disk
+                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+            else:
+                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+        self.aug_images = np.stack(self.aug_images, axis = 0)
+        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+        
+        mean = self.aug_images.mean()
+        std = self.aug_images.std()
+        
+        self.config.update_parameter(["images","mean"], float(mean))
+        self.config.update_parameter(["images","std"], float(std))
+        
+        print("Augmentations complete!")
+
+    def augmentations(self, p = None):
+        """Generates list of augmentations using parameters obtained from config file
+        
+        Parameters
+        ----------
+        p : int, optional
+            probability to apply any augmentations to image
+        
+        Returns
+        ----------
+        function
+            function used to augment images
+        """
+        from albumentations import (
+            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+        )
+        
+        augmentation_list = []
+        
+        if self.config.get_parameter("random_rotate") is True:
+            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+        
+        if self.config.get_parameter("flip") is True:
+            augmentation_list.append(Flip())
+            
+        if self.config.get_parameter("transpose") is True:
+            augmentation_list.append(Transpose())
+            
+        if self.config.get_parameter("blur_group") is True:
+            blur_augmentation = []
+            if self.config.get_parameter("motion_blur") is True:
+                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+            if self.config.get_parameter("median_blur") is True:
+                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+            if self.config.get_parameter("blur") is True:
+                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+            
+        if self.config.get_parameter("shift_scale_rotate") is True:
+            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                      scale_limit = self.config.get_parameter("scale_limit"),
+                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
+        if self.config.get_parameter("distortion_group") is True:
+            distortion_augmentation = []
+            if self.config.get_parameter("optical_distortion") is True:
+                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+            if self.config.get_parameter("elastic_transform") is True:
+                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+            if self.config.get_parameter("grid_distortion") is True:
+                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+            
+            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+        
+        if self.config.get_parameter("brightness_contrast_group") is True:
+            contrast_augmentation = []
+            if self.config.get_parameter("clahe") is True:
+                contrast_augmentation.append(CLAHE())
+            if self.config.get_parameter("sharpen") is True:
+                contrast_augmentation.append(IAASharpen())
+            if self.config.get_parameter("random_brightness_contrast") is True:
+                contrast_augmentation.append(RandomBrightnessContrast())
+           
+            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+            
+        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+        
+        return Compose(augmentation_list, p = p)
+
+

Ancestors

+ +

Subclasses

+ +

Methods

+
+
+def augment_images(self) +
+
+

Augments images using the parameters in the config file

+
+ +Expand source code + +
def augment_images(self):
+    """Augments images using the parameters in the config file"""
+    
+    # TODO: To allow for augmentation of multi-class images
+    
+    augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
+    
+    # increase number of images
+    self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
+    self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
+    
+    print("Performing augmentations on {} images".format(len(self.aug_images)))
+    sys.stdout.flush()
+    
+    for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
+        
+        # target must be image and mask in order for albumentations to work
+        data = {"image": self.aug_images[i], 
+                "mask": self.aug_ground_truth[i]}
+        augmented = augmentor(**data)
+
+        self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
+        
+        # add 
+        if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
+            from skimage.morphology import binary_dilation, disk
+            self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
+        else:
+            self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
+
+    self.aug_images = np.stack(self.aug_images, axis = 0)
+    self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
+    
+    mean = self.aug_images.mean()
+    std = self.aug_images.std()
+    
+    self.config.update_parameter(["images","mean"], float(mean))
+    self.config.update_parameter(["images","std"], float(std))
+    
+    print("Augmentations complete!")
+
+
+
+def augmentations(self, p=None) +
+
+

Generates list of augmentations using parameters obtained from config file

+

Parameters

+
+
p : int, optional
+
probability to apply any augmentations to image
+
+

Returns

+
+
function
+
function used to augment images
+
+
+ +Expand source code + +
def augmentations(self, p = None):
+    """Generates list of augmentations using parameters obtained from config file
+    
+    Parameters
+    ----------
+    p : int, optional
+        probability to apply any augmentations to image
+    
+    Returns
+    ----------
+    function
+        function used to augment images
+    """
+    from albumentations import (
+        RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
+        Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
+        IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
+        IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
+    )
+    
+    augmentation_list = []
+    
+    if self.config.get_parameter("random_rotate") is True:
+        augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
+    
+    if self.config.get_parameter("flip") is True:
+        augmentation_list.append(Flip())
+        
+    if self.config.get_parameter("transpose") is True:
+        augmentation_list.append(Transpose())
+        
+    if self.config.get_parameter("blur_group") is True:
+        blur_augmentation = []
+        if self.config.get_parameter("motion_blur") is True:
+            blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
+        if self.config.get_parameter("median_blur") is True:
+            blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
+        if self.config.get_parameter("blur") is True:
+            blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
+        augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
+        
+    if self.config.get_parameter("shift_scale_rotate") is True:
+        augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
+                                                  scale_limit = self.config.get_parameter("scale_limit"),
+                                                  rotate_limit = self.config.get_parameter("rotate_limit"),
+                                                  p = self.config.get_parameter("shift_scale_rotate_p")))
+    if self.config.get_parameter("distortion_group") is True:
+        distortion_augmentation = []
+        if self.config.get_parameter("optical_distortion") is True:
+            distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
+        if self.config.get_parameter("elastic_transform") is True:
+            distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
+        if self.config.get_parameter("grid_distortion") is True:
+            distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
+        
+        augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
+    
+    if self.config.get_parameter("brightness_contrast_group") is True:
+        contrast_augmentation = []
+        if self.config.get_parameter("clahe") is True:
+            contrast_augmentation.append(CLAHE())
+        if self.config.get_parameter("sharpen") is True:
+            contrast_augmentation.append(IAASharpen())
+        if self.config.get_parameter("random_brightness_contrast") is True:
+            contrast_augmentation.append(RandomBrightnessContrast())
+       
+        augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
+        
+    augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
+    
+    return Compose(augmentation_list, p = p)
+
+
+
+def get_class_id(self, class_name) +
+
+

Returns the class id and adds class to list if not in list of classes.

+

Parameters

+
+
class_name : str
+
Identity of class that will be associated with the class id
+
+

Returns

+
+
int
+
Class id
+
+
+ +Expand source code + +
def get_class_id(self, class_name):
+    """Returns the class id and adds class to list if not in list of classes.
+
+    Parameters
+    ----------
+    class_name : str
+        Identity of class that will be associated with the class id
+        
+    Returns
+    ----------
+    int
+        Class id
+    """
+    
+    if len(self.classes) == 0:
+        self.classes.append({"class": class_name, "id": 0})
+        return 0
+    
+    for class_info in self.classes:
+        # if class exist, return class id
+        if class_info["class"] == class_name:
+            return class_info["id"]
+
+    self.classes.append({"class": class_name, "id": len(self.classes)-1})
+    return len(self.classes)-1
+
+
+
+def load_dataset(self, dataset_dir=None, tiled=False) +
+
+

Loads dataset from dataset_dir

+

Parameters

+
+
dataset_dir : str or none, optional
+
Folder to load the dataset from. If none, dataset_dir is obtained from config file
+
tiled : bool, optional
+
To set if tiling function is used
+
+
+ +Expand source code + +
def load_dataset(self, dataset_dir = None, tiled = False):
+    """Loads dataset from ``dataset_dir``
+
+    Parameters
+    ----------
+    dataset_dir : str or none, optional
+        Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
+        
+    tiled : bool, optional
+        To set if tiling function is used
+    """
+    
+    # update dataset_dir if specified. If not, load dataset_dir from config file
+    if dataset_dir is None:
+        dataset_dir = self.config.get_parameter("dataset_dir")
+    else:
+        self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
+    
+    image_dirs = next(os.walk(dataset_dir))[1]
+    image_dirs = [f for f in image_dirs if not f[0] == '.']
+    
+    for img_dir in image_dirs:
+        # images
+        image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
+        
+        # percentile normalization
+        if self.config.get_parameter("percentile_normalization"):
+            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
+        
+        if tiled is True:
+            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            self.config.update_parameter(["images","num_rows"], num_rows)
+            self.config.update_parameter(["images","num_cols"], num_cols)
+            self.config.update_parameter(["images","padding"], padding)
+            self.train_images.extend(tile_image_list)
+        else:
+            self.train_images.extend([image,])
+        
+        #ground_truth
+        ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
+        if tiled is True:
+            tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
+            self.train_ground_truth.extend(tile_ground_truth_list)
+        else:
+            self.train_ground_truth.extend(ground_truth)
+
+
+
+def sanity_check(self, image_index) +
+
+

Plots the augmented image and ground_truth to check if everything is ok.

+

Parameters

+
+
image_index : int
+
Index of the image and its corresponding ground_truth
+
+
+ +Expand source code + +
def sanity_check(self, image_index):
+    """Plots the augmented image and ground_truth to check if everything is ok.
+
+    Parameters
+    ----------
+    image_index : int
+        Index of the image and its corresponding ground_truth
+    """
+    
+    image = self.aug_images[image_index][:,:,0]
+    ground_truth = self.aug_ground_truth[image_index][:,:,0]
+
+    plt.figure(figsize=(14, 14))
+    plt.axis('off')
+    plt.imshow(image, cmap='gray', 
+               norm=None, interpolation=None)
+    plt.show()
+
+    plt.figure(figsize=(14, 14))
+    plt.axis('off')
+    plt.imshow(ground_truth, cmap='gray', 
+               norm=None, interpolation=None)
+    plt.show()
+
+
+
+

Inherited members

+ +
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/image_functions.html b/html/models/internals/image_functions.html new file mode 100644 index 0000000..5cd254f --- /dev/null +++ b/html/models/internals/image_functions.html @@ -0,0 +1,1340 @@ + + + + + + +models.internals.image_functions API documentation + + + + + + + + + +
+
+
+

Module models.internals.image_functions

+
+
+
+ +Expand source code + +
import os
+import glob
+import sys
+
+import math
+import numpy as np
+
+#TODO: change to cv2?
+import skimage
+import skimage.io as skio
+
+class Image_Functions():
+    def list_images(self, image_dir, image_ext = '*.tif'):
+        """List images in the directory with the given file extension
+
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory to look for image files
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image_list : `list`
+            List of images found in the directory with the given file extension
+            
+        Notes
+        ----------
+        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+        """
+        # to bypass case sensitivity of file extensions in linux and possibly other systems
+        if sys.platform in ["win32",]:
+            image_extension = [image_ext]
+        else:
+            image_extension = [image_ext.lower(),image_ext.upper()]
+        
+        image_list = []
+        for ext in image_extension:
+            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+            
+        return image_list
+    
+    #######################
+    # Image IO functions
+    #######################
+    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+        """Loads images found in ``image_path``
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for image files
+        subfolder : `str`, optional
+            [Default: 'Images'] Subfolder in which to look for the image files
+        image_index : `int`, optional
+            [Default: 0] Index of image to load
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image : `array_like`
+            Loaded image
+            
+        Notes
+        ----------
+        Only one image from in each directory is loaded.
+        """
+        if os.path.isdir(image_path) is True:
+            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+            if len(image_list) > 1:
+               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+            # Load image
+            image = skio.imread(image_list[image_index])
+        else:
+            image = skio.imread(image_path)
+            
+        return image
+        
+    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for ground truth images
+        subfolder : `str`, optional
+            [Default: 'Masks'] Subfolder in which to look for the ground truth images
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of ground truth image file
+
+        Returns
+        ----------
+        output_ground_truth : `list`
+            List of ground truth images found in the directory with the given file extension
+            
+        class_ids : `list`
+            List of class ids of the ground truth images
+        """
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        
+        output_ground_truth = []
+        class_ids = []
+        
+        for ground_truth_path in image_list:
+            # add class if not in list
+            ground_truth_name = ground_truth_path.split('\\')[-1]
+            class_name = ground_truth_name.split('_')[0]
+            
+            # obtain class_id
+            class_ids.append(self.get_class_id(class_name))
+            
+            # Load image
+            ground_truth_img = skio.imread(ground_truth_path)
+            
+            # perform erosion so that the borders will still be there after augmentation
+            if self.config.get_parameter("use_binary_erosion") is True:
+                from skimage.morphology import binary_erosion, disk
+                # sets dtype back to unsigned integer in order for some augmentations to work
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            if self.config.get_parameter("use_binary_dilation") is True:
+                from skimage.morphology import binary_dilation, disk
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            # perform inversion of ground_truth if needed
+            if self.config.get_parameter("invert_ground_truth") is True:
+                ground_truth_img = skimage.util.invert(ground_truth_img)
+                
+            output_ground_truth.append(ground_truth_img)
+            
+        return output_ground_truth, class_ids
+    
+    def reshape_image(self, image):
+        """Reshapes the image to the correct dimenstions for Unet
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be reshaped
+
+        Returns
+        ----------
+        image : `array_like`
+            Reshaped image 
+        """
+        h, w = image.shape[:2]
+        image = np.reshape(image, (h, w, -1))
+        return image
+    
+    #######################
+    # Image padding
+    #######################
+    def pad_image(self, image, image_size, mode = 'constant'):
+        """Pad image to specified image_size
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be padded
+        image_size : `list`
+            Final size of padded image
+        mode : `str`, optional
+            [Default: 'constant'] Mode to pad the image
+
+        Returns
+        ----------
+        image : `array_like`
+            Padded image
+            
+        padding : `list`
+            List containing the number of pixels padded to each direction
+        """
+        h, w = image.shape[:2]
+        
+        top_pad = (image_size[0] - h) // 2
+        bottom_pad = image_size[0] - h - top_pad
+            
+        left_pad = (image_size[1] - w) // 2
+        right_pad = image_size[1] - w - left_pad
+
+        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+        image = np.pad(image, padding, mode = mode, constant_values=0)
+        
+        return image, padding
+    
+    def remove_pad_image(self, image, padding):
+        """Removes pad from image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Padded image
+        padding : `list`
+            List containing the number of padded pixels in each direction
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        
+        h, w = image.shape[:2]
+        
+        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+    
+    #######################
+    # Tiling functions
+    #######################
+    def tile_image(self, image, tile_size, tile_overlap_size):
+        """Converts an image into a list of tiled images
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be tiled
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        image_height, image_width = image.shape[:2]
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        if image_height <= tile_height and image_width <= tile_width:
+            return image
+        
+        num_rows = math.ceil(image_height/tile_height)
+        num_cols = math.ceil(image_width/tile_width)
+        num_tiles = num_rows*num_cols
+        
+        
+        # pad image to fit tile size
+        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+        
+        tile_image_list = []
+        
+        for tile_no in range(num_tiles):
+            tile_x_start = (tile_no // num_rows) * tile_width
+            tile_x_end = tile_x_start + tile_size[1]
+            
+            tile_y_start = (tile_no % num_rows) * tile_height
+            tile_y_end = tile_y_start + tile_size[0]
+            
+            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+            
+            # ensure input into unet is of correct shape
+            tile_image = self.reshape_image(tile_image)
+            
+            tile_image_list.append(tile_image)
+            
+        return tile_image_list, num_rows, num_cols, padding
+    
+    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+        """Stitches a list of tiled images back into a single image
+
+        Parameters
+        ----------
+        tile_list : `list`
+            List of tiled images
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+        num_rows : `int`
+            Number of rows of tiles
+        num_cols : `int`
+            Number of cols of tiles
+        padding : `list`
+            Amount of padding used during tiling
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        if num_rows == 1 and num_cols == 1:
+            image = tile_list[0]
+            
+            image = self.remove_pad_image(image, padding = padding)
+                
+            return image
+              
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        num_tiles = num_rows*num_cols
+        
+        for col in range(num_cols):
+            for row in range(num_rows):
+                tile_image = tile_list[num_rows*col + row][:,:,0]
+                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+                if row == 0:
+                    image_col = np.array(tile_image)
+                else:
+                    image_col = np.vstack((image_col, tile_image))
+            
+            if col == 0:
+                image = image_col
+            else:
+                image = np.hstack((image, image_col))
+        
+        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+        
+        if padding is not None:
+            image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+    
+    
+    #######################
+    # Image normalization
+    #######################
+    def percentile_normalization(self, image, in_bound=[3, 99.8]):
+        """Performs percentile normalization on the image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be normalized
+        in_bound : `list`
+            Upper and lower percentile used to normalize image
+
+        Returns
+        ----------
+        image : `array_like`
+            Normalized image
+            
+        image_min : `int`
+            Min value of ``image``
+            
+        image_max : `int`
+            Max value of ``image``
+        """
+        image_min = np.percentile(image, in_bound[0])
+        image_max = np.percentile(image, in_bound[1])
+        image = (image - image_min)/(image_max - image_min)
+
+        return image, image_min, image_max
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Image_Functions +(*args, **kwargs) +
+
+
+
+ +Expand source code + +
class Image_Functions():
+    def list_images(self, image_dir, image_ext = '*.tif'):
+        """List images in the directory with the given file extension
+
+        Parameters
+        ----------
+        image_dir : `str`
+            Directory to look for image files
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image_list : `list`
+            List of images found in the directory with the given file extension
+            
+        Notes
+        ----------
+        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+        """
+        # to bypass case sensitivity of file extensions in linux and possibly other systems
+        if sys.platform in ["win32",]:
+            image_extension = [image_ext]
+        else:
+            image_extension = [image_ext.lower(),image_ext.upper()]
+        
+        image_list = []
+        for ext in image_extension:
+            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+            
+        return image_list
+    
+    #######################
+    # Image IO functions
+    #######################
+    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+        """Loads images found in ``image_path``
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for image files
+        subfolder : `str`, optional
+            [Default: 'Images'] Subfolder in which to look for the image files
+        image_index : `int`, optional
+            [Default: 0] Index of image to load
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of the image file
+            
+        Returns
+        ----------
+        image : `array_like`
+            Loaded image
+            
+        Notes
+        ----------
+        Only one image from in each directory is loaded.
+        """
+        if os.path.isdir(image_path) is True:
+            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+            if len(image_list) > 1:
+               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+            # Load image
+            image = skio.imread(image_list[image_index])
+        else:
+            image = skio.imread(image_path)
+            
+        return image
+        
+    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+        Parameters
+        ----------
+        image_path : `str`
+            Path to look for ground truth images
+        subfolder : `str`, optional
+            [Default: 'Masks'] Subfolder in which to look for the ground truth images
+        image_ext : `str`, optional
+            [Default: '*.tif'] File extension of ground truth image file
+
+        Returns
+        ----------
+        output_ground_truth : `list`
+            List of ground truth images found in the directory with the given file extension
+            
+        class_ids : `list`
+            List of class ids of the ground truth images
+        """
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        
+        output_ground_truth = []
+        class_ids = []
+        
+        for ground_truth_path in image_list:
+            # add class if not in list
+            ground_truth_name = ground_truth_path.split('\\')[-1]
+            class_name = ground_truth_name.split('_')[0]
+            
+            # obtain class_id
+            class_ids.append(self.get_class_id(class_name))
+            
+            # Load image
+            ground_truth_img = skio.imread(ground_truth_path)
+            
+            # perform erosion so that the borders will still be there after augmentation
+            if self.config.get_parameter("use_binary_erosion") is True:
+                from skimage.morphology import binary_erosion, disk
+                # sets dtype back to unsigned integer in order for some augmentations to work
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            if self.config.get_parameter("use_binary_dilation") is True:
+                from skimage.morphology import binary_dilation, disk
+                ground_truth_dtype = ground_truth_img.dtype
+                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+            
+            # perform inversion of ground_truth if needed
+            if self.config.get_parameter("invert_ground_truth") is True:
+                ground_truth_img = skimage.util.invert(ground_truth_img)
+                
+            output_ground_truth.append(ground_truth_img)
+            
+        return output_ground_truth, class_ids
+    
+    def reshape_image(self, image):
+        """Reshapes the image to the correct dimenstions for Unet
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be reshaped
+
+        Returns
+        ----------
+        image : `array_like`
+            Reshaped image 
+        """
+        h, w = image.shape[:2]
+        image = np.reshape(image, (h, w, -1))
+        return image
+    
+    #######################
+    # Image padding
+    #######################
+    def pad_image(self, image, image_size, mode = 'constant'):
+        """Pad image to specified image_size
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be padded
+        image_size : `list`
+            Final size of padded image
+        mode : `str`, optional
+            [Default: 'constant'] Mode to pad the image
+
+        Returns
+        ----------
+        image : `array_like`
+            Padded image
+            
+        padding : `list`
+            List containing the number of pixels padded to each direction
+        """
+        h, w = image.shape[:2]
+        
+        top_pad = (image_size[0] - h) // 2
+        bottom_pad = image_size[0] - h - top_pad
+            
+        left_pad = (image_size[1] - w) // 2
+        right_pad = image_size[1] - w - left_pad
+
+        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+        image = np.pad(image, padding, mode = mode, constant_values=0)
+        
+        return image, padding
+    
+    def remove_pad_image(self, image, padding):
+        """Removes pad from image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Padded image
+        padding : `list`
+            List containing the number of padded pixels in each direction
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        
+        h, w = image.shape[:2]
+        
+        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+    
+    #######################
+    # Tiling functions
+    #######################
+    def tile_image(self, image, tile_size, tile_overlap_size):
+        """Converts an image into a list of tiled images
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be tiled
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        image_height, image_width = image.shape[:2]
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        if image_height <= tile_height and image_width <= tile_width:
+            return image
+        
+        num_rows = math.ceil(image_height/tile_height)
+        num_cols = math.ceil(image_width/tile_width)
+        num_tiles = num_rows*num_cols
+        
+        
+        # pad image to fit tile size
+        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+        
+        tile_image_list = []
+        
+        for tile_no in range(num_tiles):
+            tile_x_start = (tile_no // num_rows) * tile_width
+            tile_x_end = tile_x_start + tile_size[1]
+            
+            tile_y_start = (tile_no % num_rows) * tile_height
+            tile_y_end = tile_y_start + tile_size[0]
+            
+            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+            
+            # ensure input into unet is of correct shape
+            tile_image = self.reshape_image(tile_image)
+            
+            tile_image_list.append(tile_image)
+            
+        return tile_image_list, num_rows, num_cols, padding
+    
+    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+        """Stitches a list of tiled images back into a single image
+
+        Parameters
+        ----------
+        tile_list : `list`
+            List of tiled images
+        tile_size : `list`
+            Size of each individual tile
+        tile_overlap_size : `list`
+            Amount of overlap (in pixels) between each tile
+        num_rows : `int`
+            Number of rows of tiles
+        num_cols : `int`
+            Number of cols of tiles
+        padding : `list`
+            Amount of padding used during tiling
+
+        Returns
+        ----------
+        image : `array_like`
+            Image without padding
+        """
+        if num_rows == 1 and num_cols == 1:
+            image = tile_list[0]
+            
+            image = self.remove_pad_image(image, padding = padding)
+                
+            return image
+              
+        tile_height = tile_size[0] - tile_overlap_size[0] * 2
+        tile_width = tile_size[1] - tile_overlap_size[1] * 2
+        
+        num_tiles = num_rows*num_cols
+        
+        for col in range(num_cols):
+            for row in range(num_rows):
+                tile_image = tile_list[num_rows*col + row][:,:,0]
+                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+                if row == 0:
+                    image_col = np.array(tile_image)
+                else:
+                    image_col = np.vstack((image_col, tile_image))
+            
+            if col == 0:
+                image = image_col
+            else:
+                image = np.hstack((image, image_col))
+        
+        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+        
+        if padding is not None:
+            image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+    
+    
+    #######################
+    # Image normalization
+    #######################
+    def percentile_normalization(self, image, in_bound=[3, 99.8]):
+        """Performs percentile normalization on the image
+
+        Parameters
+        ----------
+        image : `array_like`
+            Image to be normalized
+        in_bound : `list`
+            Upper and lower percentile used to normalize image
+
+        Returns
+        ----------
+        image : `array_like`
+            Normalized image
+            
+        image_min : `int`
+            Min value of ``image``
+            
+        image_max : `int`
+            Max value of ``image``
+        """
+        image_min = np.percentile(image, in_bound[0])
+        image_max = np.percentile(image, in_bound[1])
+        image = (image - image_min)/(image_max - image_min)
+
+        return image, image_min, image_max
+
+

Subclasses

+ +

Methods

+
+
+def list_images(self, image_dir, image_ext='*.tif') +
+
+

List images in the directory with the given file extension

+

Parameters

+
+
image_dir : str
+
Directory to look for image files
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of the image file
+
+

Returns

+
+
image_list : list
+
List of images found in the directory with the given file extension
+
+

Notes

+

For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.

+
+ +Expand source code + +
def list_images(self, image_dir, image_ext = '*.tif'):
+    """List images in the directory with the given file extension
+
+    Parameters
+    ----------
+    image_dir : `str`
+        Directory to look for image files
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of the image file
+        
+    Returns
+    ----------
+    image_list : `list`
+        List of images found in the directory with the given file extension
+        
+    Notes
+    ----------
+    For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
+    """
+    # to bypass case sensitivity of file extensions in linux and possibly other systems
+    if sys.platform in ["win32",]:
+        image_extension = [image_ext]
+    else:
+        image_extension = [image_ext.lower(),image_ext.upper()]
+    
+    image_list = []
+    for ext in image_extension:
+        image_list.extend(glob.glob(os.path.join(image_dir,ext)))
+        
+    return image_list
+
+
+
+def load_ground_truth(self, image_path, subfolder='Masks', image_ext='*.tif') +
+
+

Loads ground truth images found in image_path and performs erosion/dilation/inversion if needed

+

Parameters

+
+
image_path : str
+
Path to look for ground truth images
+
subfolder : str, optional
+
[Default: 'Masks'] Subfolder in which to look for the ground truth images
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of ground truth image file
+
+

Returns

+
+
output_ground_truth : list
+
List of ground truth images found in the directory with the given file extension
+
class_ids : list
+
List of class ids of the ground truth images
+
+
+ +Expand source code + +
def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
+    """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
+
+    Parameters
+    ----------
+    image_path : `str`
+        Path to look for ground truth images
+    subfolder : `str`, optional
+        [Default: 'Masks'] Subfolder in which to look for the ground truth images
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of ground truth image file
+
+    Returns
+    ----------
+    output_ground_truth : `list`
+        List of ground truth images found in the directory with the given file extension
+        
+    class_ids : `list`
+        List of class ids of the ground truth images
+    """
+    image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+    
+    output_ground_truth = []
+    class_ids = []
+    
+    for ground_truth_path in image_list:
+        # add class if not in list
+        ground_truth_name = ground_truth_path.split('\\')[-1]
+        class_name = ground_truth_name.split('_')[0]
+        
+        # obtain class_id
+        class_ids.append(self.get_class_id(class_name))
+        
+        # Load image
+        ground_truth_img = skio.imread(ground_truth_path)
+        
+        # perform erosion so that the borders will still be there after augmentation
+        if self.config.get_parameter("use_binary_erosion") is True:
+            from skimage.morphology import binary_erosion, disk
+            # sets dtype back to unsigned integer in order for some augmentations to work
+            ground_truth_dtype = ground_truth_img.dtype
+            ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+        
+        if self.config.get_parameter("use_binary_dilation") is True:
+            from skimage.morphology import binary_dilation, disk
+            ground_truth_dtype = ground_truth_img.dtype
+            ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
+            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
+        
+        # perform inversion of ground_truth if needed
+        if self.config.get_parameter("invert_ground_truth") is True:
+            ground_truth_img = skimage.util.invert(ground_truth_img)
+            
+        output_ground_truth.append(ground_truth_img)
+        
+    return output_ground_truth, class_ids
+
+
+
+def load_image(self, image_path, subfolder='Images', image_index=0, image_ext='*.tif') +
+
+

Loads images found in image_path

+

Parameters

+
+
image_path : str
+
Path to look for image files
+
subfolder : str, optional
+
[Default: 'Images'] Subfolder in which to look for the image files
+
image_index : int, optional
+
[Default: 0] Index of image to load
+
image_ext : str, optional
+
[Default: '*.tif'] File extension of the image file
+
+

Returns

+
+
image : array_like
+
Loaded image
+
+

Notes

+

Only one image from in each directory is loaded.

+
+ +Expand source code + +
def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
+    """Loads images found in ``image_path``
+
+    Parameters
+    ----------
+    image_path : `str`
+        Path to look for image files
+    subfolder : `str`, optional
+        [Default: 'Images'] Subfolder in which to look for the image files
+    image_index : `int`, optional
+        [Default: 0] Index of image to load
+    image_ext : `str`, optional
+        [Default: '*.tif'] File extension of the image file
+        
+    Returns
+    ----------
+    image : `array_like`
+        Loaded image
+        
+    Notes
+    ----------
+    Only one image from in each directory is loaded.
+    """
+    if os.path.isdir(image_path) is True:
+        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
+        if len(image_list) > 1:
+           warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
+        # Load image
+        image = skio.imread(image_list[image_index])
+    else:
+        image = skio.imread(image_path)
+        
+    return image
+
+
+
+def pad_image(self, image, image_size, mode='constant') +
+
+

Pad image to specified image_size

+

Parameters

+
+
image : array_like
+
Image to be padded
+
image_size : list
+
Final size of padded image
+
mode : str, optional
+
[Default: 'constant'] Mode to pad the image
+
+

Returns

+
+
image : array_like
+
Padded image
+
padding : list
+
List containing the number of pixels padded to each direction
+
+
+ +Expand source code + +
def pad_image(self, image, image_size, mode = 'constant'):
+    """Pad image to specified image_size
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be padded
+    image_size : `list`
+        Final size of padded image
+    mode : `str`, optional
+        [Default: 'constant'] Mode to pad the image
+
+    Returns
+    ----------
+    image : `array_like`
+        Padded image
+        
+    padding : `list`
+        List containing the number of pixels padded to each direction
+    """
+    h, w = image.shape[:2]
+    
+    top_pad = (image_size[0] - h) // 2
+    bottom_pad = image_size[0] - h - top_pad
+        
+    left_pad = (image_size[1] - w) // 2
+    right_pad = image_size[1] - w - left_pad
+
+    padding = ((top_pad, bottom_pad), (left_pad, right_pad))
+    image = np.pad(image, padding, mode = mode, constant_values=0)
+    
+    return image, padding
+
+
+
+def percentile_normalization(self, image, in_bound=[3, 99.8]) +
+
+

Performs percentile normalization on the image

+

Parameters

+
+
image : array_like
+
Image to be normalized
+
in_bound : list
+
Upper and lower percentile used to normalize image
+
+

Returns

+
+
image : array_like
+
Normalized image
+
image_min : int
+
Min value of image
+
image_max : int
+
Max value of image
+
+
+ +Expand source code + +
def percentile_normalization(self, image, in_bound=[3, 99.8]):
+    """Performs percentile normalization on the image
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be normalized
+    in_bound : `list`
+        Upper and lower percentile used to normalize image
+
+    Returns
+    ----------
+    image : `array_like`
+        Normalized image
+        
+    image_min : `int`
+        Min value of ``image``
+        
+    image_max : `int`
+        Max value of ``image``
+    """
+    image_min = np.percentile(image, in_bound[0])
+    image_max = np.percentile(image, in_bound[1])
+    image = (image - image_min)/(image_max - image_min)
+
+    return image, image_min, image_max
+
+
+
+def remove_pad_image(self, image, padding) +
+
+

Removes pad from image

+

Parameters

+
+
image : array_like
+
Padded image
+
padding : list
+
List containing the number of padded pixels in each direction
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def remove_pad_image(self, image, padding):
+    """Removes pad from image
+
+    Parameters
+    ----------
+    image : `array_like`
+        Padded image
+    padding : `list`
+        List containing the number of padded pixels in each direction
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    
+    h, w = image.shape[:2]
+    
+    return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
+
+
+
+def reshape_image(self, image) +
+
+

Reshapes the image to the correct dimenstions for Unet

+

Parameters

+
+
image : array_like
+
Image to be reshaped
+
+

Returns

+
+
image : array_like
+
Reshaped image
+
+
+ +Expand source code + +
def reshape_image(self, image):
+    """Reshapes the image to the correct dimenstions for Unet
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be reshaped
+
+    Returns
+    ----------
+    image : `array_like`
+        Reshaped image 
+    """
+    h, w = image.shape[:2]
+    image = np.reshape(image, (h, w, -1))
+    return image
+
+
+
+def tile_image(self, image, tile_size, tile_overlap_size) +
+
+

Converts an image into a list of tiled images

+

Parameters

+
+
image : array_like
+
Image to be tiled
+
tile_size : list
+
Size of each individual tile
+
tile_overlap_size : list
+
Amount of overlap (in pixels) between each tile
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def tile_image(self, image, tile_size, tile_overlap_size):
+    """Converts an image into a list of tiled images
+
+    Parameters
+    ----------
+    image : `array_like`
+        Image to be tiled
+    tile_size : `list`
+        Size of each individual tile
+    tile_overlap_size : `list`
+        Amount of overlap (in pixels) between each tile
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    image_height, image_width = image.shape[:2]
+    tile_height = tile_size[0] - tile_overlap_size[0] * 2
+    tile_width = tile_size[1] - tile_overlap_size[1] * 2
+    
+    if image_height <= tile_height and image_width <= tile_width:
+        return image
+    
+    num_rows = math.ceil(image_height/tile_height)
+    num_cols = math.ceil(image_width/tile_width)
+    num_tiles = num_rows*num_cols
+    
+    
+    # pad image to fit tile size
+    image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
+    
+    tile_image_list = []
+    
+    for tile_no in range(num_tiles):
+        tile_x_start = (tile_no // num_rows) * tile_width
+        tile_x_end = tile_x_start + tile_size[1]
+        
+        tile_y_start = (tile_no % num_rows) * tile_height
+        tile_y_end = tile_y_start + tile_size[0]
+        
+        tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
+        
+        # ensure input into unet is of correct shape
+        tile_image = self.reshape_image(tile_image)
+        
+        tile_image_list.append(tile_image)
+        
+    return tile_image_list, num_rows, num_cols, padding
+
+
+
+def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding) +
+
+

Stitches a list of tiled images back into a single image

+

Parameters

+
+
tile_list : list
+
List of tiled images
+
tile_size : list
+
Size of each individual tile
+
tile_overlap_size : list
+
Amount of overlap (in pixels) between each tile
+
num_rows : int
+
Number of rows of tiles
+
num_cols : int
+
Number of cols of tiles
+
padding : list
+
Amount of padding used during tiling
+
+

Returns

+
+
image : array_like
+
Image without padding
+
+
+ +Expand source code + +
def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
+    """Stitches a list of tiled images back into a single image
+
+    Parameters
+    ----------
+    tile_list : `list`
+        List of tiled images
+    tile_size : `list`
+        Size of each individual tile
+    tile_overlap_size : `list`
+        Amount of overlap (in pixels) between each tile
+    num_rows : `int`
+        Number of rows of tiles
+    num_cols : `int`
+        Number of cols of tiles
+    padding : `list`
+        Amount of padding used during tiling
+
+    Returns
+    ----------
+    image : `array_like`
+        Image without padding
+    """
+    if num_rows == 1 and num_cols == 1:
+        image = tile_list[0]
+        
+        image = self.remove_pad_image(image, padding = padding)
+            
+        return image
+          
+    tile_height = tile_size[0] - tile_overlap_size[0] * 2
+    tile_width = tile_size[1] - tile_overlap_size[1] * 2
+    
+    num_tiles = num_rows*num_cols
+    
+    for col in range(num_cols):
+        for row in range(num_rows):
+            tile_image = tile_list[num_rows*col + row][:,:,0]
+            tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
+            if row == 0:
+                image_col = np.array(tile_image)
+            else:
+                image_col = np.vstack((image_col, tile_image))
+        
+        if col == 0:
+            image = image_col
+        else:
+            image = np.hstack((image, image_col))
+    
+    image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
+    
+    if padding is not None:
+        image = self.remove_pad_image(image, padding = padding)
+        
+    return image
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/index.html b/html/models/internals/index.html new file mode 100644 index 0000000..e61d3fa --- /dev/null +++ b/html/models/internals/index.html @@ -0,0 +1,86 @@ + + + + + + +models.internals API documentation + + + + + + + + + +
+
+
+

Module models.internals

+
+
+
+ +Expand source code + +
from __future__ import absolute_import, print_function
+
+
+
+

Sub-modules

+
+
models.internals.dataset
+
+
+
+
models.internals.image_functions
+
+
+
+
models.internals.losses
+
+
+
+
models.internals.network_config
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/losses.html b/html/models/internals/losses.html new file mode 100644 index 0000000..0500ea2 --- /dev/null +++ b/html/models/internals/losses.html @@ -0,0 +1,705 @@ + + + + + + +models.internals.losses API documentation + + + + + + + + + +
+
+
+

Module models.internals.losses

+
+
+
+ +Expand source code + +
from keras import backend as K
+from keras.losses import binary_crossentropy, mean_absolute_error
+import tensorflow as tf
+
+def jaccard_distance_loss(y_true, y_pred, smooth=100):
+    """
+    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
+            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
+    
+    The jaccard distance loss is usefull for unbalanced datasets. This has been
+    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
+    gradient.
+    
+    Ref: https://en.wikipedia.org/wiki/Jaccard_index
+    
+    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
+    @author: wassname
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    sum_ = K.sum(y_true + y_pred, axis=-1)
+    jac = (intersection + smooth) / (sum_ - intersection + smooth)
+    return (1 - jac) * smooth
+
+def dice_coef(y_true, y_pred, smooth=1.):
+    """
+    Dice = (2*|X & Y|)/ (|X|+ |Y|)
+         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
+    ref: https://arxiv.org/pdf/1606.04797v1.pdf
+    
+    from wassname as well
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
+
+def dice_coef_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred)
+
+def bce_dice_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+def bce_ssim_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+# code download from: https://github.com/bermanmaxim/LovaszSoftmax
+def lovasz_grad(gt_sorted):
+    """
+    Computes gradient of the Lovasz extension w.r.t sorted errors
+    See Alg. 1 in paper
+    """
+    gts = tf.reduce_sum(gt_sorted)
+    intersection = gts - tf.cumsum(gt_sorted)
+    union = gts + tf.cumsum(1. - gt_sorted)
+    jaccard = 1. - intersection / union
+    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
+    return jaccard
+
+
+# --------------------------- BINARY LOSSES ---------------------------
+
+def lovasz_hinge(logits, labels, per_image=True, ignore=None):
+    """
+    Binary Lovasz hinge loss
+      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
+      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
+      per_image: compute the loss per image instead of per batch
+      ignore: void class id
+    """
+    if per_image:
+        def treat_image(log_lab):
+            log, lab = log_lab
+            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
+            log, lab = flatten_binary_scores(log, lab, ignore)
+            return lovasz_hinge_flat(log, lab)
+        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
+        loss = tf.reduce_mean(losses)
+    else:
+        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
+    return loss
+
+
+def lovasz_hinge_flat(logits, labels):
+    """
+    Binary Lovasz hinge loss
+      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
+      labels: [P] Tensor, binary ground truth labels (0 or 1)
+      ignore: label to ignore
+    """
+
+    def compute_loss():
+        labelsf = tf.cast(labels, logits.dtype)
+        signs = 2. * labelsf - 1.
+        errors = 1. - logits * tf.stop_gradient(signs)
+        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
+        gt_sorted = tf.gather(labelsf, perm)
+        grad = lovasz_grad(gt_sorted)
+        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
+        return loss
+
+    # deal with the void prediction case (only void pixels)
+    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
+                   lambda: tf.reduce_sum(logits) * 0.,
+                   compute_loss,
+                   strict=True,
+                   name="loss"
+                   )
+    return loss
+
+
+def flatten_binary_scores(scores, labels, ignore=None):
+    """
+    Flattens predictions in the batch (binary case)
+    Remove labels equal to 'ignore'
+    """
+    scores = tf.reshape(scores, (-1,))
+    labels = tf.reshape(labels, (-1,))
+    if ignore is None:
+        return scores, labels
+    valid = tf.not_equal(labels, ignore)
+    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
+    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
+    return vscores, vlabels
+
+def lovasz_loss(y_true, y_pred):
+    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
+    #logits = K.log(y_pred / (1. - y_pred))
+    logits = y_pred #Jiaxin
+    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
+    return loss
+
+# Difference of Structural Similarity
+
+def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
+    # There are additional parameters for this function
+    # Note: some of the 'modes' for edge behavior do not yet have a
+    # gradient definition in the Theano tree
+    #   and cannot be used for learning
+    
+    c1 = (k1 * max_value) ** 2
+    c2 = (k2 * max_value) ** 2
+
+    kernel = [kernel_size, kernel_size]
+    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
+    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
+
+    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+
+    # Reshape to get the var in the cells
+    bs, w, h, c = K.int_shape(patches_pred)
+    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
+    patches_true = K.reshape(patches_true, [-1, w, h, c])
+    # Get mean
+    u_true = K.mean(patches_true, axis=-1)
+    u_pred = K.mean(patches_pred, axis=-1)
+    # Get variance
+    var_true = K.var(patches_true, axis=-1)
+    var_pred = K.var(patches_pred, axis=-1)
+    # Get std dev
+    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
+
+    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
+    denom = ((K.square(u_true)
+              + K.square(u_pred)
+              + c1) * (var_pred + var_true + c2))
+    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
+    return K.mean((1.0 - ssim) / 2.0)
+
+def dssim_mae_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+#MSSim
+#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function
+def keras_SSIM_cs(y_true, y_pred):
+    axis=None
+    gaussian = make_kernel(1.5)
+    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+
+    u_x=K.mean(x, axis=axis)
+    u_y=K.mean(y, axis=axis)
+
+    var_x=K.var(x, axis=axis)
+    var_y=K.var(y, axis=axis)
+
+    cov_xy=cov_keras(x, y, axis)
+
+    K1=0.01
+    K2=0.03
+    L=1  # depth of image (255 in case the image has a differnt scale)
+
+    C1=(K1*L)**2
+    C2=(K2*L)**2
+    C3=C2/2
+
+    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
+    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
+    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
+
+    return [c,s,l]
+
+def keras_MS_SSIM(y_true, y_pred):
+    iterations = 5
+    x=y_true
+    y=y_pred
+    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
+    c=[]
+    s=[]
+    for i in range(iterations):
+        cs=keras_SSIM_cs(x, y)
+        c.append(cs[0])
+        s.append(cs[1])
+        l=cs[2]
+        if(i!=4):
+            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
+            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
+    c = tf.stack(c)
+    s = tf.stack(s)
+    cs = c*s
+
+    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
+    l=(l+1)/2
+    cs=(cs+1)/2
+
+    cs=cs**weight
+    cs = tf.reduce_prod(cs)
+    l=l**weight[-1]
+
+    ms_ssim = l*cs
+    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
+
+    return K.mean(ms_ssim)
+
+def mssim_mae_loss(y_true, y_pred):
+    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+
+
+
+
+

Functions

+
+
+def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0) +
+
+
+
+ +Expand source code + +
def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
+    # There are additional parameters for this function
+    # Note: some of the 'modes' for edge behavior do not yet have a
+    # gradient definition in the Theano tree
+    #   and cannot be used for learning
+    
+    c1 = (k1 * max_value) ** 2
+    c2 = (k2 * max_value) ** 2
+
+    kernel = [kernel_size, kernel_size]
+    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
+    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
+
+    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
+
+    # Reshape to get the var in the cells
+    bs, w, h, c = K.int_shape(patches_pred)
+    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
+    patches_true = K.reshape(patches_true, [-1, w, h, c])
+    # Get mean
+    u_true = K.mean(patches_true, axis=-1)
+    u_pred = K.mean(patches_pred, axis=-1)
+    # Get variance
+    var_true = K.var(patches_true, axis=-1)
+    var_pred = K.var(patches_pred, axis=-1)
+    # Get std dev
+    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
+
+    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
+    denom = ((K.square(u_true)
+              + K.square(u_pred)
+              + c1) * (var_pred + var_true + c2))
+    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
+    return K.mean((1.0 - ssim) / 2.0)
+
+
+
+def bce_dice_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def bce_dice_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+
+
+def bce_ssim_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def bce_ssim_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
+
+
+
+def dice_coef(y_true, y_pred, smooth=1.0) +
+
+

Dice = (2|X & Y|)/ (|X|+ |Y|) += +2sum(|A*B|)/(sum(A^2)+sum(B^2)) +ref: https://arxiv.org/pdf/1606.04797v1.pdf

+

from wassname as well

+
+ +Expand source code + +
def dice_coef(y_true, y_pred, smooth=1.):
+    """
+    Dice = (2*|X & Y|)/ (|X|+ |Y|)
+         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
+    ref: https://arxiv.org/pdf/1606.04797v1.pdf
+    
+    from wassname as well
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
+
+
+
+def dice_coef_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def dice_coef_loss(y_true, y_pred):
+    return 1. - dice_coef(y_true, y_pred)
+
+
+
+def dssim_mae_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def dssim_mae_loss(y_true, y_pred):
+    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+def flatten_binary_scores(scores, labels, ignore=None) +
+
+

Flattens predictions in the batch (binary case) +Remove labels equal to 'ignore'

+
+ +Expand source code + +
def flatten_binary_scores(scores, labels, ignore=None):
+    """
+    Flattens predictions in the batch (binary case)
+    Remove labels equal to 'ignore'
+    """
+    scores = tf.reshape(scores, (-1,))
+    labels = tf.reshape(labels, (-1,))
+    if ignore is None:
+        return scores, labels
+    valid = tf.not_equal(labels, ignore)
+    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
+    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
+    return vscores, vlabels
+
+
+
+def jaccard_distance_loss(y_true, y_pred, smooth=100) +
+
+

Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) += sum(|AB|)/(sum(|A|)+sum(|B|)-sum(|AB|))

+

The jaccard distance loss is usefull for unbalanced datasets. This has been +shifted so it converges on 0 and is smoothed to avoid exploding or disapearing +gradient.

+

Ref: https://en.wikipedia.org/wiki/Jaccard_index

+

@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 +@author: wassname

+
+ +Expand source code + +
def jaccard_distance_loss(y_true, y_pred, smooth=100):
+    """
+    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
+            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
+    
+    The jaccard distance loss is usefull for unbalanced datasets. This has been
+    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
+    gradient.
+    
+    Ref: https://en.wikipedia.org/wiki/Jaccard_index
+    
+    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
+    @author: wassname
+    """
+    intersection = K.sum(y_true * y_pred, axis=-1)
+    sum_ = K.sum(y_true + y_pred, axis=-1)
+    jac = (intersection + smooth) / (sum_ - intersection + smooth)
+    return (1 - jac) * smooth
+
+
+
+def keras_MS_SSIM(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def keras_MS_SSIM(y_true, y_pred):
+    iterations = 5
+    x=y_true
+    y=y_pred
+    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
+    c=[]
+    s=[]
+    for i in range(iterations):
+        cs=keras_SSIM_cs(x, y)
+        c.append(cs[0])
+        s.append(cs[1])
+        l=cs[2]
+        if(i!=4):
+            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
+            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
+    c = tf.stack(c)
+    s = tf.stack(s)
+    cs = c*s
+
+    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
+    l=(l+1)/2
+    cs=(cs+1)/2
+
+    cs=cs**weight
+    cs = tf.reduce_prod(cs)
+    l=l**weight[-1]
+
+    ms_ssim = l*cs
+    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
+
+    return K.mean(ms_ssim)
+
+
+
+def keras_SSIM_cs(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def keras_SSIM_cs(y_true, y_pred):
+    axis=None
+    gaussian = make_kernel(1.5)
+    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
+
+    u_x=K.mean(x, axis=axis)
+    u_y=K.mean(y, axis=axis)
+
+    var_x=K.var(x, axis=axis)
+    var_y=K.var(y, axis=axis)
+
+    cov_xy=cov_keras(x, y, axis)
+
+    K1=0.01
+    K2=0.03
+    L=1  # depth of image (255 in case the image has a differnt scale)
+
+    C1=(K1*L)**2
+    C2=(K2*L)**2
+    C3=C2/2
+
+    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
+    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
+    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
+
+    return [c,s,l]
+
+
+
+def lovasz_grad(gt_sorted) +
+
+

Computes gradient of the Lovasz extension w.r.t sorted errors +See Alg. 1 in paper

+
+ +Expand source code + +
def lovasz_grad(gt_sorted):
+    """
+    Computes gradient of the Lovasz extension w.r.t sorted errors
+    See Alg. 1 in paper
+    """
+    gts = tf.reduce_sum(gt_sorted)
+    intersection = gts - tf.cumsum(gt_sorted)
+    union = gts + tf.cumsum(1. - gt_sorted)
+    jaccard = 1. - intersection / union
+    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
+    return jaccard
+
+
+
+def lovasz_hinge(logits, labels, per_image=True, ignore=None) +
+
+

Binary Lovasz hinge loss +logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) +labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) +per_image: compute the loss per image instead of per batch +ignore: void class id

+
+ +Expand source code + +
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
+    """
+    Binary Lovasz hinge loss
+      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
+      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
+      per_image: compute the loss per image instead of per batch
+      ignore: void class id
+    """
+    if per_image:
+        def treat_image(log_lab):
+            log, lab = log_lab
+            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
+            log, lab = flatten_binary_scores(log, lab, ignore)
+            return lovasz_hinge_flat(log, lab)
+        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
+        loss = tf.reduce_mean(losses)
+    else:
+        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
+    return loss
+
+
+
+def lovasz_hinge_flat(logits, labels) +
+
+

Binary Lovasz hinge loss +logits: [P] Variable, logits at each prediction (between -\infty and +\infty) +labels: [P] Tensor, binary ground truth labels (0 or 1) +ignore: label to ignore

+
+ +Expand source code + +
def lovasz_hinge_flat(logits, labels):
+    """
+    Binary Lovasz hinge loss
+      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
+      labels: [P] Tensor, binary ground truth labels (0 or 1)
+      ignore: label to ignore
+    """
+
+    def compute_loss():
+        labelsf = tf.cast(labels, logits.dtype)
+        signs = 2. * labelsf - 1.
+        errors = 1. - logits * tf.stop_gradient(signs)
+        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
+        gt_sorted = tf.gather(labelsf, perm)
+        grad = lovasz_grad(gt_sorted)
+        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
+        return loss
+
+    # deal with the void prediction case (only void pixels)
+    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
+                   lambda: tf.reduce_sum(logits) * 0.,
+                   compute_loss,
+                   strict=True,
+                   name="loss"
+                   )
+    return loss
+
+
+
+def lovasz_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def lovasz_loss(y_true, y_pred):
+    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
+    #logits = K.log(y_pred / (1. - y_pred))
+    logits = y_pred #Jiaxin
+    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
+    return loss
+
+
+
+def mssim_mae_loss(y_true, y_pred) +
+
+
+
+ +Expand source code + +
def mssim_mae_loss(y_true, y_pred):
+    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/internals/network_config.html b/html/models/internals/network_config.html new file mode 100644 index 0000000..b11b990 --- /dev/null +++ b/html/models/internals/network_config.html @@ -0,0 +1,908 @@ + + + + + + +models.internals.network_config API documentation + + + + + + + + + +
+
+
+

Module models.internals.network_config

+
+
+
+ +Expand source code + +
import glob
+import os
+from ruamel.yaml import YAML
+
+class Network_Config(object):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is to be saved/read from
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file that will be loaded
+        **kwargs
+            For network parameters that are to be changed from the loaded config file
+
+        Attributes
+        ----------
+        yaml : :class:`ruamel.yaml.YAML`
+            YAML class with function needed to read/write YAML files 
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+        self.yaml=YAML()
+        
+        # load config file from model_dir
+        if config_filepath is not None:
+            
+            self.config = self.load_config_from_file(config_filepath)
+            print("Loaded config file from {}".format(config_filepath))
+        elif model_dir is not None:
+            try:
+                self.config = self.load_config_from_model_dir(model_dir)
+                print("Loaded config file from {}".format(model_dir))
+            except:
+                print("Please ensure that config_filepath is set or there is a config file in model_dir")
+                raise
+            
+        if model_dir is not None:
+            # update model_dir in config
+            print("Updating model_dir to {}".format(model_dir))
+            self.update_parameter(["general", "model_dir"], model_dir)
+        
+        # overwrite network parameters with parameters given during initialization
+        for key, value in kwargs.items():
+            self.update_parameter(self.find_key(key), value)
+            
+        # perform calculations
+        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
+        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
+                  
+    ######################
+    # Accessors/Mutators
+    ######################
+    def get_parameter(self, parameter, config = []):
+        """Output the value from the config file using the given key
+
+        Parameters
+        ----------
+        parameter : `list` or `str`
+            Key or list of keys used to find for the value in the config file
+        
+        config : `list`, optional
+            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+            
+        Returns
+        ----------
+        value : `str` or `int` or `list`
+            Value obtained from the specified key
+            
+        See Also
+        ----------
+        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+        """
+        assert isinstance(parameter, (list, str))
+        
+        # find for key in nested dictionary
+        if isinstance(parameter, str):
+            parameter = self.find_key(parameter)
+        
+        if config == []:
+            config = self.config
+        if config is None:
+            return None
+        
+        if not parameter:
+            return config
+        
+        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+    def update_parameter(self, parameter, value, config = None):
+        """Updates the parameter in the config file using a full addressed list
+
+        Parameters
+        ----------
+        parameter : `list`
+            List of keys that point to the correct item in the nested dictionary
+            
+        value : `str` or `int` or `list`
+            Value that is updated in the nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        TODO
+        """
+        
+        assert type(parameter) is list
+                
+        if config == None:
+            config = self.config
+        
+        if len(parameter) == 1:
+            config.update({parameter[0]: value})
+            return config
+        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+    def find_key(self, key, config = None):
+        """Find the list of keys to address the correct item in a nested dictionary
+
+        Parameters
+        ----------
+        key : `str`
+            Key that needs to be correctly addressed in a nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        key : `list`
+            Address of the key in the nested dictionary
+        """
+        
+        if config == None:
+            config = self.config
+            
+        key_path = []
+        for k, v in config.items():
+            if k == key:
+                return [k]
+            elif isinstance(v, dict):
+                found_key = self.find_key(key, config = v)
+                if found_key is not None:
+                    return [k] + found_key
+    
+    ######################
+    # Config IO options
+    ######################
+    def load_config_from_file(self, file_path):
+        """Load parameters from yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to load
+            
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+
+        with open(file_path, 'r') as input_file: 
+            config = self.yaml.load(input_file)
+            input_file.close()
+
+        return config
+    
+    def load_config_from_model_dir(self, model_dir):
+        """Finds for a config file from the model directory and loads it
+    
+        Parameters
+        ----------
+        model_dir : `str`
+            Folder to search for and load the config file
+
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+            
+        Raises
+        ------
+        IndexError
+            If there are no config file in the model_dir
+        """
+        
+        # check if yaml file exists in model_dir
+        try:
+            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+            if len(list_config_files) > 1:
+                print("Multiple config files found. Loading {}".format(list_config_files[0]))
+            else:
+                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+            return self.load_config_from_file(list_config_files[0])
+        except IndexError:
+            print("No config file found in model_dir.")
+            raise
+
+    def write_config(self, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            self.yaml.dump(self.config, output_file)
+
+        output_file.close()
+        
+        print("Config file written to: {}".format(file_path))
+    
+    def write_model(self, model, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        model : :class:`Keras.model`
+            Keras model that will be parsed and written to a yaml file
+        
+        file_path : `str`
+            Path of model file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            output_file.write(model.to_yaml())
+
+        output_file.close()
+        
+        print("Model file written to: {}".format(file_path))
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Network_Config +(model_dir=None, config_filepath=None, **kwargs) +
+
+

Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.

+

Parameters

+
+
model_dir : str, optional
+
[Default: None] Folder where the model is to be saved/read from
+
config_filepath : str, optional
+
[Default: None] Filepath to the config file that will be loaded
+
**kwargs
+
For network parameters that are to be changed from the loaded config file
+
+

Attributes

+
+
yaml : :class:ruamel.yaml.YAML
+
YAML class with function needed to read/write YAML files
+
config : dict
+
Dictionary containing the config parameters
+
+
+ +Expand source code + +
class Network_Config(object):
+    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
+        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
+    
+        Parameters
+        ----------
+        model_dir : `str`, optional
+            [Default: None] Folder where the model is to be saved/read from
+        config_filepath : `str`, optional
+            [Default: None] Filepath to the config file that will be loaded
+        **kwargs
+            For network parameters that are to be changed from the loaded config file
+
+        Attributes
+        ----------
+        yaml : :class:`ruamel.yaml.YAML`
+            YAML class with function needed to read/write YAML files 
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+        self.yaml=YAML()
+        
+        # load config file from model_dir
+        if config_filepath is not None:
+            
+            self.config = self.load_config_from_file(config_filepath)
+            print("Loaded config file from {}".format(config_filepath))
+        elif model_dir is not None:
+            try:
+                self.config = self.load_config_from_model_dir(model_dir)
+                print("Loaded config file from {}".format(model_dir))
+            except:
+                print("Please ensure that config_filepath is set or there is a config file in model_dir")
+                raise
+            
+        if model_dir is not None:
+            # update model_dir in config
+            print("Updating model_dir to {}".format(model_dir))
+            self.update_parameter(["general", "model_dir"], model_dir)
+        
+        # overwrite network parameters with parameters given during initialization
+        for key, value in kwargs.items():
+            self.update_parameter(self.find_key(key), value)
+            
+        # perform calculations
+        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
+        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
+                  
+    ######################
+    # Accessors/Mutators
+    ######################
+    def get_parameter(self, parameter, config = []):
+        """Output the value from the config file using the given key
+
+        Parameters
+        ----------
+        parameter : `list` or `str`
+            Key or list of keys used to find for the value in the config file
+        
+        config : `list`, optional
+            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+            
+        Returns
+        ----------
+        value : `str` or `int` or `list`
+            Value obtained from the specified key
+            
+        See Also
+        ----------
+        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+        """
+        assert isinstance(parameter, (list, str))
+        
+        # find for key in nested dictionary
+        if isinstance(parameter, str):
+            parameter = self.find_key(parameter)
+        
+        if config == []:
+            config = self.config
+        if config is None:
+            return None
+        
+        if not parameter:
+            return config
+        
+        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+    def update_parameter(self, parameter, value, config = None):
+        """Updates the parameter in the config file using a full addressed list
+
+        Parameters
+        ----------
+        parameter : `list`
+            List of keys that point to the correct item in the nested dictionary
+            
+        value : `str` or `int` or `list`
+            Value that is updated in the nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        TODO
+        """
+        
+        assert type(parameter) is list
+                
+        if config == None:
+            config = self.config
+        
+        if len(parameter) == 1:
+            config.update({parameter[0]: value})
+            return config
+        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+    def find_key(self, key, config = None):
+        """Find the list of keys to address the correct item in a nested dictionary
+
+        Parameters
+        ----------
+        key : `str`
+            Key that needs to be correctly addressed in a nested dictionary
+            
+        config : `list` or `none`, optional
+            Used to iterate through nested dictionaries
+            
+        Returns
+        ----------
+        key : `list`
+            Address of the key in the nested dictionary
+        """
+        
+        if config == None:
+            config = self.config
+            
+        key_path = []
+        for k, v in config.items():
+            if k == key:
+                return [k]
+            elif isinstance(v, dict):
+                found_key = self.find_key(key, config = v)
+                if found_key is not None:
+                    return [k] + found_key
+    
+    ######################
+    # Config IO options
+    ######################
+    def load_config_from_file(self, file_path):
+        """Load parameters from yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to load
+            
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+        """
+
+        with open(file_path, 'r') as input_file: 
+            config = self.yaml.load(input_file)
+            input_file.close()
+
+        return config
+    
+    def load_config_from_model_dir(self, model_dir):
+        """Finds for a config file from the model directory and loads it
+    
+        Parameters
+        ----------
+        model_dir : `str`
+            Folder to search for and load the config file
+
+        Returns
+        ----------
+        config : `dict`
+            Dictionary containing the config parameters
+            
+        Raises
+        ------
+        IndexError
+            If there are no config file in the model_dir
+        """
+        
+        # check if yaml file exists in model_dir
+        try:
+            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+            if len(list_config_files) > 1:
+                print("Multiple config files found. Loading {}".format(list_config_files[0]))
+            else:
+                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+            return self.load_config_from_file(list_config_files[0])
+        except IndexError:
+            print("No config file found in model_dir.")
+            raise
+
+    def write_config(self, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        file_path : `str`
+            Path of config file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            self.yaml.dump(self.config, output_file)
+
+        output_file.close()
+        
+        print("Config file written to: {}".format(file_path))
+    
+    def write_model(self, model, file_path):
+        """Writes parameters to yaml file
+
+        Parameters
+        ----------
+        model : :class:`Keras.model`
+            Keras model that will be parsed and written to a yaml file
+        
+        file_path : `str`
+            Path of model file to write to
+        """
+        
+        with open(file_path, 'w') as output_file:  
+            output_file.write(model.to_yaml())
+
+        output_file.close()
+        
+        print("Model file written to: {}".format(file_path))
+
+

Methods

+
+
+def find_key(self, key, config=None) +
+
+

Find the list of keys to address the correct item in a nested dictionary

+

Parameters

+
+
key : str
+
Key that needs to be correctly addressed in a nested dictionary
+
config : list or none, optional
+
Used to iterate through nested dictionaries
+
+

Returns

+
+
key : list
+
Address of the key in the nested dictionary
+
+
+ +Expand source code + +
def find_key(self, key, config = None):
+    """Find the list of keys to address the correct item in a nested dictionary
+
+    Parameters
+    ----------
+    key : `str`
+        Key that needs to be correctly addressed in a nested dictionary
+        
+    config : `list` or `none`, optional
+        Used to iterate through nested dictionaries
+        
+    Returns
+    ----------
+    key : `list`
+        Address of the key in the nested dictionary
+    """
+    
+    if config == None:
+        config = self.config
+        
+    key_path = []
+    for k, v in config.items():
+        if k == key:
+            return [k]
+        elif isinstance(v, dict):
+            found_key = self.find_key(key, config = v)
+            if found_key is not None:
+                return [k] + found_key
+
+
+
+def get_parameter(self, parameter, config=[]) +
+
+

Output the value from the config file using the given key

+

Parameters

+
+
parameter : list or str
+
Key or list of keys used to find for the value in the config file
+
config : list, optional
+
Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+
+

Returns

+
+
value : str or int or list
+
Value obtained from the specified key
+
+

See Also

+
+
find_key
+
Function to identify the list of keys to address the correct item in a nested dictionary
+
+
+ +Expand source code + +
def get_parameter(self, parameter, config = []):
+    """Output the value from the config file using the given key
+
+    Parameters
+    ----------
+    parameter : `list` or `str`
+        Key or list of keys used to find for the value in the config file
+    
+    config : `list`, optional
+        Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
+        
+    Returns
+    ----------
+    value : `str` or `int` or `list`
+        Value obtained from the specified key
+        
+    See Also
+    ----------
+    find_key : Function to identify the list of keys to address the correct item in a nested dictionary
+    """
+    assert isinstance(parameter, (list, str))
+    
+    # find for key in nested dictionary
+    if isinstance(parameter, str):
+        parameter = self.find_key(parameter)
+    
+    if config == []:
+        config = self.config
+    if config is None:
+        return None
+    
+    if not parameter:
+        return config
+    
+    return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
+
+
+
+def load_config_from_file(self, file_path) +
+
+

Load parameters from yaml file

+

Parameters

+
+
file_path : str
+
Path of config file to load
+
+

Returns

+
+
config : dict
+
Dictionary containing the config parameters
+
+
+ +Expand source code + +
def load_config_from_file(self, file_path):
+    """Load parameters from yaml file
+
+    Parameters
+    ----------
+    file_path : `str`
+        Path of config file to load
+        
+    Returns
+    ----------
+    config : `dict`
+        Dictionary containing the config parameters
+    """
+
+    with open(file_path, 'r') as input_file: 
+        config = self.yaml.load(input_file)
+        input_file.close()
+
+    return config
+
+
+
+def load_config_from_model_dir(self, model_dir) +
+
+

Finds for a config file from the model directory and loads it

+

Parameters

+
+
model_dir : str
+
Folder to search for and load the config file
+
+

Returns

+
+
config : dict
+
Dictionary containing the config parameters
+
+

Raises

+
+
IndexError
+
If there are no config file in the model_dir
+
+
+ +Expand source code + +
def load_config_from_model_dir(self, model_dir):
+    """Finds for a config file from the model directory and loads it
+
+    Parameters
+    ----------
+    model_dir : `str`
+        Folder to search for and load the config file
+
+    Returns
+    ----------
+    config : `dict`
+        Dictionary containing the config parameters
+        
+    Raises
+    ------
+    IndexError
+        If there are no config file in the model_dir
+    """
+    
+    # check if yaml file exists in model_dir
+    try:
+        list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
+        if len(list_config_files) > 1:
+            print("Multiple config files found. Loading {}".format(list_config_files[0]))
+        else:
+            print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
+        return self.load_config_from_file(list_config_files[0])
+    except IndexError:
+        print("No config file found in model_dir.")
+        raise
+
+
+
+def update_parameter(self, parameter, value, config=None) +
+
+

Updates the parameter in the config file using a full addressed list

+

Parameters

+
+
parameter : list
+
List of keys that point to the correct item in the nested dictionary
+
value : str or int or list
+
Value that is updated in the nested dictionary
+
config : list or none, optional
+
Used to iterate through nested dictionaries
+
+

Returns

+
+
TODO
+
 
+
+
+ +Expand source code + +
def update_parameter(self, parameter, value, config = None):
+    """Updates the parameter in the config file using a full addressed list
+
+    Parameters
+    ----------
+    parameter : `list`
+        List of keys that point to the correct item in the nested dictionary
+        
+    value : `str` or `int` or `list`
+        Value that is updated in the nested dictionary
+        
+    config : `list` or `none`, optional
+        Used to iterate through nested dictionaries
+        
+    Returns
+    ----------
+    TODO
+    """
+    
+    assert type(parameter) is list
+            
+    if config == None:
+        config = self.config
+    
+    if len(parameter) == 1:
+        config.update({parameter[0]: value})
+        return config
+    return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
+
+
+
+def write_config(self, file_path) +
+
+

Writes parameters to yaml file

+

Parameters

+
+
file_path : str
+
Path of config file to write to
+
+
+ +Expand source code + +
def write_config(self, file_path):
+    """Writes parameters to yaml file
+
+    Parameters
+    ----------
+    file_path : `str`
+        Path of config file to write to
+    """
+    
+    with open(file_path, 'w') as output_file:  
+        self.yaml.dump(self.config, output_file)
+
+    output_file.close()
+    
+    print("Config file written to: {}".format(file_path))
+
+
+
+def write_model(self, model, file_path) +
+
+

Writes parameters to yaml file

+

Parameters

+
+
model : :class:Keras.model
+
Keras model that will be parsed and written to a yaml file
+
file_path : str
+
Path of model file to write to
+
+
+ +Expand source code + +
def write_model(self, model, file_path):
+    """Writes parameters to yaml file
+
+    Parameters
+    ----------
+    model : :class:`Keras.model`
+        Keras model that will be parsed and written to a yaml file
+    
+    file_path : `str`
+        Path of model file to write to
+    """
+    
+    with open(file_path, 'w') as output_file:  
+        output_file.write(model.to_yaml())
+
+    output_file.close()
+    
+    print("Model file written to: {}".format(file_path))
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/layers/index.html b/html/models/layers/index.html new file mode 100644 index 0000000..cc15a41 --- /dev/null +++ b/html/models/layers/index.html @@ -0,0 +1,71 @@ + + + + + + +models.layers API documentation + + + + + + + + + +
+
+
+

Module models.layers

+
+
+
+ +Expand source code + +
from __future__ import absolute_import, print_function
+
+
+
+

Sub-modules

+
+
models.layers.layers
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/html/models/layers/layers.html b/html/models/layers/layers.html new file mode 100644 index 0000000..9eca4b5 --- /dev/null +++ b/html/models/layers/layers.html @@ -0,0 +1,222 @@ + + + + + + +models.layers.layers API documentation + + + + + + + + + +
+
+
+

Module models.layers.layers

+
+
+
+ +Expand source code + +
import math
+
+import keras
+from keras.models import Model, load_model
+from keras.layers import Input, BatchNormalization, Activation
+from keras.layers.core import Lambda, Dropout
+from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
+from keras.layers.convolutional_recurrent import ConvLSTM2D
+from keras.layers.pooling import MaxPooling2D
+from keras.layers.merge import Concatenate, Add
+from keras import regularizers
+from keras import backend as K
+
+import tensorflow as tf
+
+def activation_function(inputs, acti):
+    if isinstance(acti, str):
+        return Activation(acti)(inputs)
+    else:
+        return acti(inputs)
+
+def regularizer_function(weight_regularizer):
+    if weight_regularizer == 0 or weight_regularizer == None:
+        return None
+    else:
+        return regularizers.l2(weight_regularizer)
+    
+def bn_relu_conv2d(inputs, filters, filter_size, 
+                    strides = 1, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
+                    kernel_initializer=kernel_initializer, 
+                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            
+    return output
+
+def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
+                            strides = 2, acti = None, padding = None, 
+                            kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
+                             kernel_initializer=kernel_initializer, 
+                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
+    return output
+
+def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
+    if mean_std_normalization is True:
+        print("Using normalization")
+        return Lambda(lambda x: (x - mean)/std)(inputs)
+    elif scale_input is True:
+        print("Not using normalization")
+        return Lambda(lambda x: x / 255)(inputs)
+    else:
+        return inputs
+            
+    
+
+
+
+
+
+
+
+

Functions

+
+
+def activation_function(inputs, acti) +
+
+
+
+ +Expand source code + +
def activation_function(inputs, acti):
+    if isinstance(acti, str):
+        return Activation(acti)(inputs)
+    else:
+        return acti(inputs)
+
+
+
+def bn_relu_conv2d(inputs, filters, filter_size, strides=1, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') +
+
+
+
+ +Expand source code + +
def bn_relu_conv2d(inputs, filters, filter_size, 
+                    strides = 1, acti = None, padding = None, 
+                    kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
+                    kernel_initializer=kernel_initializer, 
+                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
+            
+    return output
+
+
+
+def bn_relu_conv2dtranspose(inputs, filters, filter_size, strides=2, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') +
+
+
+
+ +Expand source code + +
def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
+                            strides = 2, acti = None, padding = None, 
+                            kernel_initializer = None, weight_regularizer = None, name = ""):
+    output = BatchNormalization()(inputs)
+    output = activation_function(output, acti)
+    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
+                             kernel_initializer=kernel_initializer, 
+                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
+    return output
+
+
+
+def normalize_input(inputs, scale_input=False, mean_std_normalization=False, mean=None, std=None) +
+
+
+
+ +Expand source code + +
def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
+    if mean_std_normalization is True:
+        print("Using normalization")
+        return Lambda(lambda x: (x - mean)/std)(inputs)
+    elif scale_input is True:
+        print("Not using normalization")
+        return Lambda(lambda x: x / 255)(inputs)
+    else:
+        return inputs
+
+
+
+def regularizer_function(weight_regularizer) +
+
+
+
+ +Expand source code + +
def regularizer_function(weight_regularizer):
+    if weight_regularizer == 0 or weight_regularizer == None:
+        return None
+    else:
+        return regularizers.l2(weight_regularizer)
+
+
+
+
+
+
+
+ +
+ + + + + \ No newline at end of file diff --git a/models/.DS_Store b/models/.DS_Store new file mode 100644 index 0000000..23ef0c0 Binary files /dev/null and b/models/.DS_Store differ diff --git a/models/._.DS_Store b/models/._.DS_Store new file mode 100644 index 0000000..8e82ed9 Binary files /dev/null and b/models/._.DS_Store differ diff --git a/models/._CNN_Base.py b/models/._CNN_Base.py new file mode 100644 index 0000000..8817734 Binary files /dev/null and b/models/._CNN_Base.py differ diff --git a/models/.ipynb_checkpoints/CNN_Base-checkpoint.py b/models/.ipynb_checkpoints/CNN_Base-checkpoint.py new file mode 100644 index 0000000..ab13adc --- /dev/null +++ b/models/.ipynb_checkpoints/CNN_Base-checkpoint.py @@ -0,0 +1,570 @@ +import os + +import glob +import datetime + +import skimage.io +import numpy as np + +import tensorflow as tf + +import keras +from keras import backend as K +from keras.models import Model, load_model +from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger + +from .internals.image_functions import Image_Functions +from .internals.network_config import Network_Config +from .internals.dataset import Dataset + +class CNN_Base(Dataset, Image_Functions): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates the base neural network class with basic functions + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is stored + config_filepath : `str`, optional + [Default: None] Filepath to the config file + **kwargs + Parameters that are passed to :class:`network_config.Network_Config` + + Attributes + ---------- + config : :class:`network_config.Network_Config` + Network_config object containing the config and necessary functions + """ + + super().__init__() + + self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs) + + self.config.update_parameter(["general", "now"], datetime.datetime.now()) + + if self.config.get_parameter("use_cpu") is True: + self.initialize_cpu() + else: + self.initialize_gpu() + + ####################### + # Logging functions + ####################### + def init_logs(self): + """Initiates the parameters required for the log file + """ + # Directory for training logs + print(self.config.get_parameter("name"), self.config.get_parameter("now")) + self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + + if self.config.get_parameter("save_best_weights") is False: + # Path to save after each epoch. Include placeholders that get filled by Keras. + self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}") + else: + self.checkpoint_best = os.path.join(self.log_dir, "weights_best.h5") + self.checkpoint_now = os.path.join(self.log_dir, "weights_now.h5") + + def write_logs(self): + """Writes the log file + """ + # Create log_dir if it does not exist + if os.path.exists(self.log_dir) is False: + os.makedirs(self.log_dir) + + # save the parameters used in current run to logs dir + self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + ####################### + # Initialization functions + ####################### + def summary(self): + """Summary of the layers in the model + """ + self.model.summary() + + def compile_model(self, optimizer, loss): + """Compiles model + Parameters + ---------- + optimizer + Gradient optimizer used in during the training of the network + loss + Loss function of the network + + metrics + To try : + + Class tf.compat.v1.keras.metrics.MeanIoU + Class tf.compat.v2.keras.metrics.MeanIoU + Class tf.compat.v2.metrics.MeanIoU + + """ + if self.config.get_parameter("metrics") == ['IoU']: + print("Metrics : IoU") + from .internals.metrics import mean_iou + self.model.compile(optimizer, loss = loss, metrics = [mean_iou]) + + #self.model.compile(optimizer, loss = loss, metrics = [tf.keras.metrics.MeanIoU(num_classes=1+self.config.get_parameter("nb_classes"))]) + else: + print("Metrics : {}".format(self.config.get_parameter("metrics"))) + self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics")) + + def initialize_model(self): + """Initializes the logs, builds the model, and chooses the correct initialization function + """ + # write parameters to yaml file + self.init_logs() + if self.config.get_parameter("for_prediction") is False: + self.write_logs() + + # build model + self.model = self.build_model(self.config.get_parameter("input_size")) + + # save model to yaml file + if self.config.get_parameter("for_prediction") is False: + self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training")) + self.initialize_model_normal() + + def initialize_cpu(self): + """Sets the session to only use the CPU + """ + config = tf.ConfigProto( + device_count = {'CPU' : 1, + 'GPU' : 0} + ) + session = tf.Session(config=config) + K.set_session(session) + + def get_free_gpu(self): + """Selects the gpu with the most free memory + """ + import subprocess + import os + import sys + from io import StringIO + import numpy as np + + output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE, shell=True).communicate()[0] + output = output.decode("ascii") + # assumes that it is on the popiah server and the last gpu is not used + memory_available = [int(x.split()[2]) for x in output.split("\n")[:-1]] + print("Setting GPU to use to PID {}".format(np.argmax(memory_available))) + return np.argmax(memory_available) + + def initialize_gpu(self): + """Sets the seesion to use the gpu specified in config file + """ + #if self.config.get_parameter("visible_gpu") == "None": + # gpu = self.get_free_gpu() + #else: + # gpu = self.config.get_parameter("visible_gpu") + + os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 + #os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) # needs to be a string + os.environ['CUDA_VISIBLE_DEVICES'] = str(0) # needs to be a string + + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + K.tensorflow_backend.set_session(sess) + + def initialize_model_normal(self): + """Initializes the optimizer and any specified callback functions + """ + opt = self.optimizer_function() + self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss"))) + + if self.config.get_parameter("for_prediction") == False: + self.callbacks = self.model_checkpoint_call(verbose = True) + + if self.config.get_parameter("use_tensorboard") is True: + self.callbacks.append(self.tensorboard_call()) + + if self.config.get_parameter("reduce_LR_on_plateau") is True: + self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"), + factor = self.config.get_parameter("reduce_LR_factor"), + patience = self.config.get_parameter("reduce_LR_patience"), + min_lr = self.config.get_parameter("reduce_LR_min_lr"), + verbose = True)) + + if self.config.get_parameter("early_stopping") is True: + self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"), + patience = self.config.get_parameter("early_stopping_patience"), + min_delta = self.config.get_parameter("early_stopping_min_delta"), + verbose = True)) + + ####################### + # Optimizer/Loss functions + ####################### + def optimizer_function(self, learning_rate = None): + """Initialize optimizer function + + Parameters + ---------- + learning_rate : `int` + Learning rate of the descent algorithm + + Returns + ---------- + optimizer + Function to call the optimizer + """ + if learning_rate is None: + learning_rate = self.config.get_parameter("learning_rate") + if self.config.get_parameter("optimizer_function") == 'sgd': + return keras.optimizers.SGD(lr = learning_rate, + decay = self.config.get_parameter("decay"), + momentum = self.config.get_parameter("momentum"), + nesterov = self.config.get_parameter("nesterov")) + elif self.config.get_parameter("optimizer_function") == 'rmsprop': + return keras.optimizers.RMSprop(lr = learning_rate, + decay = self.config.get_parameter("decay")) + elif self.config.get_parameter("optimizer_function") == 'adam': + return keras.optimizers.Adam(lr = learning_rate, + decay = self.config.get_parameter("decay")) + + def loss_function(self, loss): + """Initialize loss function + + Parameters + ---------- + loss : `str` + Name of the loss function + + Returns + ---------- + loss + Function to call loss function + """ + if loss == "binary_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_binary_crossentropy as loss + print("Loss : edge-enhanced binary crossentropy") + else: + print("Loss : binary crossentropy") + return loss + elif loss == "categorical_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_categorical_crossentropy as loss + print("Loss : Edge Enhanced categorical_crossentropy") + else: + print("ULoss : categorical_crossentropy") + return loss + elif loss == "jaccard_distance_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_jaccard_distance_loss as jaccard_distance_loss + print("Loss : edge-enhanced jaccard_distance_loss") + else: + print("Loss : jaccard distance loss") + from .internals.losses import jaccard_distance_loss + return jaccard_distance_loss + elif loss == "dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_dice_coef_loss as dice_coef_loss + print("Loss : edge-enhanced Dice loss") + else: + print("Loss : Dice loss") + from .internals.losses import dice_coef_loss + return dice_coef_loss + elif loss == "bce_dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_dice_loss as bce_dice_loss + print("Loss : Edge Enhanced 1 - Dice + BCE loss") + else: + print("Loss : 1 - Dice + BCE loss") + from .internals.losses import bce_dice_loss + return bce_dice_loss + elif loss == "ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_DSSIM_loss as DSSIM_loss + print("Loss : Edge Enhanced DSSIM loss") + else: + print("Loss : DSSIM loss") + from .internals.losses import DSSIM_loss + return DSSIM_loss + elif loss == "bce_ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_ssim_loss as bce_ssim_loss + print("Loss : Edge Enhanced BCE + DSSIM loss") + else: + print("Loss : BCE + DSSIM loss") + from .internals.losses import bce_ssim_loss + return bce_ssim_loss + + + elif loss == "mean_squared_error": + return keras.losses.mean_squared_error + elif loss == "mean_absolute_error": + return keras.losses.mean_absolute_error + + elif loss == "lovasz_hinge": + print("Loss : Lovasz-hinge loss") + from .internals.losses import lovasz_loss + return lovasz_loss + elif loss == "ssim_mae_loss": + print("Loss : DSSIM + MAE loss") + from .internals.losses import dssim_mae_loss + return dssim_mae_loss + else: + print("Loss : {}".format(loss)) + return loss + + + ####################### + # Callbacks + ####################### + def tensorboard_call(self): + """Initialize tensorboard call + """ + return TensorBoard(log_dir=self.log_dir, + batch_size = self.config.get_parameter("batch_size_per_GPU"), + write_graph=self.config.get_parameter("write_graph"), + write_images=self.config.get_parameter("write_images"), + write_grads=self.config.get_parameter("write_grads"), + update_freq='epoch', + histogram_freq=self.config.get_parameter("histogram_freq")) + + def model_checkpoint_call(self, verbose = 0): + """Initialize model checkpoint call + """ + if self.config.get_parameter("save_best_weights") is False: + return [ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)] + else: + return [ModelCheckpoint(self.checkpoint_best, save_best_only=True, save_weights_only=True, verbose=verbose), + ModelCheckpoint(self.checkpoint_now, save_weights_only=True, verbose=verbose)] + + ####################### + # Clear memory once training is done + ####################### + def end_training(self): + """Deletes model and releases gpu memory held by tensorflow + """ + # del reference to model + del self.model + + # clear memory + tf.reset_default_graph() + K.clear_session() + + # take hold of cuda device to shut it down + from numba import cuda + cuda.select_device(0) + cuda.close() + + ####################### + # Train Model + ####################### + def train_model(self, verbose = True): + """Trains model + + Parameters + ---------- + verbose : `int`, optional + [Default: True] Verbose output + """ + history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"), + batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True, + callbacks=self.callbacks, verbose=verbose) + + self.end_training() + + ####################### + # Predict using loaded model weights + ####################### + # TODO: change to load model from yaml file + def load_model(self, model_dir = None): # redo + """Loads model from h5 file + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Directory containing the model file + """ + # TODO: rewrite to load model from yaml file + if model_dir is None: + model_dir = self.config.get_parameter("model_dir") + + if os.path.isdir(model_dir) is True: + list_weights_files = glob.glob(os.path.join(model_dir,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + + model_dir = os.path.join(model_dir,list_weights_files[-1]) + + self.model.load_model(model_dir) + print("Loaded model from: " + model_dir) + + def load_weights(self, weights_path = None, weights_index = -1): + """Loads weights from h5 file + + Parameters + ---------- + weights_path : `str`, optional + [Default: None] Path containing the weights file or the directory to the weights file + weights_index : `int`, optional + [Default: -1] + """ + if weights_path is None: + weights_path = self.config.get_parameter("model_dir") + + if os.path.isdir(weights_path) is True: + if self.config.get_parameter("save_best_weights") is True: + weights_path = os.path.join(weights_path, "weights_best.h5") + else: + list_weights_files = glob.glob(os.path.join(weights_path,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + self.weights_path = list_weights_files[weights_index] + weights_path = os.path.join(weights_path, self.weights_path) + else: + self.weights_path = weights_path + + self.model.load_weights(weights_path) + print("Loaded weights from: " + weights_path) + + + def predict_images(self, image_dir): + """Perform prediction on images found in ``image_dir`` + + Parameters + ---------- + image_dir : `str` + Directory containing the images to perform prediction on + + Returns + ---------- + image : `array_like` + Last image that prediction was perfromed on + """ + + # load image list + from tqdm.notebook import tqdm + image_list = self.list_images(image_dir) + for image_path in tqdm(image_list): + #for image_path in image_list: + image = self.load_image(image_path = image_path) + #print(image.shape) + + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if self.config.get_parameter("tile_overlap_size") == [0,0]: + padding = None + if len(image.shape)==2: + image = np.expand_dims(image, axis = -1) + + # If length =3 : X Y C + elif len(image.shape)==3: + if image.shape[0] != self.config.get_parameter("tile_size")[0]: + if image.shape[1] != self.config.get_parameter("tile_size")[1]: + image = np.transpose(image,(1,2,0)) + + image = np.expand_dims(image, axis = 0) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + + # Else, length : N X Y Z / N X Y T + elif len(image.shape)==4: + if image.shape[1] != self.config.get_parameter("tile_size")[0]: # Means N X T Y + image = np.transpose(image,(0,1,3,2)) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + #if image.shape[0] != 1: + # image = np.transpose(image,(3,1,2,0)) + + + # Single slice image vs Stack of images (no need of new axis) + if len(image.shape)==3: + input_image = image[np.newaxis,:,:] + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + elif len(image.shape)==4: + output_image = [] + for i in tqdm(range(image.shape[0])): + input_image = image[i,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + if i == 0: + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + else: + #output_image = np.append(output_image,self.model.predict(input_image, verbose=1), axis = 0) + output_image = np.append(output_image,self.model.predict(input_image), axis = 0) + + else: + output_image = image + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + input_image = image[i,j,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + #output_image[i,j,:,:,:] = self.model.predict(input_image, verbose=1) + output_image[i,j,:,:,:] = self.model.predict(input_image) + + if padding is not None: + h, w = output_image.shape[1:3] + output_image = np.reshape(output_image, (h, w)) + output_image = self.remove_pad_image(output_image, padding = padding) + else: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + + pred_train_list = [] + for tile in tile_image_list: + + # reshape image to correct dimensions for unet + h, w = tile.shape[:2] + + tile = np.reshape(tile, (1, h, w, 1)) + + pred_train_list.extend(self.model.predict(tile, verbose=1)) + + output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"), + num_rows, num_cols, padding = padding) + + self.save_image(output_image, image_path) + #print(output_image.shape) + + return output_image + + def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'): + """Saves image to image_path + + Final location of image is as follows: + - image_path + - subfolder + - model/weights file name + + Parameters + ---------- + image : `array_like` + Image to be saved + image_path : `str` + Location to save the image in + subfolder : `str` + [Default: 'Masks'] Subfolder in which the image is to be saved in + suffix : `str` + [Default: '-preds'] Suffix to append to the filename of the predicted image + """ + image_dir = os.path.dirname(image_path) + + output_dir = os.path.join(image_dir, subfolder) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if self.config.get_parameter("save_best_weights") is True: + basename = os.path.basename(self.config.get_parameter("model_dir")) + else: + basename, _ = os.path.splitext(os.path.basename(self.weights_path)) + + output_dir = os.path.join(output_dir, basename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + filename, _ = os.path.splitext(os.path.basename(image_path)) + output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix)) + + if self.config.get_parameter("save_as_uint16") is True: + image = skimage.util.img_as_uint(image) + skimage.io.imsave(output_path, image) diff --git a/models/.ipynb_checkpoints/Unet-checkpoint.py b/models/.ipynb_checkpoints/Unet-checkpoint.py new file mode 100644 index 0000000..750186a --- /dev/null +++ b/models/.ipynb_checkpoints/Unet-checkpoint.py @@ -0,0 +1,109 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d + +###### +# Unet +###### +class Unet(CNN_Base): + """ + Unet functions + see https://www.nature.com/articles/s41592-018-0261-2 + """ + + def __init__(self, model_dir = None, name = 'Unet', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + def build_model(self, input_size, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, strides = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if strides is None: + strides = self.config.get_parameter("strides") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + ### Actual network### + inputs = Input(input_size) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + layer_store = [] + + # encoding arm + for _ in range(self.config.get_parameter("levels")): + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer_store.append(layer) + layer = MaxPooling2D((2, 2))(layer) + + filters = filters * 2 + + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + # decoding arm + for i in range(self.config.get_parameter("levels")): + layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer) + + layer = Concatenate(axis=3)([layer, layer_store[-i -1]]) + filters = filters // 2 + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + + + return Model(inputs=[inputs], outputs=[outputs], name='Unet') diff --git a/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py b/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py new file mode 100644 index 0000000..d9ed1a9 --- /dev/null +++ b/models/.ipynb_checkpoints/Unet_ResAttnet-checkpoint.py @@ -0,0 +1,501 @@ +from keras.layers import Input, concatenate, add, \ + Multiply, Lambda +from keras.layers.convolutional import Conv3D, MaxPooling3D, MaxPooling2D, UpSampling2D, \ + UpSampling3D, Conv2D +from keras.layers.core import Activation +from keras.layers.normalization import BatchNormalization +from keras.models import Model +from .CNN_Base import CNN_Base + + +# Get neural network +class RA_Unet(CNN_Base): + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def build_model(self, inp_shape): + name = self.config.get_parameter("name") + if name == 'Res_att_unet_2d': + model = self.build_res_atten_unet_2d(inp_shape) + return model + elif name == 'Res_att_unet_3d': + model = self.build_res_atten_unet_3d(inp_shape) + return model + + + # ============================================================ + # ======================Attention ResUnet 3D================================# + # ============================================================ + + + def attention_block(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='out'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block(output_trunk, output_channels=output_channels) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling3D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block(output_soft_mask) + skip_connections.append(output_skip_connection) + # print ('skip shape:', output_skip_connection.get_shape()) + + ## down sampling + output_soft_mask = MaxPooling3D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + + ## Output + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block(output, name=name) + + return output + + + def residual_block(self,input, input_channels=None, output_channels=None, kernel_size=(3, 3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + + strides = (stride, stride, stride) + + x = BatchNormalization()(input) + x = Activation('relu')(x) + x = Conv3D(input_channels, (1, 1, 1))(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(output_channels, (1, 1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv3D(output_channels, (1, 1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1): + data = Input(shape=input_shape) + pool_size = (2, 2, 2) + up_size = (2, 2, 2) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + # conv1 = Dropout(0.5)(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = residual_block(pool, output_channels=filter_num * 8) + # res1 = Dropout(0.5)(res1) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = residual_block(pool1, output_channels=filter_num * 16) + # res2 = Dropout(0.5)(res2) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = residual_block(pool2, output_channels=filter_num * 32) + # res3 = Dropout(0.5)(res3) + + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = residual_block(pool3, output_channels=filter_num * 64) + # res4 = Dropout(0.5)(res4) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = residual_block(pool4, output_channels=filter_num * 64) + res5 = residual_block(res5, output_channels=filter_num * 64) + + atb5 = attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = residual_block(merged1, output_channels=filter_num * 64) + # res5 = Dropout(0.5)(res5) + + atb6 = attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = residual_block(merged2, output_channels=filter_num * 32) + # res6 = Dropout(0.5)(res6) + + atb7 = attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = residual_block(merged3, output_channels=filter_num * 16) + # res7 = Dropout(0.5)(res7) + + atb8 = attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = residual_block(merged4, output_channels=filter_num * 8) + # res8 = Dropout(0.5)(res8) + + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + # conv9 = Dropout(0.5)(conv9) + + output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) + model = Model(data, output) + return model + + + # liver network do not modify + def build_res_atten_unet_3d(self, input_shape, merge_axis=-1, pool_size=(2, 2, 2) + , up_size=(2, 2, 2)): + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = self.residual_block(pool, output_channels=filter_num * 4) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = self.residual_block(pool1, output_channels=filter_num * 8) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = self.residual_block(pool2, output_channels=filter_num * 16) + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = self.residual_block(pool3, output_channels=filter_num * 32) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = self.residual_block(pool4, output_channels=filter_num * 64) + res5 = self.residual_block(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = self.residual_block(merged1, output_channels=filter_num * 32) + + atb6 = self.attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = self.residual_block(merged2, output_channels=filter_num * 16) + atb7 = self.attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = self.residual_block(merged3, output_channels=filter_num * 8) + atb8 = self.attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = self.residual_block(merged4, output_channels=filter_num * 4) + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + + + if self.config.get_parameter("nb_classes") == 1: + output = Conv3D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv3D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + # ============================================================ + # ======================Attention ResUnet 2D================================# + # ============================================================ + + + def attention_block_2d(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='at'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block_2d(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block_2d(output_trunk) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling2D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block_2d(output_soft_mask) + skip_connections.append(output_skip_connection) + + ## down sampling + output_soft_mask = MaxPooling2D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + + ## Output + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block_2d(output, name=name) + + return output + + + def residual_block_2d(self, input, input_channels=None, output_channels=None, kernel_size=(3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + acti = self.config.get_parameter("activation_function") + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + strides = (stride, stride) + x = BatchNormalization()(input) + x = Activation(acti)(x) + x = Conv2D(input_channels, (1, 1))(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(output_channels, (1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv2D(output_channels, (1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def build_res_atten_unet_2d(self, input_shape): + merge_axis = -1 # Feature maps are concatenated along last axis (for tf backend) + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + acti = self.config.get_parameter("activation_function") + + conv1 = Conv2D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation(acti)(conv1) + + # res0 = residual_block_2d(data, output_channels=filter_num * 2) + + pool = MaxPooling2D(pool_size=(2, 2))(conv1) + res1 = self.residual_block_2d(pool, output_channels=filter_num * 4) + + # res1 = residual_block_2d(atb1, output_channels=filter_num * 4) + + pool1 = MaxPooling2D(pool_size=(2, 2))(res1) + # pool1 = MaxPooling2D(pool_size=(2, 2))(atb1) + + res2 = self.residual_block_2d(pool1, output_channels=filter_num * 8) + + # res2 = residual_block_2d(atb2, output_channels=filter_num * 8) + pool2 = MaxPooling2D(pool_size=(2, 2))(res2) + # pool2 = MaxPooling2D(pool_size=(2, 2))(atb2) + + res3 = self.residual_block_2d(pool2, output_channels=filter_num * 16) + # res3 = residual_block_2d(atb3, output_channels=filter_num * 16) + pool3 = MaxPooling2D(pool_size=(2, 2))(res3) + # pool3 = MaxPooling2D(pool_size=(2, 2))(atb3) + + res4 = self.residual_block_2d(pool3, output_channels=filter_num * 32) + + # res4 = residual_block_2d(atb4, output_channels=filter_num * 32) + pool4 = MaxPooling2D(pool_size=(2, 2))(res4) + # pool4 = MaxPooling2D(pool_size=(2, 2))(atb4) + + res5 = self.residual_block_2d(pool4, output_channels=filter_num * 64) + # res5 = residual_block_2d(res5, output_channels=filter_num * 64) + res5 = self.residual_block_2d(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block_2d(res4, encoder_depth=1, name='atten1') + up1 = UpSampling2D(size=(2, 2))(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + # merged1 = concatenate([up1, atb4], axis=merge_axis) + + res5 = self.residual_block_2d(merged1, output_channels=filter_num * 32) + # atb5 = attention_block_2d(res5, encoder_depth=1) + + atb6 = self.attention_block_2d(res3, encoder_depth=2, name='atten2') + up2 = UpSampling2D(size=(2, 2))(res5) + # up2 = UpSampling2D(size=(2, 2))(atb5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + # merged2 = concatenate([up2, atb3], axis=merge_axis) + + res6 = self.residual_block_2d(merged2, output_channels=filter_num * 16) + # atb6 = attention_block_2d(res6, encoder_depth=2) + + # atb6 = attention_block_2d(res6, encoder_depth=2) + atb7 = self.attention_block_2d(res2, encoder_depth=3, name='atten3') + up3 = UpSampling2D(size=(2, 2))(res6) + # up3 = UpSampling2D(size=(2, 2))(atb6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + # merged3 = concatenate([up3, atb2], axis=merge_axis) + + res7 = self.residual_block_2d(merged3, output_channels=filter_num * 8) + # atb7 = attention_block_2d(res7, encoder_depth=3) + + # atb7 = attention_block_2d(res7, encoder_depth=3) + atb8 = self.attention_block_2d(res1, encoder_depth=4, name='atten4') + up4 = UpSampling2D(size=(2, 2))(res7) + # up4 = UpSampling2D(size=(2, 2))(atb7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + # merged4 = concatenate([up4, atb1], axis=merge_axis) + + res8 = self.residual_block_2d(merged4, output_channels=filter_num * 4) + # atb8 = attention_block_2d(res8, encoder_depth=4) + + # atb8 = attention_block_2d(res8, encoder_depth=4) + up = UpSampling2D(size=(2, 2))(res8) + # up = UpSampling2D(size=(2, 2))(atb8) + merged = concatenate([up, conv1], axis=merge_axis) + # res9 = residual_block_2d(merged, output_channels=filter_num * 2) + + conv9 = Conv2D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation(acti)(conv9) + + if self.config.get_parameter("nb_classes") == 1: + output = Conv2D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv2D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + +class Res_att_unet_2d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_2d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + + +class Res_att_unet_3d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_3d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) diff --git a/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py b/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py new file mode 100644 index 0000000..90b4724 --- /dev/null +++ b/models/.ipynb_checkpoints/Unet_Resnet-checkpoint.py @@ -0,0 +1,260 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose + +################################################ +# Unet + Resnet +################################################ + +class Unet_Resnet(CNN_Base): + """ + Unet + resnet functions + see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19 + """ + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def bottleneck_block(self, inputs, + upsample = False, + filters = 8, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + # Bottleneck_block + with tf.name_scope("Bottleneck_block" + name): + output = bn_relu_conv2d(inputs, filters, 1, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = bn_relu_conv2d(output, filters, 3, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + if upsample == True: + output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + output = Conv2D(filters * 4, (1,1), padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + else: + output = bn_relu_conv2d(output, filters*4, 1, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Dropout(dropout_value)(output) + + # reshape input to the same size as output + if upsample == True: + inputs = UpSampling2D()(inputs) + if strides == 2: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs) + + # ensure number of filters are correct between input and output + if output.shape[3] != inputs.shape[3]: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block" + name): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = MaxPooling2D()(output) + output = Conv2D(filters, 3, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + output = Dropout(dropout_value)(output) + + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block_up(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block_up" + name): + output = bn_relu_conv2d(inputs, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output) + + output = Dropout(dropout_value)(output) + + inputs = UpSampling2D()(inputs) + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + + def build_model(self, unet_input, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + + ### Actual network### + inputs = Input(unet_input) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + # encoder arm + layer_1 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, + kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer) + + layer_2 = self.simple_block(layer_1, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_2") + + layer = layer_2 + layer_store = [layer] + + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1): + strides = 2 + + # last layer of encoding arm is treated as across + if i == len(self.config.get_parameter("bottleneck_block")): + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i-2): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_up_layer_{}".format(2 + i)) + else: + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + filters = filters*2 + layer_store.append(layer) + + # decoder arm + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1): + filters = filters//2 + + # note that i should be positive possibly due to the way keras/tf model compile works + layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer]) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(i+6, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(i+6)) + + layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2]) + layer_14 = self.simple_block_up(layer_13, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_14") + + layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1]) + + layer_16 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), + name="Conv_layer_16")(layer_15) + + layer_17 = BatchNormalization()(layer_16) + layer_18 = activation_function(layer_17, acti) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + #outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18) + + return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name')) + +class Unet_Resnet101(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet50(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet_paper(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + """ + see https://arxiv.org/pdf/1608.04117.pdf + """ + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") \ No newline at end of file diff --git a/models/.ipynb_checkpoints/__init__-checkpoint.py b/models/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/CNN_Base.py b/models/CNN_Base.py new file mode 100644 index 0000000..ab13adc --- /dev/null +++ b/models/CNN_Base.py @@ -0,0 +1,570 @@ +import os + +import glob +import datetime + +import skimage.io +import numpy as np + +import tensorflow as tf + +import keras +from keras import backend as K +from keras.models import Model, load_model +from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger + +from .internals.image_functions import Image_Functions +from .internals.network_config import Network_Config +from .internals.dataset import Dataset + +class CNN_Base(Dataset, Image_Functions): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates the base neural network class with basic functions + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is stored + config_filepath : `str`, optional + [Default: None] Filepath to the config file + **kwargs + Parameters that are passed to :class:`network_config.Network_Config` + + Attributes + ---------- + config : :class:`network_config.Network_Config` + Network_config object containing the config and necessary functions + """ + + super().__init__() + + self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs) + + self.config.update_parameter(["general", "now"], datetime.datetime.now()) + + if self.config.get_parameter("use_cpu") is True: + self.initialize_cpu() + else: + self.initialize_gpu() + + ####################### + # Logging functions + ####################### + def init_logs(self): + """Initiates the parameters required for the log file + """ + # Directory for training logs + print(self.config.get_parameter("name"), self.config.get_parameter("now")) + self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + + if self.config.get_parameter("save_best_weights") is False: + # Path to save after each epoch. Include placeholders that get filled by Keras. + self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now"))) + self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}") + else: + self.checkpoint_best = os.path.join(self.log_dir, "weights_best.h5") + self.checkpoint_now = os.path.join(self.log_dir, "weights_now.h5") + + def write_logs(self): + """Writes the log file + """ + # Create log_dir if it does not exist + if os.path.exists(self.log_dir) is False: + os.makedirs(self.log_dir) + + # save the parameters used in current run to logs dir + self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + ####################### + # Initialization functions + ####################### + def summary(self): + """Summary of the layers in the model + """ + self.model.summary() + + def compile_model(self, optimizer, loss): + """Compiles model + Parameters + ---------- + optimizer + Gradient optimizer used in during the training of the network + loss + Loss function of the network + + metrics + To try : + + Class tf.compat.v1.keras.metrics.MeanIoU + Class tf.compat.v2.keras.metrics.MeanIoU + Class tf.compat.v2.metrics.MeanIoU + + """ + if self.config.get_parameter("metrics") == ['IoU']: + print("Metrics : IoU") + from .internals.metrics import mean_iou + self.model.compile(optimizer, loss = loss, metrics = [mean_iou]) + + #self.model.compile(optimizer, loss = loss, metrics = [tf.keras.metrics.MeanIoU(num_classes=1+self.config.get_parameter("nb_classes"))]) + else: + print("Metrics : {}".format(self.config.get_parameter("metrics"))) + self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics")) + + def initialize_model(self): + """Initializes the logs, builds the model, and chooses the correct initialization function + """ + # write parameters to yaml file + self.init_logs() + if self.config.get_parameter("for_prediction") is False: + self.write_logs() + + # build model + self.model = self.build_model(self.config.get_parameter("input_size")) + + # save model to yaml file + if self.config.get_parameter("for_prediction") is False: + self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now")))) + + print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training")) + self.initialize_model_normal() + + def initialize_cpu(self): + """Sets the session to only use the CPU + """ + config = tf.ConfigProto( + device_count = {'CPU' : 1, + 'GPU' : 0} + ) + session = tf.Session(config=config) + K.set_session(session) + + def get_free_gpu(self): + """Selects the gpu with the most free memory + """ + import subprocess + import os + import sys + from io import StringIO + import numpy as np + + output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE, shell=True).communicate()[0] + output = output.decode("ascii") + # assumes that it is on the popiah server and the last gpu is not used + memory_available = [int(x.split()[2]) for x in output.split("\n")[:-1]] + print("Setting GPU to use to PID {}".format(np.argmax(memory_available))) + return np.argmax(memory_available) + + def initialize_gpu(self): + """Sets the seesion to use the gpu specified in config file + """ + #if self.config.get_parameter("visible_gpu") == "None": + # gpu = self.get_free_gpu() + #else: + # gpu = self.config.get_parameter("visible_gpu") + + os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 + #os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) # needs to be a string + os.environ['CUDA_VISIBLE_DEVICES'] = str(0) # needs to be a string + + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + K.tensorflow_backend.set_session(sess) + + def initialize_model_normal(self): + """Initializes the optimizer and any specified callback functions + """ + opt = self.optimizer_function() + self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss"))) + + if self.config.get_parameter("for_prediction") == False: + self.callbacks = self.model_checkpoint_call(verbose = True) + + if self.config.get_parameter("use_tensorboard") is True: + self.callbacks.append(self.tensorboard_call()) + + if self.config.get_parameter("reduce_LR_on_plateau") is True: + self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"), + factor = self.config.get_parameter("reduce_LR_factor"), + patience = self.config.get_parameter("reduce_LR_patience"), + min_lr = self.config.get_parameter("reduce_LR_min_lr"), + verbose = True)) + + if self.config.get_parameter("early_stopping") is True: + self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"), + patience = self.config.get_parameter("early_stopping_patience"), + min_delta = self.config.get_parameter("early_stopping_min_delta"), + verbose = True)) + + ####################### + # Optimizer/Loss functions + ####################### + def optimizer_function(self, learning_rate = None): + """Initialize optimizer function + + Parameters + ---------- + learning_rate : `int` + Learning rate of the descent algorithm + + Returns + ---------- + optimizer + Function to call the optimizer + """ + if learning_rate is None: + learning_rate = self.config.get_parameter("learning_rate") + if self.config.get_parameter("optimizer_function") == 'sgd': + return keras.optimizers.SGD(lr = learning_rate, + decay = self.config.get_parameter("decay"), + momentum = self.config.get_parameter("momentum"), + nesterov = self.config.get_parameter("nesterov")) + elif self.config.get_parameter("optimizer_function") == 'rmsprop': + return keras.optimizers.RMSprop(lr = learning_rate, + decay = self.config.get_parameter("decay")) + elif self.config.get_parameter("optimizer_function") == 'adam': + return keras.optimizers.Adam(lr = learning_rate, + decay = self.config.get_parameter("decay")) + + def loss_function(self, loss): + """Initialize loss function + + Parameters + ---------- + loss : `str` + Name of the loss function + + Returns + ---------- + loss + Function to call loss function + """ + if loss == "binary_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_binary_crossentropy as loss + print("Loss : edge-enhanced binary crossentropy") + else: + print("Loss : binary crossentropy") + return loss + elif loss == "categorical_crossentropy": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_categorical_crossentropy as loss + print("Loss : Edge Enhanced categorical_crossentropy") + else: + print("ULoss : categorical_crossentropy") + return loss + elif loss == "jaccard_distance_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_jaccard_distance_loss as jaccard_distance_loss + print("Loss : edge-enhanced jaccard_distance_loss") + else: + print("Loss : jaccard distance loss") + from .internals.losses import jaccard_distance_loss + return jaccard_distance_loss + elif loss == "dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_dice_coef_loss as dice_coef_loss + print("Loss : edge-enhanced Dice loss") + else: + print("Loss : Dice loss") + from .internals.losses import dice_coef_loss + return dice_coef_loss + elif loss == "bce_dice_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_dice_loss as bce_dice_loss + print("Loss : Edge Enhanced 1 - Dice + BCE loss") + else: + print("Loss : 1 - Dice + BCE loss") + from .internals.losses import bce_dice_loss + return bce_dice_loss + elif loss == "ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_DSSIM_loss as DSSIM_loss + print("Loss : Edge Enhanced DSSIM loss") + else: + print("Loss : DSSIM loss") + from .internals.losses import DSSIM_loss + return DSSIM_loss + elif loss == "bce_ssim_loss": + if self.config.get_parameter("edge_enhance"): + from .internals.losses import EE_bce_ssim_loss as bce_ssim_loss + print("Loss : Edge Enhanced BCE + DSSIM loss") + else: + print("Loss : BCE + DSSIM loss") + from .internals.losses import bce_ssim_loss + return bce_ssim_loss + + + elif loss == "mean_squared_error": + return keras.losses.mean_squared_error + elif loss == "mean_absolute_error": + return keras.losses.mean_absolute_error + + elif loss == "lovasz_hinge": + print("Loss : Lovasz-hinge loss") + from .internals.losses import lovasz_loss + return lovasz_loss + elif loss == "ssim_mae_loss": + print("Loss : DSSIM + MAE loss") + from .internals.losses import dssim_mae_loss + return dssim_mae_loss + else: + print("Loss : {}".format(loss)) + return loss + + + ####################### + # Callbacks + ####################### + def tensorboard_call(self): + """Initialize tensorboard call + """ + return TensorBoard(log_dir=self.log_dir, + batch_size = self.config.get_parameter("batch_size_per_GPU"), + write_graph=self.config.get_parameter("write_graph"), + write_images=self.config.get_parameter("write_images"), + write_grads=self.config.get_parameter("write_grads"), + update_freq='epoch', + histogram_freq=self.config.get_parameter("histogram_freq")) + + def model_checkpoint_call(self, verbose = 0): + """Initialize model checkpoint call + """ + if self.config.get_parameter("save_best_weights") is False: + return [ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)] + else: + return [ModelCheckpoint(self.checkpoint_best, save_best_only=True, save_weights_only=True, verbose=verbose), + ModelCheckpoint(self.checkpoint_now, save_weights_only=True, verbose=verbose)] + + ####################### + # Clear memory once training is done + ####################### + def end_training(self): + """Deletes model and releases gpu memory held by tensorflow + """ + # del reference to model + del self.model + + # clear memory + tf.reset_default_graph() + K.clear_session() + + # take hold of cuda device to shut it down + from numba import cuda + cuda.select_device(0) + cuda.close() + + ####################### + # Train Model + ####################### + def train_model(self, verbose = True): + """Trains model + + Parameters + ---------- + verbose : `int`, optional + [Default: True] Verbose output + """ + history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"), + batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True, + callbacks=self.callbacks, verbose=verbose) + + self.end_training() + + ####################### + # Predict using loaded model weights + ####################### + # TODO: change to load model from yaml file + def load_model(self, model_dir = None): # redo + """Loads model from h5 file + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Directory containing the model file + """ + # TODO: rewrite to load model from yaml file + if model_dir is None: + model_dir = self.config.get_parameter("model_dir") + + if os.path.isdir(model_dir) is True: + list_weights_files = glob.glob(os.path.join(model_dir,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + + model_dir = os.path.join(model_dir,list_weights_files[-1]) + + self.model.load_model(model_dir) + print("Loaded model from: " + model_dir) + + def load_weights(self, weights_path = None, weights_index = -1): + """Loads weights from h5 file + + Parameters + ---------- + weights_path : `str`, optional + [Default: None] Path containing the weights file or the directory to the weights file + weights_index : `int`, optional + [Default: -1] + """ + if weights_path is None: + weights_path = self.config.get_parameter("model_dir") + + if os.path.isdir(weights_path) is True: + if self.config.get_parameter("save_best_weights") is True: + weights_path = os.path.join(weights_path, "weights_best.h5") + else: + list_weights_files = glob.glob(os.path.join(weights_path,'*.h5')) + list_weights_files.sort() # To ensure that [-1] gives the last file + self.weights_path = list_weights_files[weights_index] + weights_path = os.path.join(weights_path, self.weights_path) + else: + self.weights_path = weights_path + + self.model.load_weights(weights_path) + print("Loaded weights from: " + weights_path) + + + def predict_images(self, image_dir): + """Perform prediction on images found in ``image_dir`` + + Parameters + ---------- + image_dir : `str` + Directory containing the images to perform prediction on + + Returns + ---------- + image : `array_like` + Last image that prediction was perfromed on + """ + + # load image list + from tqdm.notebook import tqdm + image_list = self.list_images(image_dir) + for image_path in tqdm(image_list): + #for image_path in image_list: + image = self.load_image(image_path = image_path) + #print(image.shape) + + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if self.config.get_parameter("tile_overlap_size") == [0,0]: + padding = None + if len(image.shape)==2: + image = np.expand_dims(image, axis = -1) + + # If length =3 : X Y C + elif len(image.shape)==3: + if image.shape[0] != self.config.get_parameter("tile_size")[0]: + if image.shape[1] != self.config.get_parameter("tile_size")[1]: + image = np.transpose(image,(1,2,0)) + + image = np.expand_dims(image, axis = 0) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + + # Else, length : N X Y Z / N X Y T + elif len(image.shape)==4: + if image.shape[1] != self.config.get_parameter("tile_size")[0]: # Means N X T Y + image = np.transpose(image,(0,1,3,2)) + if image.shape[1] < self.config.get_parameter("tile_size")[0] or image.shape[2] < self.config.get_parameter("tile_size")[1]: + image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size")) + #if image.shape[0] != 1: + # image = np.transpose(image,(3,1,2,0)) + + + # Single slice image vs Stack of images (no need of new axis) + if len(image.shape)==3: + input_image = image[np.newaxis,:,:] + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + elif len(image.shape)==4: + output_image = [] + for i in tqdm(range(image.shape[0])): + input_image = image[i,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + if i == 0: + #output_image = self.model.predict(input_image, verbose=1) + output_image = self.model.predict(input_image) + + else: + #output_image = np.append(output_image,self.model.predict(input_image, verbose=1), axis = 0) + output_image = np.append(output_image,self.model.predict(input_image), axis = 0) + + else: + output_image = image + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + input_image = image[i,j,:,:,:] + input_image = np.expand_dims(input_image, axis = 0) + #output_image[i,j,:,:,:] = self.model.predict(input_image, verbose=1) + output_image[i,j,:,:,:] = self.model.predict(input_image) + + if padding is not None: + h, w = output_image.shape[1:3] + output_image = np.reshape(output_image, (h, w)) + output_image = self.remove_pad_image(output_image, padding = padding) + else: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + + pred_train_list = [] + for tile in tile_image_list: + + # reshape image to correct dimensions for unet + h, w = tile.shape[:2] + + tile = np.reshape(tile, (1, h, w, 1)) + + pred_train_list.extend(self.model.predict(tile, verbose=1)) + + output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"), + num_rows, num_cols, padding = padding) + + self.save_image(output_image, image_path) + #print(output_image.shape) + + return output_image + + def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'): + """Saves image to image_path + + Final location of image is as follows: + - image_path + - subfolder + - model/weights file name + + Parameters + ---------- + image : `array_like` + Image to be saved + image_path : `str` + Location to save the image in + subfolder : `str` + [Default: 'Masks'] Subfolder in which the image is to be saved in + suffix : `str` + [Default: '-preds'] Suffix to append to the filename of the predicted image + """ + image_dir = os.path.dirname(image_path) + + output_dir = os.path.join(image_dir, subfolder) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if self.config.get_parameter("save_best_weights") is True: + basename = os.path.basename(self.config.get_parameter("model_dir")) + else: + basename, _ = os.path.splitext(os.path.basename(self.weights_path)) + + output_dir = os.path.join(output_dir, basename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + filename, _ = os.path.splitext(os.path.basename(image_path)) + output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix)) + + if self.config.get_parameter("save_as_uint16") is True: + image = skimage.util.img_as_uint(image) + skimage.io.imsave(output_path, image) diff --git a/models/Unet.py b/models/Unet.py new file mode 100644 index 0000000..750186a --- /dev/null +++ b/models/Unet.py @@ -0,0 +1,109 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d + +###### +# Unet +###### +class Unet(CNN_Base): + """ + Unet functions + see https://www.nature.com/articles/s41592-018-0261-2 + """ + + def __init__(self, model_dir = None, name = 'Unet', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + def build_model(self, input_size, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, strides = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if strides is None: + strides = self.config.get_parameter("strides") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + ### Actual network### + inputs = Input(input_size) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + layer_store = [] + + # encoding arm + for _ in range(self.config.get_parameter("levels")): + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer_store.append(layer) + layer = MaxPooling2D((2, 2))(layer) + + filters = filters * 2 + + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + # decoding arm + for i in range(self.config.get_parameter("levels")): + layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer) + + layer = Concatenate(axis=3)([layer, layer_store[-i -1]]) + filters = filters // 2 + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + layer = bn_relu_conv2d(layer, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer) + + + return Model(inputs=[inputs], outputs=[outputs], name='Unet') diff --git a/models/Unet_ResAttnet.py b/models/Unet_ResAttnet.py new file mode 100644 index 0000000..d9ed1a9 --- /dev/null +++ b/models/Unet_ResAttnet.py @@ -0,0 +1,501 @@ +from keras.layers import Input, concatenate, add, \ + Multiply, Lambda +from keras.layers.convolutional import Conv3D, MaxPooling3D, MaxPooling2D, UpSampling2D, \ + UpSampling3D, Conv2D +from keras.layers.core import Activation +from keras.layers.normalization import BatchNormalization +from keras.models import Model +from .CNN_Base import CNN_Base + + +# Get neural network +class RA_Unet(CNN_Base): + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def build_model(self, inp_shape): + name = self.config.get_parameter("name") + if name == 'Res_att_unet_2d': + model = self.build_res_atten_unet_2d(inp_shape) + return model + elif name == 'Res_att_unet_3d': + model = self.build_res_atten_unet_3d(inp_shape) + return model + + + # ============================================================ + # ======================Attention ResUnet 3D================================# + # ============================================================ + + + def attention_block(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='out'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block(output_trunk, output_channels=output_channels) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling3D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block(output_soft_mask) + skip_connections.append(output_skip_connection) + # print ('skip shape:', output_skip_connection.get_shape()) + + ## down sampling + output_soft_mask = MaxPooling3D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block(output_soft_mask) + output_soft_mask = UpSampling3D()(output_soft_mask) + + ## Output + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Conv3D(input_channels, (1, 1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block(output, name=name) + + return output + + + def residual_block(self,input, input_channels=None, output_channels=None, kernel_size=(3, 3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + + strides = (stride, stride, stride) + + x = BatchNormalization()(input) + x = Activation('relu')(x) + x = Conv3D(input_channels, (1, 1, 1))(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = Conv3D(output_channels, (1, 1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv3D(output_channels, (1, 1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1): + data = Input(shape=input_shape) + pool_size = (2, 2, 2) + up_size = (2, 2, 2) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + # conv1 = Dropout(0.5)(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = residual_block(pool, output_channels=filter_num * 8) + # res1 = Dropout(0.5)(res1) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = residual_block(pool1, output_channels=filter_num * 16) + # res2 = Dropout(0.5)(res2) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = residual_block(pool2, output_channels=filter_num * 32) + # res3 = Dropout(0.5)(res3) + + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = residual_block(pool3, output_channels=filter_num * 64) + # res4 = Dropout(0.5)(res4) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = residual_block(pool4, output_channels=filter_num * 64) + res5 = residual_block(res5, output_channels=filter_num * 64) + + atb5 = attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = residual_block(merged1, output_channels=filter_num * 64) + # res5 = Dropout(0.5)(res5) + + atb6 = attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = residual_block(merged2, output_channels=filter_num * 32) + # res6 = Dropout(0.5)(res6) + + atb7 = attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = residual_block(merged3, output_channels=filter_num * 16) + # res7 = Dropout(0.5)(res7) + + atb8 = attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = residual_block(merged4, output_channels=filter_num * 8) + # res8 = Dropout(0.5)(res8) + + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + # conv9 = Dropout(0.5)(conv9) + + output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) + model = Model(data, output) + return model + + + # liver network do not modify + def build_res_atten_unet_3d(self, input_shape, merge_axis=-1, pool_size=(2, 2, 2) + , up_size=(2, 2, 2)): + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation('relu')(conv1) + + pool = MaxPooling3D(pool_size=pool_size)(conv1) + + res1 = self.residual_block(pool, output_channels=filter_num * 4) + + pool1 = MaxPooling3D(pool_size=pool_size)(res1) + + res2 = self.residual_block(pool1, output_channels=filter_num * 8) + + pool2 = MaxPooling3D(pool_size=pool_size)(res2) + + res3 = self.residual_block(pool2, output_channels=filter_num * 16) + pool3 = MaxPooling3D(pool_size=pool_size)(res3) + + res4 = self.residual_block(pool3, output_channels=filter_num * 32) + + pool4 = MaxPooling3D(pool_size=pool_size)(res4) + + res5 = self.residual_block(pool4, output_channels=filter_num * 64) + res5 = self.residual_block(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block(res4, encoder_depth=1, name='atten1') + up1 = UpSampling3D(size=up_size)(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + + res5 = self.residual_block(merged1, output_channels=filter_num * 32) + + atb6 = self.attention_block(res3, encoder_depth=2, name='atten2') + up2 = UpSampling3D(size=up_size)(res5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + + res6 = self.residual_block(merged2, output_channels=filter_num * 16) + atb7 = self.attention_block(res2, encoder_depth=3, name='atten3') + up3 = UpSampling3D(size=up_size)(res6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + + res7 = self.residual_block(merged3, output_channels=filter_num * 8) + atb8 = self.attention_block(res1, encoder_depth=4, name='atten4') + up4 = UpSampling3D(size=up_size)(res7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + + res8 = self.residual_block(merged4, output_channels=filter_num * 4) + up = UpSampling3D(size=up_size)(res8) + merged = concatenate([up, conv1], axis=merge_axis) + conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation('relu')(conv9) + + + if self.config.get_parameter("nb_classes") == 1: + output = Conv3D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv3D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + # ============================================================ + # ======================Attention ResUnet 2D================================# + # ============================================================ + + + def attention_block_2d(self,input, input_channels=None, output_channels=None, encoder_depth=1, name='at'): + """ + attention block + https://arxiv.org/abs/1704.06904 + """ + p = 1 + t = 2 + r = 1 + + if input_channels is None: + input_channels = input.get_shape()[-1].value + if output_channels is None: + output_channels = input_channels + + # First Residual Block + for i in range(p): + input = self.residual_block_2d(input) + + # Trunc Branch + output_trunk = input + for i in range(t): + output_trunk = self.residual_block_2d(output_trunk) + + # Soft Mask Branch + + ## encoder + ### first down sampling + output_soft_mask = MaxPooling2D(padding='same')(input) # 32x32 + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + skip_connections = [] + for i in range(encoder_depth - 1): + + ## skip connections + output_skip_connection = self.residual_block_2d(output_soft_mask) + skip_connections.append(output_skip_connection) + + ## down sampling + output_soft_mask = MaxPooling2D(padding='same')(output_soft_mask) + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + + ## decoder + skip_connections = list(reversed(skip_connections)) + for i in range(encoder_depth - 1): + ## upsampling + for _ in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + ## skip connections + output_soft_mask = add([output_soft_mask, skip_connections[i]]) + + ### last upsampling + for i in range(r): + output_soft_mask = self.residual_block_2d(output_soft_mask) + output_soft_mask = UpSampling2D()(output_soft_mask) + + ## Output + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask) + output_soft_mask = Activation('sigmoid')(output_soft_mask) + + # Attention: (1 + output_soft_mask) * output_trunk + output = Lambda(lambda x: x + 1)(output_soft_mask) + output = Multiply()([output, output_trunk]) # + + # Last Residual Block + for i in range(p): + output = self.residual_block_2d(output, name=name) + + return output + + + def residual_block_2d(self, input, input_channels=None, output_channels=None, kernel_size=(3, 3), stride=1, name='out'): + """ + full pre-activation residual block + https://arxiv.org/pdf/1603.05027.pdf + """ + acti = self.config.get_parameter("activation_function") + if output_channels is None: + output_channels = input.get_shape()[-1].value + if input_channels is None: + input_channels = output_channels // 4 + strides = (stride, stride) + x = BatchNormalization()(input) + x = Activation(acti)(x) + x = Conv2D(input_channels, (1, 1))(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(input_channels, kernel_size, padding='same', strides=stride)(x) + + x = BatchNormalization()(x) + x = Activation(acti)(x) + x = Conv2D(output_channels, (1, 1), padding='same')(x) + + if input_channels != output_channels or stride != 1: + input = Conv2D(output_channels, (1, 1), padding='same', strides=strides)(input) + if name == 'out': + x = add([x, input]) + else: + x = add([x, input], name=name) + return x + + + def build_res_atten_unet_2d(self, input_shape): + merge_axis = -1 # Feature maps are concatenated along last axis (for tf backend) + data = Input(shape=input_shape) + filter_num = round(self.config.get_parameter("filters")/4) + acti = self.config.get_parameter("activation_function") + + conv1 = Conv2D(filter_num * 4, 3, padding='same')(data) + conv1 = BatchNormalization()(conv1) + conv1 = Activation(acti)(conv1) + + # res0 = residual_block_2d(data, output_channels=filter_num * 2) + + pool = MaxPooling2D(pool_size=(2, 2))(conv1) + res1 = self.residual_block_2d(pool, output_channels=filter_num * 4) + + # res1 = residual_block_2d(atb1, output_channels=filter_num * 4) + + pool1 = MaxPooling2D(pool_size=(2, 2))(res1) + # pool1 = MaxPooling2D(pool_size=(2, 2))(atb1) + + res2 = self.residual_block_2d(pool1, output_channels=filter_num * 8) + + # res2 = residual_block_2d(atb2, output_channels=filter_num * 8) + pool2 = MaxPooling2D(pool_size=(2, 2))(res2) + # pool2 = MaxPooling2D(pool_size=(2, 2))(atb2) + + res3 = self.residual_block_2d(pool2, output_channels=filter_num * 16) + # res3 = residual_block_2d(atb3, output_channels=filter_num * 16) + pool3 = MaxPooling2D(pool_size=(2, 2))(res3) + # pool3 = MaxPooling2D(pool_size=(2, 2))(atb3) + + res4 = self.residual_block_2d(pool3, output_channels=filter_num * 32) + + # res4 = residual_block_2d(atb4, output_channels=filter_num * 32) + pool4 = MaxPooling2D(pool_size=(2, 2))(res4) + # pool4 = MaxPooling2D(pool_size=(2, 2))(atb4) + + res5 = self.residual_block_2d(pool4, output_channels=filter_num * 64) + # res5 = residual_block_2d(res5, output_channels=filter_num * 64) + res5 = self.residual_block_2d(res5, output_channels=filter_num * 64) + + atb5 = self.attention_block_2d(res4, encoder_depth=1, name='atten1') + up1 = UpSampling2D(size=(2, 2))(res5) + merged1 = concatenate([up1, atb5], axis=merge_axis) + # merged1 = concatenate([up1, atb4], axis=merge_axis) + + res5 = self.residual_block_2d(merged1, output_channels=filter_num * 32) + # atb5 = attention_block_2d(res5, encoder_depth=1) + + atb6 = self.attention_block_2d(res3, encoder_depth=2, name='atten2') + up2 = UpSampling2D(size=(2, 2))(res5) + # up2 = UpSampling2D(size=(2, 2))(atb5) + merged2 = concatenate([up2, atb6], axis=merge_axis) + # merged2 = concatenate([up2, atb3], axis=merge_axis) + + res6 = self.residual_block_2d(merged2, output_channels=filter_num * 16) + # atb6 = attention_block_2d(res6, encoder_depth=2) + + # atb6 = attention_block_2d(res6, encoder_depth=2) + atb7 = self.attention_block_2d(res2, encoder_depth=3, name='atten3') + up3 = UpSampling2D(size=(2, 2))(res6) + # up3 = UpSampling2D(size=(2, 2))(atb6) + merged3 = concatenate([up3, atb7], axis=merge_axis) + # merged3 = concatenate([up3, atb2], axis=merge_axis) + + res7 = self.residual_block_2d(merged3, output_channels=filter_num * 8) + # atb7 = attention_block_2d(res7, encoder_depth=3) + + # atb7 = attention_block_2d(res7, encoder_depth=3) + atb8 = self.attention_block_2d(res1, encoder_depth=4, name='atten4') + up4 = UpSampling2D(size=(2, 2))(res7) + # up4 = UpSampling2D(size=(2, 2))(atb7) + merged4 = concatenate([up4, atb8], axis=merge_axis) + # merged4 = concatenate([up4, atb1], axis=merge_axis) + + res8 = self.residual_block_2d(merged4, output_channels=filter_num * 4) + # atb8 = attention_block_2d(res8, encoder_depth=4) + + # atb8 = attention_block_2d(res8, encoder_depth=4) + up = UpSampling2D(size=(2, 2))(res8) + # up = UpSampling2D(size=(2, 2))(atb8) + merged = concatenate([up, conv1], axis=merge_axis) + # res9 = residual_block_2d(merged, output_channels=filter_num * 2) + + conv9 = Conv2D(filter_num * 4, 3, padding='same')(merged) + conv9 = BatchNormalization()(conv9) + conv9 = Activation(acti)(conv9) + + if self.config.get_parameter("nb_classes") == 1: + output = Conv2D(1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + else: + output = Conv2D(self.config.get_parameter("nb_classes")+1, 3, padding='same', activation=self.config.get_parameter("final_activation"))(conv9) + + model = Model(data, output) + return model + + + +class Res_att_unet_2d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_2d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + + + +class Res_att_unet_3d(RA_Unet): + def __init__(self, model_dir = None, name = 'Res_att_unet_3d', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) diff --git a/models/Unet_Resnet.py b/models/Unet_Resnet.py new file mode 100644 index 0000000..90b4724 --- /dev/null +++ b/models/Unet_Resnet.py @@ -0,0 +1,260 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +from .CNN_Base import CNN_Base +from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose + +################################################ +# Unet + Resnet +################################################ + +class Unet_Resnet(CNN_Base): + """ + Unet + resnet functions + see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19 + """ + + def __init__(self, model_dir = None, **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + def bottleneck_block(self, inputs, + upsample = False, + filters = 8, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + # Bottleneck_block + with tf.name_scope("Bottleneck_block" + name): + output = bn_relu_conv2d(inputs, filters, 1, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = bn_relu_conv2d(output, filters, 3, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + if upsample == True: + output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + output = Conv2D(filters * 4, (1,1), padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + else: + output = bn_relu_conv2d(output, filters*4, 1, acti=acti, padding=padding, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Dropout(dropout_value)(output) + + # reshape input to the same size as output + if upsample == True: + inputs = UpSampling2D()(inputs) + if strides == 2: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs) + + # ensure number of filters are correct between input and output + if output.shape[3] != inputs.shape[3]: + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block" + name): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = MaxPooling2D()(output) + output = Conv2D(filters, 3, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + output = Dropout(dropout_value)(output) + + inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + def simple_block_up(self, inputs, filters, + strides = 1, dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = None): + + with tf.name_scope("Simple_block_up" + name): + output = bn_relu_conv2d(inputs, filters, 3, acti=acti, padding=padding, strides=strides, + kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer) + + output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output) + + output = Dropout(dropout_value)(output) + + inputs = UpSampling2D()(inputs) + inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs) + + return Add()([output, inputs]) + + + def build_model(self, unet_input, mean_std_normalization = None, + dropout_value = None, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None): + + ### get parameters from config file ### + filters = self.config.get_parameter("filters") + + if dropout_value is None: + dropout_value = self.config.get_parameter("dropout_value") + if acti is None: + acti = self.config.get_parameter("activation_function") + if padding is None: + padding = self.config.get_parameter("padding") + if kernel_initializer is None: + kernel_initializer = self.config.get_parameter("initializer") + if weight_regularizer is None: + weight_regularizer = self.config.get_parameter("weight_regularizer") + if mean_std_normalization is None: + if self.config.get_parameter("mean_std_normalization") == True: + mean = self.config.get_parameter("mean") + std = self.config.get_parameter("std") + else: + mean = None + std = None + + + ### Actual network### + inputs = Input(unet_input) + + # normalize images + layer = normalize_input(inputs, + scale_input = self.config.get_parameter("scale_input"), + mean_std_normalization = self.config.get_parameter("mean_std_normalization"), + mean = mean, std = std) + + # encoder arm + layer_1 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, + kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer) + + layer_2 = self.simple_block(layer_1, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_2") + + layer = layer_2 + layer_store = [layer] + + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1): + strides = 2 + + # last layer of encoding arm is treated as across + if i == len(self.config.get_parameter("bottleneck_block")): + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i-2): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_up_layer_{}".format(2 + i)) + else: + layer = self.bottleneck_block(layer, filters = filters, + strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(2 + i)) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(2 + i, count)) + filters = filters*2 + layer_store.append(layer) + + # decoder arm + for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1): + filters = filters//2 + + # note that i should be positive possibly due to the way keras/tf model compile works + layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer]) + + for count in range(conv_layer_i - 1): + layer = self.bottleneck_block(layer, filters = filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name="_layer_{}-{}".format(i+6, count)) + + layer = self.bottleneck_block(layer, upsample = True, + filters = filters, strides = 1, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_{}".format(i+6)) + + layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2]) + layer_14 = self.simple_block_up(layer_13, filters, + dropout_value = dropout_value, acti = acti, padding = padding, + kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, + name = "_layer_14") + + layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1]) + + layer_16 = Conv2D(filters, (3, 3), padding = padding, + kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), + name="Conv_layer_16")(layer_15) + + layer_17 = BatchNormalization()(layer_16) + layer_18 = activation_function(layer_17, acti) + if self.config.get_parameter("nb_classes") == 1: + outputs = Conv2D(1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + else: + outputs = Conv2D(self.config.get_parameter("nb_classes")+1, (1, 1), activation=self.config.get_parameter("final_activation"))(layer_18) + #outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18) + + return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name')) + +class Unet_Resnet101(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet50(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs): + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") + +class Unet_Resnet_paper(Unet_Resnet): + def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs): + """ + see https://arxiv.org/pdf/1608.04117.pdf + """ + super().__init__(model_dir = model_dir, **kwargs) + + self.config.update_parameter(["model","name"], name) + self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3)) + + # store parameters for ease of use (may need to remove in the future) + self.conv_layer = self.config.get_parameter("bottleneck_block") \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/__pycache__/CNN_Base.cpython-36.pyc b/models/__pycache__/CNN_Base.cpython-36.pyc new file mode 100644 index 0000000..969e68a Binary files /dev/null and b/models/__pycache__/CNN_Base.cpython-36.pyc differ diff --git a/models/__pycache__/CNN_Base.cpython-37.pyc b/models/__pycache__/CNN_Base.cpython-37.pyc new file mode 100644 index 0000000..9f4efe2 Binary files /dev/null and b/models/__pycache__/CNN_Base.cpython-37.pyc differ diff --git a/models/__pycache__/Unet.cpython-36.pyc b/models/__pycache__/Unet.cpython-36.pyc new file mode 100644 index 0000000..7dca2bd Binary files /dev/null and b/models/__pycache__/Unet.cpython-36.pyc differ diff --git a/models/__pycache__/Unet.cpython-37.pyc b/models/__pycache__/Unet.cpython-37.pyc new file mode 100644 index 0000000..2ac64a6 Binary files /dev/null and b/models/__pycache__/Unet.cpython-37.pyc differ diff --git a/models/__pycache__/Unet_ResAttnet.cpython-36.pyc b/models/__pycache__/Unet_ResAttnet.cpython-36.pyc new file mode 100644 index 0000000..5cb0a4d Binary files /dev/null and b/models/__pycache__/Unet_ResAttnet.cpython-36.pyc differ diff --git a/models/__pycache__/Unet_ResAttnet.cpython-37.pyc b/models/__pycache__/Unet_ResAttnet.cpython-37.pyc new file mode 100644 index 0000000..90dcc9e Binary files /dev/null and b/models/__pycache__/Unet_ResAttnet.cpython-37.pyc differ diff --git a/models/__pycache__/Unet_Resnet.cpython-36.pyc b/models/__pycache__/Unet_Resnet.cpython-36.pyc new file mode 100644 index 0000000..1ac83f3 Binary files /dev/null and b/models/__pycache__/Unet_Resnet.cpython-36.pyc differ diff --git a/models/__pycache__/Unet_Resnet.cpython-37.pyc b/models/__pycache__/Unet_Resnet.cpython-37.pyc new file mode 100644 index 0000000..df5b98c Binary files /dev/null and b/models/__pycache__/Unet_Resnet.cpython-37.pyc differ diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..ccf9f5f Binary files /dev/null and b/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..c14b5b4 Binary files /dev/null and b/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/internals/._losses.py b/models/internals/._losses.py new file mode 100644 index 0000000..7285ea6 Binary files /dev/null and b/models/internals/._losses.py differ diff --git a/models/internals/.ipynb_checkpoints/__init__-checkpoint.py b/models/internals/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/dataset-checkpoint.py b/models/internals/.ipynb_checkpoints/dataset-checkpoint.py new file mode 100644 index 0000000..f0dfe1c --- /dev/null +++ b/models/internals/.ipynb_checkpoints/dataset-checkpoint.py @@ -0,0 +1,304 @@ +import os, sys +import numpy as np + +import matplotlib.pyplot as plt + +from tqdm import tqdm + +from .image_functions import Image_Functions + +class Dataset(Image_Functions): + def __init__(self): + """Creates Dataset object that is used to manipulate the training data. + + Attributes + ---------- + classes : list + List of dictionaries containing the class name and id + + train_images : list + List of images that is used as the input for the network + + train_ground_truth : list + List of images that is used as the ground truth for the network + """ + + self.classes = [] + self.train_images = [] + self.train_ground_truth = [] + + super().__init__() + + ####################### + # Class id functions + ####################### + def get_class_id(self, class_name): + """Returns the class id and adds class to list if not in list of classes. + + Parameters + ---------- + class_name : str + Identity of class that will be associated with the class id + + Returns + ---------- + int + Class id + """ + + if len(self.classes) == 0: + self.classes.append({"class": class_name, "id": 0}) + return 0 + + for class_info in self.classes: + # if class exist, return class id + if class_info["class"] == class_name: + return class_info["id"] + + self.classes.append({"class": class_name, "id": len(self.classes)-1}) + return len(self.classes)-1 + + ####################### + # Class id functions + ####################### + def sanity_check(self, image_index): + """Plots the augmented image and ground_truth to check if everything is ok. + + Parameters + ---------- + image_index : int + Index of the image and its corresponding ground_truth + """ + + image = self.aug_images[image_index][:,:,0] + ground_truth = self.aug_ground_truth[image_index][:,:,0] + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(image, cmap='gray', + norm=None, interpolation=None) + plt.show() + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(ground_truth, cmap='gray', + norm=None, interpolation=None) + plt.show() + + def load_dataset(self, dataset_dir = None, tiled = False): + """Loads dataset from ``dataset_dir`` + + Parameters + ---------- + dataset_dir : str or none, optional + Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file + + tiled : bool, optional + To set if tiling function is used + """ + + # update dataset_dir if specified. If not, load dataset_dir from config file + if dataset_dir is None: + dataset_dir = self.config.get_parameter("dataset_dir") + else: + self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir) + image_dirs = next(os.walk(dataset_dir))[1] + image_dirs = [f for f in image_dirs if not f[0] == '.'] + + for img_dir in image_dirs: + # images + image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder")) + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if tiled is True: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.config.update_parameter(["images","num_rows"], num_rows) + self.config.update_parameter(["images","num_cols"], num_cols) + self.config.update_parameter(["images","padding"], padding) + self.train_images.extend(tile_image_list) + else: + self.train_images.extend([image,]) + + #ground_truth + ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder")) + if tiled is True: + tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.train_ground_truth.extend(tile_ground_truth_list) + else: + self.train_ground_truth.extend(ground_truth) + + ####################### + # Image augmentation + ####################### + def augment_images(self): + """Augments images using the parameters in the config file""" + + # TODO: To allow for augmentation of multi-class images + + augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p")) + + # increase number of images + self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images") + self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images") + + print("Performing augmentations on {} images".format(len(self.aug_images))) + sys.stdout.flush() + + for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"): + + # target must be image and mask in order for albumentations to work + data = {"image": self.aug_images[i], + "mask": self.aug_ground_truth[i]} + augmented = augmentor(**data) + + self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"])) + + # add + if self.config.get_parameter("use_binary_dilation_after_augmentation") is True: + from skimage.morphology import binary_dilation, disk + self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size")))) + else: + self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool)) + + self.aug_images = np.stack(self.aug_images, axis = 0) + self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0) + + mean = self.aug_images.mean() + std = self.aug_images.std() + + self.config.update_parameter(["images","mean"], float(mean)) + self.config.update_parameter(["images","std"], float(std)) + + print("Augmentations complete!") + + def augmentations(self, p = None): + """Generates list of augmentations using parameters obtained from config file + + Parameters + ---------- + p : int, optional + probability to apply any augmentations to image + + Returns + ---------- + function + function used to augment images + """ + from albumentations import ( + RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, + Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform, + IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, + IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose + ) + + augmentation_list = [] + + if self.config.get_parameter("random_rotate") is True: + augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9 + + if self.config.get_parameter("flip") is True: + augmentation_list.append(Flip()) + + if self.config.get_parameter("transpose") is True: + augmentation_list.append(Transpose()) + + if self.config.get_parameter("blur_group") is True: + blur_augmentation = [] + if self.config.get_parameter("motion_blur") is True: + blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p"))) + if self.config.get_parameter("median_blur") is True: + blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p"))) + if self.config.get_parameter("blur") is True: + blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p"))) + augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) + + if self.config.get_parameter("shift_scale_rotate") is True: + augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"), + scale_limit = self.config.get_parameter("scale_limit"), + rotate_limit = self.config.get_parameter("rotate_limit"), + p = self.config.get_parameter("shift_scale_rotate_p"))) + if self.config.get_parameter("distortion_group") is True: + distortion_augmentation = [] + if self.config.get_parameter("optical_distortion") is True: + distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p"))) + if self.config.get_parameter("elastic_transform") is True: + distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p"))) + if self.config.get_parameter("grid_distortion") is True: + distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p"))) + + augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p"))) + + if self.config.get_parameter("brightness_contrast_group") is True: + contrast_augmentation = [] + if self.config.get_parameter("clahe") is True: + contrast_augmentation.append(CLAHE()) + if self.config.get_parameter("sharpen") is True: + contrast_augmentation.append(IAASharpen()) + if self.config.get_parameter("random_brightness_contrast") is True: + contrast_augmentation.append(RandomBrightnessContrast()) + + augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p"))) + + augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True)) + + return Compose(augmentation_list, p = p) + +############################### TODO ############################### +# def preapare_data(self): +# """ +# Performs augmentation if needed +# """ + + +# # Create data generator +# # Return augmented images/ground_truth arrays of batch size +# def generator(features, labels, batch_size, seq_det): +# # create empty arrays to contain batch of features and labels +# batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3])) +# batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3])) + +# while True: +# # Fill arrays of batch size with augmented data taken randomly from full passed arrays +# indexes = random.sample(range(len(features)), batch_size) +# # Perform the exactly the same augmentation for X and y +# random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes]) +# batch_features[:,:,:,:] = random_augmented_images[:,:,:,:] +# batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:] + +# yield batch_features, batch_labels + + # Train augmentation +# def do_augmentation(seq_det, X_train, y_train): +# # Use seq_det to build augmentation. +# # .... +# return np.array(X_train_aug), np.array(y_train_aug) + +# seq = iaa.Sequential([ +# iaa.Fliplr(0.5), # horizontally flip +# iaa.OneOf([ +# iaa.Noop(), +# iaa.GaussianBlur(sigma=(0.0, 1.0)), +# iaa.Noop(), +# iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)), +# iaa.Noop(), +# iaa.PerspectiveTransform(scale=(0.04, 0.08)), +# iaa.Noop(), +# iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)), +# ]), +# # More as you want ... +# ]) +# seq_det = seq.to_deterministic() + +# history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det), +# epochs=EPOCHS, +# steps_per_epoch=steps_per_epoch, +# validation_data=(X_valid, y_valid), +# verbose = 1, +# callbacks = [check_point] +# ) + + # Image augmentations + +############################### END of TODO ############################### \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py b/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py new file mode 100644 index 0000000..ebc9dba --- /dev/null +++ b/models/internals/.ipynb_checkpoints/image_functions-checkpoint.py @@ -0,0 +1,366 @@ +import os +import glob +import sys + +import math +import numpy as np + +import skimage +import skimage.io as skio + +class Image_Functions(): + def list_images(self, image_dir, image_ext = '*.tif'): + """List images in the directory with the given file extension + + Parameters + ---------- + image_dir : `str` + Directory to look for image files + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image_list : `list` + List of images found in the directory with the given file extension + + Notes + ---------- + For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase. + """ + # to bypass case sensitivity of file extensions in linux and possibly other systems + if sys.platform in ["win32",]: + image_extension = [image_ext] + else: + image_extension = [image_ext.lower(),image_ext.upper()] + + image_list = [] + for ext in image_extension: + image_list.extend(glob.glob(os.path.join(image_dir,ext))) + + return image_list + + ####################### + # Image IO functions + ####################### + def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'): + """Loads images found in ``image_path`` + + Parameters + ---------- + image_path : `str` + Path to look for image files + subfolder : `str`, optional + [Default: 'Images'] Subfolder in which to look for the image files + image_index : `int`, optional + [Default: 0] Index of image to load + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image : `array_like` + Loaded image + + Notes + ---------- + Only one image from in each directory is loaded. + """ + if os.path.isdir(image_path) is True: + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + if len(image_list) > 1: + warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index])) + # Load image + image = skio.imread(image_list[image_index]) + else: + image = skio.imread(image_path) + + image = np.expand_dims(image, axis=-1) + return image + + def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'): + """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed + + Parameters + ---------- + image_path : `str` + Path to look for ground truth images + subfolder : `str`, optional + [Default: 'Masks'] Subfolder in which to look for the ground truth images + image_ext : `str`, optional + [Default: '*.tif'] File extension of ground truth image file + + Returns + ---------- + output_ground_truth : `list` + List of ground truth images found in the directory with the given file extension + + class_ids : `list` + List of class ids of the ground truth images + """ + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + output_ground_truth = [] + class_ids = [] + for ground_truth_path in image_list: + # add class if not in list + ground_truth_name = ground_truth_path.split('\\')[-1] + class_name = ground_truth_name.split('_')[0] + # obtain class_id + class_ids.append(self.get_class_id(class_name)) + + # Load image + ground_truth_img = skio.imread(ground_truth_path) + + # If one mask in 2D, add one extra dimension for the class + if len(ground_truth_img.shape) == 2: + ground_truth_img = np.expand_dims(ground_truth_img, axis=-1) + else: + # Transpore dimension to get class at the end + if ground_truth_img.shape[-1] != self.config.get_parameter("nb_classes"): + ground_truth_img = np.transpose(ground_truth_img,(1,2,0)) + + # perform erosion so that the borders will still be there after augmentation + if self.config.get_parameter("use_binary_erosion") is True: + from skimage.morphology import binary_erosion, disk + # sets dtype back to unsigned integer in order for some augmentations to work + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + if self.config.get_parameter("use_binary_dilation") is True: + from skimage.morphology import binary_dilation, disk + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + # perform inversion of ground_truth if needed + if self.config.get_parameter("invert_ground_truth") is True: + ground_truth_img = skimage.util.invert(ground_truth_img) + + # Concatenate masks from different files together + if len(output_ground_truth) == 0: + output_ground_truth.append(ground_truth_img) + else: + output_ground_truth = np.concatenate((output_ground_truth,ground_truth_img[None,:,:]), axis=-1) + + # If multiclass segmentation, add one mask for non-assigned pixels + if self.config.get_parameter("nb_classes")>1: + last_mask = np.ones([ground_truth_img.shape[0],ground_truth_img.shape[1]])*np.amax(ground_truth_img) + last_mask = ((last_mask - np.sum(output_ground_truth, axis =-1))>0)*np.amax(ground_truth_img) # To get rid of overlap and negative values + last_mask = np.expand_dims(last_mask, axis=-1) + output_ground_truth = np.concatenate((output_ground_truth,last_mask), axis=-1) + + return output_ground_truth, class_ids + + def reshape_image(self, image): + """Reshapes the image to the correct dimenstions for Unet + + Parameters + ---------- + image : `array_like` + Image to be reshaped + + Returns + ---------- + image : `array_like` + Reshaped image + """ + h, w = image.shape[:2] + image = np.reshape(image, (h, w, -1)) + return image + + ####################### + # Image padding + ####################### + def pad_image(self, image, image_size, mode = 'constant'): + """Pad image to specified image_size + + Parameters + ---------- + image : `array_like` + Image to be padded + image_size : `list` + Final size of padded image + mode : `str`, optional + [Default: 'constant'] Mode to pad the image + + Returns + ---------- + image : `array_like` + Padded image + + padding : `list` + List containing the number of pixels padded to each direction + """ + h, w = image.shape[:2] + + top_pad = (image_size[0] - h) // 2 + bottom_pad = image_size[0] - h - top_pad + + left_pad = (image_size[1] - w) // 2 + right_pad = image_size[1] - w - left_pad + + padding = ((top_pad, bottom_pad), (left_pad, right_pad)) + image = np.pad(image, padding, mode = mode, constant_values=0) + + return image, padding + + def remove_pad_image(self, image, padding): + """Removes pad from image + + Parameters + ---------- + image : `array_like` + Padded image + padding : `list` + List containing the number of padded pixels in each direction + + Returns + ---------- + image : `array_like` + Image without padding + """ + + h, w = image.shape[:2] + + return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]] + + ####################### + # Tiling functions + ####################### + def tile_image(self, image, tile_size, tile_overlap_size): + """Converts an image into a list of tiled images + + Parameters + ---------- + image : `array_like` + Image to be tiled + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + + Returns + ---------- + image : `array_like` + Image without padding + """ + image_height, image_width = image.shape[:2] + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + if image_height <= tile_height and image_width <= tile_width: + return image + + num_rows = math.ceil(image_height/tile_height) + num_cols = math.ceil(image_width/tile_width) + num_tiles = num_rows*num_cols + + + # pad image to fit tile size + image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2)) + + tile_image_list = [] + + for tile_no in range(num_tiles): + tile_x_start = (tile_no // num_rows) * tile_width + tile_x_end = tile_x_start + tile_size[1] + + tile_y_start = (tile_no % num_rows) * tile_height + tile_y_end = tile_y_start + tile_size[0] + + tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end] + + # ensure input into unet is of correct shape + tile_image = self.reshape_image(tile_image) + + tile_image_list.append(tile_image) + + return tile_image_list, num_rows, num_cols, padding + + def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): + """Stitches a list of tiled images back into a single image + + Parameters + ---------- + tile_list : `list` + List of tiled images + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + num_rows : `int` + Number of rows of tiles + num_cols : `int` + Number of cols of tiles + padding : `list` + Amount of padding used during tiling + + Returns + ---------- + image : `array_like` + Image without padding + """ + if num_rows == 1 and num_cols == 1: + image = tile_list[0] + + image = self.remove_pad_image(image, padding = padding) + + return image + + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + num_tiles = num_rows*num_cols + + for col in range(num_cols): + for row in range(num_rows): + tile_image = tile_list[num_rows*col + row][:,:,0] + tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)] + if row == 0: + image_col = np.array(tile_image) + else: + image_col = np.vstack((image_col, tile_image)) + + if col == 0: + image = image_col + else: + image = np.hstack((image, image_col)) + + image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2)) + + if padding is not None: + image = self.remove_pad_image(image, padding = padding) + + return image + + + ####################### + # Image normalization + ####################### + def percentile_normalization(self, image, in_bound=[3, 99.8]): + """Performs percentile normalization on the image + + Parameters + ---------- + image : `array_like` + Image to be normalized + in_bound : `list` + Upper and lower percentile used to normalize image + + Returns + ---------- + image : `array_like` + Normalized image + + image_min : `int` + Min value of ``image`` + + image_max : `int` + Max value of ``image`` + """ + image_min = np.percentile(image, in_bound[0]) + image_max = np.percentile(image, in_bound[1]) + image = (image - image_min)/(image_max - image_min) + + return image, image_min, image_max diff --git a/models/internals/.ipynb_checkpoints/losses-checkpoint.py b/models/internals/.ipynb_checkpoints/losses-checkpoint.py new file mode 100644 index 0000000..7e8ec84 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/losses-checkpoint.py @@ -0,0 +1,328 @@ +from keras import backend as K +from keras.losses import binary_crossentropy, mean_absolute_error, categorical_crossentropy +import keras +import tensorflow as tf +import numpy as np +from scipy import ndimage + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(y_true * y_pred, axis=-1) + sum_ = K.sum(y_true + y_pred, axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + + +def dice_coef(y_true, y_pred, smooth=1.): + """ + Dice = (2*|X & Y|)/ (|X|+ |Y|) + = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) + ref: https://arxiv.org/pdf/1606.04797v1.pdf + + from wassname as well + """ + intersection = K.sum(y_true * y_pred, axis=-1) + return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth) + +def dice_coef_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + +def bce_dice_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +def bce_ssim_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +# code download from: https://github.com/bermanmaxim/LovaszSoftmax +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + gts = tf.reduce_sum(gt_sorted) + intersection = gts - tf.cumsum(gt_sorted) + union = gts + tf.cumsum(1. - gt_sorted) + jaccard = 1. - intersection / union + jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) + return jaccard + + +# --------------------------- EDGE DETECTION --------------------------- + +def edge_detection(y_true, y_pred): + size = 5 + in_channel = y_pred.shape[-1] # Number of class + + fil = np.ones([size, size]) + fil[int(size/2), int(size/2)] = 1.0 - size**2 + fil = tf.convert_to_tensor(fil, tf.float32) + fil = tf.stack([fil]*in_channel, axis=2) + fil = tf.expand_dims(fil, 3) + + GT_edge_enhanced = tf.nn.depthwise_conv2d(y_true, fil, strides=[1, 1, 1, 1], padding="SAME") + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + + # Define threshold values on Laplacian filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.1)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.1)) + + GT_edge1 = tf.gather_nd(y_true, Index_1) + GT_edge2 = tf.gather_nd(y_true, Index_2) + + Pred_edge1 = tf.gather_nd(y_pred, Index_1) + Pred_edge2 = tf.gather_nd(y_pred, Index_2) + + + y_true = tf.concat([K.flatten(y_true), K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([K.flatten(y_pred), K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def edge_detection_sobel(y_true, y_pred): + y_true = K.cast(y_true, "float32") + y_pred = K.cast(y_pred, "float32") + GT_edge_enhanced = tf.image.sobel_edges(y_true) + #y_true = K.flatten(y_true) + #y_pred = K.flatten(y_pred) + #GT_edge_enhanced = K.flatten(GT_edge_enhanced) +#converting the datatypes of y_true, y_pred to make sure they are of same dtypes + + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + GT_edge_enhanced = tf.keras.backend.sum(GT_edge_enhanced, axis = -1) # Sum X and Y Sobel + + y_true = K.flatten(y_true) + y_pred = K.flatten(y_pred) + GT_edge_enhanced = K.flatten(GT_edge_enhanced) + + # Define threshold values on sobel filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.001)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.001)) + + GT_edge1 = tf.gather(y_true, Index_1) + GT_edge2 = tf.gather(y_true, Index_2) + + Pred_edge1 = tf.gather(y_pred, Index_1) + Pred_edge2 = tf.gather(y_pred, Index_2) + + + y_true = tf.concat([y_true, K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([y_pred, K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def EE_bce_dice_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_dice_loss(y_true, y_pred) + + +def EE_jaccard_distance_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return jaccard_distance_loss(y_true, y_pred) + +def EE_dice_coef_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return dice_coef_loss(y_true, y_pred) + +def EE_bce_ssim_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_ssim_loss(y_true, y_pred) + +def EE_binary_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return binary_crossentropy(y_true, y_pred) + +def EE_categorical_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return categorical_crossentropy(y_true, y_pred) + + +# --------------------------- BINARY LOSSES --------------------------- + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + def treat_image(log_lab): + log, lab = log_lab + log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) + log, lab = flatten_binary_scores(log, lab, ignore) + return lovasz_hinge_flat(log, lab) + losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) + loss = tf.reduce_mean(losses) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + + def compute_loss(): + labelsf = tf.cast(labels, logits.dtype) + signs = 2. * labelsf - 1. + errors = 1. - logits * tf.stop_gradient(signs) + errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") + gt_sorted = tf.gather(labelsf, perm) + grad = lovasz_grad(gt_sorted) + loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") + return loss + + # deal with the void prediction case (only void pixels) + loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), + lambda: tf.reduce_sum(logits) * 0., + compute_loss, + strict=True, + name="loss" + ) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = tf.reshape(scores, (-1,)) + labels = tf.reshape(labels, (-1,)) + if ignore is None: + return scores, labels + valid = tf.not_equal(labels, ignore) + vscores = tf.boolean_mask(scores, valid, name='valid_scores') + vlabels = tf.boolean_mask(labels, valid, name='valid_labels') + return vscores, vlabels + +def lovasz_loss(y_true, y_pred): + y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32') + #logits = K.log(y_pred / (1. - y_pred)) + logits = y_pred #Jiaxin + loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None) + return loss + +# Difference of Structural Similarity + +def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0): + # There are additional parameters for this function + # Note: some of the 'modes' for edge behavior do not yet have a + # gradient definition in the Theano tree + # and cannot be used for learning + + c1 = (k1 * max_value) ** 2 + c2 = (k2 * max_value) ** 2 + + kernel = [kernel_size, kernel_size] + y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:])) + y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:])) + + patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + + # Reshape to get the var in the cells + bs, w, h, c = K.int_shape(patches_pred) + patches_pred = K.reshape(patches_pred, [-1, w, h, c]) + patches_true = K.reshape(patches_true, [-1, w, h, c]) + # Get mean + u_true = K.mean(patches_true, axis=-1) + u_pred = K.mean(patches_pred, axis=-1) + # Get variance + var_true = K.var(patches_true, axis=-1) + var_pred = K.var(patches_pred, axis=-1) + # Get std dev + covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred + + ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2) + denom = ((K.square(u_true) + + K.square(u_pred) + + c1) * (var_pred + var_true + c2)) + ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero + return K.mean((1.0 - ssim) / 2.0) + +def dssim_mae_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred) + +#MSSim +#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function +def keras_SSIM_cs(y_true, y_pred): + axis=None + gaussian = make_kernel(1.5) + x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME') + y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME') + + u_x=K.mean(x, axis=axis) + u_y=K.mean(y, axis=axis) + + var_x=K.var(x, axis=axis) + var_y=K.var(y, axis=axis) + + cov_xy=cov_keras(x, y, axis) + + K1=0.01 + K2=0.03 + L=1 # depth of image (255 in case the image has a differnt scale) + + C1=(K1*L)**2 + C2=(K2*L)**2 + C3=C2/2 + + l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1) + c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2) + s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3) + + return [c,s,l] + +def keras_MS_SSIM(y_true, y_pred): + iterations = 5 + x=y_true + y=y_pred + weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] + c=[] + s=[] + for i in range(iterations): + cs=keras_SSIM_cs(x, y) + c.append(cs[0]) + s.append(cs[1]) + l=cs[2] + if(i!=4): + x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1)))) + y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1)))) + c = tf.stack(c) + s = tf.stack(s) + cs = c*s + + #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values + l=(l+1)/2 + cs=(cs+1)/2 + + cs=cs**weight + cs = tf.reduce_prod(cs) + l=l**weight[-1] + + ms_ssim = l*cs + ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim) + + return K.mean(ms_ssim) + +def mssim_mae_loss(y_true, y_pred): + return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred) diff --git a/models/internals/.ipynb_checkpoints/metrics-checkpoint.py b/models/internals/.ipynb_checkpoints/metrics-checkpoint.py new file mode 100644 index 0000000..a426549 --- /dev/null +++ b/models/internals/.ipynb_checkpoints/metrics-checkpoint.py @@ -0,0 +1,23 @@ +"""Metrics for measuring machine learning algorithm performances +adapted from https://github.com/deaspo/Unet_MedicalImagingSegmentation +""" + +from keras import backend as K +import tensorflow as tf +import numpy as np + +def mean_iou(y_true, y_pred): + prec = [] + for t in np.arange(0.5, 1.0, 0.05): + #y_pred_ = tf.to_int32(y_pred > t) + y_pred_ = tf.cast(y_pred > t, tf.int32) + if K.int_shape(y_pred)[-1] >1: + num_class = K.int_shape(y_pred)[-1] + else: + num_class = K.int_shape(y_pred)[-1]+1 + score, up_opt = tf.compat.v1.metrics.mean_iou(y_true, y_pred_, num_class) + K.get_session().run(tf.compat.v1.local_variables_initializer()) + with tf.control_dependencies([up_opt]): + score = tf.identity(score) + prec.append(score) + return K.mean(K.stack(prec), axis=0) \ No newline at end of file diff --git a/models/internals/.ipynb_checkpoints/network_config-checkpoint.py b/models/internals/.ipynb_checkpoints/network_config-checkpoint.py new file mode 100644 index 0000000..9455a2c --- /dev/null +++ b/models/internals/.ipynb_checkpoints/network_config-checkpoint.py @@ -0,0 +1,237 @@ +import glob +import os +from ruamel.yaml import YAML + +class Network_Config(object): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters. + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is to be saved/read from + config_filepath : `str`, optional + [Default: None] Filepath to the config file that will be loaded + **kwargs + For network parameters that are to be changed from the loaded config file + + Attributes + ---------- + yaml : :class:`ruamel.yaml.YAML` + YAML class with function needed to read/write YAML files + config : `dict` + Dictionary containing the config parameters + """ + self.yaml=YAML() + + # load config file from model_dir + if config_filepath is not None: + + self.config = self.load_config_from_file(config_filepath) + print("Loaded config file from {}".format(config_filepath)) + elif model_dir is not None: + try: + self.config = self.load_config_from_model_dir(model_dir) + print("Loaded config file from {}".format(model_dir)) + except: + print("Please ensure that config_filepath is set or there is a config file in model_dir") + raise + + if model_dir is not None: + # update model_dir in config + print("Updating model_dir to {}".format(model_dir)) + self.update_parameter(["general", "model_dir"], model_dir) + + # overwrite network parameters with parameters given during initialization + for key, value in kwargs.items(): + self.update_parameter(self.find_key(key), value) + + # perform calculations + self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),]) + self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count + + ###################### + # Accessors/Mutators + ###################### + def get_parameter(self, parameter, config = []): + """Output the value from the config file using the given key + + Parameters + ---------- + parameter : `list` or `str` + Key or list of keys used to find for the value in the config file + + config : `list`, optional + Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary + + Returns + ---------- + value : `str` or `int` or `list` + Value obtained from the specified key + + See Also + ---------- + find_key : Function to identify the list of keys to address the correct item in a nested dictionary + """ + assert isinstance(parameter, (list, str)) + + # find for key in nested dictionary + if isinstance(parameter, str): + parameter = self.find_key(parameter) + + if config == []: + config = self.config + if config is None: + return None + + if not parameter: + return config + + return self.get_parameter(parameter[1:], config = config.get(parameter[0])) + + def update_parameter(self, parameter, value, config = None): + """Updates the parameter in the config file using a full addressed list + + Parameters + ---------- + parameter : `list` + List of keys that point to the correct item in the nested dictionary + + value : `str` or `int` or `list` + Value that is updated in the nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + TODO + """ + + assert type(parameter) is list + + if config == None: + config = self.config + + if len(parameter) == 1: + config.update({parameter[0]: value}) + return config + return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0])) + + def find_key(self, key, config = None): + """Find the list of keys to address the correct item in a nested dictionary + + Parameters + ---------- + key : `str` + Key that needs to be correctly addressed in a nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + key : `list` + Address of the key in the nested dictionary + """ + + if config == None: + config = self.config + + key_path = [] + for k, v in config.items(): + if k == key: + return [k] + elif isinstance(v, dict): + found_key = self.find_key(key, config = v) + if found_key is not None: + return [k] + found_key + + ###################### + # Config IO options + ###################### + def load_config_from_file(self, file_path): + """Load parameters from yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to load + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + """ + + with open(file_path, 'r') as input_file: + config = self.yaml.load(input_file) + input_file.close() + + return config + + def load_config_from_model_dir(self, model_dir): + """Finds for a config file from the model directory and loads it + + Parameters + ---------- + model_dir : `str` + Folder to search for and load the config file + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + + Raises + ------ + IndexError + If there are no config file in the model_dir + """ + + # check if yaml file exists in model_dir + try: + list_config_files = glob.glob(os.path.join(model_dir,'*config.yml')) + if len(list_config_files) > 1: + print("Multiple config files found. Loading {}".format(list_config_files[0])) + else: + print("Config file exists in model directory. Loading {}".format(list_config_files[0])) + return self.load_config_from_file(list_config_files[0]) + except IndexError: + print("No config file found in model_dir.") + raise + + def write_config(self, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to write to + """ + + with open(file_path, 'w') as output_file: + self.yaml.dump(self.config, output_file) + + output_file.close() + + print("Config file written to: {}".format(file_path)) + + def write_model(self, model, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + model : :class:`Keras.model` + Keras model that will be parsed and written to a yaml file + + file_path : `str` + Path of model file to write to + """ + + with open(file_path, 'w') as output_file: + output_file.write(model.to_yaml()) + + output_file.close() + + print("Model file written to: {}".format(file_path)) \ No newline at end of file diff --git a/models/internals/__init__.py b/models/internals/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/internals/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/internals/__pycache__/__init__.cpython-36.pyc b/models/internals/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..fa628a3 Binary files /dev/null and b/models/internals/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/internals/__pycache__/__init__.cpython-37.pyc b/models/internals/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..10f34d8 Binary files /dev/null and b/models/internals/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/internals/__pycache__/dataset.cpython-36.pyc b/models/internals/__pycache__/dataset.cpython-36.pyc new file mode 100644 index 0000000..4298ab7 Binary files /dev/null and b/models/internals/__pycache__/dataset.cpython-36.pyc differ diff --git a/models/internals/__pycache__/dataset.cpython-37.pyc b/models/internals/__pycache__/dataset.cpython-37.pyc new file mode 100644 index 0000000..0164c2f Binary files /dev/null and b/models/internals/__pycache__/dataset.cpython-37.pyc differ diff --git a/models/internals/__pycache__/image_functions.cpython-36.pyc b/models/internals/__pycache__/image_functions.cpython-36.pyc new file mode 100644 index 0000000..e77fe20 Binary files /dev/null and b/models/internals/__pycache__/image_functions.cpython-36.pyc differ diff --git a/models/internals/__pycache__/image_functions.cpython-37.pyc b/models/internals/__pycache__/image_functions.cpython-37.pyc new file mode 100644 index 0000000..5dd3576 Binary files /dev/null and b/models/internals/__pycache__/image_functions.cpython-37.pyc differ diff --git a/models/internals/__pycache__/losses.cpython-36.pyc b/models/internals/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000..d31d721 Binary files /dev/null and b/models/internals/__pycache__/losses.cpython-36.pyc differ diff --git a/models/internals/__pycache__/losses.cpython-37.pyc b/models/internals/__pycache__/losses.cpython-37.pyc new file mode 100644 index 0000000..11e8735 Binary files /dev/null and b/models/internals/__pycache__/losses.cpython-37.pyc differ diff --git a/models/internals/__pycache__/metrics.cpython-36.pyc b/models/internals/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000..4394623 Binary files /dev/null and b/models/internals/__pycache__/metrics.cpython-36.pyc differ diff --git a/models/internals/__pycache__/metrics.cpython-37.pyc b/models/internals/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000..21817de Binary files /dev/null and b/models/internals/__pycache__/metrics.cpython-37.pyc differ diff --git a/models/internals/__pycache__/network_config.cpython-36.pyc b/models/internals/__pycache__/network_config.cpython-36.pyc new file mode 100644 index 0000000..46e5f99 Binary files /dev/null and b/models/internals/__pycache__/network_config.cpython-36.pyc differ diff --git a/models/internals/__pycache__/network_config.cpython-37.pyc b/models/internals/__pycache__/network_config.cpython-37.pyc new file mode 100644 index 0000000..dd3e172 Binary files /dev/null and b/models/internals/__pycache__/network_config.cpython-37.pyc differ diff --git a/models/internals/dataset.py b/models/internals/dataset.py new file mode 100644 index 0000000..f0dfe1c --- /dev/null +++ b/models/internals/dataset.py @@ -0,0 +1,304 @@ +import os, sys +import numpy as np + +import matplotlib.pyplot as plt + +from tqdm import tqdm + +from .image_functions import Image_Functions + +class Dataset(Image_Functions): + def __init__(self): + """Creates Dataset object that is used to manipulate the training data. + + Attributes + ---------- + classes : list + List of dictionaries containing the class name and id + + train_images : list + List of images that is used as the input for the network + + train_ground_truth : list + List of images that is used as the ground truth for the network + """ + + self.classes = [] + self.train_images = [] + self.train_ground_truth = [] + + super().__init__() + + ####################### + # Class id functions + ####################### + def get_class_id(self, class_name): + """Returns the class id and adds class to list if not in list of classes. + + Parameters + ---------- + class_name : str + Identity of class that will be associated with the class id + + Returns + ---------- + int + Class id + """ + + if len(self.classes) == 0: + self.classes.append({"class": class_name, "id": 0}) + return 0 + + for class_info in self.classes: + # if class exist, return class id + if class_info["class"] == class_name: + return class_info["id"] + + self.classes.append({"class": class_name, "id": len(self.classes)-1}) + return len(self.classes)-1 + + ####################### + # Class id functions + ####################### + def sanity_check(self, image_index): + """Plots the augmented image and ground_truth to check if everything is ok. + + Parameters + ---------- + image_index : int + Index of the image and its corresponding ground_truth + """ + + image = self.aug_images[image_index][:,:,0] + ground_truth = self.aug_ground_truth[image_index][:,:,0] + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(image, cmap='gray', + norm=None, interpolation=None) + plt.show() + + plt.figure(figsize=(14, 14)) + plt.axis('off') + plt.imshow(ground_truth, cmap='gray', + norm=None, interpolation=None) + plt.show() + + def load_dataset(self, dataset_dir = None, tiled = False): + """Loads dataset from ``dataset_dir`` + + Parameters + ---------- + dataset_dir : str or none, optional + Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file + + tiled : bool, optional + To set if tiling function is used + """ + + # update dataset_dir if specified. If not, load dataset_dir from config file + if dataset_dir is None: + dataset_dir = self.config.get_parameter("dataset_dir") + else: + self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir) + image_dirs = next(os.walk(dataset_dir))[1] + image_dirs = [f for f in image_dirs if not f[0] == '.'] + + for img_dir in image_dirs: + # images + image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder")) + # percentile normalization + if self.config.get_parameter("percentile_normalization"): + image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile")) + + if tiled is True: + tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.config.update_parameter(["images","num_rows"], num_rows) + self.config.update_parameter(["images","num_cols"], num_cols) + self.config.update_parameter(["images","padding"], padding) + self.train_images.extend(tile_image_list) + else: + self.train_images.extend([image,]) + + #ground_truth + ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder")) + if tiled is True: + tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size")) + self.train_ground_truth.extend(tile_ground_truth_list) + else: + self.train_ground_truth.extend(ground_truth) + + ####################### + # Image augmentation + ####################### + def augment_images(self): + """Augments images using the parameters in the config file""" + + # TODO: To allow for augmentation of multi-class images + + augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p")) + + # increase number of images + self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images") + self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images") + + print("Performing augmentations on {} images".format(len(self.aug_images))) + sys.stdout.flush() + + for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"): + + # target must be image and mask in order for albumentations to work + data = {"image": self.aug_images[i], + "mask": self.aug_ground_truth[i]} + augmented = augmentor(**data) + + self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"])) + + # add + if self.config.get_parameter("use_binary_dilation_after_augmentation") is True: + from skimage.morphology import binary_dilation, disk + self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size")))) + else: + self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool)) + + self.aug_images = np.stack(self.aug_images, axis = 0) + self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0) + + mean = self.aug_images.mean() + std = self.aug_images.std() + + self.config.update_parameter(["images","mean"], float(mean)) + self.config.update_parameter(["images","std"], float(std)) + + print("Augmentations complete!") + + def augmentations(self, p = None): + """Generates list of augmentations using parameters obtained from config file + + Parameters + ---------- + p : int, optional + probability to apply any augmentations to image + + Returns + ---------- + function + function used to augment images + """ + from albumentations import ( + RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, + Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform, + IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, + IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose + ) + + augmentation_list = [] + + if self.config.get_parameter("random_rotate") is True: + augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9 + + if self.config.get_parameter("flip") is True: + augmentation_list.append(Flip()) + + if self.config.get_parameter("transpose") is True: + augmentation_list.append(Transpose()) + + if self.config.get_parameter("blur_group") is True: + blur_augmentation = [] + if self.config.get_parameter("motion_blur") is True: + blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p"))) + if self.config.get_parameter("median_blur") is True: + blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p"))) + if self.config.get_parameter("blur") is True: + blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p"))) + augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) + + if self.config.get_parameter("shift_scale_rotate") is True: + augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"), + scale_limit = self.config.get_parameter("scale_limit"), + rotate_limit = self.config.get_parameter("rotate_limit"), + p = self.config.get_parameter("shift_scale_rotate_p"))) + if self.config.get_parameter("distortion_group") is True: + distortion_augmentation = [] + if self.config.get_parameter("optical_distortion") is True: + distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p"))) + if self.config.get_parameter("elastic_transform") is True: + distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p"))) + if self.config.get_parameter("grid_distortion") is True: + distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p"))) + + augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p"))) + + if self.config.get_parameter("brightness_contrast_group") is True: + contrast_augmentation = [] + if self.config.get_parameter("clahe") is True: + contrast_augmentation.append(CLAHE()) + if self.config.get_parameter("sharpen") is True: + contrast_augmentation.append(IAASharpen()) + if self.config.get_parameter("random_brightness_contrast") is True: + contrast_augmentation.append(RandomBrightnessContrast()) + + augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p"))) + + augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True)) + + return Compose(augmentation_list, p = p) + +############################### TODO ############################### +# def preapare_data(self): +# """ +# Performs augmentation if needed +# """ + + +# # Create data generator +# # Return augmented images/ground_truth arrays of batch size +# def generator(features, labels, batch_size, seq_det): +# # create empty arrays to contain batch of features and labels +# batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3])) +# batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3])) + +# while True: +# # Fill arrays of batch size with augmented data taken randomly from full passed arrays +# indexes = random.sample(range(len(features)), batch_size) +# # Perform the exactly the same augmentation for X and y +# random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes]) +# batch_features[:,:,:,:] = random_augmented_images[:,:,:,:] +# batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:] + +# yield batch_features, batch_labels + + # Train augmentation +# def do_augmentation(seq_det, X_train, y_train): +# # Use seq_det to build augmentation. +# # .... +# return np.array(X_train_aug), np.array(y_train_aug) + +# seq = iaa.Sequential([ +# iaa.Fliplr(0.5), # horizontally flip +# iaa.OneOf([ +# iaa.Noop(), +# iaa.GaussianBlur(sigma=(0.0, 1.0)), +# iaa.Noop(), +# iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)), +# iaa.Noop(), +# iaa.PerspectiveTransform(scale=(0.04, 0.08)), +# iaa.Noop(), +# iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)), +# ]), +# # More as you want ... +# ]) +# seq_det = seq.to_deterministic() + +# history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det), +# epochs=EPOCHS, +# steps_per_epoch=steps_per_epoch, +# validation_data=(X_valid, y_valid), +# verbose = 1, +# callbacks = [check_point] +# ) + + # Image augmentations + +############################### END of TODO ############################### \ No newline at end of file diff --git a/models/internals/image_functions.py b/models/internals/image_functions.py new file mode 100644 index 0000000..ebc9dba --- /dev/null +++ b/models/internals/image_functions.py @@ -0,0 +1,366 @@ +import os +import glob +import sys + +import math +import numpy as np + +import skimage +import skimage.io as skio + +class Image_Functions(): + def list_images(self, image_dir, image_ext = '*.tif'): + """List images in the directory with the given file extension + + Parameters + ---------- + image_dir : `str` + Directory to look for image files + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image_list : `list` + List of images found in the directory with the given file extension + + Notes + ---------- + For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase. + """ + # to bypass case sensitivity of file extensions in linux and possibly other systems + if sys.platform in ["win32",]: + image_extension = [image_ext] + else: + image_extension = [image_ext.lower(),image_ext.upper()] + + image_list = [] + for ext in image_extension: + image_list.extend(glob.glob(os.path.join(image_dir,ext))) + + return image_list + + ####################### + # Image IO functions + ####################### + def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'): + """Loads images found in ``image_path`` + + Parameters + ---------- + image_path : `str` + Path to look for image files + subfolder : `str`, optional + [Default: 'Images'] Subfolder in which to look for the image files + image_index : `int`, optional + [Default: 0] Index of image to load + image_ext : `str`, optional + [Default: '*.tif'] File extension of the image file + + Returns + ---------- + image : `array_like` + Loaded image + + Notes + ---------- + Only one image from in each directory is loaded. + """ + if os.path.isdir(image_path) is True: + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + if len(image_list) > 1: + warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index])) + # Load image + image = skio.imread(image_list[image_index]) + else: + image = skio.imread(image_path) + + image = np.expand_dims(image, axis=-1) + return image + + def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'): + """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed + + Parameters + ---------- + image_path : `str` + Path to look for ground truth images + subfolder : `str`, optional + [Default: 'Masks'] Subfolder in which to look for the ground truth images + image_ext : `str`, optional + [Default: '*.tif'] File extension of ground truth image file + + Returns + ---------- + output_ground_truth : `list` + List of ground truth images found in the directory with the given file extension + + class_ids : `list` + List of class ids of the ground truth images + """ + image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext) + output_ground_truth = [] + class_ids = [] + for ground_truth_path in image_list: + # add class if not in list + ground_truth_name = ground_truth_path.split('\\')[-1] + class_name = ground_truth_name.split('_')[0] + # obtain class_id + class_ids.append(self.get_class_id(class_name)) + + # Load image + ground_truth_img = skio.imread(ground_truth_path) + + # If one mask in 2D, add one extra dimension for the class + if len(ground_truth_img.shape) == 2: + ground_truth_img = np.expand_dims(ground_truth_img, axis=-1) + else: + # Transpore dimension to get class at the end + if ground_truth_img.shape[-1] != self.config.get_parameter("nb_classes"): + ground_truth_img = np.transpose(ground_truth_img,(1,2,0)) + + # perform erosion so that the borders will still be there after augmentation + if self.config.get_parameter("use_binary_erosion") is True: + from skimage.morphology import binary_erosion, disk + # sets dtype back to unsigned integer in order for some augmentations to work + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + if self.config.get_parameter("use_binary_dilation") is True: + from skimage.morphology import binary_dilation, disk + ground_truth_dtype = ground_truth_img.dtype + ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size"))) + ground_truth_img = ground_truth_img.astype(ground_truth_dtype) + + # perform inversion of ground_truth if needed + if self.config.get_parameter("invert_ground_truth") is True: + ground_truth_img = skimage.util.invert(ground_truth_img) + + # Concatenate masks from different files together + if len(output_ground_truth) == 0: + output_ground_truth.append(ground_truth_img) + else: + output_ground_truth = np.concatenate((output_ground_truth,ground_truth_img[None,:,:]), axis=-1) + + # If multiclass segmentation, add one mask for non-assigned pixels + if self.config.get_parameter("nb_classes")>1: + last_mask = np.ones([ground_truth_img.shape[0],ground_truth_img.shape[1]])*np.amax(ground_truth_img) + last_mask = ((last_mask - np.sum(output_ground_truth, axis =-1))>0)*np.amax(ground_truth_img) # To get rid of overlap and negative values + last_mask = np.expand_dims(last_mask, axis=-1) + output_ground_truth = np.concatenate((output_ground_truth,last_mask), axis=-1) + + return output_ground_truth, class_ids + + def reshape_image(self, image): + """Reshapes the image to the correct dimenstions for Unet + + Parameters + ---------- + image : `array_like` + Image to be reshaped + + Returns + ---------- + image : `array_like` + Reshaped image + """ + h, w = image.shape[:2] + image = np.reshape(image, (h, w, -1)) + return image + + ####################### + # Image padding + ####################### + def pad_image(self, image, image_size, mode = 'constant'): + """Pad image to specified image_size + + Parameters + ---------- + image : `array_like` + Image to be padded + image_size : `list` + Final size of padded image + mode : `str`, optional + [Default: 'constant'] Mode to pad the image + + Returns + ---------- + image : `array_like` + Padded image + + padding : `list` + List containing the number of pixels padded to each direction + """ + h, w = image.shape[:2] + + top_pad = (image_size[0] - h) // 2 + bottom_pad = image_size[0] - h - top_pad + + left_pad = (image_size[1] - w) // 2 + right_pad = image_size[1] - w - left_pad + + padding = ((top_pad, bottom_pad), (left_pad, right_pad)) + image = np.pad(image, padding, mode = mode, constant_values=0) + + return image, padding + + def remove_pad_image(self, image, padding): + """Removes pad from image + + Parameters + ---------- + image : `array_like` + Padded image + padding : `list` + List containing the number of padded pixels in each direction + + Returns + ---------- + image : `array_like` + Image without padding + """ + + h, w = image.shape[:2] + + return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]] + + ####################### + # Tiling functions + ####################### + def tile_image(self, image, tile_size, tile_overlap_size): + """Converts an image into a list of tiled images + + Parameters + ---------- + image : `array_like` + Image to be tiled + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + + Returns + ---------- + image : `array_like` + Image without padding + """ + image_height, image_width = image.shape[:2] + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + if image_height <= tile_height and image_width <= tile_width: + return image + + num_rows = math.ceil(image_height/tile_height) + num_cols = math.ceil(image_width/tile_width) + num_tiles = num_rows*num_cols + + + # pad image to fit tile size + image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2)) + + tile_image_list = [] + + for tile_no in range(num_tiles): + tile_x_start = (tile_no // num_rows) * tile_width + tile_x_end = tile_x_start + tile_size[1] + + tile_y_start = (tile_no % num_rows) * tile_height + tile_y_end = tile_y_start + tile_size[0] + + tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end] + + # ensure input into unet is of correct shape + tile_image = self.reshape_image(tile_image) + + tile_image_list.append(tile_image) + + return tile_image_list, num_rows, num_cols, padding + + def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): + """Stitches a list of tiled images back into a single image + + Parameters + ---------- + tile_list : `list` + List of tiled images + tile_size : `list` + Size of each individual tile + tile_overlap_size : `list` + Amount of overlap (in pixels) between each tile + num_rows : `int` + Number of rows of tiles + num_cols : `int` + Number of cols of tiles + padding : `list` + Amount of padding used during tiling + + Returns + ---------- + image : `array_like` + Image without padding + """ + if num_rows == 1 and num_cols == 1: + image = tile_list[0] + + image = self.remove_pad_image(image, padding = padding) + + return image + + tile_height = tile_size[0] - tile_overlap_size[0] * 2 + tile_width = tile_size[1] - tile_overlap_size[1] * 2 + + num_tiles = num_rows*num_cols + + for col in range(num_cols): + for row in range(num_rows): + tile_image = tile_list[num_rows*col + row][:,:,0] + tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)] + if row == 0: + image_col = np.array(tile_image) + else: + image_col = np.vstack((image_col, tile_image)) + + if col == 0: + image = image_col + else: + image = np.hstack((image, image_col)) + + image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2)) + + if padding is not None: + image = self.remove_pad_image(image, padding = padding) + + return image + + + ####################### + # Image normalization + ####################### + def percentile_normalization(self, image, in_bound=[3, 99.8]): + """Performs percentile normalization on the image + + Parameters + ---------- + image : `array_like` + Image to be normalized + in_bound : `list` + Upper and lower percentile used to normalize image + + Returns + ---------- + image : `array_like` + Normalized image + + image_min : `int` + Min value of ``image`` + + image_max : `int` + Max value of ``image`` + """ + image_min = np.percentile(image, in_bound[0]) + image_max = np.percentile(image, in_bound[1]) + image = (image - image_min)/(image_max - image_min) + + return image, image_min, image_max diff --git a/models/internals/losses.py b/models/internals/losses.py new file mode 100644 index 0000000..7e8ec84 --- /dev/null +++ b/models/internals/losses.py @@ -0,0 +1,328 @@ +from keras import backend as K +from keras.losses import binary_crossentropy, mean_absolute_error, categorical_crossentropy +import keras +import tensorflow as tf +import numpy as np +from scipy import ndimage + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(y_true * y_pred, axis=-1) + sum_ = K.sum(y_true + y_pred, axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + + +def dice_coef(y_true, y_pred, smooth=1.): + """ + Dice = (2*|X & Y|)/ (|X|+ |Y|) + = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) + ref: https://arxiv.org/pdf/1606.04797v1.pdf + + from wassname as well + """ + intersection = K.sum(y_true * y_pred, axis=-1) + return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth) + +def dice_coef_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + +def bce_dice_loss(y_true, y_pred): + return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +def bce_ssim_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred) + +# code download from: https://github.com/bermanmaxim/LovaszSoftmax +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + gts = tf.reduce_sum(gt_sorted) + intersection = gts - tf.cumsum(gt_sorted) + union = gts + tf.cumsum(1. - gt_sorted) + jaccard = 1. - intersection / union + jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) + return jaccard + + +# --------------------------- EDGE DETECTION --------------------------- + +def edge_detection(y_true, y_pred): + size = 5 + in_channel = y_pred.shape[-1] # Number of class + + fil = np.ones([size, size]) + fil[int(size/2), int(size/2)] = 1.0 - size**2 + fil = tf.convert_to_tensor(fil, tf.float32) + fil = tf.stack([fil]*in_channel, axis=2) + fil = tf.expand_dims(fil, 3) + + GT_edge_enhanced = tf.nn.depthwise_conv2d(y_true, fil, strides=[1, 1, 1, 1], padding="SAME") + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + + # Define threshold values on Laplacian filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.1)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.1)) + + GT_edge1 = tf.gather_nd(y_true, Index_1) + GT_edge2 = tf.gather_nd(y_true, Index_2) + + Pred_edge1 = tf.gather_nd(y_pred, Index_1) + Pred_edge2 = tf.gather_nd(y_pred, Index_2) + + + y_true = tf.concat([K.flatten(y_true), K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([K.flatten(y_pred), K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def edge_detection_sobel(y_true, y_pred): + y_true = K.cast(y_true, "float32") + y_pred = K.cast(y_pred, "float32") + GT_edge_enhanced = tf.image.sobel_edges(y_true) + #y_true = K.flatten(y_true) + #y_pred = K.flatten(y_pred) + #GT_edge_enhanced = K.flatten(GT_edge_enhanced) +#converting the datatypes of y_true, y_pred to make sure they are of same dtypes + + GT_edge_enhanced = K.cast(GT_edge_enhanced, "float32") + GT_edge_enhanced = tf.keras.backend.sum(GT_edge_enhanced, axis = -1) # Sum X and Y Sobel + + y_true = K.flatten(y_true) + y_pred = K.flatten(y_pred) + GT_edge_enhanced = K.flatten(GT_edge_enhanced) + + # Define threshold values on sobel filter + Index_1 = tf.where(K.greater(GT_edge_enhanced, 0.001)) + Index_2 = tf.where(K.less(GT_edge_enhanced, -0.001)) + + GT_edge1 = tf.gather(y_true, Index_1) + GT_edge2 = tf.gather(y_true, Index_2) + + Pred_edge1 = tf.gather(y_pred, Index_1) + Pred_edge2 = tf.gather(y_pred, Index_2) + + + y_true = tf.concat([y_true, K.flatten(GT_edge1), K.flatten(GT_edge2)],0) + y_pred = tf.concat([y_pred, K.flatten(Pred_edge1), K.flatten(Pred_edge2)],0) + return y_true, y_pred + + +def EE_bce_dice_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_dice_loss(y_true, y_pred) + + +def EE_jaccard_distance_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return jaccard_distance_loss(y_true, y_pred) + +def EE_dice_coef_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return dice_coef_loss(y_true, y_pred) + +def EE_bce_ssim_loss(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return bce_ssim_loss(y_true, y_pred) + +def EE_binary_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return binary_crossentropy(y_true, y_pred) + +def EE_categorical_crossentropy(y_true, y_pred): + y_true, y_pred = edge_detection(y_true, y_pred) + return categorical_crossentropy(y_true, y_pred) + + +# --------------------------- BINARY LOSSES --------------------------- + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + def treat_image(log_lab): + log, lab = log_lab + log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) + log, lab = flatten_binary_scores(log, lab, ignore) + return lovasz_hinge_flat(log, lab) + losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) + loss = tf.reduce_mean(losses) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + + def compute_loss(): + labelsf = tf.cast(labels, logits.dtype) + signs = 2. * labelsf - 1. + errors = 1. - logits * tf.stop_gradient(signs) + errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") + gt_sorted = tf.gather(labelsf, perm) + grad = lovasz_grad(gt_sorted) + loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") + return loss + + # deal with the void prediction case (only void pixels) + loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), + lambda: tf.reduce_sum(logits) * 0., + compute_loss, + strict=True, + name="loss" + ) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = tf.reshape(scores, (-1,)) + labels = tf.reshape(labels, (-1,)) + if ignore is None: + return scores, labels + valid = tf.not_equal(labels, ignore) + vscores = tf.boolean_mask(scores, valid, name='valid_scores') + vlabels = tf.boolean_mask(labels, valid, name='valid_labels') + return vscores, vlabels + +def lovasz_loss(y_true, y_pred): + y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32') + #logits = K.log(y_pred / (1. - y_pred)) + logits = y_pred #Jiaxin + loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None) + return loss + +# Difference of Structural Similarity + +def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0): + # There are additional parameters for this function + # Note: some of the 'modes' for edge behavior do not yet have a + # gradient definition in the Theano tree + # and cannot be used for learning + + c1 = (k1 * max_value) ** 2 + c2 = (k2 * max_value) ** 2 + + kernel = [kernel_size, kernel_size] + y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:])) + y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:])) + + patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME") + + # Reshape to get the var in the cells + bs, w, h, c = K.int_shape(patches_pred) + patches_pred = K.reshape(patches_pred, [-1, w, h, c]) + patches_true = K.reshape(patches_true, [-1, w, h, c]) + # Get mean + u_true = K.mean(patches_true, axis=-1) + u_pred = K.mean(patches_pred, axis=-1) + # Get variance + var_true = K.var(patches_true, axis=-1) + var_pred = K.var(patches_pred, axis=-1) + # Get std dev + covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred + + ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2) + denom = ((K.square(u_true) + + K.square(u_pred) + + c1) * (var_pred + var_true + c2)) + ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero + return K.mean((1.0 - ssim) / 2.0) + +def dssim_mae_loss(y_true, y_pred): + return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred) + +#MSSim +#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function +def keras_SSIM_cs(y_true, y_pred): + axis=None + gaussian = make_kernel(1.5) + x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME') + y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME') + + u_x=K.mean(x, axis=axis) + u_y=K.mean(y, axis=axis) + + var_x=K.var(x, axis=axis) + var_y=K.var(y, axis=axis) + + cov_xy=cov_keras(x, y, axis) + + K1=0.01 + K2=0.03 + L=1 # depth of image (255 in case the image has a differnt scale) + + C1=(K1*L)**2 + C2=(K2*L)**2 + C3=C2/2 + + l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1) + c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2) + s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3) + + return [c,s,l] + +def keras_MS_SSIM(y_true, y_pred): + iterations = 5 + x=y_true + y=y_pred + weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] + c=[] + s=[] + for i in range(iterations): + cs=keras_SSIM_cs(x, y) + c.append(cs[0]) + s.append(cs[1]) + l=cs[2] + if(i!=4): + x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1)))) + y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1)))) + c = tf.stack(c) + s = tf.stack(s) + cs = c*s + + #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values + l=(l+1)/2 + cs=(cs+1)/2 + + cs=cs**weight + cs = tf.reduce_prod(cs) + l=l**weight[-1] + + ms_ssim = l*cs + ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim) + + return K.mean(ms_ssim) + +def mssim_mae_loss(y_true, y_pred): + return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred) diff --git a/models/internals/metrics.py b/models/internals/metrics.py new file mode 100644 index 0000000..a426549 --- /dev/null +++ b/models/internals/metrics.py @@ -0,0 +1,23 @@ +"""Metrics for measuring machine learning algorithm performances +adapted from https://github.com/deaspo/Unet_MedicalImagingSegmentation +""" + +from keras import backend as K +import tensorflow as tf +import numpy as np + +def mean_iou(y_true, y_pred): + prec = [] + for t in np.arange(0.5, 1.0, 0.05): + #y_pred_ = tf.to_int32(y_pred > t) + y_pred_ = tf.cast(y_pred > t, tf.int32) + if K.int_shape(y_pred)[-1] >1: + num_class = K.int_shape(y_pred)[-1] + else: + num_class = K.int_shape(y_pred)[-1]+1 + score, up_opt = tf.compat.v1.metrics.mean_iou(y_true, y_pred_, num_class) + K.get_session().run(tf.compat.v1.local_variables_initializer()) + with tf.control_dependencies([up_opt]): + score = tf.identity(score) + prec.append(score) + return K.mean(K.stack(prec), axis=0) \ No newline at end of file diff --git a/models/internals/network_config.py b/models/internals/network_config.py new file mode 100644 index 0000000..9455a2c --- /dev/null +++ b/models/internals/network_config.py @@ -0,0 +1,237 @@ +import glob +import os +from ruamel.yaml import YAML + +class Network_Config(object): + def __init__(self, model_dir = None, config_filepath = None, **kwargs): + """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters. + + Parameters + ---------- + model_dir : `str`, optional + [Default: None] Folder where the model is to be saved/read from + config_filepath : `str`, optional + [Default: None] Filepath to the config file that will be loaded + **kwargs + For network parameters that are to be changed from the loaded config file + + Attributes + ---------- + yaml : :class:`ruamel.yaml.YAML` + YAML class with function needed to read/write YAML files + config : `dict` + Dictionary containing the config parameters + """ + self.yaml=YAML() + + # load config file from model_dir + if config_filepath is not None: + + self.config = self.load_config_from_file(config_filepath) + print("Loaded config file from {}".format(config_filepath)) + elif model_dir is not None: + try: + self.config = self.load_config_from_model_dir(model_dir) + print("Loaded config file from {}".format(model_dir)) + except: + print("Please ensure that config_filepath is set or there is a config file in model_dir") + raise + + if model_dir is not None: + # update model_dir in config + print("Updating model_dir to {}".format(model_dir)) + self.update_parameter(["general", "model_dir"], model_dir) + + # overwrite network parameters with parameters given during initialization + for key, value in kwargs.items(): + self.update_parameter(self.find_key(key), value) + + # perform calculations + self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),]) + self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count + + ###################### + # Accessors/Mutators + ###################### + def get_parameter(self, parameter, config = []): + """Output the value from the config file using the given key + + Parameters + ---------- + parameter : `list` or `str` + Key or list of keys used to find for the value in the config file + + config : `list`, optional + Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary + + Returns + ---------- + value : `str` or `int` or `list` + Value obtained from the specified key + + See Also + ---------- + find_key : Function to identify the list of keys to address the correct item in a nested dictionary + """ + assert isinstance(parameter, (list, str)) + + # find for key in nested dictionary + if isinstance(parameter, str): + parameter = self.find_key(parameter) + + if config == []: + config = self.config + if config is None: + return None + + if not parameter: + return config + + return self.get_parameter(parameter[1:], config = config.get(parameter[0])) + + def update_parameter(self, parameter, value, config = None): + """Updates the parameter in the config file using a full addressed list + + Parameters + ---------- + parameter : `list` + List of keys that point to the correct item in the nested dictionary + + value : `str` or `int` or `list` + Value that is updated in the nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + TODO + """ + + assert type(parameter) is list + + if config == None: + config = self.config + + if len(parameter) == 1: + config.update({parameter[0]: value}) + return config + return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0])) + + def find_key(self, key, config = None): + """Find the list of keys to address the correct item in a nested dictionary + + Parameters + ---------- + key : `str` + Key that needs to be correctly addressed in a nested dictionary + + config : `list` or `none`, optional + Used to iterate through nested dictionaries + + Returns + ---------- + key : `list` + Address of the key in the nested dictionary + """ + + if config == None: + config = self.config + + key_path = [] + for k, v in config.items(): + if k == key: + return [k] + elif isinstance(v, dict): + found_key = self.find_key(key, config = v) + if found_key is not None: + return [k] + found_key + + ###################### + # Config IO options + ###################### + def load_config_from_file(self, file_path): + """Load parameters from yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to load + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + """ + + with open(file_path, 'r') as input_file: + config = self.yaml.load(input_file) + input_file.close() + + return config + + def load_config_from_model_dir(self, model_dir): + """Finds for a config file from the model directory and loads it + + Parameters + ---------- + model_dir : `str` + Folder to search for and load the config file + + Returns + ---------- + config : `dict` + Dictionary containing the config parameters + + Raises + ------ + IndexError + If there are no config file in the model_dir + """ + + # check if yaml file exists in model_dir + try: + list_config_files = glob.glob(os.path.join(model_dir,'*config.yml')) + if len(list_config_files) > 1: + print("Multiple config files found. Loading {}".format(list_config_files[0])) + else: + print("Config file exists in model directory. Loading {}".format(list_config_files[0])) + return self.load_config_from_file(list_config_files[0]) + except IndexError: + print("No config file found in model_dir.") + raise + + def write_config(self, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + file_path : `str` + Path of config file to write to + """ + + with open(file_path, 'w') as output_file: + self.yaml.dump(self.config, output_file) + + output_file.close() + + print("Config file written to: {}".format(file_path)) + + def write_model(self, model, file_path): + """Writes parameters to yaml file + + Parameters + ---------- + model : :class:`Keras.model` + Keras model that will be parsed and written to a yaml file + + file_path : `str` + Path of model file to write to + """ + + with open(file_path, 'w') as output_file: + output_file.write(model.to_yaml()) + + output_file.close() + + print("Model file written to: {}".format(file_path)) \ No newline at end of file diff --git a/models/layers/.ipynb_checkpoints/layers-checkpoint.py b/models/layers/.ipynb_checkpoints/layers-checkpoint.py new file mode 100644 index 0000000..507d984 --- /dev/null +++ b/models/layers/.ipynb_checkpoints/layers-checkpoint.py @@ -0,0 +1,59 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +def activation_function(inputs, acti): + if isinstance(acti, str): + return Activation(acti)(inputs) + else: + return acti(inputs) + +def regularizer_function(weight_regularizer): + if weight_regularizer == 0 or weight_regularizer == None: + return None + else: + return regularizers.l2(weight_regularizer) + +def bn_relu_conv2d(inputs, filters, filter_size, + strides = 1, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + return output + +def bn_relu_conv2dtranspose(inputs, filters, filter_size, + strides = 2, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + return output + +def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None): + if mean_std_normalization is True: + print("Using normalization") + return Lambda(lambda x: (x - mean)/std)(inputs) + elif scale_input is True: + print("Not using normalization") + return Lambda(lambda x: x / 255)(inputs) + else: + return inputs + + \ No newline at end of file diff --git a/models/layers/__init__.py b/models/layers/__init__.py new file mode 100644 index 0000000..61006f3 --- /dev/null +++ b/models/layers/__init__.py @@ -0,0 +1 @@ +from __future__ import absolute_import, print_function \ No newline at end of file diff --git a/models/layers/__pycache__/__init__.cpython-36.pyc b/models/layers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..95b5193 Binary files /dev/null and b/models/layers/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/layers/__pycache__/__init__.cpython-37.pyc b/models/layers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..5d40310 Binary files /dev/null and b/models/layers/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/layers/__pycache__/layers.cpython-36.pyc b/models/layers/__pycache__/layers.cpython-36.pyc new file mode 100644 index 0000000..80df40c Binary files /dev/null and b/models/layers/__pycache__/layers.cpython-36.pyc differ diff --git a/models/layers/__pycache__/layers.cpython-37.pyc b/models/layers/__pycache__/layers.cpython-37.pyc new file mode 100644 index 0000000..33e0960 Binary files /dev/null and b/models/layers/__pycache__/layers.cpython-37.pyc differ diff --git a/models/layers/layers.py b/models/layers/layers.py new file mode 100644 index 0000000..507d984 --- /dev/null +++ b/models/layers/layers.py @@ -0,0 +1,59 @@ +import math + +import keras +from keras.models import Model, load_model +from keras.layers import Input, BatchNormalization, Activation +from keras.layers.core import Lambda, Dropout +from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D +from keras.layers.convolutional_recurrent import ConvLSTM2D +from keras.layers.pooling import MaxPooling2D +from keras.layers.merge import Concatenate, Add +from keras import regularizers +from keras import backend as K + +import tensorflow as tf + +def activation_function(inputs, acti): + if isinstance(acti, str): + return Activation(acti)(inputs) + else: + return acti(inputs) + +def regularizer_function(weight_regularizer): + if weight_regularizer == 0 or weight_regularizer == None: + return None + else: + return regularizers.l2(weight_regularizer) + +def bn_relu_conv2d(inputs, filters, filter_size, + strides = 1, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + + return output + +def bn_relu_conv2dtranspose(inputs, filters, filter_size, + strides = 2, acti = None, padding = None, + kernel_initializer = None, weight_regularizer = None, name = ""): + output = BatchNormalization()(inputs) + output = activation_function(output, acti) + output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, + kernel_initializer=kernel_initializer, + kernel_regularizer=regularizer_function(weight_regularizer))(output) + return output + +def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None): + if mean_std_normalization is True: + print("Using normalization") + return Lambda(lambda x: (x - mean)/std)(inputs) + elif scale_input is True: + print("Not using normalization") + return Lambda(lambda x: x / 255)(inputs) + else: + return inputs + + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b6f70bf --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +matplotlib==3.1.2 +scikit-image==0.17.2 +scikit-learn==0.22.1 +ruamel.yaml==0.16.12 +tqdm==4.53.0 +tensorflow-gpu==1.14.0 +protobuf==3.11.2 +PyYAML==5.3 +albumentations==0.5.1 +Keras==2.3.0 +jupyterlab==1.2.5 +jupyter-tensorboard==0.1.10 +tensorboard==1.14.0 +numba==0.48.0 +ipywidgets==7.5.1 +npm +h5py==2.10.0 + + +#conda install -c conda-forge nodejs==16.13.2 diff --git a/unets.ipynb b/unets.ipynb new file mode 100644 index 0000000..08253ce --- /dev/null +++ b/unets.ipynb @@ -0,0 +1,549 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import required modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "import os\n", + "import warnings\n", + "warnings.simplefilter(action='ignore', category=FutureWarning)\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# import user classes\n", + "from models.Unet import Unet\n", + "from models.Unet_Resnet import Unet_Resnet101, Unet_Resnet50, Unet_Resnet_paper\n", + "from models.Unet_ResAttnet import Res_att_unet_2d, Res_att_unet_3d\n", + "\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "# functions for visualization\n", + "def display_images(image, cmap='gray', norm=None, interpolation='bilinear'):\n", + "\n", + " plt.figure(figsize=(14, 14))\n", + " plt.axis('off')\n", + " plt.imshow(image, cmap=cmap,\n", + " norm=norm, interpolation=interpolation)\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialize model" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded config file from configs/default_singleclass_unet.yml\n", + "WARNING:tensorflow:From C:\\Users\\cjt678\\Desktop\\Unets\\models\\CNN_Base.py:169: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", + "\n", + "WARNING:tensorflow:From C:\\Users\\cjt678\\Desktop\\Unets\\models\\CNN_Base.py:171: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", + "\n" + ] + } + ], + "source": [ + "model = Unet(config_filepath=\"configs/default_singleclass_unet.yml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Load / augment dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing augmentations on 200 images\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Augmentation of images: 100%|███████████████| 200/200 [00:00<00:00, 413.20it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Augmentations complete!\n" + ] + } + ], + "source": [ + "model.load_dataset()\n", + "model.augment_images()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Training" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Unet 2022-02-15 13:41:48.599567\n", + "Config file written to: /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\Unet-20220215T1341-config.yml\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n", + "\n", + "Model file written to: /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\Unet-20220215T1341-model.yml\n", + "Training using single GPU or CPU..\n", + "Loss : edge-enhanced Dice loss\n", + "Metrics : IoU\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\ops\\metrics_impl.py:1178: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\tensorflow\\python\\ops\\metrics_impl.py:1179: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Deprecated in favor of operator or tf.math.divide.\n" + ] + } + ], + "source": [ + "model.initialize_model()\n", + "# If pre-trained model, please indicate the path \n", + "#model.load_weights('/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/OneClass/Best/Res_att_unet_2d-20200504T0646_BCE/weights_now.h5')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "#model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", + "\n", + "Train on 180 samples, validate on 20 samples\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:200: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.\n", + "\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:206: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.\n", + "\n", + "Epoch 1/120\n", + "180/180 [==============================] - 19s 107ms/step - loss: 0.7970 - mean_iou: 0.4572 - val_loss: 0.8440 - val_mean_iou: 0.5045\n", + "\n", + "Epoch 00001: val_loss improved from inf to 0.84399, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00001: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\envs\\env-unets\\lib\\site-packages\\keras\\callbacks\\tensorboard_v1.py:343: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.\n", + "\n", + "Epoch 2/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.5830 - mean_iou: 0.5309 - val_loss: 0.8166 - val_mean_iou: 0.5575\n", + "\n", + "Epoch 00002: val_loss improved from 0.84399 to 0.81663, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00002: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 3/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.4035 - mean_iou: 0.5728 - val_loss: 0.7998 - val_mean_iou: 0.5881\n", + "\n", + "Epoch 00003: val_loss improved from 0.81663 to 0.79979, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00003: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 4/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.3243 - mean_iou: 0.5984 - val_loss: 0.8206 - val_mean_iou: 0.6103\n", + "\n", + "Epoch 00004: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00004: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 5/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.2794 - mean_iou: 0.6194 - val_loss: 0.8618 - val_mean_iou: 0.6287\n", + "\n", + "Epoch 00005: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00005: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 6/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.2433 - mean_iou: 0.6364 - val_loss: 0.8946 - val_mean_iou: 0.6449\n", + "\n", + "Epoch 00006: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00006: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 7/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.2270 - mean_iou: 0.6518 - val_loss: 0.8884 - val_mean_iou: 0.6584\n", + "\n", + "Epoch 00007: val_loss did not improve from 0.79979\n", + "\n", + "Epoch 00007: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 8/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.2077 - mean_iou: 0.6640 - val_loss: 0.7788 - val_mean_iou: 0.6703\n", + "\n", + "Epoch 00008: val_loss improved from 0.79979 to 0.77882, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00008: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 9/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1966 - mean_iou: 0.6751 - val_loss: 0.6477 - val_mean_iou: 0.6810\n", + "\n", + "Epoch 00009: val_loss improved from 0.77882 to 0.64773, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00009: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 10/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1959 - mean_iou: 0.6857 - val_loss: 0.6879 - val_mean_iou: 0.6904\n", + "\n", + "Epoch 00010: val_loss did not improve from 0.64773\n", + "\n", + "Epoch 00010: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 11/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1838 - mean_iou: 0.6945 - val_loss: 0.6889 - val_mean_iou: 0.6987\n", + "\n", + "Epoch 00011: val_loss did not improve from 0.64773\n", + "\n", + "Epoch 00011: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 12/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1772 - mean_iou: 0.7020 - val_loss: 0.4161 - val_mean_iou: 0.7060\n", + "\n", + "Epoch 00012: val_loss improved from 0.64773 to 0.41609, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00012: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 13/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1653 - mean_iou: 0.7099 - val_loss: 0.4036 - val_mean_iou: 0.7138\n", + "\n", + "Epoch 00013: val_loss improved from 0.41609 to 0.40355, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00013: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 14/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1576 - mean_iou: 0.7174 - val_loss: 0.3715 - val_mean_iou: 0.7209\n", + "\n", + "Epoch 00014: val_loss improved from 0.40355 to 0.37149, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00014: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 15/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1485 - mean_iou: 0.7243 - val_loss: 0.2858 - val_mean_iou: 0.7280\n", + "\n", + "Epoch 00015: val_loss improved from 0.37149 to 0.28581, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00015: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 16/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1411 - mean_iou: 0.7313 - val_loss: 0.3070 - val_mean_iou: 0.7345\n", + "\n", + "Epoch 00016: val_loss did not improve from 0.28581\n", + "\n", + "Epoch 00016: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 17/120\n", + "180/180 [==============================] - 12s 65ms/step - loss: 0.1451 - mean_iou: 0.7372 - val_loss: 0.2580 - val_mean_iou: 0.7402\n", + "\n", + "Epoch 00017: val_loss improved from 0.28581 to 0.25796, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00017: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 18/120\n", + "180/180 [==============================] - 11s 63ms/step - loss: 0.1419 - mean_iou: 0.7428 - val_loss: 0.2959 - val_mean_iou: 0.7455\n", + "\n", + "Epoch 00018: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00018: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 19/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1355 - mean_iou: 0.7479 - val_loss: 0.2934 - val_mean_iou: 0.7505\n", + "\n", + "Epoch 00019: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00019: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 20/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1255 - mean_iou: 0.7529 - val_loss: 0.3173 - val_mean_iou: 0.7553\n", + "\n", + "Epoch 00020: val_loss did not improve from 0.25796\n", + "\n", + "Epoch 00020: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 21/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1196 - mean_iou: 0.7576 - val_loss: 0.2384 - val_mean_iou: 0.7599\n", + "\n", + "Epoch 00021: val_loss improved from 0.25796 to 0.23842, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00021: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 22/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1284 - mean_iou: 0.7620 - val_loss: 0.2996 - val_mean_iou: 0.7642\n", + "\n", + "Epoch 00022: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00022: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 23/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1166 - mean_iou: 0.7661 - val_loss: 0.2548 - val_mean_iou: 0.7682\n", + "\n", + "Epoch 00023: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00023: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 24/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1101 - mean_iou: 0.7702 - val_loss: 0.2796 - val_mean_iou: 0.7721\n", + "\n", + "Epoch 00024: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00024: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 25/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1057 - mean_iou: 0.7739 - val_loss: 0.3131 - val_mean_iou: 0.7758\n", + "\n", + "Epoch 00025: val_loss did not improve from 0.23842\n", + "\n", + "Epoch 00025: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 26/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.1040 - mean_iou: 0.7773 - val_loss: 0.2209 - val_mean_iou: 0.7792\n", + "\n", + "Epoch 00026: val_loss improved from 0.23842 to 0.22088, saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_best.h5\n", + "\n", + "Epoch 00026: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 27/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.1021 - mean_iou: 0.7811 - val_loss: 0.2432 - val_mean_iou: 0.7828\n", + "\n", + "Epoch 00027: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00027: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 28/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.0953 - mean_iou: 0.7844 - val_loss: 0.2320 - val_mean_iou: 0.7862\n", + "\n", + "Epoch 00028: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00028: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 29/120\n", + "180/180 [==============================] - 12s 64ms/step - loss: 0.0952 - mean_iou: 0.7878 - val_loss: 0.2883 - val_mean_iou: 0.7893\n", + "\n", + "Epoch 00029: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00029: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 30/120\n", + "180/180 [==============================] - 11s 64ms/step - loss: 0.0942 - mean_iou: 0.7907 - val_loss: 0.2919 - val_mean_iou: 0.7922\n", + "\n", + "Epoch 00030: val_loss did not improve from 0.22088\n", + "\n", + "Epoch 00030: saving model to /Users/cjt678/Desktop/Unets/Networks/Unet-20220215T1341\\weights_now.h5\n", + "Epoch 31/120\n", + " 84/180 [=============>................] - ETA: 5s - loss: 0.0872 - mean_iou: 0.7928" + ] + } + ], + "source": [ + "model.train_model()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Prediction" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Config file exists in model directory. Loading /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/Unet-20191024T0603-config.yml\n", + "Loaded config file from /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/\n", + "Updating model_dir to /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/\n", + "Unet 2021-05-19 07:03:55.120803\n", + "Predicting using single GPU or CPU..\n", + "Loss : Edge Enhanced categorical_crossentropy\n", + "Metrics : ['categorical_accuracy']\n", + "Loaded weights from: /mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/weights_best.h5\n" + ] + } + ], + "source": [ + "# Please remember to change to the correct folder containing the network weights\n", + "model_dir = '/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/Networks/Unet-20191024T0603/'\n", + "model = Unet(model_dir = model_dir,\n", + " for_prediction=True,\n", + " tile_size = [512,512],\n", + " tile_overlap_size = [0,0])\n", + "#model = Unet(model_dir = model_dir,\n", + "# use_cpu = True,\n", + "# config_filepath=None,\n", + "# for_prediction=True,\n", + "# save_as_uint16=True,\n", + "# tile_size = [512,512],\n", + "# tile_overlap_size = [0,0])\n", + "model.initialize_model()\n", + "model.load_weights(model_dir+'weights_best.h5') # leave blank to load last h5 file in folder" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d921fb6051344743bb061dee2f4dbf25", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=2.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TiffPage 0: TypeError: read_bytes() missing 3 required positional arguments: 'dtype', 'count', and 'offsetsize'\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1bbb46d4142244019af1b5369a11fa20", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=2.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TiffPage 0: TypeError: read_bytes() missing 3 required positional arguments: 'dtype', 'count', and 'offsetsize'\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "940c529fb73542a0a9912b667e67a328", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=68.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n" + ] + } + ], + "source": [ + "# popiah\n", + "_ = model.predict_images('/mnt/mbi/home/mbirdm/AI/data_ai/Vidhya/prediction/')" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [], + "source": [ + "model.end_training()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Please remember to restart or stop the notebook once you are done. Thank you. \n", + "\n", + "Alternatively, run the line above" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}