From 095812260002b14a0e47176b6451b74accb64c0d Mon Sep 17 00:00:00 2001 From: rdemets Date: Wed, 18 Jan 2023 14:02:17 +0100 Subject: [PATCH] Removed unnecessary html files --- html/models/CNN_Base.html | 1604 -------------------- html/models/Unet.html | 417 ----- html/models/Unet_Resnet.html | 1095 ------------- html/models/index.html | 86 -- html/models/internals/dataset.html | 958 ------------ html/models/internals/image_functions.html | 1340 ---------------- html/models/internals/index.html | 86 -- html/models/internals/losses.html | 705 --------- html/models/internals/network_config.html | 908 ----------- html/models/layers/index.html | 71 - html/models/layers/layers.html | 222 --- 11 files changed, 7492 deletions(-) delete mode 100644 html/models/CNN_Base.html delete mode 100644 html/models/Unet.html delete mode 100644 html/models/Unet_Resnet.html delete mode 100644 html/models/index.html delete mode 100644 html/models/internals/dataset.html delete mode 100644 html/models/internals/image_functions.html delete mode 100644 html/models/internals/index.html delete mode 100644 html/models/internals/losses.html delete mode 100644 html/models/internals/network_config.html delete mode 100644 html/models/layers/index.html delete mode 100644 html/models/layers/layers.html diff --git a/html/models/CNN_Base.html b/html/models/CNN_Base.html deleted file mode 100644 index 37dfaaf..0000000 --- a/html/models/CNN_Base.html +++ /dev/null @@ -1,1604 +0,0 @@ - - - - - - -models.CNN_Base API documentation - - - - - - - - - -
-
-
-

Module models.CNN_Base

-
-
-
- -Expand source code - -
import os
-
-import glob
-import datetime
-
-import skimage.io
-import numpy as np
-
-import tensorflow as tf
-
-import keras
-from keras import backend as K
-from keras.models import Model, load_model
-from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard, ProgbarLogger
-
-from .internals.image_functions import Image_Functions
-from .internals.network_config import Network_Config
-from .internals.dataset import Dataset
-
-class CNN_Base(Dataset, Image_Functions):
-    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
-        """Creates the base neural network class with basic functions
-    
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Folder where the model is stored
-        config_filepath : `str`, optional
-            [Default: None] Filepath to the config file
-        **kwargs
-            Parameters that are passed to :class:`network_config.Network_Config`
-
-        Attributes
-        ----------
-        config : :class:`network_config.Network_Config`
-            Network_config object containing the config and necessary functions
-        """
-        
-        super().__init__()
-        
-        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
-        
-        self.config.update_parameter(["general", "now"], datetime.datetime.now())
-        
-        if self.config.get_parameter("use_cpu") is True:
-            self.initialize_cpu()
-        else:
-            self.initialize_gpu()
-    
-    #######################
-    # Logging functions
-    #######################
-    def init_logs(self):
-        """Initiates the parameters required for the log file
-        """
-        # Directory for training logs
-        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
-        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-        
-        # Path to save after each epoch. Include placeholders that get filled by Keras.
-        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
-        
-    def write_logs(self):
-        """Writes the log file
-        """
-        # Create log_dir if it does not exist
-        if os.path.exists(self.log_dir) is False:
-            os.makedirs(self.log_dir)
-            
-        # save the parameters used in current run to logs dir
-        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-        
-    #######################
-    # Initialization functions
-    #######################
-    def summary(self):
-        """Summary of the layers in the model
-        """
-        self.model.summary()
-        
-    def compile_model(self, optimizer, loss):
-        """Compiles model
-        
-        Parameters
-        ----------
-        optimizer
-            Gradient optimizer used in during the training of the network
-        loss
-            Loss function of the network
-        """
-        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
-
-    def initialize_model(self):
-        """Initializes the logs, builds the model, and chooses the correct initialization function
-        """
-        # write parameters to yaml file
-        self.init_logs()
-        if self.config.get_parameter("for_prediction") is False:
-            self.write_logs()
-            
-        # build model
-        self.model = self.build_model(self.config.get_parameter("input_size"))
-        
-        # save model to yaml file
-        if self.config.get_parameter("for_prediction") is False:
-            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-
-        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
-        self.initialize_model_normal()
-            
-    def initialize_cpu(self):
-        """Sets the session to only use the CPU
-        """
-        config = tf.ConfigProto(
-                        device_count = {'CPU' : 1,
-                                        'GPU' : 0}
-                       )
-        session = tf.Session(config=config)
-        K.set_session(session)   
-        
-    def initialize_gpu(self):
-        """Sets the seesion to use the gpu specified in config file
-        """
-        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
-        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
-        
-        config = tf.ConfigProto()
-        config.gpu_options.allow_growth = True
-        sess = tf.Session(config=config)
-        K.tensorflow_backend.set_session(sess)
-    
-    def initialize_model_normal(self):
-        """Initializes the optimizer and any specified callback functions
-        """
-        opt = self.optimizer_function()
-        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
-        
-        if self.config.get_parameter("for_prediction") == False:
-            self.callbacks = [self.model_checkpoint_call(verbose = True)]
-
-            if self.config.get_parameter("use_tensorboard") is True:
-                self.callbacks.append(self.tensorboard_call())
-                
-            if self.config.get_parameter("reduce_LR_on_plateau") is True:
-                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
-                                                        factor = self.config.get_parameter("reduce_LR_factor"),
-                                                        patience = self.config.get_parameter("reduce_LR_patience"),
-                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
-                                                        verbose = True))
-            
-            if self.config.get_parameter("early_stopping") is True:
-                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
-                                                    patience = self.config.get_parameter("early_stopping_patience"),
-                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
-                                                    verbose = True))
-                
-    #######################
-    # Optimizer/Loss functions
-    #######################         
-    def optimizer_function(self, learning_rate = None):
-        """Initialize optimizer function
-        
-        Parameters
-        ----------
-        learning_rate : `int`
-            Learning rate of the descent algorithm
-            
-        Returns
-        ----------
-        optimizer
-            Function to call the optimizer
-        """
-        if learning_rate is None:
-            learning_rate = self.config.get_parameter("learning_rate")
-        if self.config.get_parameter("optimizer_function") == 'sgd':
-            return keras.optimizers.SGD(lr = learning_rate, 
-                                        decay = self.config.get_parameter("decay"), 
-                                        momentum = self.config.get_parameter("momentum"), 
-                                        nesterov = self.config.get_parameter("nesterov"))
-        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
-            return keras.optimizers.RMSprop(lr = learning_rate, 
-                                            decay = self.config.get_parameter("decay"))
-        elif self.config.get_parameter("optimizer_function") == 'adam':
-            return keras.optimizers.Adam(lr = learning_rate, 
-                                         decay = self.config.get_parameter("decay"))
-        
-    def loss_function(self, loss):
-        """Initialize loss function
-        
-        Parameters
-        ----------
-        loss : `str`
-            Name of the loss function
-            
-        Returns
-        ----------
-        loss
-            Function to call loss function
-        """
-        if loss == "binary_crossentropy":
-            print("Using binary crossentropy")
-            return loss
-        elif loss == "jaccard_distance_loss":
-            print("Using jaccard distance loss")
-            from .internals.losses import jaccard_distance_loss
-            return jaccard_distance_loss
-        elif loss == "lovasz_hinge":
-            print("Using Lovasz-hinge loss")
-            from .internals.losses import lovasz_loss
-            return lovasz_loss
-        elif loss == "dice_loss":
-            print("Using Dice loss")
-            from .internals.losses import dice_coef_loss
-            return dice_coef_loss
-        elif loss == "bce_dice_loss":
-            print("Using 1 - Dice + BCE loss")
-            from .internals.losses import bce_dice_loss
-            return bce_dice_loss
-        elif loss == "ssim_loss":
-            print("Using DSSIM loss")
-            from .internals.losses import DSSIM_loss
-            return DSSIM_loss
-        elif loss == "bce_ssim_loss":
-            print("Using BCE + DSSIM loss")
-            from .internals.losses import bce_ssim_loss
-            return bce_ssim_loss
-        elif loss == "mean_squared_error":
-            return keras.losses.mean_squared_error
-        elif loss == "mean_absolute_error":
-            return keras.losses.mean_absolute_error
-        elif loss == "ssim_mae_loss":
-            print("Using DSSIM + MAE loss")
-            from .internals.losses import dssim_mae_loss
-            return dssim_mae_loss
-        else:
-            print("Using {}".format(loss))
-            return loss
-        
-    #######################
-    # Callbacks
-    #######################     
-    def tensorboard_call(self):
-        """Initialize tensorboard call
-        """
-        return TensorBoard(log_dir=self.log_dir, 
-                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
-                           write_graph=self.config.get_parameter("write_graph"),
-                           write_images=self.config.get_parameter("write_images"), 
-                           write_grads=self.config.get_parameter("write_grads"), 
-                           update_freq='epoch', 
-                           histogram_freq=self.config.get_parameter("histogram_freq"))
-    
-    def model_checkpoint_call(self, verbose = 0):
-        """Initialize model checkpoint call
-        """
-        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
-    
-    #######################
-    # Clear memory once training is done
-    #######################
-    def end_training(self):
-        """Deletes model and releases gpu memory held by tensorflow
-        """
-        # del reference to model
-        del self.model
-        
-        # clear memory
-        tf.reset_default_graph()
-        K.clear_session()
-        
-        # take hold of cuda device to shut it down
-        from numba import cuda
-        cuda.select_device(0)
-        cuda.close()
-    
-    #######################
-    # Train Model
-    #######################
-    def train_model(self, verbose = True):
-        """Trains model
-        
-        Parameters
-        ----------
-        verbose : `int`, optional
-            [Default: True] Verbose output
-        """      
-        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
-                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
-                                 callbacks=self.callbacks, verbose=verbose)
-        
-        self.end_training()
-        
-    #######################
-    # Predict using loaded model weights
-    ####################### 
-    # TODO: change to load model from yaml file
-    def load_model(self, model_dir = None):
-        """Loads model from h5 file
-        
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Directory containing the model file
-        """
-        # TODO: rewrite to load model from yaml file
-        if model_dir is None:
-            model_dir = self.config.get_parameter("model_dir")
-            
-        if os.path.isdir(model_dir) is True:
-            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-            list_weights_files.sort() # To ensure that [-1] gives the last file
-            
-            model_dir = os.path.join(model_dir,list_weights_files[-1])
-
-        self.model.load_model(model_dir)
-        print("Loaded model from: " + model_dir)
-        
-    def load_weights(self, model_dir = None, weights_index = -1):
-        """Loads weights from h5 file
-        
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Directory containing the weights file
-        weights_index : `int`, optional
-            [Default: -1] 
-        """
-        if model_dir is None:
-            model_dir = self.config.get_parameter("model_dir")
-        
-        if os.path.isdir(model_dir) is True:
-            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-            list_weights_files.sort() # To ensure that [-1] gives the last file
-            self.weights_path = list_weights_files[weights_index]
-            model_dir = os.path.join(model_dir, self.weights_path)
-        else:
-            self.weights_path = model_dir
-        
-        self.model.load_weights(model_dir)
-        print("Loaded weights from: " + model_dir)
-       
-    def predict_images(self, image_dir):
-        """Perform prediction on images found in ``image_dir``
-        
-        Parameters
-        ----------
-        image_dir : `str`
-            Directory containing the images to perform prediction on
-            
-        Returns
-        ----------
-        image : `array_like`
-            Last image that prediction was perfromed on
-        """
-        # load image list
-        image_list = self.list_images(image_dir)
-        
-        for image_path in image_list:
-            image = self.load_image(image_path = image_path)
-            
-            # percentile normalization
-            if self.config.get_parameter("percentile_normalization"):
-                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-            
-            if self.config.get_parameter("tile_overlap_size") == [0,0]:
-                padding = None
-                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
-                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
-                input_image = image[np.newaxis,:,:,np.newaxis]
-                
-                output_image = self.model.predict(input_image, verbose=1)
-                
-                if padding is not None: 
-                    h, w = output_image.shape[1:3]
-                    output_image = np.reshape(output_image, (h, w))
-                    output_image = self.remove_pad_image(output_image, padding = padding)
-            else:
-                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                
-                pred_train_list = []
-                for tile in tile_image_list:
-
-                    # reshape image to correct dimensions for unet
-                    h, w = tile.shape[:2]
-                    
-                    tile = np.reshape(tile, (1, h, w, 1))
-
-                    pred_train_list.extend(self.model.predict(tile, verbose=1))
-
-                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
-                                                 num_rows, num_cols, padding = padding)
-            
-            self.save_image(output_image, image_path)
-            
-        return output_image
-    
-    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
-        """Saves image to image_path
-        
-        Final location of image is as follows:
-          - image_path
-              - subfolder
-                 - model/weights file name
-        
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be saved
-        image_path : `str`
-            Location to save the image in
-        subfolder : `str`
-            [Default: 'Masks'] Subfolder in which the image is to be saved in
-        suffix : `str`
-            [Default: '-preds'] Suffix to append to the filename of the predicted image
-        """
-        image_dir = os.path.dirname(image_path)
-        
-        output_dir = os.path.join(image_dir, subfolder)
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
-            
-        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
-        
-        output_dir = os.path.join(output_dir, basename)
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
-            
-        filename, _ = os.path.splitext(os.path.basename(image_path))
-        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
-        
-        skimage.io.imsave(output_path, image)
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class CNN_Base -(model_dir=None, config_filepath=None, **kwargs) -
-
-

Creates the base neural network class with basic functions

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is stored
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file
-
**kwargs
-
Parameters that are passed to :class:network_config.Network_Config
-
-

Attributes

-
-
config : :class:network_config.Network_Config
-
Network_config object containing the config and necessary functions
-
-
- -Expand source code - -
class CNN_Base(Dataset, Image_Functions):
-    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
-        """Creates the base neural network class with basic functions
-    
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Folder where the model is stored
-        config_filepath : `str`, optional
-            [Default: None] Filepath to the config file
-        **kwargs
-            Parameters that are passed to :class:`network_config.Network_Config`
-
-        Attributes
-        ----------
-        config : :class:`network_config.Network_Config`
-            Network_config object containing the config and necessary functions
-        """
-        
-        super().__init__()
-        
-        self.config = Network_Config(model_dir = model_dir, config_filepath = config_filepath, **kwargs)
-        
-        self.config.update_parameter(["general", "now"], datetime.datetime.now())
-        
-        if self.config.get_parameter("use_cpu") is True:
-            self.initialize_cpu()
-        else:
-            self.initialize_gpu()
-    
-    #######################
-    # Logging functions
-    #######################
-    def init_logs(self):
-        """Initiates the parameters required for the log file
-        """
-        # Directory for training logs
-        print(self.config.get_parameter("name"), self.config.get_parameter("now"))
-        self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-        
-        # Path to save after each epoch. Include placeholders that get filled by Keras.
-        self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-        self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
-        
-    def write_logs(self):
-        """Writes the log file
-        """
-        # Create log_dir if it does not exist
-        if os.path.exists(self.log_dir) is False:
-            os.makedirs(self.log_dir)
-            
-        # save the parameters used in current run to logs dir
-        self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-        
-    #######################
-    # Initialization functions
-    #######################
-    def summary(self):
-        """Summary of the layers in the model
-        """
-        self.model.summary()
-        
-    def compile_model(self, optimizer, loss):
-        """Compiles model
-        
-        Parameters
-        ----------
-        optimizer
-            Gradient optimizer used in during the training of the network
-        loss
-            Loss function of the network
-        """
-        self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
-
-    def initialize_model(self):
-        """Initializes the logs, builds the model, and chooses the correct initialization function
-        """
-        # write parameters to yaml file
-        self.init_logs()
-        if self.config.get_parameter("for_prediction") is False:
-            self.write_logs()
-            
-        # build model
-        self.model = self.build_model(self.config.get_parameter("input_size"))
-        
-        # save model to yaml file
-        if self.config.get_parameter("for_prediction") is False:
-            self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-
-        print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
-        self.initialize_model_normal()
-            
-    def initialize_cpu(self):
-        """Sets the session to only use the CPU
-        """
-        config = tf.ConfigProto(
-                        device_count = {'CPU' : 1,
-                                        'GPU' : 0}
-                       )
-        session = tf.Session(config=config)
-        K.set_session(session)   
-        
-    def initialize_gpu(self):
-        """Sets the seesion to use the gpu specified in config file
-        """
-        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
-        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
-        
-        config = tf.ConfigProto()
-        config.gpu_options.allow_growth = True
-        sess = tf.Session(config=config)
-        K.tensorflow_backend.set_session(sess)
-    
-    def initialize_model_normal(self):
-        """Initializes the optimizer and any specified callback functions
-        """
-        opt = self.optimizer_function()
-        self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
-        
-        if self.config.get_parameter("for_prediction") == False:
-            self.callbacks = [self.model_checkpoint_call(verbose = True)]
-
-            if self.config.get_parameter("use_tensorboard") is True:
-                self.callbacks.append(self.tensorboard_call())
-                
-            if self.config.get_parameter("reduce_LR_on_plateau") is True:
-                self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
-                                                        factor = self.config.get_parameter("reduce_LR_factor"),
-                                                        patience = self.config.get_parameter("reduce_LR_patience"),
-                                                        min_lr = self.config.get_parameter("reduce_LR_min_lr"),
-                                                        verbose = True))
-            
-            if self.config.get_parameter("early_stopping") is True:
-                self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
-                                                    patience = self.config.get_parameter("early_stopping_patience"),
-                                                    min_delta = self.config.get_parameter("early_stopping_min_delta"),
-                                                    verbose = True))
-                
-    #######################
-    # Optimizer/Loss functions
-    #######################         
-    def optimizer_function(self, learning_rate = None):
-        """Initialize optimizer function
-        
-        Parameters
-        ----------
-        learning_rate : `int`
-            Learning rate of the descent algorithm
-            
-        Returns
-        ----------
-        optimizer
-            Function to call the optimizer
-        """
-        if learning_rate is None:
-            learning_rate = self.config.get_parameter("learning_rate")
-        if self.config.get_parameter("optimizer_function") == 'sgd':
-            return keras.optimizers.SGD(lr = learning_rate, 
-                                        decay = self.config.get_parameter("decay"), 
-                                        momentum = self.config.get_parameter("momentum"), 
-                                        nesterov = self.config.get_parameter("nesterov"))
-        elif self.config.get_parameter("optimizer_function") == 'rmsprop':
-            return keras.optimizers.RMSprop(lr = learning_rate, 
-                                            decay = self.config.get_parameter("decay"))
-        elif self.config.get_parameter("optimizer_function") == 'adam':
-            return keras.optimizers.Adam(lr = learning_rate, 
-                                         decay = self.config.get_parameter("decay"))
-        
-    def loss_function(self, loss):
-        """Initialize loss function
-        
-        Parameters
-        ----------
-        loss : `str`
-            Name of the loss function
-            
-        Returns
-        ----------
-        loss
-            Function to call loss function
-        """
-        if loss == "binary_crossentropy":
-            print("Using binary crossentropy")
-            return loss
-        elif loss == "jaccard_distance_loss":
-            print("Using jaccard distance loss")
-            from .internals.losses import jaccard_distance_loss
-            return jaccard_distance_loss
-        elif loss == "lovasz_hinge":
-            print("Using Lovasz-hinge loss")
-            from .internals.losses import lovasz_loss
-            return lovasz_loss
-        elif loss == "dice_loss":
-            print("Using Dice loss")
-            from .internals.losses import dice_coef_loss
-            return dice_coef_loss
-        elif loss == "bce_dice_loss":
-            print("Using 1 - Dice + BCE loss")
-            from .internals.losses import bce_dice_loss
-            return bce_dice_loss
-        elif loss == "ssim_loss":
-            print("Using DSSIM loss")
-            from .internals.losses import DSSIM_loss
-            return DSSIM_loss
-        elif loss == "bce_ssim_loss":
-            print("Using BCE + DSSIM loss")
-            from .internals.losses import bce_ssim_loss
-            return bce_ssim_loss
-        elif loss == "mean_squared_error":
-            return keras.losses.mean_squared_error
-        elif loss == "mean_absolute_error":
-            return keras.losses.mean_absolute_error
-        elif loss == "ssim_mae_loss":
-            print("Using DSSIM + MAE loss")
-            from .internals.losses import dssim_mae_loss
-            return dssim_mae_loss
-        else:
-            print("Using {}".format(loss))
-            return loss
-        
-    #######################
-    # Callbacks
-    #######################     
-    def tensorboard_call(self):
-        """Initialize tensorboard call
-        """
-        return TensorBoard(log_dir=self.log_dir, 
-                           batch_size = self.config.get_parameter("batch_size_per_GPU"), 
-                           write_graph=self.config.get_parameter("write_graph"),
-                           write_images=self.config.get_parameter("write_images"), 
-                           write_grads=self.config.get_parameter("write_grads"), 
-                           update_freq='epoch', 
-                           histogram_freq=self.config.get_parameter("histogram_freq"))
-    
-    def model_checkpoint_call(self, verbose = 0):
-        """Initialize model checkpoint call
-        """
-        return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
-    
-    #######################
-    # Clear memory once training is done
-    #######################
-    def end_training(self):
-        """Deletes model and releases gpu memory held by tensorflow
-        """
-        # del reference to model
-        del self.model
-        
-        # clear memory
-        tf.reset_default_graph()
-        K.clear_session()
-        
-        # take hold of cuda device to shut it down
-        from numba import cuda
-        cuda.select_device(0)
-        cuda.close()
-    
-    #######################
-    # Train Model
-    #######################
-    def train_model(self, verbose = True):
-        """Trains model
-        
-        Parameters
-        ----------
-        verbose : `int`, optional
-            [Default: True] Verbose output
-        """      
-        history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
-                                 batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
-                                 callbacks=self.callbacks, verbose=verbose)
-        
-        self.end_training()
-        
-    #######################
-    # Predict using loaded model weights
-    ####################### 
-    # TODO: change to load model from yaml file
-    def load_model(self, model_dir = None):
-        """Loads model from h5 file
-        
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Directory containing the model file
-        """
-        # TODO: rewrite to load model from yaml file
-        if model_dir is None:
-            model_dir = self.config.get_parameter("model_dir")
-            
-        if os.path.isdir(model_dir) is True:
-            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-            list_weights_files.sort() # To ensure that [-1] gives the last file
-            
-            model_dir = os.path.join(model_dir,list_weights_files[-1])
-
-        self.model.load_model(model_dir)
-        print("Loaded model from: " + model_dir)
-        
-    def load_weights(self, model_dir = None, weights_index = -1):
-        """Loads weights from h5 file
-        
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Directory containing the weights file
-        weights_index : `int`, optional
-            [Default: -1] 
-        """
-        if model_dir is None:
-            model_dir = self.config.get_parameter("model_dir")
-        
-        if os.path.isdir(model_dir) is True:
-            list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-            list_weights_files.sort() # To ensure that [-1] gives the last file
-            self.weights_path = list_weights_files[weights_index]
-            model_dir = os.path.join(model_dir, self.weights_path)
-        else:
-            self.weights_path = model_dir
-        
-        self.model.load_weights(model_dir)
-        print("Loaded weights from: " + model_dir)
-       
-    def predict_images(self, image_dir):
-        """Perform prediction on images found in ``image_dir``
-        
-        Parameters
-        ----------
-        image_dir : `str`
-            Directory containing the images to perform prediction on
-            
-        Returns
-        ----------
-        image : `array_like`
-            Last image that prediction was perfromed on
-        """
-        # load image list
-        image_list = self.list_images(image_dir)
-        
-        for image_path in image_list:
-            image = self.load_image(image_path = image_path)
-            
-            # percentile normalization
-            if self.config.get_parameter("percentile_normalization"):
-                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-            
-            if self.config.get_parameter("tile_overlap_size") == [0,0]:
-                padding = None
-                if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
-                    image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
-                input_image = image[np.newaxis,:,:,np.newaxis]
-                
-                output_image = self.model.predict(input_image, verbose=1)
-                
-                if padding is not None: 
-                    h, w = output_image.shape[1:3]
-                    output_image = np.reshape(output_image, (h, w))
-                    output_image = self.remove_pad_image(output_image, padding = padding)
-            else:
-                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                
-                pred_train_list = []
-                for tile in tile_image_list:
-
-                    # reshape image to correct dimensions for unet
-                    h, w = tile.shape[:2]
-                    
-                    tile = np.reshape(tile, (1, h, w, 1))
-
-                    pred_train_list.extend(self.model.predict(tile, verbose=1))
-
-                output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
-                                                 num_rows, num_cols, padding = padding)
-            
-            self.save_image(output_image, image_path)
-            
-        return output_image
-    
-    def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
-        """Saves image to image_path
-        
-        Final location of image is as follows:
-          - image_path
-              - subfolder
-                 - model/weights file name
-        
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be saved
-        image_path : `str`
-            Location to save the image in
-        subfolder : `str`
-            [Default: 'Masks'] Subfolder in which the image is to be saved in
-        suffix : `str`
-            [Default: '-preds'] Suffix to append to the filename of the predicted image
-        """
-        image_dir = os.path.dirname(image_path)
-        
-        output_dir = os.path.join(image_dir, subfolder)
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
-            
-        basename, _ = os.path.splitext(os.path.basename(self.weights_path))
-        
-        output_dir = os.path.join(output_dir, basename)
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
-            
-        filename, _ = os.path.splitext(os.path.basename(image_path))
-        output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
-        
-        skimage.io.imsave(output_path, image)
-
-

Ancestors

- -

Subclasses

- -

Methods

-
-
-def compile_model(self, optimizer, loss) -
-
-

Compiles model

-

Parameters

-
-
optimizer
-
Gradient optimizer used in during the training of the network
-
loss
-
Loss function of the network
-
-
- -Expand source code - -
def compile_model(self, optimizer, loss):
-    """Compiles model
-    
-    Parameters
-    ----------
-    optimizer
-        Gradient optimizer used in during the training of the network
-    loss
-        Loss function of the network
-    """
-    self.model.compile(optimizer, loss = loss, metrics = self.config.get_parameter("metrics"))
-
-
-
-def end_training(self) -
-
-

Deletes model and releases gpu memory held by tensorflow

-
- -Expand source code - -
def end_training(self):
-    """Deletes model and releases gpu memory held by tensorflow
-    """
-    # del reference to model
-    del self.model
-    
-    # clear memory
-    tf.reset_default_graph()
-    K.clear_session()
-    
-    # take hold of cuda device to shut it down
-    from numba import cuda
-    cuda.select_device(0)
-    cuda.close()
-
-
-
-def init_logs(self) -
-
-

Initiates the parameters required for the log file

-
- -Expand source code - -
def init_logs(self):
-    """Initiates the parameters required for the log file
-    """
-    # Directory for training logs
-    print(self.config.get_parameter("name"), self.config.get_parameter("now"))
-    self.log_dir = os.path.join(self.config.get_parameter("model_dir"), "{}-{:%Y%m%dT%H%M}".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-    
-    # Path to save after each epoch. Include placeholders that get filled by Keras.
-    self.checkpoint_path = os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}_*epoch*.h5".format(self.config.get_parameter("name"), self.config.get_parameter("now")))
-    self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
-
-
-
-def initialize_cpu(self) -
-
-

Sets the session to only use the CPU

-
- -Expand source code - -
def initialize_cpu(self):
-    """Sets the session to only use the CPU
-    """
-    config = tf.ConfigProto(
-                    device_count = {'CPU' : 1,
-                                    'GPU' : 0}
-                   )
-    session = tf.Session(config=config)
-    K.set_session(session)   
-
-
-
-def initialize_gpu(self) -
-
-

Sets the seesion to use the gpu specified in config file

-
- -Expand source code - -
def initialize_gpu(self):
-    """Sets the seesion to use the gpu specified in config file
-    """
-    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
-    os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.get_parameter("visible_gpu")) # needs to be a string
-    
-    config = tf.ConfigProto()
-    config.gpu_options.allow_growth = True
-    sess = tf.Session(config=config)
-    K.tensorflow_backend.set_session(sess)
-
-
-
-def initialize_model(self) -
-
-

Initializes the logs, builds the model, and chooses the correct initialization function

-
- -Expand source code - -
def initialize_model(self):
-    """Initializes the logs, builds the model, and chooses the correct initialization function
-    """
-    # write parameters to yaml file
-    self.init_logs()
-    if self.config.get_parameter("for_prediction") is False:
-        self.write_logs()
-        
-    # build model
-    self.model = self.build_model(self.config.get_parameter("input_size"))
-    
-    # save model to yaml file
-    if self.config.get_parameter("for_prediction") is False:
-        self.config.write_model(self.model, os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-model.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-
-    print("{} using single GPU or CPU..".format("Predicting" if self.config.get_parameter("for_prediction") else "Training"))
-    self.initialize_model_normal()
-
-
-
-def initialize_model_normal(self) -
-
-

Initializes the optimizer and any specified callback functions

-
- -Expand source code - -
def initialize_model_normal(self):
-    """Initializes the optimizer and any specified callback functions
-    """
-    opt = self.optimizer_function()
-    self.compile_model(optimizer = opt, loss = self.loss_function(self.config.get_parameter("loss")))
-    
-    if self.config.get_parameter("for_prediction") == False:
-        self.callbacks = [self.model_checkpoint_call(verbose = True)]
-
-        if self.config.get_parameter("use_tensorboard") is True:
-            self.callbacks.append(self.tensorboard_call())
-            
-        if self.config.get_parameter("reduce_LR_on_plateau") is True:
-            self.callbacks.append(ReduceLROnPlateau(monitor=self.config.get_parameter("reduce_LR_monitor"),
-                                                    factor = self.config.get_parameter("reduce_LR_factor"),
-                                                    patience = self.config.get_parameter("reduce_LR_patience"),
-                                                    min_lr = self.config.get_parameter("reduce_LR_min_lr"),
-                                                    verbose = True))
-        
-        if self.config.get_parameter("early_stopping") is True:
-            self.callbacks.append(EarlyStopping(monitor=self.config.get_parameter("early_stopping_monitor"),
-                                                patience = self.config.get_parameter("early_stopping_patience"),
-                                                min_delta = self.config.get_parameter("early_stopping_min_delta"),
-                                                verbose = True))
-
-
-
-def load_model(self, model_dir=None) -
-
-

Loads model from h5 file

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Directory containing the model file
-
-
- -Expand source code - -
def load_model(self, model_dir = None):
-    """Loads model from h5 file
-    
-    Parameters
-    ----------
-    model_dir : `str`, optional
-        [Default: None] Directory containing the model file
-    """
-    # TODO: rewrite to load model from yaml file
-    if model_dir is None:
-        model_dir = self.config.get_parameter("model_dir")
-        
-    if os.path.isdir(model_dir) is True:
-        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-        list_weights_files.sort() # To ensure that [-1] gives the last file
-        
-        model_dir = os.path.join(model_dir,list_weights_files[-1])
-
-    self.model.load_model(model_dir)
-    print("Loaded model from: " + model_dir)
-
-
-
-def load_weights(self, model_dir=None, weights_index=-1) -
-
-

Loads weights from h5 file

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Directory containing the weights file
-
weights_index : int, optional
-
[Default: -1]
-
-
- -Expand source code - -
def load_weights(self, model_dir = None, weights_index = -1):
-    """Loads weights from h5 file
-    
-    Parameters
-    ----------
-    model_dir : `str`, optional
-        [Default: None] Directory containing the weights file
-    weights_index : `int`, optional
-        [Default: -1] 
-    """
-    if model_dir is None:
-        model_dir = self.config.get_parameter("model_dir")
-    
-    if os.path.isdir(model_dir) is True:
-        list_weights_files = glob.glob(os.path.join(model_dir,'*.h5'))
-        list_weights_files.sort() # To ensure that [-1] gives the last file
-        self.weights_path = list_weights_files[weights_index]
-        model_dir = os.path.join(model_dir, self.weights_path)
-    else:
-        self.weights_path = model_dir
-    
-    self.model.load_weights(model_dir)
-    print("Loaded weights from: " + model_dir)
-
-
-
-def loss_function(self, loss) -
-
-

Initialize loss function

-

Parameters

-
-
loss : str
-
Name of the loss function
-
-

Returns

-
-
loss
-
Function to call loss function
-
-
- -Expand source code - -
def loss_function(self, loss):
-    """Initialize loss function
-    
-    Parameters
-    ----------
-    loss : `str`
-        Name of the loss function
-        
-    Returns
-    ----------
-    loss
-        Function to call loss function
-    """
-    if loss == "binary_crossentropy":
-        print("Using binary crossentropy")
-        return loss
-    elif loss == "jaccard_distance_loss":
-        print("Using jaccard distance loss")
-        from .internals.losses import jaccard_distance_loss
-        return jaccard_distance_loss
-    elif loss == "lovasz_hinge":
-        print("Using Lovasz-hinge loss")
-        from .internals.losses import lovasz_loss
-        return lovasz_loss
-    elif loss == "dice_loss":
-        print("Using Dice loss")
-        from .internals.losses import dice_coef_loss
-        return dice_coef_loss
-    elif loss == "bce_dice_loss":
-        print("Using 1 - Dice + BCE loss")
-        from .internals.losses import bce_dice_loss
-        return bce_dice_loss
-    elif loss == "ssim_loss":
-        print("Using DSSIM loss")
-        from .internals.losses import DSSIM_loss
-        return DSSIM_loss
-    elif loss == "bce_ssim_loss":
-        print("Using BCE + DSSIM loss")
-        from .internals.losses import bce_ssim_loss
-        return bce_ssim_loss
-    elif loss == "mean_squared_error":
-        return keras.losses.mean_squared_error
-    elif loss == "mean_absolute_error":
-        return keras.losses.mean_absolute_error
-    elif loss == "ssim_mae_loss":
-        print("Using DSSIM + MAE loss")
-        from .internals.losses import dssim_mae_loss
-        return dssim_mae_loss
-    else:
-        print("Using {}".format(loss))
-        return loss
-
-
-
-def model_checkpoint_call(self, verbose=0) -
-
-

Initialize model checkpoint call

-
- -Expand source code - -
def model_checkpoint_call(self, verbose = 0):
-    """Initialize model checkpoint call
-    """
-    return ModelCheckpoint(self.checkpoint_path, save_weights_only=True, verbose=verbose)
-
-
-
-def optimizer_function(self, learning_rate=None) -
-
-

Initialize optimizer function

-

Parameters

-
-
learning_rate : int
-
Learning rate of the descent algorithm
-
-

Returns

-
-
optimizer
-
Function to call the optimizer
-
-
- -Expand source code - -
def optimizer_function(self, learning_rate = None):
-    """Initialize optimizer function
-    
-    Parameters
-    ----------
-    learning_rate : `int`
-        Learning rate of the descent algorithm
-        
-    Returns
-    ----------
-    optimizer
-        Function to call the optimizer
-    """
-    if learning_rate is None:
-        learning_rate = self.config.get_parameter("learning_rate")
-    if self.config.get_parameter("optimizer_function") == 'sgd':
-        return keras.optimizers.SGD(lr = learning_rate, 
-                                    decay = self.config.get_parameter("decay"), 
-                                    momentum = self.config.get_parameter("momentum"), 
-                                    nesterov = self.config.get_parameter("nesterov"))
-    elif self.config.get_parameter("optimizer_function") == 'rmsprop':
-        return keras.optimizers.RMSprop(lr = learning_rate, 
-                                        decay = self.config.get_parameter("decay"))
-    elif self.config.get_parameter("optimizer_function") == 'adam':
-        return keras.optimizers.Adam(lr = learning_rate, 
-                                     decay = self.config.get_parameter("decay"))
-
-
-
-def predict_images(self, image_dir) -
-
-

Perform prediction on images found in image_dir

-

Parameters

-
-
image_dir : str
-
Directory containing the images to perform prediction on
-
-

Returns

-
-
image : array_like
-
Last image that prediction was perfromed on
-
-
- -Expand source code - -
def predict_images(self, image_dir):
-    """Perform prediction on images found in ``image_dir``
-    
-    Parameters
-    ----------
-    image_dir : `str`
-        Directory containing the images to perform prediction on
-        
-    Returns
-    ----------
-    image : `array_like`
-        Last image that prediction was perfromed on
-    """
-    # load image list
-    image_list = self.list_images(image_dir)
-    
-    for image_path in image_list:
-        image = self.load_image(image_path = image_path)
-        
-        # percentile normalization
-        if self.config.get_parameter("percentile_normalization"):
-            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-        
-        if self.config.get_parameter("tile_overlap_size") == [0,0]:
-            padding = None
-            if image.shape[0] < self.config.get_parameter("tile_size")[0] or image.shape[1] < self.config.get_parameter("tile_size")[1]:
-                image, padding = self.pad_image(image, image_size = self.config.get_parameter("tile_size"))
-            input_image = image[np.newaxis,:,:,np.newaxis]
-            
-            output_image = self.model.predict(input_image, verbose=1)
-            
-            if padding is not None: 
-                h, w = output_image.shape[1:3]
-                output_image = np.reshape(output_image, (h, w))
-                output_image = self.remove_pad_image(output_image, padding = padding)
-        else:
-            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-            
-            pred_train_list = []
-            for tile in tile_image_list:
-
-                # reshape image to correct dimensions for unet
-                h, w = tile.shape[:2]
-                
-                tile = np.reshape(tile, (1, h, w, 1))
-
-                pred_train_list.extend(self.model.predict(tile, verbose=1))
-
-            output_image = self.untile_image(pred_train_list, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"),
-                                             num_rows, num_cols, padding = padding)
-        
-        self.save_image(output_image, image_path)
-        
-    return output_image
-
-
-
-def save_image(self, image, image_path, subfolder='Masks', suffix='-preds') -
-
-

Saves image to image_path

-

Final location of image is as follows: -- image_path -- subfolder -- model/weights file name

-

Parameters

-
-
image : array_like
-
Image to be saved
-
image_path : str
-
Location to save the image in
-
subfolder : str
-
[Default: 'Masks'] Subfolder in which the image is to be saved in
-
suffix : str
-
[Default: '-preds'] Suffix to append to the filename of the predicted image
-
-
- -Expand source code - -
def save_image(self, image, image_path, subfolder = 'Masks', suffix = '-preds'):
-    """Saves image to image_path
-    
-    Final location of image is as follows:
-      - image_path
-          - subfolder
-             - model/weights file name
-    
-    Parameters
-    ----------
-    image : `array_like`
-        Image to be saved
-    image_path : `str`
-        Location to save the image in
-    subfolder : `str`
-        [Default: 'Masks'] Subfolder in which the image is to be saved in
-    suffix : `str`
-        [Default: '-preds'] Suffix to append to the filename of the predicted image
-    """
-    image_dir = os.path.dirname(image_path)
-    
-    output_dir = os.path.join(image_dir, subfolder)
-    if not os.path.exists(output_dir):
-        os.makedirs(output_dir)
-        
-    basename, _ = os.path.splitext(os.path.basename(self.weights_path))
-    
-    output_dir = os.path.join(output_dir, basename)
-    if not os.path.exists(output_dir):
-        os.makedirs(output_dir)
-        
-    filename, _ = os.path.splitext(os.path.basename(image_path))
-    output_path = os.path.join(output_dir, "{}{}.tif".format(filename, suffix))
-    
-    skimage.io.imsave(output_path, image)
-
-
-
-def summary(self) -
-
-

Summary of the layers in the model

-
- -Expand source code - -
def summary(self):
-    """Summary of the layers in the model
-    """
-    self.model.summary()
-
-
-
-def tensorboard_call(self) -
-
-

Initialize tensorboard call

-
- -Expand source code - -
def tensorboard_call(self):
-    """Initialize tensorboard call
-    """
-    return TensorBoard(log_dir=self.log_dir, 
-                       batch_size = self.config.get_parameter("batch_size_per_GPU"), 
-                       write_graph=self.config.get_parameter("write_graph"),
-                       write_images=self.config.get_parameter("write_images"), 
-                       write_grads=self.config.get_parameter("write_grads"), 
-                       update_freq='epoch', 
-                       histogram_freq=self.config.get_parameter("histogram_freq"))
-
-
-
-def train_model(self, verbose=True) -
-
-

Trains model

-

Parameters

-
-
verbose : int, optional
-
[Default: True] Verbose output
-
-
- -Expand source code - -
def train_model(self, verbose = True):
-    """Trains model
-    
-    Parameters
-    ----------
-    verbose : `int`, optional
-        [Default: True] Verbose output
-    """      
-    history = self.model.fit(self.aug_images, self.aug_ground_truth, validation_split = self.config.get_parameter("val_split"),
-                             batch_size = self.config.get_parameter("batch_size"), epochs = self.config.get_parameter("num_epochs"), shuffle = True,
-                             callbacks=self.callbacks, verbose=verbose)
-    
-    self.end_training()
-
-
-
-def write_logs(self) -
-
-

Writes the log file

-
- -Expand source code - -
def write_logs(self):
-    """Writes the log file
-    """
-    # Create log_dir if it does not exist
-    if os.path.exists(self.log_dir) is False:
-        os.makedirs(self.log_dir)
-        
-    # save the parameters used in current run to logs dir
-    self.config.write_config(os.path.join(self.log_dir, "{}-{:%Y%m%dT%H%M}-config.yml".format(self.config.get_parameter("name"), self.config.get_parameter("now"))))
-
-
-
-

Inherited members

- -
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/Unet.html b/html/models/Unet.html deleted file mode 100644 index b67adc9..0000000 --- a/html/models/Unet.html +++ /dev/null @@ -1,417 +0,0 @@ - - - - - - -models.Unet API documentation - - - - - - - - - -
-
-
-

Module models.Unet

-
-
-
- -Expand source code - -
import math
-
-import keras
-from keras.models import Model, load_model
-from keras.layers import Input, BatchNormalization, Activation
-from keras.layers.core import Lambda, Dropout
-from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
-from keras.layers.convolutional_recurrent import ConvLSTM2D
-from keras.layers.pooling import MaxPooling2D
-from keras.layers.merge import Concatenate, Add
-from keras import regularizers
-from keras import backend as K
-
-import tensorflow as tf
-
-from .CNN_Base import CNN_Base
-from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d
-    
-######
-# Unet
-######
-class Unet(CNN_Base):
-    """
-    Unet functions
-    see https://www.nature.com/articles/s41592-018-0261-2
-    """
-    
-    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        
-    def build_model(self, input_size, mean_std_normalization = None, 
-                    dropout_value = None, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None):
-        
-        ### get parameters from config file ###
-        filters = self.config.get_parameter("filters")
-        
-        if dropout_value is None:
-            dropout_value = self.config.get_parameter("dropout_value")
-        if acti is None:
-            acti = self.config.get_parameter("activation_function")
-        if padding is None:
-            padding = self.config.get_parameter("padding")
-        if kernel_initializer is None:
-            kernel_initializer = self.config.get_parameter("initializer")
-        if weight_regularizer is None:
-            weight_regularizer = self.config.get_parameter("weight_regularizer")
-        if mean_std_normalization is None:
-            if self.config.get_parameter("mean_std_normalization") == True:
-                mean = self.config.get_parameter("mean")
-                std = self.config.get_parameter("std")
-            else:
-                mean = None
-                std = None
-        
-        ### Actual network###
-        inputs = Input(input_size)
-        
-        # normalize images
-        layer = normalize_input(inputs, 
-                                scale_input = self.config.get_parameter("scale_input"),
-                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                                mean = mean, std = std)
-        
-        layer_store = []
-        
-        # encoding arm
-        for _ in range(self.config.get_parameter("levels")):
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer_store.append(layer)
-            layer = MaxPooling2D((2, 2))(layer)
-            
-            filters = filters * 2
-            
-        
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        # decoding arm 
-        for i in range(self.config.get_parameter("levels")):
-            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
-            
-            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
-            filters = filters // 2
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
-        
-        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class Unet -(model_dir=None, name='Unet', **kwargs) -
-
-

Unet functions -see https://www.nature.com/articles/s41592-018-0261-2

-

Creates the base neural network class with basic functions

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is stored
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file
-
**kwargs
-
Parameters that are passed to :class:network_config.Network_Config
-
-

Attributes

-
-
config : :class:network_config.Network_Config
-
Network_config object containing the config and necessary functions
-
-
- -Expand source code - -
class Unet(CNN_Base):
-    """
-    Unet functions
-    see https://www.nature.com/articles/s41592-018-0261-2
-    """
-    
-    def __init__(self, model_dir = None, name = 'Unet', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        
-    def build_model(self, input_size, mean_std_normalization = None, 
-                    dropout_value = None, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None):
-        
-        ### get parameters from config file ###
-        filters = self.config.get_parameter("filters")
-        
-        if dropout_value is None:
-            dropout_value = self.config.get_parameter("dropout_value")
-        if acti is None:
-            acti = self.config.get_parameter("activation_function")
-        if padding is None:
-            padding = self.config.get_parameter("padding")
-        if kernel_initializer is None:
-            kernel_initializer = self.config.get_parameter("initializer")
-        if weight_regularizer is None:
-            weight_regularizer = self.config.get_parameter("weight_regularizer")
-        if mean_std_normalization is None:
-            if self.config.get_parameter("mean_std_normalization") == True:
-                mean = self.config.get_parameter("mean")
-                std = self.config.get_parameter("std")
-            else:
-                mean = None
-                std = None
-        
-        ### Actual network###
-        inputs = Input(input_size)
-        
-        # normalize images
-        layer = normalize_input(inputs, 
-                                scale_input = self.config.get_parameter("scale_input"),
-                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                                mean = mean, std = std)
-        
-        layer_store = []
-        
-        # encoding arm
-        for _ in range(self.config.get_parameter("levels")):
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer_store.append(layer)
-            layer = MaxPooling2D((2, 2))(layer)
-            
-            filters = filters * 2
-            
-        
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        # decoding arm 
-        for i in range(self.config.get_parameter("levels")):
-            layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
-            
-            layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
-            filters = filters // 2
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                   kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-        outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
-        
-        return Model(inputs=[inputs], outputs=[outputs], name='Unet')
-
-

Ancestors

- -

Methods

-
-
-def build_model(self, input_size, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) -
-
-
-
- -Expand source code - -
def build_model(self, input_size, mean_std_normalization = None, 
-                dropout_value = None, acti = None, padding = None, 
-                kernel_initializer = None, weight_regularizer = None):
-    
-    ### get parameters from config file ###
-    filters = self.config.get_parameter("filters")
-    
-    if dropout_value is None:
-        dropout_value = self.config.get_parameter("dropout_value")
-    if acti is None:
-        acti = self.config.get_parameter("activation_function")
-    if padding is None:
-        padding = self.config.get_parameter("padding")
-    if kernel_initializer is None:
-        kernel_initializer = self.config.get_parameter("initializer")
-    if weight_regularizer is None:
-        weight_regularizer = self.config.get_parameter("weight_regularizer")
-    if mean_std_normalization is None:
-        if self.config.get_parameter("mean_std_normalization") == True:
-            mean = self.config.get_parameter("mean")
-            std = self.config.get_parameter("std")
-        else:
-            mean = None
-            std = None
-    
-    ### Actual network###
-    inputs = Input(input_size)
-    
-    # normalize images
-    layer = normalize_input(inputs, 
-                            scale_input = self.config.get_parameter("scale_input"),
-                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                            mean = mean, std = std)
-    
-    layer_store = []
-    
-    # encoding arm
-    for _ in range(self.config.get_parameter("levels")):
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-        layer_store.append(layer)
-        layer = MaxPooling2D((2, 2))(layer)
-        
-        filters = filters * 2
-        
-    
-    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-    layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                           kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-    # decoding arm 
-    for i in range(self.config.get_parameter("levels")):
-        layer = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(layer)
-        
-        layer = Concatenate(axis=3)([layer, layer_store[-i -1]])
-        filters = filters // 2
-        
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-        layer = bn_relu_conv2d(layer, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                               kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-    outputs = Conv2D(1, (1, 1), activation='sigmoid')(layer)
-    
-    return Model(inputs=[inputs], outputs=[outputs], name='Unet')
-
-
-
-

Inherited members

- -
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/Unet_Resnet.html b/html/models/Unet_Resnet.html deleted file mode 100644 index 97475e5..0000000 --- a/html/models/Unet_Resnet.html +++ /dev/null @@ -1,1095 +0,0 @@ - - - - - - -models.Unet_Resnet API documentation - - - - - - - - - -
-
-
-

Module models.Unet_Resnet

-
-
-
- -Expand source code - -
import math
-
-import keras
-from keras.models import Model, load_model
-from keras.layers import Input, BatchNormalization, Activation
-from keras.layers.core import Lambda, Dropout
-from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
-from keras.layers.convolutional_recurrent import ConvLSTM2D
-from keras.layers.pooling import MaxPooling2D
-from keras.layers.merge import Concatenate, Add
-from keras import regularizers
-from keras import backend as K
-
-import tensorflow as tfconv
-
-from .CNN_Base import CNN_Base
-from .layers.layers import normalize_input, activation_function, regularizer_function, bn_relu_conv2d, bn_relu_conv2dtranspose
-        
-################################################
-# Unet + Resnet
-################################################
-
-class Unet_Resnet(CNN_Base):
-    """
-    Unet + resnet functions
-    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
-    """
-    
-    def __init__(self, model_dir = None, **kwargs):       
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-    def bottleneck_block(self, inputs, 
-                         upsample = False,
-                         filters = 8,
-                         strides = 1, dropout_value = None, acti = None, padding = None, 
-                         kernel_initializer = None, weight_regularizer = None, name = None):            
-        # Bottleneck_block
-        with tf.name_scope("Bottleneck_block" + name):
-            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            if upsample == True:
-                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
-                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-                output = Conv2D(filters * 4, (1,1), padding=padding, 
-                                kernel_initializer=kernel_initializer, 
-                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
-            else:
-                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
-                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-            output = Dropout(dropout_value)(output)
-            
-            # reshape input to the same size as output
-            if upsample == True:
-                inputs = UpSampling2D()(inputs)
-            if strides == 2:
-                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
-            
-            # ensure number of filters are correct between input and output
-            if output.shape[3] != inputs.shape[3]:
-                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-            return Add()([output, inputs])
-        
-    def simple_block(self, inputs, filters,
-                     strides = 1, dropout_value = None, acti = None, padding = None, 
-                     kernel_initializer = None, weight_regularizer = None, name = None):
-            
-        with tf.name_scope("Simple_block" + name):
-            output = BatchNormalization()(inputs)
-            output = activation_function(output, acti)
-            output = MaxPooling2D()(output)
-            output = Conv2D(filters, 3, padding=padding, strides=strides,
-                            kernel_initializer=kernel_initializer, 
-                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
-
-            output = Dropout(dropout_value)(output)
-
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
-            
-            return Add()([output, inputs])
-        
-    def simple_block_up(self, inputs, filters,
-                        strides = 1, dropout_value = None, acti = None, padding = None, 
-                        kernel_initializer = None, weight_regularizer = None, name = None):
-        
-        with tf.name_scope("Simple_block_up" + name):
-            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
-
-            output = Dropout(dropout_value)(output)
-            
-            inputs = UpSampling2D()(inputs)
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-            return Add()([output, inputs])
-    
-
-    def build_model(self, unet_input, mean_std_normalization = None, 
-                    dropout_value = None, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None):
-        
-        ### get parameters from config file ###
-        filters = self.config.get_parameter("filters")
-        
-        if dropout_value is None:
-            dropout_value = self.config.get_parameter("dropout_value")
-        if acti is None:
-            acti = self.config.get_parameter("activation_function")
-        if padding is None:
-            padding = self.config.get_parameter("padding")
-        if kernel_initializer is None:
-            kernel_initializer = self.config.get_parameter("initializer")
-        if weight_regularizer is None:
-            weight_regularizer = self.config.get_parameter("weight_regularizer")
-        if mean_std_normalization is None:
-            if self.config.get_parameter("mean_std_normalization") == True:
-                mean = self.config.get_parameter("mean")
-                std = self.config.get_parameter("std")
-            else:
-                mean = None
-                std = None
-            
-        
-        ### Actual network###
-        inputs = Input(unet_input)
-        
-        # normalize images
-        layer = normalize_input(inputs, 
-                                scale_input = self.config.get_parameter("scale_input"),
-                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                                mean = mean, std = std)
-
-        # encoder arm
-        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
-                         kernel_initializer = kernel_initializer, 
-                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
-        
-        layer_2 = self.simple_block(layer_1, filters, 
-                                    dropout_value = dropout_value, acti = acti, padding = padding, 
-                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                    name = "_layer_2")
-        
-        layer = layer_2
-        layer_store = [layer]
-        
-        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
-            strides = 2
-            
-            # last layer of encoding arm is treated as across    
-            if i == len(self.config.get_parameter("bottleneck_block")):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_layer_{}".format(2 + i))
-
-                for count in range(conv_layer_i-2):
-                    layer = self.bottleneck_block(layer, filters = filters, 
-                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
-                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                                  name="_layer_{}-{}".format(2 + i, count))
-                    
-                layer = self.bottleneck_block(layer, upsample = True,
-                                              filters = filters, strides = 1,
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_up_layer_{}".format(2 + i))
-            else:       
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_layer_{}".format(2 + i))
-
-                for count in range(conv_layer_i - 1):
-                    layer = self.bottleneck_block(layer, filters = filters, 
-                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
-                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                                  name="_layer_{}-{}".format(2 + i, count))
-                filters = filters*2
-                layer_store.append(layer)
-
-        # decoder arm
-        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
-            filters = filters//2  
-
-            # note that i should be positive possibly due to the way keras/tf model compile works
-            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
-            
-            for count in range(conv_layer_i - 1):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name="_layer_{}-{}".format(i+6, count))
-
-            layer = self.bottleneck_block(layer, upsample = True,
-                                          filters = filters, strides = 1,
-                                          dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name = "_layer_{}".format(i+6))
-        
-        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
-        layer_14 = self.simple_block_up(layer_13, filters,
-                                        dropout_value = dropout_value, acti = acti, padding = padding, 
-                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                        name = "_layer_14")
-
-        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
-        
-        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
-                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
-                          name="Conv_layer_16")(layer_15)
-        
-        layer_17 = BatchNormalization()(layer_16)
-        layer_18 = activation_function(layer_17, acti)
-
-        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
-        
-        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
-    
-class Unet_Resnet101(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
-
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-
-class Unet_Resnet50(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
-        
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-        
-class Unet_Resnet_paper(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
-        """
-        see https://arxiv.org/pdf/1608.04117.pdf
-        """
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
-
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class Unet_Resnet -(model_dir=None, **kwargs) -
-
-

Unet + resnet functions -see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

-

Creates the base neural network class with basic functions

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is stored
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file
-
**kwargs
-
Parameters that are passed to :class:network_config.Network_Config
-
-

Attributes

-
-
config : :class:network_config.Network_Config
-
Network_config object containing the config and necessary functions
-
-
- -Expand source code - -
class Unet_Resnet(CNN_Base):
-    """
-    Unet + resnet functions
-    see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19
-    """
-    
-    def __init__(self, model_dir = None, **kwargs):       
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-    def bottleneck_block(self, inputs, 
-                         upsample = False,
-                         filters = 8,
-                         strides = 1, dropout_value = None, acti = None, padding = None, 
-                         kernel_initializer = None, weight_regularizer = None, name = None):            
-        # Bottleneck_block
-        with tf.name_scope("Bottleneck_block" + name):
-            output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            
-            if upsample == True:
-                output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
-                                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-                output = Conv2D(filters * 4, (1,1), padding=padding, 
-                                kernel_initializer=kernel_initializer, 
-                                kernel_regularizer=regularizer_function(weight_regularizer))(output)
-            else:
-                output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
-                                        kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-            output = Dropout(dropout_value)(output)
-            
-            # reshape input to the same size as output
-            if upsample == True:
-                inputs = UpSampling2D()(inputs)
-            if strides == 2:
-                inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
-            
-            # ensure number of filters are correct between input and output
-            if output.shape[3] != inputs.shape[3]:
-                inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-            return Add()([output, inputs])
-        
-    def simple_block(self, inputs, filters,
-                     strides = 1, dropout_value = None, acti = None, padding = None, 
-                     kernel_initializer = None, weight_regularizer = None, name = None):
-            
-        with tf.name_scope("Simple_block" + name):
-            output = BatchNormalization()(inputs)
-            output = activation_function(output, acti)
-            output = MaxPooling2D()(output)
-            output = Conv2D(filters, 3, padding=padding, strides=strides,
-                            kernel_initializer=kernel_initializer, 
-                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
-
-            output = Dropout(dropout_value)(output)
-
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
-            
-            return Add()([output, inputs])
-        
-    def simple_block_up(self, inputs, filters,
-                        strides = 1, dropout_value = None, acti = None, padding = None, 
-                        kernel_initializer = None, weight_regularizer = None, name = None):
-        
-        with tf.name_scope("Simple_block_up" + name):
-            output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-            output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
-
-            output = Dropout(dropout_value)(output)
-            
-            inputs = UpSampling2D()(inputs)
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-            return Add()([output, inputs])
-    
-
-    def build_model(self, unet_input, mean_std_normalization = None, 
-                    dropout_value = None, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None):
-        
-        ### get parameters from config file ###
-        filters = self.config.get_parameter("filters")
-        
-        if dropout_value is None:
-            dropout_value = self.config.get_parameter("dropout_value")
-        if acti is None:
-            acti = self.config.get_parameter("activation_function")
-        if padding is None:
-            padding = self.config.get_parameter("padding")
-        if kernel_initializer is None:
-            kernel_initializer = self.config.get_parameter("initializer")
-        if weight_regularizer is None:
-            weight_regularizer = self.config.get_parameter("weight_regularizer")
-        if mean_std_normalization is None:
-            if self.config.get_parameter("mean_std_normalization") == True:
-                mean = self.config.get_parameter("mean")
-                std = self.config.get_parameter("std")
-            else:
-                mean = None
-                std = None
-            
-        
-        ### Actual network###
-        inputs = Input(unet_input)
-        
-        # normalize images
-        layer = normalize_input(inputs, 
-                                scale_input = self.config.get_parameter("scale_input"),
-                                mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                                mean = mean, std = std)
-
-        # encoder arm
-        layer_1 = Conv2D(filters, (3, 3), padding = padding, 
-                         kernel_initializer = kernel_initializer, 
-                         kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
-        
-        layer_2 = self.simple_block(layer_1, filters, 
-                                    dropout_value = dropout_value, acti = acti, padding = padding, 
-                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                    name = "_layer_2")
-        
-        layer = layer_2
-        layer_store = [layer]
-        
-        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
-            strides = 2
-            
-            # last layer of encoding arm is treated as across    
-            if i == len(self.config.get_parameter("bottleneck_block")):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_layer_{}".format(2 + i))
-
-                for count in range(conv_layer_i-2):
-                    layer = self.bottleneck_block(layer, filters = filters, 
-                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
-                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                                  name="_layer_{}-{}".format(2 + i, count))
-                    
-                layer = self.bottleneck_block(layer, upsample = True,
-                                              filters = filters, strides = 1,
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_up_layer_{}".format(2 + i))
-            else:       
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name = "_layer_{}".format(2 + i))
-
-                for count in range(conv_layer_i - 1):
-                    layer = self.bottleneck_block(layer, filters = filters, 
-                                                  dropout_value = dropout_value, acti = acti, padding = padding, 
-                                                  kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                                  name="_layer_{}-{}".format(2 + i, count))
-                filters = filters*2
-                layer_store.append(layer)
-
-        # decoder arm
-        for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
-            filters = filters//2  
-
-            # note that i should be positive possibly due to the way keras/tf model compile works
-            layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
-            
-            for count in range(conv_layer_i - 1):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name="_layer_{}-{}".format(i+6, count))
-
-            layer = self.bottleneck_block(layer, upsample = True,
-                                          filters = filters, strides = 1,
-                                          dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name = "_layer_{}".format(i+6))
-        
-        layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
-        layer_14 = self.simple_block_up(layer_13, filters,
-                                        dropout_value = dropout_value, acti = acti, padding = padding, 
-                                        kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                        name = "_layer_14")
-
-        layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
-        
-        layer_16 = Conv2D(filters, (3, 3), padding = padding, 
-                          kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
-                          name="Conv_layer_16")(layer_15)
-        
-        layer_17 = BatchNormalization()(layer_16)
-        layer_18 = activation_function(layer_17, acti)
-
-        outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
-        
-        return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
-
-

Ancestors

- -

Subclasses

- -

Methods

-
-
-def bottleneck_block(self, inputs, upsample=False, filters=8, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) -
-
-
-
- -Expand source code - -
def bottleneck_block(self, inputs, 
-                     upsample = False,
-                     filters = 8,
-                     strides = 1, dropout_value = None, acti = None, padding = None, 
-                     kernel_initializer = None, weight_regularizer = None, name = None):            
-    # Bottleneck_block
-    with tf.name_scope("Bottleneck_block" + name):
-        output = bn_relu_conv2d(inputs, filters, 1,  acti=acti, padding=padding, strides=strides, 
-                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-        output = bn_relu_conv2d(output, filters, 3,  acti=acti, padding=padding, 
-                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-        
-        if upsample == True:
-            output = bn_relu_conv2dtranspose(output, filters, (2,2), strides = (2,2), acti=acti, padding=padding, 
-                                            kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-            output = Conv2D(filters * 4, (1,1), padding=padding, 
-                            kernel_initializer=kernel_initializer, 
-                            kernel_regularizer=regularizer_function(weight_regularizer))(output)
-        else:
-            output = bn_relu_conv2d(output, filters*4, 1,  acti=acti, padding=padding, 
-                                    kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-        output = Dropout(dropout_value)(output)
-        
-        # reshape input to the same size as output
-        if upsample == True:
-            inputs = UpSampling2D()(inputs)
-        if strides == 2:
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=strides, kernel_initializer=kernel_initializer)(inputs)
-        
-        # ensure number of filters are correct between input and output
-        if output.shape[3] != inputs.shape[3]:
-            inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-        return Add()([output, inputs])
-
-
-
-def build_model(self, unet_input, mean_std_normalization=None, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None) -
-
-
-
- -Expand source code - -
def build_model(self, unet_input, mean_std_normalization = None, 
-                dropout_value = None, acti = None, padding = None, 
-                kernel_initializer = None, weight_regularizer = None):
-    
-    ### get parameters from config file ###
-    filters = self.config.get_parameter("filters")
-    
-    if dropout_value is None:
-        dropout_value = self.config.get_parameter("dropout_value")
-    if acti is None:
-        acti = self.config.get_parameter("activation_function")
-    if padding is None:
-        padding = self.config.get_parameter("padding")
-    if kernel_initializer is None:
-        kernel_initializer = self.config.get_parameter("initializer")
-    if weight_regularizer is None:
-        weight_regularizer = self.config.get_parameter("weight_regularizer")
-    if mean_std_normalization is None:
-        if self.config.get_parameter("mean_std_normalization") == True:
-            mean = self.config.get_parameter("mean")
-            std = self.config.get_parameter("std")
-        else:
-            mean = None
-            std = None
-        
-    
-    ### Actual network###
-    inputs = Input(unet_input)
-    
-    # normalize images
-    layer = normalize_input(inputs, 
-                            scale_input = self.config.get_parameter("scale_input"),
-                            mean_std_normalization = self.config.get_parameter("mean_std_normalization"),
-                            mean = mean, std = std)
-
-    # encoder arm
-    layer_1 = Conv2D(filters, (3, 3), padding = padding, 
-                     kernel_initializer = kernel_initializer, 
-                     kernel_regularizer = regularizer_function(weight_regularizer), name="Conv_layer_1")(layer)
-    
-    layer_2 = self.simple_block(layer_1, filters, 
-                                dropout_value = dropout_value, acti = acti, padding = padding, 
-                                kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                name = "_layer_2")
-    
-    layer = layer_2
-    layer_store = [layer]
-    
-    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block"), 1):
-        strides = 2
-        
-        # last layer of encoding arm is treated as across    
-        if i == len(self.config.get_parameter("bottleneck_block")):
-            layer = self.bottleneck_block(layer, filters = filters, 
-                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name = "_layer_{}".format(2 + i))
-
-            for count in range(conv_layer_i-2):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name="_layer_{}-{}".format(2 + i, count))
-                
-            layer = self.bottleneck_block(layer, upsample = True,
-                                          filters = filters, strides = 1,
-                                          dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name = "_up_layer_{}".format(2 + i))
-        else:       
-            layer = self.bottleneck_block(layer, filters = filters, 
-                                          strides = strides, dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name = "_layer_{}".format(2 + i))
-
-            for count in range(conv_layer_i - 1):
-                layer = self.bottleneck_block(layer, filters = filters, 
-                                              dropout_value = dropout_value, acti = acti, padding = padding, 
-                                              kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                              name="_layer_{}-{}".format(2 + i, count))
-            filters = filters*2
-            layer_store.append(layer)
-
-    # decoder arm
-    for i, conv_layer_i in enumerate(self.config.get_parameter("bottleneck_block")[-2::-1], 1):
-        filters = filters//2  
-
-        # note that i should be positive possibly due to the way keras/tf model compile works
-        layer = Concatenate(axis=3, name="Concatenate_layer_{}".format(i+6))([layer_store[-i], layer])
-        
-        for count in range(conv_layer_i - 1):
-            layer = self.bottleneck_block(layer, filters = filters, 
-                                          dropout_value = dropout_value, acti = acti, padding = padding, 
-                                          kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                          name="_layer_{}-{}".format(i+6, count))
-
-        layer = self.bottleneck_block(layer, upsample = True,
-                                      filters = filters, strides = 1,
-                                      dropout_value = dropout_value, acti = acti, padding = padding, 
-                                      kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                      name = "_layer_{}".format(i+6))
-    
-    layer_13 = Concatenate(axis=3, name="Concatenate_layer_13")([layer, layer_2])
-    layer_14 = self.simple_block_up(layer_13, filters,
-                                    dropout_value = dropout_value, acti = acti, padding = padding, 
-                                    kernel_initializer = kernel_initializer, weight_regularizer = weight_regularizer, 
-                                    name = "_layer_14")
-
-    layer_15 = Concatenate(axis=3, name="Concatenate_layer_15")([layer_14, layer_1])
-    
-    layer_16 = Conv2D(filters, (3, 3), padding = padding, 
-                      kernel_initializer = kernel_initializer, kernel_regularizer = regularizer_function(weight_regularizer), 
-                      name="Conv_layer_16")(layer_15)
-    
-    layer_17 = BatchNormalization()(layer_16)
-    layer_18 = activation_function(layer_17, acti)
-
-    outputs = Conv2D(1, (1, 1), activation = self.config.get_parameter("final_activation"))(layer_18)
-    
-    return Model(inputs=[inputs], outputs=[outputs], name = self.config.get_parameter('name'))
-
-
-
-def simple_block(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) -
-
-
-
- -Expand source code - -
def simple_block(self, inputs, filters,
-                 strides = 1, dropout_value = None, acti = None, padding = None, 
-                 kernel_initializer = None, weight_regularizer = None, name = None):
-        
-    with tf.name_scope("Simple_block" + name):
-        output = BatchNormalization()(inputs)
-        output = activation_function(output, acti)
-        output = MaxPooling2D()(output)
-        output = Conv2D(filters, 3, padding=padding, strides=strides,
-                        kernel_initializer=kernel_initializer, 
-                        kernel_regularizer=regularizer_function(weight_regularizer))(output)
-
-        output = Dropout(dropout_value)(output)
-
-        inputs = Conv2D(output.shape[3].value, 1, padding=padding, strides=2, kernel_initializer=kernel_initializer)(inputs)
-        
-        return Add()([output, inputs])
-
-
-
-def simple_block_up(self, inputs, filters, strides=1, dropout_value=None, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name=None) -
-
-
-
- -Expand source code - -
def simple_block_up(self, inputs, filters,
-                    strides = 1, dropout_value = None, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None, name = None):
-    
-    with tf.name_scope("Simple_block_up" + name):
-        output = bn_relu_conv2d(inputs, filters, 3,  acti=acti, padding=padding, strides=strides, 
-                                kernel_initializer=kernel_initializer, weight_regularizer=weight_regularizer)
-
-        output = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding=padding, kernel_initializer=kernel_initializer)(output)
-
-        output = Dropout(dropout_value)(output)
-        
-        inputs = UpSampling2D()(inputs)
-        inputs = Conv2D(output.shape[3].value, 1, padding=padding, kernel_initializer=kernel_initializer)(inputs)
-
-        return Add()([output, inputs])
-
-
-
-

Inherited members

- -
-
-class Unet_Resnet101 -(model_dir=None, name='Unet_Resnet101', **kwargs) -
-
-

Unet + resnet functions -see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

-

Creates the base neural network class with basic functions

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is stored
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file
-
**kwargs
-
Parameters that are passed to :class:network_config.Network_Config
-
-

Attributes

-
-
config : :class:network_config.Network_Config
-
Network_config object containing the config and necessary functions
-
-
- -Expand source code - -
class Unet_Resnet101(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 23, 3))
-
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-
-

Ancestors

- -

Inherited members

- -
-
-class Unet_Resnet50 -(model_dir=None, name='Unet_Resnet50', **kwargs) -
-
-

Unet + resnet functions -see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

-

Creates the base neural network class with basic functions

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is stored
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file
-
**kwargs
-
Parameters that are passed to :class:network_config.Network_Config
-
-

Attributes

-
-
config : :class:network_config.Network_Config
-
Network_config object containing the config and necessary functions
-
-
- -Expand source code - -
class Unet_Resnet50(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet50', **kwargs):
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 4, 6, 3))
-        
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-
-

Ancestors

- -

Inherited members

- -
-
-class Unet_Resnet_paper -(model_dir=None, name='Unet_Resnet101', **kwargs) -
-
-

Unet + resnet functions -see https://link.springer.com/chapter/10.1007/978-3-319-46976-8_19

-

see https://arxiv.org/pdf/1608.04117.pdf

-
- -Expand source code - -
class Unet_Resnet_paper(Unet_Resnet):
-    def __init__(self, model_dir = None, name = 'Unet_Resnet101', **kwargs):
-        """
-        see https://arxiv.org/pdf/1608.04117.pdf
-        """
-        super().__init__(model_dir = model_dir, **kwargs)
-        
-        self.config.update_parameter(["model","name"], name)
-        self.config.update_parameter(["model","bottleneck_block"], (3, 8, 10, 3))
-
-        # store parameters for ease of use (may need to remove in the future)
-        self.conv_layer = self.config.get_parameter("bottleneck_block")
-
-

Ancestors

- -

Inherited members

- -
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/index.html b/html/models/index.html deleted file mode 100644 index b530452..0000000 --- a/html/models/index.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - -models API documentation - - - - - - - - - -
- - -
- - - - - \ No newline at end of file diff --git a/html/models/internals/dataset.html b/html/models/internals/dataset.html deleted file mode 100644 index d653901..0000000 --- a/html/models/internals/dataset.html +++ /dev/null @@ -1,958 +0,0 @@ - - - - - - -models.internals.dataset API documentation - - - - - - - - - -
-
-
-

Module models.internals.dataset

-
-
-
- -Expand source code - -
import os, sys
-import numpy as np
-
-import matplotlib.pyplot as plt
-
-from tqdm import tqdm
-
-from .image_functions import Image_Functions      
-
-class Dataset(Image_Functions):
-    def __init__(self):
-        """Creates Dataset object that is used to manipulate the training data.
-    
-        Attributes
-        ----------
-        classes : list
-            List of dictionaries containing the class name and id
-            
-        train_images : list
-            List of images that is used as the input for the network
-            
-        train_ground_truth : list
-            List of images that is used as the ground truth for the network
-        """
-            
-        self.classes = []
-        self.train_images = []
-        self.train_ground_truth = []
-        
-        super().__init__()
-    
-    #######################
-    # Class id functions
-    #######################
-    def get_class_id(self, class_name):
-        """Returns the class id and adds class to list if not in list of classes.
-    
-        Parameters
-        ----------
-        class_name : str
-            Identity of class that will be associated with the class id
-            
-        Returns
-        ----------
-        int
-            Class id
-        """
-        
-        if len(self.classes) == 0:
-            self.classes.append({"class": class_name, "id": 0})
-            return 0
-        
-        for class_info in self.classes:
-            # if class exist, return class id
-            if class_info["class"] == class_name:
-                return class_info["id"]
-   
-        self.classes.append({"class": class_name, "id": len(self.classes)-1})
-        return len(self.classes)-1
-    
-    #######################
-    # Class id functions
-    #######################
-    def sanity_check(self, image_index):
-        """Plots the augmented image and ground_truth to check if everything is ok.
-    
-        Parameters
-        ----------
-        image_index : int
-            Index of the image and its corresponding ground_truth
-        """
-        
-        image = self.aug_images[image_index][:,:,0]
-        ground_truth = self.aug_ground_truth[image_index][:,:,0]
-
-        plt.figure(figsize=(14, 14))
-        plt.axis('off')
-        plt.imshow(image, cmap='gray', 
-                   norm=None, interpolation=None)
-        plt.show()
-
-        plt.figure(figsize=(14, 14))
-        plt.axis('off')
-        plt.imshow(ground_truth, cmap='gray', 
-                   norm=None, interpolation=None)
-        plt.show()
-    
-    def load_dataset(self, dataset_dir = None, tiled = False):
-        """Loads dataset from ``dataset_dir``
-    
-        Parameters
-        ----------
-        dataset_dir : str or none, optional
-            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
-            
-        tiled : bool, optional
-            To set if tiling function is used
-        """
-        
-        # update dataset_dir if specified. If not, load dataset_dir from config file
-        if dataset_dir is None:
-            dataset_dir = self.config.get_parameter("dataset_dir")
-        else:
-            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
-        
-        image_dirs = next(os.walk(dataset_dir))[1]
-        image_dirs = [f for f in image_dirs if not f[0] == '.']
-        
-        for img_dir in image_dirs:
-            # images
-            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
-            
-            # percentile normalization
-            if self.config.get_parameter("percentile_normalization"):
-                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-            
-            if tiled is True:
-                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                self.config.update_parameter(["images","num_rows"], num_rows)
-                self.config.update_parameter(["images","num_cols"], num_cols)
-                self.config.update_parameter(["images","padding"], padding)
-                self.train_images.extend(tile_image_list)
-            else:
-                self.train_images.extend([image,])
-            
-            #ground_truth
-            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
-            if tiled is True:
-                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                self.train_ground_truth.extend(tile_ground_truth_list)
-            else:
-                self.train_ground_truth.extend(ground_truth)
-                
-    #######################
-    # Image augmentation
-    #######################
-    def augment_images(self):
-        """Augments images using the parameters in the config file"""
-        
-        # TODO: To allow for augmentation of multi-class images
-        
-        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
-        
-        # increase number of images
-        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
-        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
-        
-        print("Performing augmentations on {} images".format(len(self.aug_images)))
-        sys.stdout.flush()
-        
-        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
-            
-            # target must be image and mask in order for albumentations to work
-            data = {"image": self.aug_images[i], 
-                    "mask": self.aug_ground_truth[i]}
-            augmented = augmentor(**data)
-
-            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
-            
-            # add 
-            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
-                from skimage.morphology import binary_dilation, disk
-                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
-            else:
-                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
-
-        self.aug_images = np.stack(self.aug_images, axis = 0)
-        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
-        
-        mean = self.aug_images.mean()
-        std = self.aug_images.std()
-        
-        self.config.update_parameter(["images","mean"], float(mean))
-        self.config.update_parameter(["images","std"], float(std))
-        
-        print("Augmentations complete!")
-
-    def augmentations(self, p = None):
-        """Generates list of augmentations using parameters obtained from config file
-        
-        Parameters
-        ----------
-        p : int, optional
-            probability to apply any augmentations to image
-        
-        Returns
-        ----------
-        function
-            function used to augment images
-        """
-        from albumentations import (
-            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
-            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
-            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
-            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
-        )
-        
-        augmentation_list = []
-        
-        if self.config.get_parameter("random_rotate") is True:
-            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
-        
-        if self.config.get_parameter("flip") is True:
-            augmentation_list.append(Flip())
-            
-        if self.config.get_parameter("transpose") is True:
-            augmentation_list.append(Transpose())
-            
-        if self.config.get_parameter("blur_group") is True:
-            blur_augmentation = []
-            if self.config.get_parameter("motion_blur") is True:
-                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
-            if self.config.get_parameter("median_blur") is True:
-                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
-            if self.config.get_parameter("blur") is True:
-                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
-            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
-            
-        if self.config.get_parameter("shift_scale_rotate") is True:
-            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
-                                                      scale_limit = self.config.get_parameter("scale_limit"),
-                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
-                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
-        if self.config.get_parameter("distortion_group") is True:
-            distortion_augmentation = []
-            if self.config.get_parameter("optical_distortion") is True:
-                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
-            if self.config.get_parameter("elastic_transform") is True:
-                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
-            if self.config.get_parameter("grid_distortion") is True:
-                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
-            
-            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
-        
-        if self.config.get_parameter("brightness_contrast_group") is True:
-            contrast_augmentation = []
-            if self.config.get_parameter("clahe") is True:
-                contrast_augmentation.append(CLAHE())
-            if self.config.get_parameter("sharpen") is True:
-                contrast_augmentation.append(IAASharpen())
-            if self.config.get_parameter("random_brightness_contrast") is True:
-                contrast_augmentation.append(RandomBrightnessContrast())
-           
-            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
-            
-        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
-        
-        return Compose(augmentation_list, p = p)
-
-############################### TODO ###############################
-#     def preapare_data(self):
-#         """        
-#         Performs augmentation if needed
-#         """
-        
-            
-#     # Create data generator
-#     # Return augmented images/ground_truth arrays of batch size
-#     def generator(features, labels, batch_size, seq_det):
-#         # create empty arrays to contain batch of features and labels
-#         batch_features = np.zeros((batch_size, features.shape[1], features.shape[2], features.shape[3]))
-#         batch_labels = np.zeros((batch_size, labels.shape[1], labels.shape[2], labels.shape[3]))
-
-#         while True:
-#             # Fill arrays of batch size with augmented data taken randomly from full passed arrays
-#             indexes = random.sample(range(len(features)), batch_size)
-#             # Perform the exactly the same augmentation for X and y
-#             random_augmented_images, random_augmented_labels = do_augmentation(seq_det, features[indexes], labels[indexes])
-#             batch_features[:,:,:,:] = random_augmented_images[:,:,:,:]
-#             batch_labels[:,:,:,:] = random_augmented_labels[:,:,:,:]
-
-#             yield batch_features, batch_labels
-            
-    # Train augmentation
-#     def do_augmentation(seq_det, X_train, y_train):
-#         # Use seq_det to build augmentation.
-#         # ....
-#         return np.array(X_train_aug), np.array(y_train_aug)
-
-#     seq = iaa.Sequential([
-#         iaa.Fliplr(0.5), # horizontally flip
-#         iaa.OneOf([
-#             iaa.Noop(),
-#             iaa.GaussianBlur(sigma=(0.0, 1.0)),
-#             iaa.Noop(),
-#             iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)),
-#             iaa.Noop(),
-#             iaa.PerspectiveTransform(scale=(0.04, 0.08)),
-#             iaa.Noop(),
-#             iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)),
-#         ]),
-#         # More as you want ...
-#     ])
-#     seq_det = seq.to_deterministic()
-    
-#     history = model.fit_generator(generator(X_train, y_train, BATCH_SIZE, seq_det),
-#                               epochs=EPOCHS,
-#                               steps_per_epoch=steps_per_epoch,
-#                               validation_data=(X_valid, y_valid),
-#                               verbose = 1, 
-#                               callbacks = [check_point]
-#                              ) 
-    
-    # Image augmentations
-            
-############################### END of TODO ###############################
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class Dataset -
-
-

Creates Dataset object that is used to manipulate the training data.

-

Attributes

-
-
classes : list
-
List of dictionaries containing the class name and id
-
train_images : list
-
List of images that is used as the input for the network
-
train_ground_truth : list
-
List of images that is used as the ground truth for the network
-
-
- -Expand source code - -
class Dataset(Image_Functions):
-    def __init__(self):
-        """Creates Dataset object that is used to manipulate the training data.
-    
-        Attributes
-        ----------
-        classes : list
-            List of dictionaries containing the class name and id
-            
-        train_images : list
-            List of images that is used as the input for the network
-            
-        train_ground_truth : list
-            List of images that is used as the ground truth for the network
-        """
-            
-        self.classes = []
-        self.train_images = []
-        self.train_ground_truth = []
-        
-        super().__init__()
-    
-    #######################
-    # Class id functions
-    #######################
-    def get_class_id(self, class_name):
-        """Returns the class id and adds class to list if not in list of classes.
-    
-        Parameters
-        ----------
-        class_name : str
-            Identity of class that will be associated with the class id
-            
-        Returns
-        ----------
-        int
-            Class id
-        """
-        
-        if len(self.classes) == 0:
-            self.classes.append({"class": class_name, "id": 0})
-            return 0
-        
-        for class_info in self.classes:
-            # if class exist, return class id
-            if class_info["class"] == class_name:
-                return class_info["id"]
-   
-        self.classes.append({"class": class_name, "id": len(self.classes)-1})
-        return len(self.classes)-1
-    
-    #######################
-    # Class id functions
-    #######################
-    def sanity_check(self, image_index):
-        """Plots the augmented image and ground_truth to check if everything is ok.
-    
-        Parameters
-        ----------
-        image_index : int
-            Index of the image and its corresponding ground_truth
-        """
-        
-        image = self.aug_images[image_index][:,:,0]
-        ground_truth = self.aug_ground_truth[image_index][:,:,0]
-
-        plt.figure(figsize=(14, 14))
-        plt.axis('off')
-        plt.imshow(image, cmap='gray', 
-                   norm=None, interpolation=None)
-        plt.show()
-
-        plt.figure(figsize=(14, 14))
-        plt.axis('off')
-        plt.imshow(ground_truth, cmap='gray', 
-                   norm=None, interpolation=None)
-        plt.show()
-    
-    def load_dataset(self, dataset_dir = None, tiled = False):
-        """Loads dataset from ``dataset_dir``
-    
-        Parameters
-        ----------
-        dataset_dir : str or none, optional
-            Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
-            
-        tiled : bool, optional
-            To set if tiling function is used
-        """
-        
-        # update dataset_dir if specified. If not, load dataset_dir from config file
-        if dataset_dir is None:
-            dataset_dir = self.config.get_parameter("dataset_dir")
-        else:
-            self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
-        
-        image_dirs = next(os.walk(dataset_dir))[1]
-        image_dirs = [f for f in image_dirs if not f[0] == '.']
-        
-        for img_dir in image_dirs:
-            # images
-            image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
-            
-            # percentile normalization
-            if self.config.get_parameter("percentile_normalization"):
-                image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-            
-            if tiled is True:
-                tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                self.config.update_parameter(["images","num_rows"], num_rows)
-                self.config.update_parameter(["images","num_cols"], num_cols)
-                self.config.update_parameter(["images","padding"], padding)
-                self.train_images.extend(tile_image_list)
-            else:
-                self.train_images.extend([image,])
-            
-            #ground_truth
-            ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
-            if tiled is True:
-                tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-                self.train_ground_truth.extend(tile_ground_truth_list)
-            else:
-                self.train_ground_truth.extend(ground_truth)
-                
-    #######################
-    # Image augmentation
-    #######################
-    def augment_images(self):
-        """Augments images using the parameters in the config file"""
-        
-        # TODO: To allow for augmentation of multi-class images
-        
-        augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
-        
-        # increase number of images
-        self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
-        self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
-        
-        print("Performing augmentations on {} images".format(len(self.aug_images)))
-        sys.stdout.flush()
-        
-        for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
-            
-            # target must be image and mask in order for albumentations to work
-            data = {"image": self.aug_images[i], 
-                    "mask": self.aug_ground_truth[i]}
-            augmented = augmentor(**data)
-
-            self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
-            
-            # add 
-            if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
-                from skimage.morphology import binary_dilation, disk
-                self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
-            else:
-                self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
-
-        self.aug_images = np.stack(self.aug_images, axis = 0)
-        self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
-        
-        mean = self.aug_images.mean()
-        std = self.aug_images.std()
-        
-        self.config.update_parameter(["images","mean"], float(mean))
-        self.config.update_parameter(["images","std"], float(std))
-        
-        print("Augmentations complete!")
-
-    def augmentations(self, p = None):
-        """Generates list of augmentations using parameters obtained from config file
-        
-        Parameters
-        ----------
-        p : int, optional
-            probability to apply any augmentations to image
-        
-        Returns
-        ----------
-        function
-            function used to augment images
-        """
-        from albumentations import (
-            RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
-            Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
-            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
-            IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
-        )
-        
-        augmentation_list = []
-        
-        if self.config.get_parameter("random_rotate") is True:
-            augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
-        
-        if self.config.get_parameter("flip") is True:
-            augmentation_list.append(Flip())
-            
-        if self.config.get_parameter("transpose") is True:
-            augmentation_list.append(Transpose())
-            
-        if self.config.get_parameter("blur_group") is True:
-            blur_augmentation = []
-            if self.config.get_parameter("motion_blur") is True:
-                blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
-            if self.config.get_parameter("median_blur") is True:
-                blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
-            if self.config.get_parameter("blur") is True:
-                blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
-            augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
-            
-        if self.config.get_parameter("shift_scale_rotate") is True:
-            augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
-                                                      scale_limit = self.config.get_parameter("scale_limit"),
-                                                      rotate_limit = self.config.get_parameter("rotate_limit"),
-                                                      p = self.config.get_parameter("shift_scale_rotate_p")))
-        if self.config.get_parameter("distortion_group") is True:
-            distortion_augmentation = []
-            if self.config.get_parameter("optical_distortion") is True:
-                distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
-            if self.config.get_parameter("elastic_transform") is True:
-                distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
-            if self.config.get_parameter("grid_distortion") is True:
-                distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
-            
-            augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
-        
-        if self.config.get_parameter("brightness_contrast_group") is True:
-            contrast_augmentation = []
-            if self.config.get_parameter("clahe") is True:
-                contrast_augmentation.append(CLAHE())
-            if self.config.get_parameter("sharpen") is True:
-                contrast_augmentation.append(IAASharpen())
-            if self.config.get_parameter("random_brightness_contrast") is True:
-                contrast_augmentation.append(RandomBrightnessContrast())
-           
-            augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
-            
-        augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
-        
-        return Compose(augmentation_list, p = p)
-
-

Ancestors

- -

Subclasses

- -

Methods

-
-
-def augment_images(self) -
-
-

Augments images using the parameters in the config file

-
- -Expand source code - -
def augment_images(self):
-    """Augments images using the parameters in the config file"""
-    
-    # TODO: To allow for augmentation of multi-class images
-    
-    augmentor = self.augmentations(p=self.config.get_parameter("augmentations_p"))
-    
-    # increase number of images
-    self.aug_images = self.train_images*self.config.get_parameter("num_augmented_images")
-    self.aug_ground_truth = self.train_ground_truth*self.config.get_parameter("num_augmented_images")
-    
-    print("Performing augmentations on {} images".format(len(self.aug_images)))
-    sys.stdout.flush()
-    
-    for i in tqdm(range(len(self.aug_images)),desc="Augmentation of images"):
-        
-        # target must be image and mask in order for albumentations to work
-        data = {"image": self.aug_images[i], 
-                "mask": self.aug_ground_truth[i]}
-        augmented = augmentor(**data)
-
-        self.aug_images[i] = self.reshape_image(np.asarray(augmented["image"]))
-        
-        # add 
-        if self.config.get_parameter("use_binary_dilation_after_augmentation") is True:
-            from skimage.morphology import binary_dilation, disk
-            self.aug_ground_truth[i] = self.reshape_image(binary_dilation(np.ndarray.astype(augmented["mask"], np.bool), disk(self.config.get_parameter("disk_size"))))
-        else:
-            self.aug_ground_truth[i] = self.reshape_image(np.ndarray.astype(augmented["mask"], np.bool))
-
-    self.aug_images = np.stack(self.aug_images, axis = 0)
-    self.aug_ground_truth = np.stack(self.aug_ground_truth, axis = 0)
-    
-    mean = self.aug_images.mean()
-    std = self.aug_images.std()
-    
-    self.config.update_parameter(["images","mean"], float(mean))
-    self.config.update_parameter(["images","std"], float(std))
-    
-    print("Augmentations complete!")
-
-
-
-def augmentations(self, p=None) -
-
-

Generates list of augmentations using parameters obtained from config file

-

Parameters

-
-
p : int, optional
-
probability to apply any augmentations to image
-
-

Returns

-
-
function
-
function used to augment images
-
-
- -Expand source code - -
def augmentations(self, p = None):
-    """Generates list of augmentations using parameters obtained from config file
-    
-    Parameters
-    ----------
-    p : int, optional
-        probability to apply any augmentations to image
-    
-    Returns
-    ----------
-    function
-        function used to augment images
-    """
-    from albumentations import (
-        RandomCrop, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
-        Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, ElasticTransform,
-        IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
-        IAASharpen, RandomBrightnessContrast, Flip, OneOf, Compose
-    )
-    
-    augmentation_list = []
-    
-    if self.config.get_parameter("random_rotate") is True:
-        augmentation_list.append(RandomRotate90(p = self.config.get_parameter("random_rotate_p"))) # 0.9
-    
-    if self.config.get_parameter("flip") is True:
-        augmentation_list.append(Flip())
-        
-    if self.config.get_parameter("transpose") is True:
-        augmentation_list.append(Transpose())
-        
-    if self.config.get_parameter("blur_group") is True:
-        blur_augmentation = []
-        if self.config.get_parameter("motion_blur") is True:
-            blur_augmentation.append(MotionBlur(p = self.config.get_parameter("motion_blur_p")))
-        if self.config.get_parameter("median_blur") is True:
-            blur_augmentation.append(MedianBlur(blur_limit = self.config.get_parameter("median_blur_limit"), p = self.config.get_parameter("median_blur_p")))
-        if self.config.get_parameter("blur") is True:
-            blur_augmentation.append(Blur(blur_limit = self.config.get_parameter("blur_limit"), p = self.config.get_parameter("blur_p")))
-        augmentation_list.append(OneOf(blur_augmentation, p = self.config.get_parameter("blur_group_p"))) 
-        
-    if self.config.get_parameter("shift_scale_rotate") is True:
-        augmentation_list.append(ShiftScaleRotate(shift_limit = self.config.get_parameter("shift_limit"),
-                                                  scale_limit = self.config.get_parameter("scale_limit"),
-                                                  rotate_limit = self.config.get_parameter("rotate_limit"),
-                                                  p = self.config.get_parameter("shift_scale_rotate_p")))
-    if self.config.get_parameter("distortion_group") is True:
-        distortion_augmentation = []
-        if self.config.get_parameter("optical_distortion") is True:
-            distortion_augmentation.append(OpticalDistortion(p = self.config.get_parameter("optical_distortion_p")))
-        if self.config.get_parameter("elastic_transform") is True:
-            distortion_augmentation.append(ElasticTransform(p = self.config.get_parameter("elastic_transform_p")))
-        if self.config.get_parameter("grid_distortion") is True:
-            distortion_augmentation.append(GridDistortion(p = self.config.get_parameter("grid_distortion_p")))
-        
-        augmentation_list.append(OneOf(distortion_augmentation, p = self.config.get_parameter("distortion_group_p")))
-    
-    if self.config.get_parameter("brightness_contrast_group") is True:
-        contrast_augmentation = []
-        if self.config.get_parameter("clahe") is True:
-            contrast_augmentation.append(CLAHE())
-        if self.config.get_parameter("sharpen") is True:
-            contrast_augmentation.append(IAASharpen())
-        if self.config.get_parameter("random_brightness_contrast") is True:
-            contrast_augmentation.append(RandomBrightnessContrast())
-       
-        augmentation_list.append(OneOf(contrast_augmentation, p = self.config.get_parameter("brightness_contrast_group_p")))
-        
-    augmentation_list.append(RandomCrop(self.config.get_parameter("tile_size")[0], self.config.get_parameter("tile_size")[1], always_apply=True))
-    
-    return Compose(augmentation_list, p = p)
-
-
-
-def get_class_id(self, class_name) -
-
-

Returns the class id and adds class to list if not in list of classes.

-

Parameters

-
-
class_name : str
-
Identity of class that will be associated with the class id
-
-

Returns

-
-
int
-
Class id
-
-
- -Expand source code - -
def get_class_id(self, class_name):
-    """Returns the class id and adds class to list if not in list of classes.
-
-    Parameters
-    ----------
-    class_name : str
-        Identity of class that will be associated with the class id
-        
-    Returns
-    ----------
-    int
-        Class id
-    """
-    
-    if len(self.classes) == 0:
-        self.classes.append({"class": class_name, "id": 0})
-        return 0
-    
-    for class_info in self.classes:
-        # if class exist, return class id
-        if class_info["class"] == class_name:
-            return class_info["id"]
-
-    self.classes.append({"class": class_name, "id": len(self.classes)-1})
-    return len(self.classes)-1
-
-
-
-def load_dataset(self, dataset_dir=None, tiled=False) -
-
-

Loads dataset from dataset_dir

-

Parameters

-
-
dataset_dir : str or none, optional
-
Folder to load the dataset from. If none, dataset_dir is obtained from config file
-
tiled : bool, optional
-
To set if tiling function is used
-
-
- -Expand source code - -
def load_dataset(self, dataset_dir = None, tiled = False):
-    """Loads dataset from ``dataset_dir``
-
-    Parameters
-    ----------
-    dataset_dir : str or none, optional
-        Folder to load the dataset from. If none, ``dataset_dir`` is obtained from config file
-        
-    tiled : bool, optional
-        To set if tiling function is used
-    """
-    
-    # update dataset_dir if specified. If not, load dataset_dir from config file
-    if dataset_dir is None:
-        dataset_dir = self.config.get_parameter("dataset_dir")
-    else:
-        self.config.update_parameter(self.config.find_key("dataset_dir"), dataset_dir)
-    
-    image_dirs = next(os.walk(dataset_dir))[1]
-    image_dirs = [f for f in image_dirs if not f[0] == '.']
-    
-    for img_dir in image_dirs:
-        # images
-        image = self.load_image(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("image_subfolder"))
-        
-        # percentile normalization
-        if self.config.get_parameter("percentile_normalization"):
-            image, _, _ = self.percentile_normalization(image, in_bound = self.config.get_parameter("percentile"))
-        
-        if tiled is True:
-            tile_image_list, num_rows, num_cols, padding = self.tile_image(image, self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-            self.config.update_parameter(["images","num_rows"], num_rows)
-            self.config.update_parameter(["images","num_cols"], num_cols)
-            self.config.update_parameter(["images","padding"], padding)
-            self.train_images.extend(tile_image_list)
-        else:
-            self.train_images.extend([image,])
-        
-        #ground_truth
-        ground_truth, class_id = self.load_ground_truth(os.path.join(dataset_dir, img_dir), subfolder = self.config.get_parameter("ground_truth_subfolder"))
-        if tiled is True:
-            tile_ground_truth_list, _, _, _ = self.tile_image(ground_truth[0], self.config.get_parameter("tile_size"), self.config.get_parameter("tile_overlap_size"))
-            self.train_ground_truth.extend(tile_ground_truth_list)
-        else:
-            self.train_ground_truth.extend(ground_truth)
-
-
-
-def sanity_check(self, image_index) -
-
-

Plots the augmented image and ground_truth to check if everything is ok.

-

Parameters

-
-
image_index : int
-
Index of the image and its corresponding ground_truth
-
-
- -Expand source code - -
def sanity_check(self, image_index):
-    """Plots the augmented image and ground_truth to check if everything is ok.
-
-    Parameters
-    ----------
-    image_index : int
-        Index of the image and its corresponding ground_truth
-    """
-    
-    image = self.aug_images[image_index][:,:,0]
-    ground_truth = self.aug_ground_truth[image_index][:,:,0]
-
-    plt.figure(figsize=(14, 14))
-    plt.axis('off')
-    plt.imshow(image, cmap='gray', 
-               norm=None, interpolation=None)
-    plt.show()
-
-    plt.figure(figsize=(14, 14))
-    plt.axis('off')
-    plt.imshow(ground_truth, cmap='gray', 
-               norm=None, interpolation=None)
-    plt.show()
-
-
-
-

Inherited members

- -
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/internals/image_functions.html b/html/models/internals/image_functions.html deleted file mode 100644 index 5cd254f..0000000 --- a/html/models/internals/image_functions.html +++ /dev/null @@ -1,1340 +0,0 @@ - - - - - - -models.internals.image_functions API documentation - - - - - - - - - -
-
-
-

Module models.internals.image_functions

-
-
-
- -Expand source code - -
import os
-import glob
-import sys
-
-import math
-import numpy as np
-
-#TODO: change to cv2?
-import skimage
-import skimage.io as skio
-
-class Image_Functions():
-    def list_images(self, image_dir, image_ext = '*.tif'):
-        """List images in the directory with the given file extension
-
-        Parameters
-        ----------
-        image_dir : `str`
-            Directory to look for image files
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of the image file
-            
-        Returns
-        ----------
-        image_list : `list`
-            List of images found in the directory with the given file extension
-            
-        Notes
-        ----------
-        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
-        """
-        # to bypass case sensitivity of file extensions in linux and possibly other systems
-        if sys.platform in ["win32",]:
-            image_extension = [image_ext]
-        else:
-            image_extension = [image_ext.lower(),image_ext.upper()]
-        
-        image_list = []
-        for ext in image_extension:
-            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
-            
-        return image_list
-    
-    #######################
-    # Image IO functions
-    #######################
-    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
-        """Loads images found in ``image_path``
-
-        Parameters
-        ----------
-        image_path : `str`
-            Path to look for image files
-        subfolder : `str`, optional
-            [Default: 'Images'] Subfolder in which to look for the image files
-        image_index : `int`, optional
-            [Default: 0] Index of image to load
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of the image file
-            
-        Returns
-        ----------
-        image : `array_like`
-            Loaded image
-            
-        Notes
-        ----------
-        Only one image from in each directory is loaded.
-        """
-        if os.path.isdir(image_path) is True:
-            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-            if len(image_list) > 1:
-               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
-            # Load image
-            image = skio.imread(image_list[image_index])
-        else:
-            image = skio.imread(image_path)
-            
-        return image
-        
-    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
-        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
-
-        Parameters
-        ----------
-        image_path : `str`
-            Path to look for ground truth images
-        subfolder : `str`, optional
-            [Default: 'Masks'] Subfolder in which to look for the ground truth images
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of ground truth image file
-
-        Returns
-        ----------
-        output_ground_truth : `list`
-            List of ground truth images found in the directory with the given file extension
-            
-        class_ids : `list`
-            List of class ids of the ground truth images
-        """
-        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-        
-        output_ground_truth = []
-        class_ids = []
-        
-        for ground_truth_path in image_list:
-            # add class if not in list
-            ground_truth_name = ground_truth_path.split('\\')[-1]
-            class_name = ground_truth_name.split('_')[0]
-            
-            # obtain class_id
-            class_ids.append(self.get_class_id(class_name))
-            
-            # Load image
-            ground_truth_img = skio.imread(ground_truth_path)
-            
-            # perform erosion so that the borders will still be there after augmentation
-            if self.config.get_parameter("use_binary_erosion") is True:
-                from skimage.morphology import binary_erosion, disk
-                # sets dtype back to unsigned integer in order for some augmentations to work
-                ground_truth_dtype = ground_truth_img.dtype
-                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-            
-            if self.config.get_parameter("use_binary_dilation") is True:
-                from skimage.morphology import binary_dilation, disk
-                ground_truth_dtype = ground_truth_img.dtype
-                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-            
-            # perform inversion of ground_truth if needed
-            if self.config.get_parameter("invert_ground_truth") is True:
-                ground_truth_img = skimage.util.invert(ground_truth_img)
-                
-            output_ground_truth.append(ground_truth_img)
-            
-        return output_ground_truth, class_ids
-    
-    def reshape_image(self, image):
-        """Reshapes the image to the correct dimenstions for Unet
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be reshaped
-
-        Returns
-        ----------
-        image : `array_like`
-            Reshaped image 
-        """
-        h, w = image.shape[:2]
-        image = np.reshape(image, (h, w, -1))
-        return image
-    
-    #######################
-    # Image padding
-    #######################
-    def pad_image(self, image, image_size, mode = 'constant'):
-        """Pad image to specified image_size
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be padded
-        image_size : `list`
-            Final size of padded image
-        mode : `str`, optional
-            [Default: 'constant'] Mode to pad the image
-
-        Returns
-        ----------
-        image : `array_like`
-            Padded image
-            
-        padding : `list`
-            List containing the number of pixels padded to each direction
-        """
-        h, w = image.shape[:2]
-        
-        top_pad = (image_size[0] - h) // 2
-        bottom_pad = image_size[0] - h - top_pad
-            
-        left_pad = (image_size[1] - w) // 2
-        right_pad = image_size[1] - w - left_pad
-
-        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
-        image = np.pad(image, padding, mode = mode, constant_values=0)
-        
-        return image, padding
-    
-    def remove_pad_image(self, image, padding):
-        """Removes pad from image
-
-        Parameters
-        ----------
-        image : `array_like`
-            Padded image
-        padding : `list`
-            List containing the number of padded pixels in each direction
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        
-        h, w = image.shape[:2]
-        
-        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
-    
-    #######################
-    # Tiling functions
-    #######################
-    def tile_image(self, image, tile_size, tile_overlap_size):
-        """Converts an image into a list of tiled images
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be tiled
-        tile_size : `list`
-            Size of each individual tile
-        tile_overlap_size : `list`
-            Amount of overlap (in pixels) between each tile
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        image_height, image_width = image.shape[:2]
-        tile_height = tile_size[0] - tile_overlap_size[0] * 2
-        tile_width = tile_size[1] - tile_overlap_size[1] * 2
-        
-        if image_height <= tile_height and image_width <= tile_width:
-            return image
-        
-        num_rows = math.ceil(image_height/tile_height)
-        num_cols = math.ceil(image_width/tile_width)
-        num_tiles = num_rows*num_cols
-        
-        
-        # pad image to fit tile size
-        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
-        
-        tile_image_list = []
-        
-        for tile_no in range(num_tiles):
-            tile_x_start = (tile_no // num_rows) * tile_width
-            tile_x_end = tile_x_start + tile_size[1]
-            
-            tile_y_start = (tile_no % num_rows) * tile_height
-            tile_y_end = tile_y_start + tile_size[0]
-            
-            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
-            
-            # ensure input into unet is of correct shape
-            tile_image = self.reshape_image(tile_image)
-            
-            tile_image_list.append(tile_image)
-            
-        return tile_image_list, num_rows, num_cols, padding
-    
-    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
-        """Stitches a list of tiled images back into a single image
-
-        Parameters
-        ----------
-        tile_list : `list`
-            List of tiled images
-        tile_size : `list`
-            Size of each individual tile
-        tile_overlap_size : `list`
-            Amount of overlap (in pixels) between each tile
-        num_rows : `int`
-            Number of rows of tiles
-        num_cols : `int`
-            Number of cols of tiles
-        padding : `list`
-            Amount of padding used during tiling
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        if num_rows == 1 and num_cols == 1:
-            image = tile_list[0]
-            
-            image = self.remove_pad_image(image, padding = padding)
-                
-            return image
-              
-        tile_height = tile_size[0] - tile_overlap_size[0] * 2
-        tile_width = tile_size[1] - tile_overlap_size[1] * 2
-        
-        num_tiles = num_rows*num_cols
-        
-        for col in range(num_cols):
-            for row in range(num_rows):
-                tile_image = tile_list[num_rows*col + row][:,:,0]
-                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
-                if row == 0:
-                    image_col = np.array(tile_image)
-                else:
-                    image_col = np.vstack((image_col, tile_image))
-            
-            if col == 0:
-                image = image_col
-            else:
-                image = np.hstack((image, image_col))
-        
-        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
-        
-        if padding is not None:
-            image = self.remove_pad_image(image, padding = padding)
-            
-        return image
-    
-    
-    #######################
-    # Image normalization
-    #######################
-    def percentile_normalization(self, image, in_bound=[3, 99.8]):
-        """Performs percentile normalization on the image
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be normalized
-        in_bound : `list`
-            Upper and lower percentile used to normalize image
-
-        Returns
-        ----------
-        image : `array_like`
-            Normalized image
-            
-        image_min : `int`
-            Min value of ``image``
-            
-        image_max : `int`
-            Max value of ``image``
-        """
-        image_min = np.percentile(image, in_bound[0])
-        image_max = np.percentile(image, in_bound[1])
-        image = (image - image_min)/(image_max - image_min)
-
-        return image, image_min, image_max
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class Image_Functions -(*args, **kwargs) -
-
-
-
- -Expand source code - -
class Image_Functions():
-    def list_images(self, image_dir, image_ext = '*.tif'):
-        """List images in the directory with the given file extension
-
-        Parameters
-        ----------
-        image_dir : `str`
-            Directory to look for image files
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of the image file
-            
-        Returns
-        ----------
-        image_list : `list`
-            List of images found in the directory with the given file extension
-            
-        Notes
-        ----------
-        For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
-        """
-        # to bypass case sensitivity of file extensions in linux and possibly other systems
-        if sys.platform in ["win32",]:
-            image_extension = [image_ext]
-        else:
-            image_extension = [image_ext.lower(),image_ext.upper()]
-        
-        image_list = []
-        for ext in image_extension:
-            image_list.extend(glob.glob(os.path.join(image_dir,ext)))
-            
-        return image_list
-    
-    #######################
-    # Image IO functions
-    #######################
-    def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
-        """Loads images found in ``image_path``
-
-        Parameters
-        ----------
-        image_path : `str`
-            Path to look for image files
-        subfolder : `str`, optional
-            [Default: 'Images'] Subfolder in which to look for the image files
-        image_index : `int`, optional
-            [Default: 0] Index of image to load
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of the image file
-            
-        Returns
-        ----------
-        image : `array_like`
-            Loaded image
-            
-        Notes
-        ----------
-        Only one image from in each directory is loaded.
-        """
-        if os.path.isdir(image_path) is True:
-            image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-            if len(image_list) > 1:
-               warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
-            # Load image
-            image = skio.imread(image_list[image_index])
-        else:
-            image = skio.imread(image_path)
-            
-        return image
-        
-    def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
-        """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
-
-        Parameters
-        ----------
-        image_path : `str`
-            Path to look for ground truth images
-        subfolder : `str`, optional
-            [Default: 'Masks'] Subfolder in which to look for the ground truth images
-        image_ext : `str`, optional
-            [Default: '*.tif'] File extension of ground truth image file
-
-        Returns
-        ----------
-        output_ground_truth : `list`
-            List of ground truth images found in the directory with the given file extension
-            
-        class_ids : `list`
-            List of class ids of the ground truth images
-        """
-        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-        
-        output_ground_truth = []
-        class_ids = []
-        
-        for ground_truth_path in image_list:
-            # add class if not in list
-            ground_truth_name = ground_truth_path.split('\\')[-1]
-            class_name = ground_truth_name.split('_')[0]
-            
-            # obtain class_id
-            class_ids.append(self.get_class_id(class_name))
-            
-            # Load image
-            ground_truth_img = skio.imread(ground_truth_path)
-            
-            # perform erosion so that the borders will still be there after augmentation
-            if self.config.get_parameter("use_binary_erosion") is True:
-                from skimage.morphology import binary_erosion, disk
-                # sets dtype back to unsigned integer in order for some augmentations to work
-                ground_truth_dtype = ground_truth_img.dtype
-                ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-            
-            if self.config.get_parameter("use_binary_dilation") is True:
-                from skimage.morphology import binary_dilation, disk
-                ground_truth_dtype = ground_truth_img.dtype
-                ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-                ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-            
-            # perform inversion of ground_truth if needed
-            if self.config.get_parameter("invert_ground_truth") is True:
-                ground_truth_img = skimage.util.invert(ground_truth_img)
-                
-            output_ground_truth.append(ground_truth_img)
-            
-        return output_ground_truth, class_ids
-    
-    def reshape_image(self, image):
-        """Reshapes the image to the correct dimenstions for Unet
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be reshaped
-
-        Returns
-        ----------
-        image : `array_like`
-            Reshaped image 
-        """
-        h, w = image.shape[:2]
-        image = np.reshape(image, (h, w, -1))
-        return image
-    
-    #######################
-    # Image padding
-    #######################
-    def pad_image(self, image, image_size, mode = 'constant'):
-        """Pad image to specified image_size
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be padded
-        image_size : `list`
-            Final size of padded image
-        mode : `str`, optional
-            [Default: 'constant'] Mode to pad the image
-
-        Returns
-        ----------
-        image : `array_like`
-            Padded image
-            
-        padding : `list`
-            List containing the number of pixels padded to each direction
-        """
-        h, w = image.shape[:2]
-        
-        top_pad = (image_size[0] - h) // 2
-        bottom_pad = image_size[0] - h - top_pad
-            
-        left_pad = (image_size[1] - w) // 2
-        right_pad = image_size[1] - w - left_pad
-
-        padding = ((top_pad, bottom_pad), (left_pad, right_pad))
-        image = np.pad(image, padding, mode = mode, constant_values=0)
-        
-        return image, padding
-    
-    def remove_pad_image(self, image, padding):
-        """Removes pad from image
-
-        Parameters
-        ----------
-        image : `array_like`
-            Padded image
-        padding : `list`
-            List containing the number of padded pixels in each direction
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        
-        h, w = image.shape[:2]
-        
-        return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
-    
-    #######################
-    # Tiling functions
-    #######################
-    def tile_image(self, image, tile_size, tile_overlap_size):
-        """Converts an image into a list of tiled images
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be tiled
-        tile_size : `list`
-            Size of each individual tile
-        tile_overlap_size : `list`
-            Amount of overlap (in pixels) between each tile
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        image_height, image_width = image.shape[:2]
-        tile_height = tile_size[0] - tile_overlap_size[0] * 2
-        tile_width = tile_size[1] - tile_overlap_size[1] * 2
-        
-        if image_height <= tile_height and image_width <= tile_width:
-            return image
-        
-        num_rows = math.ceil(image_height/tile_height)
-        num_cols = math.ceil(image_width/tile_width)
-        num_tiles = num_rows*num_cols
-        
-        
-        # pad image to fit tile size
-        image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
-        
-        tile_image_list = []
-        
-        for tile_no in range(num_tiles):
-            tile_x_start = (tile_no // num_rows) * tile_width
-            tile_x_end = tile_x_start + tile_size[1]
-            
-            tile_y_start = (tile_no % num_rows) * tile_height
-            tile_y_end = tile_y_start + tile_size[0]
-            
-            tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
-            
-            # ensure input into unet is of correct shape
-            tile_image = self.reshape_image(tile_image)
-            
-            tile_image_list.append(tile_image)
-            
-        return tile_image_list, num_rows, num_cols, padding
-    
-    def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
-        """Stitches a list of tiled images back into a single image
-
-        Parameters
-        ----------
-        tile_list : `list`
-            List of tiled images
-        tile_size : `list`
-            Size of each individual tile
-        tile_overlap_size : `list`
-            Amount of overlap (in pixels) between each tile
-        num_rows : `int`
-            Number of rows of tiles
-        num_cols : `int`
-            Number of cols of tiles
-        padding : `list`
-            Amount of padding used during tiling
-
-        Returns
-        ----------
-        image : `array_like`
-            Image without padding
-        """
-        if num_rows == 1 and num_cols == 1:
-            image = tile_list[0]
-            
-            image = self.remove_pad_image(image, padding = padding)
-                
-            return image
-              
-        tile_height = tile_size[0] - tile_overlap_size[0] * 2
-        tile_width = tile_size[1] - tile_overlap_size[1] * 2
-        
-        num_tiles = num_rows*num_cols
-        
-        for col in range(num_cols):
-            for row in range(num_rows):
-                tile_image = tile_list[num_rows*col + row][:,:,0]
-                tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
-                if row == 0:
-                    image_col = np.array(tile_image)
-                else:
-                    image_col = np.vstack((image_col, tile_image))
-            
-            if col == 0:
-                image = image_col
-            else:
-                image = np.hstack((image, image_col))
-        
-        image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
-        
-        if padding is not None:
-            image = self.remove_pad_image(image, padding = padding)
-            
-        return image
-    
-    
-    #######################
-    # Image normalization
-    #######################
-    def percentile_normalization(self, image, in_bound=[3, 99.8]):
-        """Performs percentile normalization on the image
-
-        Parameters
-        ----------
-        image : `array_like`
-            Image to be normalized
-        in_bound : `list`
-            Upper and lower percentile used to normalize image
-
-        Returns
-        ----------
-        image : `array_like`
-            Normalized image
-            
-        image_min : `int`
-            Min value of ``image``
-            
-        image_max : `int`
-            Max value of ``image``
-        """
-        image_min = np.percentile(image, in_bound[0])
-        image_max = np.percentile(image, in_bound[1])
-        image = (image - image_min)/(image_max - image_min)
-
-        return image, image_min, image_max
-
-

Subclasses

- -

Methods

-
-
-def list_images(self, image_dir, image_ext='*.tif') -
-
-

List images in the directory with the given file extension

-

Parameters

-
-
image_dir : str
-
Directory to look for image files
-
image_ext : str, optional
-
[Default: '*.tif'] File extension of the image file
-
-

Returns

-
-
image_list : list
-
List of images found in the directory with the given file extension
-
-

Notes

-

For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.

-
- -Expand source code - -
def list_images(self, image_dir, image_ext = '*.tif'):
-    """List images in the directory with the given file extension
-
-    Parameters
-    ----------
-    image_dir : `str`
-        Directory to look for image files
-    image_ext : `str`, optional
-        [Default: '*.tif'] File extension of the image file
-        
-    Returns
-    ----------
-    image_list : `list`
-        List of images found in the directory with the given file extension
-        
-    Notes
-    ----------
-    For linux based systems, please ensure that the file extensions are either in all lowercase or all uppercase.
-    """
-    # to bypass case sensitivity of file extensions in linux and possibly other systems
-    if sys.platform in ["win32",]:
-        image_extension = [image_ext]
-    else:
-        image_extension = [image_ext.lower(),image_ext.upper()]
-    
-    image_list = []
-    for ext in image_extension:
-        image_list.extend(glob.glob(os.path.join(image_dir,ext)))
-        
-    return image_list
-
-
-
-def load_ground_truth(self, image_path, subfolder='Masks', image_ext='*.tif') -
-
-

Loads ground truth images found in image_path and performs erosion/dilation/inversion if needed

-

Parameters

-
-
image_path : str
-
Path to look for ground truth images
-
subfolder : str, optional
-
[Default: 'Masks'] Subfolder in which to look for the ground truth images
-
image_ext : str, optional
-
[Default: '*.tif'] File extension of ground truth image file
-
-

Returns

-
-
output_ground_truth : list
-
List of ground truth images found in the directory with the given file extension
-
class_ids : list
-
List of class ids of the ground truth images
-
-
- -Expand source code - -
def load_ground_truth(self, image_path, subfolder = 'Masks', image_ext = '*.tif'):
-    """Loads ground truth images found in ``image_path`` and performs erosion/dilation/inversion if needed
-
-    Parameters
-    ----------
-    image_path : `str`
-        Path to look for ground truth images
-    subfolder : `str`, optional
-        [Default: 'Masks'] Subfolder in which to look for the ground truth images
-    image_ext : `str`, optional
-        [Default: '*.tif'] File extension of ground truth image file
-
-    Returns
-    ----------
-    output_ground_truth : `list`
-        List of ground truth images found in the directory with the given file extension
-        
-    class_ids : `list`
-        List of class ids of the ground truth images
-    """
-    image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-    
-    output_ground_truth = []
-    class_ids = []
-    
-    for ground_truth_path in image_list:
-        # add class if not in list
-        ground_truth_name = ground_truth_path.split('\\')[-1]
-        class_name = ground_truth_name.split('_')[0]
-        
-        # obtain class_id
-        class_ids.append(self.get_class_id(class_name))
-        
-        # Load image
-        ground_truth_img = skio.imread(ground_truth_path)
-        
-        # perform erosion so that the borders will still be there after augmentation
-        if self.config.get_parameter("use_binary_erosion") is True:
-            from skimage.morphology import binary_erosion, disk
-            # sets dtype back to unsigned integer in order for some augmentations to work
-            ground_truth_dtype = ground_truth_img.dtype
-            ground_truth_img = binary_erosion(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-        
-        if self.config.get_parameter("use_binary_dilation") is True:
-            from skimage.morphology import binary_dilation, disk
-            ground_truth_dtype = ground_truth_img.dtype
-            ground_truth_img = binary_dilation(ground_truth_img, disk(self.config.get_parameter("disk_size")))
-            ground_truth_img = ground_truth_img.astype(ground_truth_dtype)
-        
-        # perform inversion of ground_truth if needed
-        if self.config.get_parameter("invert_ground_truth") is True:
-            ground_truth_img = skimage.util.invert(ground_truth_img)
-            
-        output_ground_truth.append(ground_truth_img)
-        
-    return output_ground_truth, class_ids
-
-
-
-def load_image(self, image_path, subfolder='Images', image_index=0, image_ext='*.tif') -
-
-

Loads images found in image_path

-

Parameters

-
-
image_path : str
-
Path to look for image files
-
subfolder : str, optional
-
[Default: 'Images'] Subfolder in which to look for the image files
-
image_index : int, optional
-
[Default: 0] Index of image to load
-
image_ext : str, optional
-
[Default: '*.tif'] File extension of the image file
-
-

Returns

-
-
image : array_like
-
Loaded image
-
-

Notes

-

Only one image from in each directory is loaded.

-
- -Expand source code - -
def load_image(self, image_path, subfolder = 'Images', image_index = 0, image_ext = '*.tif'):
-    """Loads images found in ``image_path``
-
-    Parameters
-    ----------
-    image_path : `str`
-        Path to look for image files
-    subfolder : `str`, optional
-        [Default: 'Images'] Subfolder in which to look for the image files
-    image_index : `int`, optional
-        [Default: 0] Index of image to load
-    image_ext : `str`, optional
-        [Default: '*.tif'] File extension of the image file
-        
-    Returns
-    ----------
-    image : `array_like`
-        Loaded image
-        
-    Notes
-    ----------
-    Only one image from in each directory is loaded.
-    """
-    if os.path.isdir(image_path) is True:
-        image_list = self.list_images(os.path.join(image_path, subfolder), image_ext = image_ext)
-        if len(image_list) > 1:
-           warnings.warn("More that 1 image found in directory. Loading {}".format(image_list[image_index]))
-        # Load image
-        image = skio.imread(image_list[image_index])
-    else:
-        image = skio.imread(image_path)
-        
-    return image
-
-
-
-def pad_image(self, image, image_size, mode='constant') -
-
-

Pad image to specified image_size

-

Parameters

-
-
image : array_like
-
Image to be padded
-
image_size : list
-
Final size of padded image
-
mode : str, optional
-
[Default: 'constant'] Mode to pad the image
-
-

Returns

-
-
image : array_like
-
Padded image
-
padding : list
-
List containing the number of pixels padded to each direction
-
-
- -Expand source code - -
def pad_image(self, image, image_size, mode = 'constant'):
-    """Pad image to specified image_size
-
-    Parameters
-    ----------
-    image : `array_like`
-        Image to be padded
-    image_size : `list`
-        Final size of padded image
-    mode : `str`, optional
-        [Default: 'constant'] Mode to pad the image
-
-    Returns
-    ----------
-    image : `array_like`
-        Padded image
-        
-    padding : `list`
-        List containing the number of pixels padded to each direction
-    """
-    h, w = image.shape[:2]
-    
-    top_pad = (image_size[0] - h) // 2
-    bottom_pad = image_size[0] - h - top_pad
-        
-    left_pad = (image_size[1] - w) // 2
-    right_pad = image_size[1] - w - left_pad
-
-    padding = ((top_pad, bottom_pad), (left_pad, right_pad))
-    image = np.pad(image, padding, mode = mode, constant_values=0)
-    
-    return image, padding
-
-
-
-def percentile_normalization(self, image, in_bound=[3, 99.8]) -
-
-

Performs percentile normalization on the image

-

Parameters

-
-
image : array_like
-
Image to be normalized
-
in_bound : list
-
Upper and lower percentile used to normalize image
-
-

Returns

-
-
image : array_like
-
Normalized image
-
image_min : int
-
Min value of image
-
image_max : int
-
Max value of image
-
-
- -Expand source code - -
def percentile_normalization(self, image, in_bound=[3, 99.8]):
-    """Performs percentile normalization on the image
-
-    Parameters
-    ----------
-    image : `array_like`
-        Image to be normalized
-    in_bound : `list`
-        Upper and lower percentile used to normalize image
-
-    Returns
-    ----------
-    image : `array_like`
-        Normalized image
-        
-    image_min : `int`
-        Min value of ``image``
-        
-    image_max : `int`
-        Max value of ``image``
-    """
-    image_min = np.percentile(image, in_bound[0])
-    image_max = np.percentile(image, in_bound[1])
-    image = (image - image_min)/(image_max - image_min)
-
-    return image, image_min, image_max
-
-
-
-def remove_pad_image(self, image, padding) -
-
-

Removes pad from image

-

Parameters

-
-
image : array_like
-
Padded image
-
padding : list
-
List containing the number of padded pixels in each direction
-
-

Returns

-
-
image : array_like
-
Image without padding
-
-
- -Expand source code - -
def remove_pad_image(self, image, padding):
-    """Removes pad from image
-
-    Parameters
-    ----------
-    image : `array_like`
-        Padded image
-    padding : `list`
-        List containing the number of padded pixels in each direction
-
-    Returns
-    ----------
-    image : `array_like`
-        Image without padding
-    """
-    
-    h, w = image.shape[:2]
-    
-    return image[padding[0][0]:h-padding[0][1], padding[1][0]:w-padding[1][1]]
-
-
-
-def reshape_image(self, image) -
-
-

Reshapes the image to the correct dimenstions for Unet

-

Parameters

-
-
image : array_like
-
Image to be reshaped
-
-

Returns

-
-
image : array_like
-
Reshaped image
-
-
- -Expand source code - -
def reshape_image(self, image):
-    """Reshapes the image to the correct dimenstions for Unet
-
-    Parameters
-    ----------
-    image : `array_like`
-        Image to be reshaped
-
-    Returns
-    ----------
-    image : `array_like`
-        Reshaped image 
-    """
-    h, w = image.shape[:2]
-    image = np.reshape(image, (h, w, -1))
-    return image
-
-
-
-def tile_image(self, image, tile_size, tile_overlap_size) -
-
-

Converts an image into a list of tiled images

-

Parameters

-
-
image : array_like
-
Image to be tiled
-
tile_size : list
-
Size of each individual tile
-
tile_overlap_size : list
-
Amount of overlap (in pixels) between each tile
-
-

Returns

-
-
image : array_like
-
Image without padding
-
-
- -Expand source code - -
def tile_image(self, image, tile_size, tile_overlap_size):
-    """Converts an image into a list of tiled images
-
-    Parameters
-    ----------
-    image : `array_like`
-        Image to be tiled
-    tile_size : `list`
-        Size of each individual tile
-    tile_overlap_size : `list`
-        Amount of overlap (in pixels) between each tile
-
-    Returns
-    ----------
-    image : `array_like`
-        Image without padding
-    """
-    image_height, image_width = image.shape[:2]
-    tile_height = tile_size[0] - tile_overlap_size[0] * 2
-    tile_width = tile_size[1] - tile_overlap_size[1] * 2
-    
-    if image_height <= tile_height and image_width <= tile_width:
-        return image
-    
-    num_rows = math.ceil(image_height/tile_height)
-    num_cols = math.ceil(image_width/tile_width)
-    num_tiles = num_rows*num_cols
-    
-    
-    # pad image to fit tile size
-    image, padding = self.pad_image(image, (tile_height*num_rows + tile_overlap_size[0] * 2, tile_width*num_cols + tile_overlap_size[1]*2))
-    
-    tile_image_list = []
-    
-    for tile_no in range(num_tiles):
-        tile_x_start = (tile_no // num_rows) * tile_width
-        tile_x_end = tile_x_start + tile_size[1]
-        
-        tile_y_start = (tile_no % num_rows) * tile_height
-        tile_y_end = tile_y_start + tile_size[0]
-        
-        tile_image = image[tile_y_start: tile_y_end, tile_x_start:tile_x_end]
-        
-        # ensure input into unet is of correct shape
-        tile_image = self.reshape_image(tile_image)
-        
-        tile_image_list.append(tile_image)
-        
-    return tile_image_list, num_rows, num_cols, padding
-
-
-
-def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding) -
-
-

Stitches a list of tiled images back into a single image

-

Parameters

-
-
tile_list : list
-
List of tiled images
-
tile_size : list
-
Size of each individual tile
-
tile_overlap_size : list
-
Amount of overlap (in pixels) between each tile
-
num_rows : int
-
Number of rows of tiles
-
num_cols : int
-
Number of cols of tiles
-
padding : list
-
Amount of padding used during tiling
-
-

Returns

-
-
image : array_like
-
Image without padding
-
-
- -Expand source code - -
def untile_image(self, tile_list, tile_size, tile_overlap_size, num_rows, num_cols, padding): 
-    """Stitches a list of tiled images back into a single image
-
-    Parameters
-    ----------
-    tile_list : `list`
-        List of tiled images
-    tile_size : `list`
-        Size of each individual tile
-    tile_overlap_size : `list`
-        Amount of overlap (in pixels) between each tile
-    num_rows : `int`
-        Number of rows of tiles
-    num_cols : `int`
-        Number of cols of tiles
-    padding : `list`
-        Amount of padding used during tiling
-
-    Returns
-    ----------
-    image : `array_like`
-        Image without padding
-    """
-    if num_rows == 1 and num_cols == 1:
-        image = tile_list[0]
-        
-        image = self.remove_pad_image(image, padding = padding)
-            
-        return image
-          
-    tile_height = tile_size[0] - tile_overlap_size[0] * 2
-    tile_width = tile_size[1] - tile_overlap_size[1] * 2
-    
-    num_tiles = num_rows*num_cols
-    
-    for col in range(num_cols):
-        for row in range(num_rows):
-            tile_image = tile_list[num_rows*col + row][:,:,0]
-            tile_image = tile_image[tile_overlap_size[0]:min(-tile_overlap_size[0],-1),tile_overlap_size[1]:min(-tile_overlap_size[1],-1)]
-            if row == 0:
-                image_col = np.array(tile_image)
-            else:
-                image_col = np.vstack((image_col, tile_image))
-        
-        if col == 0:
-            image = image_col
-        else:
-            image = np.hstack((image, image_col))
-    
-    image, _ = self.pad_image(image, image_size = (tile_height * num_rows + tile_overlap_size[0] * 2, tile_width * num_cols + tile_overlap_size[1]*2))
-    
-    if padding is not None:
-        image = self.remove_pad_image(image, padding = padding)
-        
-    return image
-
-
-
-
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/internals/index.html b/html/models/internals/index.html deleted file mode 100644 index e61d3fa..0000000 --- a/html/models/internals/index.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - -models.internals API documentation - - - - - - - - - -
- - -
- - - - - \ No newline at end of file diff --git a/html/models/internals/losses.html b/html/models/internals/losses.html deleted file mode 100644 index 0500ea2..0000000 --- a/html/models/internals/losses.html +++ /dev/null @@ -1,705 +0,0 @@ - - - - - - -models.internals.losses API documentation - - - - - - - - - -
-
-
-

Module models.internals.losses

-
-
-
- -Expand source code - -
from keras import backend as K
-from keras.losses import binary_crossentropy, mean_absolute_error
-import tensorflow as tf
-
-def jaccard_distance_loss(y_true, y_pred, smooth=100):
-    """
-    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
-            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
-    
-    The jaccard distance loss is usefull for unbalanced datasets. This has been
-    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
-    gradient.
-    
-    Ref: https://en.wikipedia.org/wiki/Jaccard_index
-    
-    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
-    @author: wassname
-    """
-    intersection = K.sum(y_true * y_pred, axis=-1)
-    sum_ = K.sum(y_true + y_pred, axis=-1)
-    jac = (intersection + smooth) / (sum_ - intersection + smooth)
-    return (1 - jac) * smooth
-
-def dice_coef(y_true, y_pred, smooth=1.):
-    """
-    Dice = (2*|X & Y|)/ (|X|+ |Y|)
-         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
-    ref: https://arxiv.org/pdf/1606.04797v1.pdf
-    
-    from wassname as well
-    """
-    intersection = K.sum(y_true * y_pred, axis=-1)
-    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
-
-def dice_coef_loss(y_true, y_pred):
-    return 1. - dice_coef(y_true, y_pred)
-
-def bce_dice_loss(y_true, y_pred):
-    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
-
-def bce_ssim_loss(y_true, y_pred):
-    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
-
-# code download from: https://github.com/bermanmaxim/LovaszSoftmax
-def lovasz_grad(gt_sorted):
-    """
-    Computes gradient of the Lovasz extension w.r.t sorted errors
-    See Alg. 1 in paper
-    """
-    gts = tf.reduce_sum(gt_sorted)
-    intersection = gts - tf.cumsum(gt_sorted)
-    union = gts + tf.cumsum(1. - gt_sorted)
-    jaccard = 1. - intersection / union
-    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
-    return jaccard
-
-
-# --------------------------- BINARY LOSSES ---------------------------
-
-def lovasz_hinge(logits, labels, per_image=True, ignore=None):
-    """
-    Binary Lovasz hinge loss
-      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
-      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
-      per_image: compute the loss per image instead of per batch
-      ignore: void class id
-    """
-    if per_image:
-        def treat_image(log_lab):
-            log, lab = log_lab
-            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
-            log, lab = flatten_binary_scores(log, lab, ignore)
-            return lovasz_hinge_flat(log, lab)
-        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
-        loss = tf.reduce_mean(losses)
-    else:
-        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
-    return loss
-
-
-def lovasz_hinge_flat(logits, labels):
-    """
-    Binary Lovasz hinge loss
-      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
-      labels: [P] Tensor, binary ground truth labels (0 or 1)
-      ignore: label to ignore
-    """
-
-    def compute_loss():
-        labelsf = tf.cast(labels, logits.dtype)
-        signs = 2. * labelsf - 1.
-        errors = 1. - logits * tf.stop_gradient(signs)
-        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
-        gt_sorted = tf.gather(labelsf, perm)
-        grad = lovasz_grad(gt_sorted)
-        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
-        return loss
-
-    # deal with the void prediction case (only void pixels)
-    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
-                   lambda: tf.reduce_sum(logits) * 0.,
-                   compute_loss,
-                   strict=True,
-                   name="loss"
-                   )
-    return loss
-
-
-def flatten_binary_scores(scores, labels, ignore=None):
-    """
-    Flattens predictions in the batch (binary case)
-    Remove labels equal to 'ignore'
-    """
-    scores = tf.reshape(scores, (-1,))
-    labels = tf.reshape(labels, (-1,))
-    if ignore is None:
-        return scores, labels
-    valid = tf.not_equal(labels, ignore)
-    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
-    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
-    return vscores, vlabels
-
-def lovasz_loss(y_true, y_pred):
-    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
-    #logits = K.log(y_pred / (1. - y_pred))
-    logits = y_pred #Jiaxin
-    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
-    return loss
-
-# Difference of Structural Similarity
-
-def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
-    # There are additional parameters for this function
-    # Note: some of the 'modes' for edge behavior do not yet have a
-    # gradient definition in the Theano tree
-    #   and cannot be used for learning
-    
-    c1 = (k1 * max_value) ** 2
-    c2 = (k2 * max_value) ** 2
-
-    kernel = [kernel_size, kernel_size]
-    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
-    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
-
-    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
-    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
-
-    # Reshape to get the var in the cells
-    bs, w, h, c = K.int_shape(patches_pred)
-    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
-    patches_true = K.reshape(patches_true, [-1, w, h, c])
-    # Get mean
-    u_true = K.mean(patches_true, axis=-1)
-    u_pred = K.mean(patches_pred, axis=-1)
-    # Get variance
-    var_true = K.var(patches_true, axis=-1)
-    var_pred = K.var(patches_pred, axis=-1)
-    # Get std dev
-    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
-
-    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
-    denom = ((K.square(u_true)
-              + K.square(u_pred)
-              + c1) * (var_pred + var_true + c2))
-    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
-    return K.mean((1.0 - ssim) / 2.0)
-
-def dssim_mae_loss(y_true, y_pred):
-    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
-
-#MSSim
-#https://stackoverflow.com/questions/48744945/keras-ms-ssim-as-loss-function
-def keras_SSIM_cs(y_true, y_pred):
-    axis=None
-    gaussian = make_kernel(1.5)
-    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
-    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
-
-    u_x=K.mean(x, axis=axis)
-    u_y=K.mean(y, axis=axis)
-
-    var_x=K.var(x, axis=axis)
-    var_y=K.var(y, axis=axis)
-
-    cov_xy=cov_keras(x, y, axis)
-
-    K1=0.01
-    K2=0.03
-    L=1  # depth of image (255 in case the image has a differnt scale)
-
-    C1=(K1*L)**2
-    C2=(K2*L)**2
-    C3=C2/2
-
-    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
-    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
-    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
-
-    return [c,s,l]
-
-def keras_MS_SSIM(y_true, y_pred):
-    iterations = 5
-    x=y_true
-    y=y_pred
-    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
-    c=[]
-    s=[]
-    for i in range(iterations):
-        cs=keras_SSIM_cs(x, y)
-        c.append(cs[0])
-        s.append(cs[1])
-        l=cs[2]
-        if(i!=4):
-            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
-            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
-    c = tf.stack(c)
-    s = tf.stack(s)
-    cs = c*s
-
-    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
-    l=(l+1)/2
-    cs=(cs+1)/2
-
-    cs=cs**weight
-    cs = tf.reduce_prod(cs)
-    l=l**weight[-1]
-
-    ms_ssim = l*cs
-    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
-
-    return K.mean(ms_ssim)
-
-def mssim_mae_loss(y_true, y_pred):
-    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
-
-
-
-
-
-
-
-

Functions

-
-
-def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0) -
-
-
-
- -Expand source code - -
def DSSIM_loss(y_true, y_pred, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
-    # There are additional parameters for this function
-    # Note: some of the 'modes' for edge behavior do not yet have a
-    # gradient definition in the Theano tree
-    #   and cannot be used for learning
-    
-    c1 = (k1 * max_value) ** 2
-    c2 = (k2 * max_value) ** 2
-
-    kernel = [kernel_size, kernel_size]
-    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
-    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))
-
-    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
-    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
-
-    # Reshape to get the var in the cells
-    bs, w, h, c = K.int_shape(patches_pred)
-    patches_pred = K.reshape(patches_pred, [-1, w, h, c])
-    patches_true = K.reshape(patches_true, [-1, w, h, c])
-    # Get mean
-    u_true = K.mean(patches_true, axis=-1)
-    u_pred = K.mean(patches_pred, axis=-1)
-    # Get variance
-    var_true = K.var(patches_true, axis=-1)
-    var_pred = K.var(patches_pred, axis=-1)
-    # Get std dev
-    covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
-
-    ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
-    denom = ((K.square(u_true)
-              + K.square(u_pred)
-              + c1) * (var_pred + var_true + c2))
-    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
-    return K.mean((1.0 - ssim) / 2.0)
-
-
-
-def bce_dice_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def bce_dice_loss(y_true, y_pred):
-    return 1. - dice_coef(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
-
-
-
-def bce_ssim_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def bce_ssim_loss(y_true, y_pred):
-    return DSSIM_loss(y_true, y_pred) + binary_crossentropy(y_true, y_pred)
-
-
-
-def dice_coef(y_true, y_pred, smooth=1.0) -
-
-

Dice = (2|X & Y|)/ (|X|+ |Y|) -= -2sum(|A*B|)/(sum(A^2)+sum(B^2)) -ref: https://arxiv.org/pdf/1606.04797v1.pdf

-

from wassname as well

-
- -Expand source code - -
def dice_coef(y_true, y_pred, smooth=1.):
-    """
-    Dice = (2*|X & Y|)/ (|X|+ |Y|)
-         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
-    ref: https://arxiv.org/pdf/1606.04797v1.pdf
-    
-    from wassname as well
-    """
-    intersection = K.sum(y_true * y_pred, axis=-1)
-    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
-
-
-
-def dice_coef_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def dice_coef_loss(y_true, y_pred):
-    return 1. - dice_coef(y_true, y_pred)
-
-
-
-def dssim_mae_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def dssim_mae_loss(y_true, y_pred):
-    return DSSIM_loss(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
-
-
-
-def flatten_binary_scores(scores, labels, ignore=None) -
-
-

Flattens predictions in the batch (binary case) -Remove labels equal to 'ignore'

-
- -Expand source code - -
def flatten_binary_scores(scores, labels, ignore=None):
-    """
-    Flattens predictions in the batch (binary case)
-    Remove labels equal to 'ignore'
-    """
-    scores = tf.reshape(scores, (-1,))
-    labels = tf.reshape(labels, (-1,))
-    if ignore is None:
-        return scores, labels
-    valid = tf.not_equal(labels, ignore)
-    vscores = tf.boolean_mask(scores, valid, name='valid_scores')
-    vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
-    return vscores, vlabels
-
-
-
-def jaccard_distance_loss(y_true, y_pred, smooth=100) -
-
-

Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) -= sum(|AB|)/(sum(|A|)+sum(|B|)-sum(|AB|))

-

The jaccard distance loss is usefull for unbalanced datasets. This has been -shifted so it converges on 0 and is smoothed to avoid exploding or disapearing -gradient.

-

Ref: https://en.wikipedia.org/wiki/Jaccard_index

-

@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 -@author: wassname

-
- -Expand source code - -
def jaccard_distance_loss(y_true, y_pred, smooth=100):
-    """
-    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
-            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
-    
-    The jaccard distance loss is usefull for unbalanced datasets. This has been
-    shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
-    gradient.
-    
-    Ref: https://en.wikipedia.org/wiki/Jaccard_index
-    
-    @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
-    @author: wassname
-    """
-    intersection = K.sum(y_true * y_pred, axis=-1)
-    sum_ = K.sum(y_true + y_pred, axis=-1)
-    jac = (intersection + smooth) / (sum_ - intersection + smooth)
-    return (1 - jac) * smooth
-
-
-
-def keras_MS_SSIM(y_true, y_pred) -
-
-
-
- -Expand source code - -
def keras_MS_SSIM(y_true, y_pred):
-    iterations = 5
-    x=y_true
-    y=y_pred
-    weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
-    c=[]
-    s=[]
-    for i in range(iterations):
-        cs=keras_SSIM_cs(x, y)
-        c.append(cs[0])
-        s.append(cs[1])
-        l=cs[2]
-        if(i!=4):
-            x=tf.image.resize_images(x, (x.get_shape().as_list()[1]//(2**(i+1)), x.get_shape().as_list()[2]//(2**(i+1))))
-            y=tf.image.resize_images(y, (y.get_shape().as_list()[1]//(2**(i+1)), y.get_shape().as_list()[2]//(2**(i+1))))
-    c = tf.stack(c)
-    s = tf.stack(s)
-    cs = c*s
-
-    #Normalize: suggestion from https://github.com/jorge-pessoa/pytorch-msssim/issues/2 last comment to avoid NaN values
-    l=(l+1)/2
-    cs=(cs+1)/2
-
-    cs=cs**weight
-    cs = tf.reduce_prod(cs)
-    l=l**weight[-1]
-
-    ms_ssim = l*cs
-    ms_ssim = tf.where(tf.is_nan(ms_ssim), K.zeros_like(ms_ssim), ms_ssim)
-
-    return K.mean(ms_ssim)
-
-
-
-def keras_SSIM_cs(y_true, y_pred) -
-
-
-
- -Expand source code - -
def keras_SSIM_cs(y_true, y_pred):
-    axis=None
-    gaussian = make_kernel(1.5)
-    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
-    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')
-
-    u_x=K.mean(x, axis=axis)
-    u_y=K.mean(y, axis=axis)
-
-    var_x=K.var(x, axis=axis)
-    var_y=K.var(y, axis=axis)
-
-    cov_xy=cov_keras(x, y, axis)
-
-    K1=0.01
-    K2=0.03
-    L=1  # depth of image (255 in case the image has a differnt scale)
-
-    C1=(K1*L)**2
-    C2=(K2*L)**2
-    C3=C2/2
-
-    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
-    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
-    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)
-
-    return [c,s,l]
-
-
-
-def lovasz_grad(gt_sorted) -
-
-

Computes gradient of the Lovasz extension w.r.t sorted errors -See Alg. 1 in paper

-
- -Expand source code - -
def lovasz_grad(gt_sorted):
-    """
-    Computes gradient of the Lovasz extension w.r.t sorted errors
-    See Alg. 1 in paper
-    """
-    gts = tf.reduce_sum(gt_sorted)
-    intersection = gts - tf.cumsum(gt_sorted)
-    union = gts + tf.cumsum(1. - gt_sorted)
-    jaccard = 1. - intersection / union
-    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
-    return jaccard
-
-
-
-def lovasz_hinge(logits, labels, per_image=True, ignore=None) -
-
-

Binary Lovasz hinge loss -logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) -labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) -per_image: compute the loss per image instead of per batch -ignore: void class id

-
- -Expand source code - -
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
-    """
-    Binary Lovasz hinge loss
-      logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
-      labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
-      per_image: compute the loss per image instead of per batch
-      ignore: void class id
-    """
-    if per_image:
-        def treat_image(log_lab):
-            log, lab = log_lab
-            log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
-            log, lab = flatten_binary_scores(log, lab, ignore)
-            return lovasz_hinge_flat(log, lab)
-        losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
-        loss = tf.reduce_mean(losses)
-    else:
-        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
-    return loss
-
-
-
-def lovasz_hinge_flat(logits, labels) -
-
-

Binary Lovasz hinge loss -logits: [P] Variable, logits at each prediction (between -\infty and +\infty) -labels: [P] Tensor, binary ground truth labels (0 or 1) -ignore: label to ignore

-
- -Expand source code - -
def lovasz_hinge_flat(logits, labels):
-    """
-    Binary Lovasz hinge loss
-      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
-      labels: [P] Tensor, binary ground truth labels (0 or 1)
-      ignore: label to ignore
-    """
-
-    def compute_loss():
-        labelsf = tf.cast(labels, logits.dtype)
-        signs = 2. * labelsf - 1.
-        errors = 1. - logits * tf.stop_gradient(signs)
-        errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
-        gt_sorted = tf.gather(labelsf, perm)
-        grad = lovasz_grad(gt_sorted)
-        loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
-        return loss
-
-    # deal with the void prediction case (only void pixels)
-    loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
-                   lambda: tf.reduce_sum(logits) * 0.,
-                   compute_loss,
-                   strict=True,
-                   name="loss"
-                   )
-    return loss
-
-
-
-def lovasz_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def lovasz_loss(y_true, y_pred):
-    y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
-    #logits = K.log(y_pred / (1. - y_pred))
-    logits = y_pred #Jiaxin
-    loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
-    return loss
-
-
-
-def mssim_mae_loss(y_true, y_pred) -
-
-
-
- -Expand source code - -
def mssim_mae_loss(y_true, y_pred):
-    return keras_MS_SSIM(y_true, y_pred) + mean_absolute_error(y_true, y_pred)
-
-
-
-
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/internals/network_config.html b/html/models/internals/network_config.html deleted file mode 100644 index b11b990..0000000 --- a/html/models/internals/network_config.html +++ /dev/null @@ -1,908 +0,0 @@ - - - - - - -models.internals.network_config API documentation - - - - - - - - - -
-
-
-

Module models.internals.network_config

-
-
-
- -Expand source code - -
import glob
-import os
-from ruamel.yaml import YAML
-
-class Network_Config(object):
-    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
-        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
-    
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Folder where the model is to be saved/read from
-        config_filepath : `str`, optional
-            [Default: None] Filepath to the config file that will be loaded
-        **kwargs
-            For network parameters that are to be changed from the loaded config file
-
-        Attributes
-        ----------
-        yaml : :class:`ruamel.yaml.YAML`
-            YAML class with function needed to read/write YAML files 
-        config : `dict`
-            Dictionary containing the config parameters
-        """
-        self.yaml=YAML()
-        
-        # load config file from model_dir
-        if config_filepath is not None:
-            
-            self.config = self.load_config_from_file(config_filepath)
-            print("Loaded config file from {}".format(config_filepath))
-        elif model_dir is not None:
-            try:
-                self.config = self.load_config_from_model_dir(model_dir)
-                print("Loaded config file from {}".format(model_dir))
-            except:
-                print("Please ensure that config_filepath is set or there is a config file in model_dir")
-                raise
-            
-        if model_dir is not None:
-            # update model_dir in config
-            print("Updating model_dir to {}".format(model_dir))
-            self.update_parameter(["general", "model_dir"], model_dir)
-        
-        # overwrite network parameters with parameters given during initialization
-        for key, value in kwargs.items():
-            self.update_parameter(self.find_key(key), value)
-            
-        # perform calculations
-        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
-        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
-                  
-    ######################
-    # Accessors/Mutators
-    ######################
-    def get_parameter(self, parameter, config = []):
-        """Output the value from the config file using the given key
-
-        Parameters
-        ----------
-        parameter : `list` or `str`
-            Key or list of keys used to find for the value in the config file
-        
-        config : `list`, optional
-            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
-            
-        Returns
-        ----------
-        value : `str` or `int` or `list`
-            Value obtained from the specified key
-            
-        See Also
-        ----------
-        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
-        """
-        assert isinstance(parameter, (list, str))
-        
-        # find for key in nested dictionary
-        if isinstance(parameter, str):
-            parameter = self.find_key(parameter)
-        
-        if config == []:
-            config = self.config
-        if config is None:
-            return None
-        
-        if not parameter:
-            return config
-        
-        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
-
-    def update_parameter(self, parameter, value, config = None):
-        """Updates the parameter in the config file using a full addressed list
-
-        Parameters
-        ----------
-        parameter : `list`
-            List of keys that point to the correct item in the nested dictionary
-            
-        value : `str` or `int` or `list`
-            Value that is updated in the nested dictionary
-            
-        config : `list` or `none`, optional
-            Used to iterate through nested dictionaries
-            
-        Returns
-        ----------
-        TODO
-        """
-        
-        assert type(parameter) is list
-                
-        if config == None:
-            config = self.config
-        
-        if len(parameter) == 1:
-            config.update({parameter[0]: value})
-            return config
-        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
-
-    def find_key(self, key, config = None):
-        """Find the list of keys to address the correct item in a nested dictionary
-
-        Parameters
-        ----------
-        key : `str`
-            Key that needs to be correctly addressed in a nested dictionary
-            
-        config : `list` or `none`, optional
-            Used to iterate through nested dictionaries
-            
-        Returns
-        ----------
-        key : `list`
-            Address of the key in the nested dictionary
-        """
-        
-        if config == None:
-            config = self.config
-            
-        key_path = []
-        for k, v in config.items():
-            if k == key:
-                return [k]
-            elif isinstance(v, dict):
-                found_key = self.find_key(key, config = v)
-                if found_key is not None:
-                    return [k] + found_key
-    
-    ######################
-    # Config IO options
-    ######################
-    def load_config_from_file(self, file_path):
-        """Load parameters from yaml file
-
-        Parameters
-        ----------
-        file_path : `str`
-            Path of config file to load
-            
-        Returns
-        ----------
-        config : `dict`
-            Dictionary containing the config parameters
-        """
-
-        with open(file_path, 'r') as input_file: 
-            config = self.yaml.load(input_file)
-            input_file.close()
-
-        return config
-    
-    def load_config_from_model_dir(self, model_dir):
-        """Finds for a config file from the model directory and loads it
-    
-        Parameters
-        ----------
-        model_dir : `str`
-            Folder to search for and load the config file
-
-        Returns
-        ----------
-        config : `dict`
-            Dictionary containing the config parameters
-            
-        Raises
-        ------
-        IndexError
-            If there are no config file in the model_dir
-        """
-        
-        # check if yaml file exists in model_dir
-        try:
-            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
-            if len(list_config_files) > 1:
-                print("Multiple config files found. Loading {}".format(list_config_files[0]))
-            else:
-                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
-            return self.load_config_from_file(list_config_files[0])
-        except IndexError:
-            print("No config file found in model_dir.")
-            raise
-
-    def write_config(self, file_path):
-        """Writes parameters to yaml file
-
-        Parameters
-        ----------
-        file_path : `str`
-            Path of config file to write to
-        """
-        
-        with open(file_path, 'w') as output_file:  
-            self.yaml.dump(self.config, output_file)
-
-        output_file.close()
-        
-        print("Config file written to: {}".format(file_path))
-    
-    def write_model(self, model, file_path):
-        """Writes parameters to yaml file
-
-        Parameters
-        ----------
-        model : :class:`Keras.model`
-            Keras model that will be parsed and written to a yaml file
-        
-        file_path : `str`
-            Path of model file to write to
-        """
-        
-        with open(file_path, 'w') as output_file:  
-            output_file.write(model.to_yaml())
-
-        output_file.close()
-        
-        print("Model file written to: {}".format(file_path))
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class Network_Config -(model_dir=None, config_filepath=None, **kwargs) -
-
-

Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.

-

Parameters

-
-
model_dir : str, optional
-
[Default: None] Folder where the model is to be saved/read from
-
config_filepath : str, optional
-
[Default: None] Filepath to the config file that will be loaded
-
**kwargs
-
For network parameters that are to be changed from the loaded config file
-
-

Attributes

-
-
yaml : :class:ruamel.yaml.YAML
-
YAML class with function needed to read/write YAML files
-
config : dict
-
Dictionary containing the config parameters
-
-
- -Expand source code - -
class Network_Config(object):
-    def __init__(self, model_dir = None, config_filepath = None, **kwargs):
-        """Creates Network_Config object that contains the network parameters and functions needed to manipulate these parameters.
-    
-        Parameters
-        ----------
-        model_dir : `str`, optional
-            [Default: None] Folder where the model is to be saved/read from
-        config_filepath : `str`, optional
-            [Default: None] Filepath to the config file that will be loaded
-        **kwargs
-            For network parameters that are to be changed from the loaded config file
-
-        Attributes
-        ----------
-        yaml : :class:`ruamel.yaml.YAML`
-            YAML class with function needed to read/write YAML files 
-        config : `dict`
-            Dictionary containing the config parameters
-        """
-        self.yaml=YAML()
-        
-        # load config file from model_dir
-        if config_filepath is not None:
-            
-            self.config = self.load_config_from_file(config_filepath)
-            print("Loaded config file from {}".format(config_filepath))
-        elif model_dir is not None:
-            try:
-                self.config = self.load_config_from_model_dir(model_dir)
-                print("Loaded config file from {}".format(model_dir))
-            except:
-                print("Please ensure that config_filepath is set or there is a config file in model_dir")
-                raise
-            
-        if model_dir is not None:
-            # update model_dir in config
-            print("Updating model_dir to {}".format(model_dir))
-            self.update_parameter(["general", "model_dir"], model_dir)
-        
-        # overwrite network parameters with parameters given during initialization
-        for key, value in kwargs.items():
-            self.update_parameter(self.find_key(key), value)
-            
-        # perform calculations
-        self.update_parameter(["model", "input_size"], self.get_parameter("tile_size") + [self.get_parameter("image_channel"),])
-        self.update_parameter(["model", "batch_size"], self.get_parameter("batch_size_per_GPU")) # * self.gpu_count
-                  
-    ######################
-    # Accessors/Mutators
-    ######################
-    def get_parameter(self, parameter, config = []):
-        """Output the value from the config file using the given key
-
-        Parameters
-        ----------
-        parameter : `list` or `str`
-            Key or list of keys used to find for the value in the config file
-        
-        config : `list`, optional
-            Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
-            
-        Returns
-        ----------
-        value : `str` or `int` or `list`
-            Value obtained from the specified key
-            
-        See Also
-        ----------
-        find_key : Function to identify the list of keys to address the correct item in a nested dictionary
-        """
-        assert isinstance(parameter, (list, str))
-        
-        # find for key in nested dictionary
-        if isinstance(parameter, str):
-            parameter = self.find_key(parameter)
-        
-        if config == []:
-            config = self.config
-        if config is None:
-            return None
-        
-        if not parameter:
-            return config
-        
-        return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
-
-    def update_parameter(self, parameter, value, config = None):
-        """Updates the parameter in the config file using a full addressed list
-
-        Parameters
-        ----------
-        parameter : `list`
-            List of keys that point to the correct item in the nested dictionary
-            
-        value : `str` or `int` or `list`
-            Value that is updated in the nested dictionary
-            
-        config : `list` or `none`, optional
-            Used to iterate through nested dictionaries
-            
-        Returns
-        ----------
-        TODO
-        """
-        
-        assert type(parameter) is list
-                
-        if config == None:
-            config = self.config
-        
-        if len(parameter) == 1:
-            config.update({parameter[0]: value})
-            return config
-        return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
-
-    def find_key(self, key, config = None):
-        """Find the list of keys to address the correct item in a nested dictionary
-
-        Parameters
-        ----------
-        key : `str`
-            Key that needs to be correctly addressed in a nested dictionary
-            
-        config : `list` or `none`, optional
-            Used to iterate through nested dictionaries
-            
-        Returns
-        ----------
-        key : `list`
-            Address of the key in the nested dictionary
-        """
-        
-        if config == None:
-            config = self.config
-            
-        key_path = []
-        for k, v in config.items():
-            if k == key:
-                return [k]
-            elif isinstance(v, dict):
-                found_key = self.find_key(key, config = v)
-                if found_key is not None:
-                    return [k] + found_key
-    
-    ######################
-    # Config IO options
-    ######################
-    def load_config_from_file(self, file_path):
-        """Load parameters from yaml file
-
-        Parameters
-        ----------
-        file_path : `str`
-            Path of config file to load
-            
-        Returns
-        ----------
-        config : `dict`
-            Dictionary containing the config parameters
-        """
-
-        with open(file_path, 'r') as input_file: 
-            config = self.yaml.load(input_file)
-            input_file.close()
-
-        return config
-    
-    def load_config_from_model_dir(self, model_dir):
-        """Finds for a config file from the model directory and loads it
-    
-        Parameters
-        ----------
-        model_dir : `str`
-            Folder to search for and load the config file
-
-        Returns
-        ----------
-        config : `dict`
-            Dictionary containing the config parameters
-            
-        Raises
-        ------
-        IndexError
-            If there are no config file in the model_dir
-        """
-        
-        # check if yaml file exists in model_dir
-        try:
-            list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
-            if len(list_config_files) > 1:
-                print("Multiple config files found. Loading {}".format(list_config_files[0]))
-            else:
-                print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
-            return self.load_config_from_file(list_config_files[0])
-        except IndexError:
-            print("No config file found in model_dir.")
-            raise
-
-    def write_config(self, file_path):
-        """Writes parameters to yaml file
-
-        Parameters
-        ----------
-        file_path : `str`
-            Path of config file to write to
-        """
-        
-        with open(file_path, 'w') as output_file:  
-            self.yaml.dump(self.config, output_file)
-
-        output_file.close()
-        
-        print("Config file written to: {}".format(file_path))
-    
-    def write_model(self, model, file_path):
-        """Writes parameters to yaml file
-
-        Parameters
-        ----------
-        model : :class:`Keras.model`
-            Keras model that will be parsed and written to a yaml file
-        
-        file_path : `str`
-            Path of model file to write to
-        """
-        
-        with open(file_path, 'w') as output_file:  
-            output_file.write(model.to_yaml())
-
-        output_file.close()
-        
-        print("Model file written to: {}".format(file_path))
-
-

Methods

-
-
-def find_key(self, key, config=None) -
-
-

Find the list of keys to address the correct item in a nested dictionary

-

Parameters

-
-
key : str
-
Key that needs to be correctly addressed in a nested dictionary
-
config : list or none, optional
-
Used to iterate through nested dictionaries
-
-

Returns

-
-
key : list
-
Address of the key in the nested dictionary
-
-
- -Expand source code - -
def find_key(self, key, config = None):
-    """Find the list of keys to address the correct item in a nested dictionary
-
-    Parameters
-    ----------
-    key : `str`
-        Key that needs to be correctly addressed in a nested dictionary
-        
-    config : `list` or `none`, optional
-        Used to iterate through nested dictionaries
-        
-    Returns
-    ----------
-    key : `list`
-        Address of the key in the nested dictionary
-    """
-    
-    if config == None:
-        config = self.config
-        
-    key_path = []
-    for k, v in config.items():
-        if k == key:
-            return [k]
-        elif isinstance(v, dict):
-            found_key = self.find_key(key, config = v)
-            if found_key is not None:
-                return [k] + found_key
-
-
-
-def get_parameter(self, parameter, config=[]) -
-
-

Output the value from the config file using the given key

-

Parameters

-
-
parameter : list or str
-
Key or list of keys used to find for the value in the config file
-
config : list, optional
-
Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
-
-

Returns

-
-
value : str or int or list
-
Value obtained from the specified key
-
-

See Also

-
-
find_key
-
Function to identify the list of keys to address the correct item in a nested dictionary
-
-
- -Expand source code - -
def get_parameter(self, parameter, config = []):
-    """Output the value from the config file using the given key
-
-    Parameters
-    ----------
-    parameter : `list` or `str`
-        Key or list of keys used to find for the value in the config file
-    
-    config : `list`, optional
-        Used to iterate through nested dictionaries. Required to recursively iterate through neseted dictionary
-        
-    Returns
-    ----------
-    value : `str` or `int` or `list`
-        Value obtained from the specified key
-        
-    See Also
-    ----------
-    find_key : Function to identify the list of keys to address the correct item in a nested dictionary
-    """
-    assert isinstance(parameter, (list, str))
-    
-    # find for key in nested dictionary
-    if isinstance(parameter, str):
-        parameter = self.find_key(parameter)
-    
-    if config == []:
-        config = self.config
-    if config is None:
-        return None
-    
-    if not parameter:
-        return config
-    
-    return self.get_parameter(parameter[1:], config = config.get(parameter[0]))
-
-
-
-def load_config_from_file(self, file_path) -
-
-

Load parameters from yaml file

-

Parameters

-
-
file_path : str
-
Path of config file to load
-
-

Returns

-
-
config : dict
-
Dictionary containing the config parameters
-
-
- -Expand source code - -
def load_config_from_file(self, file_path):
-    """Load parameters from yaml file
-
-    Parameters
-    ----------
-    file_path : `str`
-        Path of config file to load
-        
-    Returns
-    ----------
-    config : `dict`
-        Dictionary containing the config parameters
-    """
-
-    with open(file_path, 'r') as input_file: 
-        config = self.yaml.load(input_file)
-        input_file.close()
-
-    return config
-
-
-
-def load_config_from_model_dir(self, model_dir) -
-
-

Finds for a config file from the model directory and loads it

-

Parameters

-
-
model_dir : str
-
Folder to search for and load the config file
-
-

Returns

-
-
config : dict
-
Dictionary containing the config parameters
-
-

Raises

-
-
IndexError
-
If there are no config file in the model_dir
-
-
- -Expand source code - -
def load_config_from_model_dir(self, model_dir):
-    """Finds for a config file from the model directory and loads it
-
-    Parameters
-    ----------
-    model_dir : `str`
-        Folder to search for and load the config file
-
-    Returns
-    ----------
-    config : `dict`
-        Dictionary containing the config parameters
-        
-    Raises
-    ------
-    IndexError
-        If there are no config file in the model_dir
-    """
-    
-    # check if yaml file exists in model_dir
-    try:
-        list_config_files = glob.glob(os.path.join(model_dir,'*config.yml'))
-        if len(list_config_files) > 1:
-            print("Multiple config files found. Loading {}".format(list_config_files[0]))
-        else:
-            print("Config file exists in model directory. Loading {}".format(list_config_files[0]))
-        return self.load_config_from_file(list_config_files[0])
-    except IndexError:
-        print("No config file found in model_dir.")
-        raise
-
-
-
-def update_parameter(self, parameter, value, config=None) -
-
-

Updates the parameter in the config file using a full addressed list

-

Parameters

-
-
parameter : list
-
List of keys that point to the correct item in the nested dictionary
-
value : str or int or list
-
Value that is updated in the nested dictionary
-
config : list or none, optional
-
Used to iterate through nested dictionaries
-
-

Returns

-
-
TODO
-
 
-
-
- -Expand source code - -
def update_parameter(self, parameter, value, config = None):
-    """Updates the parameter in the config file using a full addressed list
-
-    Parameters
-    ----------
-    parameter : `list`
-        List of keys that point to the correct item in the nested dictionary
-        
-    value : `str` or `int` or `list`
-        Value that is updated in the nested dictionary
-        
-    config : `list` or `none`, optional
-        Used to iterate through nested dictionaries
-        
-    Returns
-    ----------
-    TODO
-    """
-    
-    assert type(parameter) is list
-            
-    if config == None:
-        config = self.config
-    
-    if len(parameter) == 1:
-        config.update({parameter[0]: value})
-        return config
-    return self.update_parameter(parameter[1:], value, config = self.config.get(parameter[0]))
-
-
-
-def write_config(self, file_path) -
-
-

Writes parameters to yaml file

-

Parameters

-
-
file_path : str
-
Path of config file to write to
-
-
- -Expand source code - -
def write_config(self, file_path):
-    """Writes parameters to yaml file
-
-    Parameters
-    ----------
-    file_path : `str`
-        Path of config file to write to
-    """
-    
-    with open(file_path, 'w') as output_file:  
-        self.yaml.dump(self.config, output_file)
-
-    output_file.close()
-    
-    print("Config file written to: {}".format(file_path))
-
-
-
-def write_model(self, model, file_path) -
-
-

Writes parameters to yaml file

-

Parameters

-
-
model : :class:Keras.model
-
Keras model that will be parsed and written to a yaml file
-
file_path : str
-
Path of model file to write to
-
-
- -Expand source code - -
def write_model(self, model, file_path):
-    """Writes parameters to yaml file
-
-    Parameters
-    ----------
-    model : :class:`Keras.model`
-        Keras model that will be parsed and written to a yaml file
-    
-    file_path : `str`
-        Path of model file to write to
-    """
-    
-    with open(file_path, 'w') as output_file:  
-        output_file.write(model.to_yaml())
-
-    output_file.close()
-    
-    print("Model file written to: {}".format(file_path))
-
-
-
-
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/layers/index.html b/html/models/layers/index.html deleted file mode 100644 index cc15a41..0000000 --- a/html/models/layers/index.html +++ /dev/null @@ -1,71 +0,0 @@ - - - - - - -models.layers API documentation - - - - - - - - - -
-
-
-

Module models.layers

-
-
-
- -Expand source code - -
from __future__ import absolute_import, print_function
-
-
-
-

Sub-modules

-
-
models.layers.layers
-
-
-
-
-
-
-
-
-
-
-
-
- -
- - - - - \ No newline at end of file diff --git a/html/models/layers/layers.html b/html/models/layers/layers.html deleted file mode 100644 index 9eca4b5..0000000 --- a/html/models/layers/layers.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - - - -models.layers.layers API documentation - - - - - - - - - -
-
-
-

Module models.layers.layers

-
-
-
- -Expand source code - -
import math
-
-import keras
-from keras.models import Model, load_model
-from keras.layers import Input, BatchNormalization, Activation
-from keras.layers.core import Lambda, Dropout
-from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
-from keras.layers.convolutional_recurrent import ConvLSTM2D
-from keras.layers.pooling import MaxPooling2D
-from keras.layers.merge import Concatenate, Add
-from keras import regularizers
-from keras import backend as K
-
-import tensorflow as tf
-
-def activation_function(inputs, acti):
-    if isinstance(acti, str):
-        return Activation(acti)(inputs)
-    else:
-        return acti(inputs)
-
-def regularizer_function(weight_regularizer):
-    if weight_regularizer == 0 or weight_regularizer == None:
-        return None
-    else:
-        return regularizers.l2(weight_regularizer)
-    
-def bn_relu_conv2d(inputs, filters, filter_size, 
-                    strides = 1, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None, name = ""):
-    output = BatchNormalization()(inputs)
-    output = activation_function(output, acti)
-    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
-                    kernel_initializer=kernel_initializer, 
-                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
-            
-    return output
-
-def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
-                            strides = 2, acti = None, padding = None, 
-                            kernel_initializer = None, weight_regularizer = None, name = ""):
-    output = BatchNormalization()(inputs)
-    output = activation_function(output, acti)
-    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
-                             kernel_initializer=kernel_initializer, 
-                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
-    return output
-
-def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
-    if mean_std_normalization is True:
-        print("Using normalization")
-        return Lambda(lambda x: (x - mean)/std)(inputs)
-    elif scale_input is True:
-        print("Not using normalization")
-        return Lambda(lambda x: x / 255)(inputs)
-    else:
-        return inputs
-            
-    
-
-
-
-
-
-
-
-

Functions

-
-
-def activation_function(inputs, acti) -
-
-
-
- -Expand source code - -
def activation_function(inputs, acti):
-    if isinstance(acti, str):
-        return Activation(acti)(inputs)
-    else:
-        return acti(inputs)
-
-
-
-def bn_relu_conv2d(inputs, filters, filter_size, strides=1, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') -
-
-
-
- -Expand source code - -
def bn_relu_conv2d(inputs, filters, filter_size, 
-                    strides = 1, acti = None, padding = None, 
-                    kernel_initializer = None, weight_regularizer = None, name = ""):
-    output = BatchNormalization()(inputs)
-    output = activation_function(output, acti)
-    output = Conv2D(filters, (filter_size, filter_size), padding=padding, strides = strides,
-                    kernel_initializer=kernel_initializer, 
-                    kernel_regularizer=regularizer_function(weight_regularizer))(output)
-            
-    return output
-
-
-
-def bn_relu_conv2dtranspose(inputs, filters, filter_size, strides=2, acti=None, padding=None, kernel_initializer=None, weight_regularizer=None, name='') -
-
-
-
- -Expand source code - -
def bn_relu_conv2dtranspose(inputs, filters, filter_size, 
-                            strides = 2, acti = None, padding = None, 
-                            kernel_initializer = None, weight_regularizer = None, name = ""):
-    output = BatchNormalization()(inputs)
-    output = activation_function(output, acti)
-    output = Conv2DTranspose(filters, (2, 2), strides=strides, padding=padding, 
-                             kernel_initializer=kernel_initializer, 
-                             kernel_regularizer=regularizer_function(weight_regularizer))(output)
-    return output
-
-
-
-def normalize_input(inputs, scale_input=False, mean_std_normalization=False, mean=None, std=None) -
-
-
-
- -Expand source code - -
def normalize_input(inputs, scale_input = False, mean_std_normalization = False, mean = None, std = None):
-    if mean_std_normalization is True:
-        print("Using normalization")
-        return Lambda(lambda x: (x - mean)/std)(inputs)
-    elif scale_input is True:
-        print("Not using normalization")
-        return Lambda(lambda x: x / 255)(inputs)
-    else:
-        return inputs
-
-
-
-def regularizer_function(weight_regularizer) -
-
-
-
- -Expand source code - -
def regularizer_function(weight_regularizer):
-    if weight_regularizer == 0 or weight_regularizer == None:
-        return None
-    else:
-        return regularizers.l2(weight_regularizer)
-
-
-
-
-
-
-
- -
- - - - - \ No newline at end of file