From 3b666698be0ffe6fa353b96b07ce0e5b78b35475 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:04:39 +0200 Subject: [PATCH 01/18] Added linting workflow --- .github/workflows/linting.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/linting.yml diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml new file mode 100644 index 0000000..93fcc40 --- /dev/null +++ b/.github/workflows/linting.yml @@ -0,0 +1,26 @@ +name: Linting + +on: + push: + branches: + - '*' + pull_request: + branches: + - '*' + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v1 + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: "3.8" + + - name: Install lint dependencies + run: pip install wheel setuptools black==22.3.0 isort==5.10.1 flake8==4.0.1 + + - name: Lint the code + run: sh shell/lint.sh From cdf6120d28da13dea309f3caaec1b445d971e45d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:08:44 +0200 Subject: [PATCH 02/18] Added format shell script --- shell/format.sh | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 shell/format.sh diff --git a/shell/format.sh b/shell/format.sh new file mode 100644 index 0000000..390b9e0 --- /dev/null +++ b/shell/format.sh @@ -0,0 +1,4 @@ +#!/bin/bash +isort --sl . +black --line-length 80 . +flake8 . From 2fdd4847246b5bd0de946a8f46ad4ba7d7a52358 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:08:51 +0200 Subject: [PATCH 03/18] Added linting shell script --- shell/lint.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 shell/lint.sh diff --git a/shell/lint.sh b/shell/lint.sh new file mode 100644 index 0000000..38755e2 --- /dev/null +++ b/shell/lint.sh @@ -0,0 +1,23 @@ +#!/bin/bash +isort --check --sl -c . +if ! [ $? -eq 0 ] +then + echo "Please run \"sh shell/format.sh\" to format the code." + exit 1 +fi +echo "no issues with isort" +flake8 . +if ! [ $? -eq 0 ] +then + echo "Please fix the code style issue." + exit 1 +fi +echo "no issues with flake8" +black --check --line-length 80 . +if ! [ $? -eq 0 ] +then + echo "Please run \"sh shell/format.sh\" to format the code." + exit 1 +fi +echo "no issues with black" +echo "linting success!" From 5e1eb22e1b000505630d28e72eb600d0453deea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:12:09 +0200 Subject: [PATCH 04/18] Added flake8 and isort config; only run on py files and ignore venv/ dir --- setup.cfg | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 setup.cfg diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..ca629cf --- /dev/null +++ b/setup.cfg @@ -0,0 +1,16 @@ +[metadata] +description-file = README.md + +[isort] +force_single_line=True +known_first_party=. +line_length=80 +profile=black + +[flake8] +# imported but unused in __init__.py, that's ok. +per-file-ignores=*__init__.py:F401 +ignore=E203,W503,W605,F632,E266,E731,E712,E741 +max-line-length=80 +filename = *.py +exclude = venv/ From c69bbc100d16ebf8f73fa74f3f068054e5fbb9f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:12:17 +0200 Subject: [PATCH 05/18] Ignore venv/ dir --- .gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b685d73 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*venv/ From d765e883d0a512c1278df8f3f78d27d8712b7ba7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:12:24 +0200 Subject: [PATCH 06/18] Linted code --- ClusteringForTissueBalancing.py | 71 +++++---- ClustringRefinement.py | 74 ++++++---- FewShot.py | 153 +++++++++++++------- GenerateGradients.py | 28 ++-- PostProcess.py | 25 +++- SegmentationTraining.py | 249 ++++++++++++++++++++------------ TissueClustersFromThumbnails.py | 41 ++++-- src/augmentation/MLD.py | 13 +- src/generator/Generator.py | 70 ++++++--- src/models/Build_DR.py | 60 ++++---- src/models/Build_Unet.py | 59 ++++---- src/models/losses.py | 30 ++-- src/utils/utilities.py | 54 ++++--- 13 files changed, 572 insertions(+), 355 deletions(-) diff --git a/ClusteringForTissueBalancing.py b/ClusteringForTissueBalancing.py index 109a925..2cbf366 100644 --- a/ClusteringForTissueBalancing.py +++ b/ClusteringForTissueBalancing.py @@ -1,8 +1,10 @@ -import numpy as np -import cv2 import os -from sklearn.cluster import KMeans + +import cv2 +import numpy as np from matplotlib import pyplot as plt +from sklearn.cluster import KMeans + def fill_holes(binary_img): # Copy the image @@ -16,10 +18,10 @@ def fill_holes(binary_img): # Mask used for flood filling. Notice the size needs to be 2 pixels larger than the image h, w = im_th.shape[:2] - mask = np.zeros((h+2, w+2), np.uint8) + mask = np.zeros((h + 2, w + 2), np.uint8) # Flood fill from point (0, 0) - cv2.floodFill(im_floodfill, mask, (0,0), 255) + cv2.floodFill(im_floodfill, mask, (0, 0), 255) # Invert floodfilled image im_floodfill_inv = cv2.bitwise_not(im_floodfill) @@ -29,42 +31,53 @@ def fill_holes(binary_img): return filled_image -def cluster(image_path, weights=[0.6,0.1,0.2], fill_the_holes=True): + +def cluster(image_path, weights=[0.6, 0.1, 0.2], fill_the_holes=True): # Load image and extract each channel image = cv2.imread(image_path) - Rw1, Rw2, Rw3 = [image[..., i]/255 for i in range(3)] + Rw1, Rw2, Rw3 = [image[..., i] / 255 for i in range(3)] images = [Rw1, Rw2, Rw3] - + scale_percent = 30 # percent of the original size width = int(Rw1.shape[1] * scale_percent / 100) height = int(Rw1.shape[0] * scale_percent / 100) dim = (width, height) - + # Resize image - resized_images = [cv2.resize(img, dim, interpolation=cv2.INTER_AREA) for img in images] + resized_images = [ + cv2.resize(img, dim, interpolation=cv2.INTER_AREA) for img in images + ] + + weighted_images = [ + img * weight for img, weight in zip(resized_images, weights) + ] - weighted_images = [img * weight for img, weight in zip(resized_images, weights)] - # Stack all images to create a feature vector for each pixel features = np.stack(weighted_images, axis=-1).reshape(-1, 3) - + # Apply KMeans clustering with a consistent initialization and random seed - kmeans = KMeans(n_clusters=4, init='k-means++', random_state=42) + kmeans = KMeans(n_clusters=4, init="k-means++", random_state=42) labels = kmeans.fit_predict(features) - + # Identify the cluster that is closest to white - white_cluster = np.argmin(np.linalg.norm(kmeans.cluster_centers_ - [1, 1, 1], axis=1)) - + white_cluster = np.argmin( + np.linalg.norm(kmeans.cluster_centers_ - [1, 1, 1], axis=1) + ) + # If the white cluster is not labeled as '0', swap labels if white_cluster != 0: labels[labels == 0] = -1 # Temporary change label '0' to '-1' - labels[labels == white_cluster] = 0 # Assign label '0' to the white cluster - labels[labels == -1] = white_cluster # Assign previous '0' cluster to 'white_cluster' label - + labels[labels == white_cluster] = ( + 0 # Assign label '0' to the white cluster + ) + labels[labels == -1] = ( + white_cluster # Assign previous '0' cluster to 'white_cluster' label + ) + # Reshape the labels to the image's shape labels_2D = labels.reshape(height, width) - + pred = labels_2D.astype(np.uint8) pred = cv2.medianBlur(pred, 11) @@ -73,25 +86,29 @@ def cluster(image_path, weights=[0.6,0.1,0.2], fill_the_holes=True): return pred + def process_images(input_folder, output_folder): for filename in os.listdir(input_folder): if filename.lower().endswith((".png", ".jpg", ".jpeg")): image_path = os.path.join(input_folder, filename) result = cluster(image_path, fill_the_holes=True) - + # Create the output folder if it doesn't exist os.makedirs(output_folder, exist_ok=True) - + # Save the result output_path = os.path.join(output_folder, "processed_" + filename) - cv2.imwrite(output_path, result * 255) # Scale back up to 0-255 range + cv2.imwrite( + output_path, result * 255 + ) # Scale back up to 0-255 range # Optionally display the result plt.imshow(result) - plt.axis('off') + plt.axis("off") plt.show() + # Usage -input_folder = './input_images' -output_folder = './output_images' +input_folder = "./input_images" +output_folder = "./output_images" process_images(input_folder, output_folder) diff --git a/ClustringRefinement.py b/ClustringRefinement.py index 497bf00..740861f 100644 --- a/ClustringRefinement.py +++ b/ClustringRefinement.py @@ -1,5 +1,6 @@ import glob import os + import cv2 import numpy as np from sklearn.cluster import KMeans @@ -7,11 +8,12 @@ # explicit function to normalize array def normalize(x): - x_norm = (x-np.min(x))/(np.max(x)-np.min(x)) + x_norm = (x - np.min(x)) / (np.max(x) - np.min(x)) return x_norm -names = glob.glob('/Path/To/Test/Thumbnails/*.png') + +names = glob.glob("/Path/To/Test/Thumbnails/*.png") names = [os.path.split(name)[1] for name in names] # print(names) # folders = glob.glob('/home/soroush47/fastpathology/projects/VibekesAnnotations/results/*') @@ -20,31 +22,38 @@ def normalize(x): for name in names: print("/Path/To/images/" + name) - FM = cv2.imread("/Path/To/Test/PWC/results/" + name)[...,1]/255 - Gr = cv2.imread("/Path/To/Test/Gradients/" + name)[...,1]/255 - Rw1 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[...,0]/255 - Rw2 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[...,1]/255 - Rw3 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[...,2]/255 - SP = cv2.imread("/Path/To/Test/Superpixels/" + name)[...,1]/255 - - scale_percent = 30 # percent of original size + FM = cv2.imread("/Path/To/Test/PWC/results/" + name)[..., 1] / 255 + Gr = cv2.imread("/Path/To/Test/Gradients/" + name)[..., 1] / 255 + Rw1 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[..., 0] / 255 + Rw2 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[..., 1] / 255 + Rw3 = cv2.imread("/Path/To/Test/Thumbnails/" + name)[..., 2] / 255 + SP = cv2.imread("/Path/To/Test/Superpixels/" + name)[..., 1] / 255 + + scale_percent = 30 # percent of original size width = int(Rw1.shape[1] * scale_percent / 100) height = int(Rw1.shape[0] * scale_percent / 100) dim = (width, height) # resize image - Rw1 = cv2.resize(Rw1, dim, interpolation = cv2.INTER_AREA) - Rw2 = cv2.resize(Rw2, dim, interpolation = cv2.INTER_AREA) - Rw3 = cv2.resize(Rw3, dim, interpolation = cv2.INTER_AREA) - FM = cv2.resize(FM, dim, interpolation = cv2.INTER_AREA) - Gr = cv2.resize(Gr, dim, interpolation = cv2.INTER_AREA) - SP = cv2.resize(SP, dim, interpolation = cv2.INTER_AREA) + Rw1 = cv2.resize(Rw1, dim, interpolation=cv2.INTER_AREA) + Rw2 = cv2.resize(Rw2, dim, interpolation=cv2.INTER_AREA) + Rw3 = cv2.resize(Rw3, dim, interpolation=cv2.INTER_AREA) + FM = cv2.resize(FM, dim, interpolation=cv2.INTER_AREA) + Gr = cv2.resize(Gr, dim, interpolation=cv2.INTER_AREA) + SP = cv2.resize(SP, dim, interpolation=cv2.INTER_AREA) FM = normalize(FM) - FM[FM<0.7] = 0 + FM[FM < 0.7] = 0 Ws = np.array([1, 1, 1, 0.8, 0.2, 0.4]) - features_initial = [FM, Rw1, Rw2, Rw3, Gr, SP] # Assuming these are your feature arrays + features_initial = [ + FM, + Rw1, + Rw2, + Rw3, + Gr, + SP, + ] # Assuming these are your feature arrays # Apply the weights to each feature using map weighted_features = list(map(lambda f, w: f * w, features, Ws)) @@ -56,7 +65,7 @@ def normalize(x): features = features_stacked.reshape(-1, 5) # Apply KMeans clustering with a consistent initialization and random seed - kmeans = KMeans(n_clusters=3, init='k-means++', random_state=42) + kmeans = KMeans(n_clusters=3, init="k-means++", random_state=42) labels = kmeans.fit_predict(features) # Reshape the labels to the image's shape @@ -66,11 +75,10 @@ def normalize(x): overlap_scores = [np.sum((labels_2D == i) & (FM == 1)) for i in range(3)] main_cluster = np.argmax(overlap_scores) - # Replace the main cluster with 1 and other clusters with 0 pred = np.where(labels_2D == main_cluster, 1, 0) - label=pred.astype(np.uint8) + label = pred.astype(np.uint8) label = cv2.medianBlur(label, 3) def fill_holes(binary_img): @@ -85,10 +93,10 @@ def fill_holes(binary_img): # Mask used for flood filling. Notice the size needs to be 2 pixels larger than the image h, w = im_th.shape[:2] - mask = np.zeros((h+2, w+2), np.uint8) + mask = np.zeros((h + 2, w + 2), np.uint8) # Flood fill from point (0, 0) - cv2.floodFill(im_floodfill, mask, (0,0), 255) + cv2.floodFill(im_floodfill, mask, (0, 0), 255) # Invert floodfilled image im_floodfill_inv = cv2.bitwise_not(im_floodfill) @@ -100,16 +108,18 @@ def fill_holes(binary_img): label = fill_holes(label) - smoothed_image = cv2.blur(label, (79,79)) - smoothed_image = cv2.threshold(smoothed_image,10, 200, cv2.THRESH_BINARY) - Gr = cv2.imread("/Path/To/Test/Gradients/" + name)[...,1] - Gr = cv2.resize(Gr, dim, interpolation = cv2.INTER_AREA) + smoothed_image = cv2.blur(label, (79, 79)) + smoothed_image = cv2.threshold(smoothed_image, 10, 200, cv2.THRESH_BINARY) + Gr = cv2.imread("/Path/To/Test/Gradients/" + name)[..., 1] + Gr = cv2.resize(Gr, dim, interpolation=cv2.INTER_AREA) Gr = cv2.medianBlur(Gr, 11) - ret,thresh = cv2.threshold(Gr,10,51,cv2.THRESH_BINARY) + ret, thresh = cv2.threshold(Gr, 10, 51, cv2.THRESH_BINARY) # print(np.unique(thresh)) - contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours, hierarchy = cv2.findContours( + thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE + ) empt = np.zeros(Rw2.shape) - smoothed_image[1][thresh<0.5]=0 - smoothed_image[1][smoothed_image[1]>100]=255 + smoothed_image[1][thresh < 0.5] = 0 + smoothed_image[1][smoothed_image[1] > 100] = 255 Final = cv2.medianBlur(smoothed_image[1], 21) - cv2.imwrite( '/Path/To/Test/ClusteringResults/' + name, Final) \ No newline at end of file + cv2.imwrite("/Path/To/Test/ClusteringResults/" + name, Final) diff --git a/FewShot.py b/FewShot.py index 7046169..2cd3b40 100644 --- a/FewShot.py +++ b/FewShot.py @@ -1,36 +1,40 @@ import os -import numpy as np + import cv2 +import numpy as np +import onnx import tensorflow as tf import tf2onnx -import onnx - -from keras.models import Model, load_model +from keras.models import Model +from keras.models import load_model +from MLD import multi_lens_distortion +from PIL import Image +from PIL import ImageEnhance from tensorflow.keras import backend as K -from tensorflow.keras.layers import * from tensorflow.keras import initializers -from tensorflow.keras import optimizers from tensorflow.keras import layers -from PIL import Image -from PIL import ImageEnhance -from MLD import multi_lens_distortion +from tensorflow.keras import optimizers +from tensorflow.keras.layers import * os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Select GPU with index 0 -filenames_Tumor = next(os.walk('./Path/To/Tumor/'), (None, None, []))[2] # [] if no file -filenames_Normal = next(os.walk('./Path/To/Normal/'), (None, None, []))[2] # [] if no file +filenames_Tumor = next(os.walk("./Path/To/Tumor/"), (None, None, []))[ + 2 +] # [] if no file +filenames_Normal = next(os.walk("./Path/To/Normal/"), (None, None, []))[ + 2 +] # [] if no file data = { - "Tumor":['./Path/To/Tumor/'+i for i in filenames_Tumor], - "Normal":['./Path/To/Normal/'+j for j in filenames_Normal] - + "Tumor": ["./Path/To/Tumor/" + i for i in filenames_Tumor], + "Normal": ["./Path/To/Normal/" + j for j in filenames_Normal], } def load_image(image_path): image = tf.io.read_file(image_path) - image = tf.image.decode_png(image)[:,:,0:3] + image = tf.image.decode_png(image)[:, :, 0:3] # print(image) image = tf.image.random_hue(image, 0.08) image = tf.image.random_contrast(image, 0.7, 1.3) @@ -40,27 +44,30 @@ def load_image(image_path): image = tf.image.random_flip_up_down(image) image = tf.image.random_crop(image, (224, 224, 3)) image = tf.cast(image, tf.float32) - image = image/255 + image = image / 255 # image = tf.numpy_function( - # multi_lens_distortion, - # [image, 4, (80, 110), (-0.5, 0.5)], + # multi_lens_distortion, + # [image, 4, (80, 110), (-0.5, 0.5)], # tf.uint8 # ) return image -data_images = { - "Tumor": data["Tumor"], - "Normal": data["Normal"] -} + +data_images = {"Tumor": data["Tumor"], "Normal": data["Normal"]} + def load_images(paths): return np.array([load_image(path) for path in paths]) + IMG_SIZE = (224, 224) IMG_SHAPE = IMG_SIZE + (3,) + def embedding_model(): - prev_model = tf.keras.applications.DenseNet121(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') + prev_model = tf.keras.applications.DenseNet121( + input_shape=IMG_SHAPE, include_top=False, weights="imagenet" + ) z = tf.keras.layers.Flatten()(prev_model.output) z = tf.keras.layers.Dense(32, activation="relu")(z) @@ -71,7 +78,11 @@ def embedding_model(): embedding_net = embedding_model() for layer in embedding_net.layers[:-12]: layer.trainable = False -embedding_net.compile(optimizer=optimizers.Adam(0.1), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) +embedding_net.compile( + optimizer=optimizers.Adam(0.1), + loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=["accuracy"], +) def compute_prototype(embeddings, labels): @@ -84,50 +95,68 @@ def compute_prototype(embeddings, labels): num_epochs = 100 -best_loss = float('inf') # Initialize best loss to infinity -best_model_path = 'best_model.h5' # Define path to save the best model +best_loss = float("inf") # Initialize best loss to infinity +best_model_path = "best_model.h5" # Define path to save the best model -for epoch in range(num_epochs): +for epoch in range(num_epochs): epoch_loss_avg = tf.keras.metrics.Mean() # Randomly sample support set and query set for both classes - support_idx_tumor = np.random.choice(len(data_images["Tumor"]), n_shots, replace=False) - query_idx_tumor = np.random.choice(len(data_images["Tumor"]), n_query, replace=False) - - support_idx_normal = np.random.choice(len(data_images["Normal"]), n_shots, replace=False) - query_idx_normal = np.random.choice(len(data_images["Normal"]), n_query, replace=False) + support_idx_tumor = np.random.choice( + len(data_images["Tumor"]), n_shots, replace=False + ) + query_idx_tumor = np.random.choice( + len(data_images["Tumor"]), n_query, replace=False + ) + + support_idx_normal = np.random.choice( + len(data_images["Normal"]), n_shots, replace=False + ) + query_idx_normal = np.random.choice( + len(data_images["Normal"]), n_query, replace=False + ) # Load images using indices and paths - support_tumor = load_images([data_images["Tumor"][i] for i in support_idx_tumor]) - query_tumor = load_images([data_images["Tumor"][i] for i in query_idx_tumor]) - - support_normal = load_images([data_images["Normal"][i] for i in support_idx_normal]) - query_normal = load_images([data_images["Normal"][i] for i in query_idx_normal]) - - - support_set = tf.concat([support_normal, support_tumor ], axis=0) + support_tumor = load_images( + [data_images["Tumor"][i] for i in support_idx_tumor] + ) + query_tumor = load_images( + [data_images["Tumor"][i] for i in query_idx_tumor] + ) + + support_normal = load_images( + [data_images["Normal"][i] for i in support_idx_normal] + ) + query_normal = load_images( + [data_images["Normal"][i] for i in query_idx_normal] + ) + + support_set = tf.concat([support_normal, support_tumor], axis=0) query_set = tf.concat([query_normal, query_tumor], axis=0) - support_labels = [0] * n_shots + [1] * n_shots query_labels = [0] * n_query + [1] * n_query # Ensure labels are one-hot encoded - + query_labels_one_hot = tf.one_hot(query_labels, depth=2) support_embeddings = embedding_net(support_set) query_embeddings = embedding_net(query_set) - - tumor_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 1)) - normal_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 0)) + tumor_prototype = compute_prototype( + support_embeddings, tf.equal(support_labels, 1) + ) + normal_prototype = compute_prototype( + support_embeddings, tf.equal(support_labels, 0) + ) # print(tumor_prototype.shape) prototypes = tf.stack([tumor_prototype, normal_prototype]) # Compute Euclidean distance from each query embedding to the prototypes - distances = tf.norm(tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1) - + distances = tf.norm( + tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1 + ) # Optimize optimizer = tf.keras.optimizers.Adam(learning_rate=0.1) @@ -135,19 +164,33 @@ def compute_prototype(embeddings, labels): # Compute the loss and optimize with tf.GradientTape() as tape: - loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=-distances, labels=query_labels_one_hot)) + loss = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits( + logits=-distances, labels=query_labels_one_hot + ) + ) epoch_loss_avg.update_state(loss) # All model-related calculations here support_embeddings = embedding_net(support_set) query_embeddings = embedding_net(query_set) - - tumor_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 1)) - normal_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 0)) + + tumor_prototype = compute_prototype( + support_embeddings, tf.equal(support_labels, 1) + ) + normal_prototype = compute_prototype( + support_embeddings, tf.equal(support_labels, 0) + ) prototypes = tf.stack([tumor_prototype, normal_prototype]) - - distances = tf.norm(tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1) - loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=-distances, labels=query_labels_one_hot)) + + distances = tf.norm( + tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1 + ) + loss = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits( + logits=-distances, labels=query_labels_one_hot + ) + ) print(f"Epoch {epoch+1}: Loss: {epoch_loss_avg.result()}") gradients = tape.gradient(loss, embedding_net.trainable_variables) @@ -165,4 +208,4 @@ def compute_prototype(embeddings, labels): best_model = tf.keras.models.load_model(best_model_path) onnx_model, _ = tf2onnx.convert.from_keras(best_model, opset=13) -onnx.save(onnx_model, "./FewShotModel.onnx") \ No newline at end of file +onnx.save(onnx_model, "./FewShotModel.onnx") diff --git a/GenerateGradients.py b/GenerateGradients.py index 529e059..031ce43 100644 --- a/GenerateGradients.py +++ b/GenerateGradients.py @@ -1,14 +1,15 @@ -import cv2 -import os import glob -import tensorflow as tf +import os + +import cv2 import matplotlib.pyplot as plt import numpy as np +import tensorflow as tf dst_dir = "Path/To/Gradients/Reults/" os.makedirs(dst_dir, exist_ok=True) -files = glob.glob('Path/To/Thumbnails/*.png') +files = glob.glob("Path/To/Thumbnails/*.png") # files2 = glob.glob('D:/Bergens/resized2/*.jpg') @@ -25,8 +26,12 @@ def generate_gradients(imgPath): resize_factor = 1 # Calculate the new height and width as tensors based on the resize factor - new_height = tf.cast(tf.cast(original_shape[0], tf.float32) * resize_factor, tf.int32) - new_width = tf.cast(tf.cast(original_shape[1], tf.float32) * resize_factor, tf.int32) + new_height = tf.cast( + tf.cast(original_shape[0], tf.float32) * resize_factor, tf.int32 + ) + new_width = tf.cast( + tf.cast(original_shape[1], tf.float32) * resize_factor, tf.int32 + ) # Resize the image to of its original size if necessary resized_img = tf.image.resize(img, [new_height, new_width]) @@ -51,23 +56,20 @@ def generate_gradients(imgPath): # plt.imshow(gy[0,...,1], cmap='gray') # plt.imshow(direction[0,...,2], cmap='gray') - a = magnitude[0,...,1]/tf.math.reduce_max(magnitude[0,...,1]) + a = magnitude[0, ..., 1] / tf.math.reduce_max(magnitude[0, ..., 1]) - plt.axis('off') + plt.axis("off") root, ext = os.path.splitext(f) basename = os.path.basename(root) b = np.array(a) - b *= 255.0/b.max() - + b *= 255.0 / b.max() print(type(b)) # plt.imshow(np.array(b)) - cv2.imwrite(os.path.join(dst_dir, basename + '' + '.png'), np.array(b)) - + cv2.imwrite(os.path.join(dst_dir, basename + "" + ".png"), np.array(b)) for indx, f in enumerate(files): print(indx) generate_gradients(f) - diff --git a/PostProcess.py b/PostProcess.py index 86428f7..79880e1 100644 --- a/PostProcess.py +++ b/PostProcess.py @@ -1,9 +1,11 @@ # post processing - -import cv2 + import os + +import cv2 import numpy as np + def remove_small_fragments(image_path, size_threshold): # Read the image image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) @@ -17,7 +19,9 @@ def remove_small_fragments(image_path, size_threshold): _, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY) # Find all contours - contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + contours, _ = cv2.findContours( + binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) # Filter out small fragments for cnt in contours: @@ -26,21 +30,25 @@ def remove_small_fragments(image_path, size_threshold): return binary_image + def smooth_edges(binary_image, kernel_size=7, iterations=1): # Define the kernel for morphological operations kernel = np.ones((kernel_size, kernel_size), np.uint8) # Apply morphological opening (erosion followed by dilation) smoothed_image = cv2.medianBlur(binary_image, ksize=11) - smoothed_image = cv2.morphologyEx(smoothed_image, cv2.MORPH_OPEN, kernel, iterations=iterations) + smoothed_image = cv2.morphologyEx( + smoothed_image, cv2.MORPH_OPEN, kernel, iterations=iterations + ) return smoothed_image + def process_images_in_directory(directory, size_threshold): for filename in os.listdir(directory): if filename.endswith(".png"): image_path = os.path.join(directory, filename) - + # Remove small fragments binary_image = remove_small_fragments(image_path, size_threshold) @@ -51,8 +59,11 @@ def process_images_in_directory(directory, size_threshold): cv2.imwrite(image_path, smoothed_image) print(f"Processed {filename}") + # Define the directory and size threshold -directory = '/Path/To/SegmentationResults/' # Update with the path to your images +directory = ( + "/Path/To/SegmentationResults/" # Update with the path to your images +) size_threshold = 10 # Update this value based on your requirement -process_images_in_directory(directory, size_threshold) \ No newline at end of file +process_images_in_directory(directory, size_threshold) diff --git a/SegmentationTraining.py b/SegmentationTraining.py index fe38425..3ef9442 100644 --- a/SegmentationTraining.py +++ b/SegmentationTraining.py @@ -1,92 +1,101 @@ import os -import numpy as np + import cv2 +import numpy as np +import onnx import tensorflow as tf import tf2onnx -import onnx - +from keras.models import Model +from keras.models import load_model +from MLD import multi_lens_distortion from PIL import Image -from keras.models import Model, load_model +from tensorflow.keras import Input +from tensorflow.keras import layers +from tensorflow.keras import models from tensorflow.keras.layers import * -from tensorflow.keras import layers, models, Input -from tensorflow.keras.preprocessing.image import load_img, img_to_array +from tensorflow.keras.preprocessing.image import img_to_array +from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.utils import Sequence -from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger -from MLD import multi_lens_distortion +from tensorflow.python.keras.callbacks import CSVLogger +from tensorflow.python.keras.callbacks import EarlyStopping +from tensorflow.python.keras.callbacks import ModelCheckpoint -os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' +os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" os.environ["CUDA_VISIBLE_DEVICES"] = "2" # Select GPU def build_network(): - input_image = Input(shape=(1120, 1120, 3), name='input_image') - input_pred = Input(shape=(1120, 1120, 1), name='input_pred') + input_image = Input(shape=(1120, 1120, 3), name="input_image") + input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation='relu', padding='same')(input_pred) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( + input_pred + ) combined = layers.Concatenate()([input_image, conv_pred]) - + # Block 1 - c1 = layers.Conv2D(4, (3, 3), activation='relu', padding='same')(combined) - c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1) + c1 = layers.Conv2D(4, (3, 3), activation="relu", padding="same")(combined) + c1 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(c1) c1 = layers.BatchNormalization()(c1) p1 = layers.MaxPooling2D((2, 2))(c1) - + # Block 2 - c2 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(p1) - c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2) + c2 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(p1) + c2 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(c2) c2 = layers.BatchNormalization()(c2) p2 = layers.MaxPooling2D((2, 2))(c2) - + # Block 3 - c3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p2) + c3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(p2) # c3 = layers.Dropout(0.3)(c3) c3 = layers.SpatialDropout2D(0.3)(c3) - c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3) + c3 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(c3) c3 = layers.BatchNormalization()(c3) p3 = layers.MaxPooling2D((2, 2))(c3) - + # Block 4 - c4 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p3) - c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4) + c4 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(p3) + c4 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(c4) c4 = layers.BatchNormalization()(c4) - + # Bottleneck - bn = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c4) + bn = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(c4) bn = layers.BatchNormalization()(bn) - + # Upsampling (decoder) side # Block 1 - u1 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(bn) + u1 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(bn) u1 = layers.Concatenate()([u1, c4]) u1 = layers.BatchNormalization()(u1) # Block 2 of the Upsampling (decoder) side u2 = layers.UpSampling2D(size=(2, 2))(u1) - u2 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u2) + u2 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(u2) # u2 = layers.ZeroPadding2D(padding=((1, 0), (1, 0)))(u2) # Adjust padding as needed u2 = layers.Concatenate()([u2, c3]) u2 = layers.BatchNormalization()(u2) # Block 3 u3 = layers.UpSampling2D(size=(2, 2))(u2) - u3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u3) + u3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(u3) u3 = layers.Concatenate()([u3, c2]) u3 = layers.BatchNormalization()(u3) # Block 4 u4 = layers.UpSampling2D(size=(2, 2))(u3) - u4 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u4) + u4 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(u4) # u4 = layers.Concatenate()([u4, c1_1]) u4 = layers.BatchNormalization()(u4) - + # Final Layer - x = layers.Conv2D(2, (3, 3), activation='softmax', padding='same')(u4) + x = layers.Conv2D(2, (3, 3), activation="softmax", padding="same")(u4) - model = models.Model(inputs=[input_image,input_pred], outputs=x) + model = models.Model(inputs=[input_image, input_pred], outputs=x) return model + unet_model = build_network() unet_model.summary() @@ -96,22 +105,25 @@ def build_network(): def PreProc(img, pred, mask): - img = img/255. - pred = pred /255. - mask = mask/255. + img = img / 255.0 + pred = pred / 255.0 + mask = mask / 255.0 - img = tf.image.resize(img,IMG_SIZE) - pred = tf.image.resize(pred,IMG_SIZE) - mask = tf.image.resize(mask,IMG_SIZE) + img = tf.image.resize(img, IMG_SIZE) + pred = tf.image.resize(pred, IMG_SIZE) + mask = tf.image.resize(mask, IMG_SIZE) mask = tf.cast(mask > 0.5, tf.float32) return img, pred, mask + def Augmentor(img, pred, mask): # Apply transformations to both the image and the mask using a fixed seed for each random operation - - seed = np.random.randint(0, 1e6) # Generate a common seed for this iteration + + seed = np.random.randint( + 0, 1e6 + ) # Generate a common seed for this iteration # Random flips if tf.random.uniform((), seed=seed) > 0.5: @@ -125,45 +137,55 @@ def Augmentor(img, pred, mask): mask = tf.image.flip_up_down(mask) if tf.random.uniform((), seed=seed) > 0.5: - nbr_rot = tf.random.uniform(shape=[], minval=1, maxval=4, dtype=tf.int32) - img =tf.image.rot90(img, k=nbr_rot) - pred =tf.image.rot90(pred, k=nbr_rot) - mask =tf.image.rot90(mask, k=nbr_rot) + nbr_rot = tf.random.uniform( + shape=[], minval=1, maxval=4, dtype=tf.int32 + ) + img = tf.image.rot90(img, k=nbr_rot) + pred = tf.image.rot90(pred, k=nbr_rot) + mask = tf.image.rot90(mask, k=nbr_rot) # Other transformations # print(img.shape) # This should print something like (224, 224, 4) for a 4-channel image. augmented_channels = tf.image.random_hue(img, 0.08, seed=seed) - augmented_channels = tf.image.random_contrast(augmented_channels, 0.7, 1.3, seed=seed) - augmented_channels = tf.image.random_brightness(augmented_channels, 0.2, seed=seed) - augmented_channels = tf.image.random_saturation(augmented_channels, 0.7, 1.3, seed=seed) + augmented_channels = tf.image.random_contrast( + augmented_channels, 0.7, 1.3, seed=seed + ) + augmented_channels = tf.image.random_brightness( + augmented_channels, 0.2, seed=seed + ) + augmented_channels = tf.image.random_saturation( + augmented_channels, 0.7, 1.3, seed=seed + ) distortion_seed = np.random.randint(0, 2**32 - 1) # Apply multi_lens_distortion to both the image and the mask img = tf.numpy_function( - multi_lens_distortion, - [img, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + multi_lens_distortion, + [img, 6, (300, 500), (-0.3, 0.5), distortion_seed], + tf.float32, ) pred = tf.numpy_function( - multi_lens_distortion, - [pred, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + multi_lens_distortion, + [pred, 6, (300, 500), (-0.3, 0.5), distortion_seed], + tf.float32, ) mask = tf.numpy_function( multi_lens_distortion, [mask, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + tf.float32, ) return img, pred, mask class TrainDataGenerator(Sequence): - def __init__(self, image_dir, pred_dir, mask_dir, batch_size, augmentation=True): + def __init__( + self, image_dir, pred_dir, mask_dir, batch_size, augmentation=True + ): self.image_dir = image_dir self.pred_dir = pred_dir self.mask_dir = mask_dir @@ -174,38 +196,54 @@ def __init__(self, image_dir, pred_dir, mask_dir, batch_size, augmentation=True) def __len__(self): return int(np.ceil(len(self.image_filenames) / self.batch_size)) - def on_epoch_begin(self): np.random.shuffle(self.image_filenames) - def __getitem__(self, index): # Get batch of filenames - batch_files = self.image_filenames[index*self.batch_size : (index+1)*self.batch_size] - + batch_files = self.image_filenames[ + index * self.batch_size : (index + 1) * self.batch_size + ] + batch_imgs = [] batch_preds = [] batch_masks = [] for filename in batch_files: # Load 3-channel image img = img_to_array(load_img(os.path.join(self.image_dir, filename))) - + # Load the corresponding 1-channel prediction - pred = img_to_array(load_img(os.path.join(self.pred_dir, filename), color_mode='grayscale')) - + pred = img_to_array( + load_img( + os.path.join(self.pred_dir, filename), + color_mode="grayscale", + ) + ) + # Check if prediction has only one channel - assert pred.shape[2] == 1, f"Prediction {filename} has more than one channel!" - + assert ( + pred.shape[2] == 1 + ), f"Prediction {filename} has more than one channel!" + # Resize prediction to match the image size # pred = tf.image.resize(pred, (img.shape[0], img.shape[1])) - pred = cv2.resize(pred, (img.shape[0], img.shape[1]), interpolation=cv2.INTER_LINEAR) + pred = cv2.resize( + pred, + (img.shape[0], img.shape[1]), + interpolation=cv2.INTER_LINEAR, + ) pred = np.expand_dims(pred, axis=-1) # # Concatenate to form 4-channel input # combined_img = np.concatenate([img, pred], axis=-1) # this is for a 4-channel input # print(combined_img.shape) # Load the corresponding mask - mask = img_to_array(load_img(os.path.join(self.mask_dir, filename), color_mode='grayscale')) + mask = img_to_array( + load_img( + os.path.join(self.mask_dir, filename), + color_mode="grayscale", + ) + ) # print(np.unique(img.flatten())) # print(np.unique(pred.flatten())) @@ -224,75 +262,98 @@ def __getitem__(self, index): # break - mask = tf.concat([1-mask, mask], axis = -1) + mask = tf.concat([1 - mask, mask], axis=-1) batch_imgs.append(img) batch_preds.append(pred) batch_masks.append(mask) + return [np.array(batch_imgs), np.array(batch_preds)], np.array( + batch_masks + ) - return [np.array(batch_imgs), np.array(batch_preds)], np.array(batch_masks) - batch_size = 2 -train_gen = TrainDataGenerator('./train/images/', './train/pw_predictions/', './train/masks/', batch_size=batch_size, augmentation=True) -val_gen = TrainDataGenerator('./valid/images/', './valid/pw_predictions/', './valid/masks/', batch_size=batch_size, augmentation=False) +train_gen = TrainDataGenerator( + "./train/images/", + "./train/pw_predictions/", + "./train/masks/", + batch_size=batch_size, + augmentation=True, +) +val_gen = TrainDataGenerator( + "./valid/images/", + "./valid/pw_predictions/", + "./valid/masks/", + batch_size=batch_size, + augmentation=False, +) def get_dice_loss(nb_classes=1, use_background=False): def dice_loss(target, output, epsilon=1e-10): - smooth = 1. + smooth = 1.0 dice = 0 for i in range(0 if use_background else 1, nb_classes): output1 = output[..., i] target1 = target[..., i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) - dice += (2. * intersection1 + smooth) / (union1 + smooth) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( + target1 * target1 + ) + dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes else: - dice /= (nb_classes - 1) - return tf.clip_by_value(1. - dice, 0., 1. - epsilon) + dice /= nb_classes - 1 + return tf.clip_by_value(1.0 - dice, 0.0, 1.0 - epsilon) + return dice_loss + def dsc_thresholded(nb_classes=2, use_background=False): def dice(target, output, epsilon=1e-10): - smooth = 1. + smooth = 1.0 dice = 0 output = tf.cast(output > 0.5, tf.float32) for i in range(0 if use_background else 1, nb_classes): - output1 = output[:,:,:, i] - target1 = target[:,:,:, i] + output1 = output[:, :, :, i] + target1 = target[:, :, :, i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) - dice += (2. * intersection1 + smooth) / (union1 + smooth) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( + target1 * target1 + ) + dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes else: - dice /= (nb_classes - 1) + dice /= nb_classes - 1 - return tf.clip_by_value(dice, 0., 1. - epsilon) + return tf.clip_by_value(dice, 0.0, 1.0 - epsilon) return dice dice_loss_fn = get_dice_loss(nb_classes=2, use_background=False) dice_thresh_fn = dsc_thresholded() -unet_model.compile(optimizer=tf.keras.optimizers.experimental.Adam(1e-4), loss=dice_loss_fn, metrics=[dice_thresh_fn]) +unet_model.compile( + optimizer=tf.keras.optimizers.experimental.Adam(1e-4), + loss=dice_loss_fn, + metrics=[dice_thresh_fn], +) -early = EarlyStopping(monitor='val_loss', patience=20, verbose=1) +early = EarlyStopping(monitor="val_loss", patience=20, verbose=1) save_best = ModelCheckpoint( - './model', - monitor='val_loss', + "./model", + monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=False, - mode='auto', - period=1 + mode="auto", + period=1, ) history = unet_model.fit( @@ -301,11 +362,11 @@ def dice(target, output, epsilon=1e-10): validation_data=val_gen, validation_steps=len(val_gen), epochs=300, - callbacks=[early, save_best] + callbacks=[early, save_best], ) -best_model = load_model('./model', compile=False) +best_model = load_model("./model", compile=False) onnx_model, _ = tf2onnx.convert.from_keras(best_model, opset=13) -onnx.save(onnx_model, "./model.onnx") \ No newline at end of file +onnx.save(onnx_model, "./model.onnx") diff --git a/TissueClustersFromThumbnails.py b/TissueClustersFromThumbnails.py index 8bcb592..63abdca 100644 --- a/TissueClustersFromThumbnails.py +++ b/TissueClustersFromThumbnails.py @@ -1,21 +1,20 @@ ## creating direct clusters of tissues using pretrained models -import numpy as np -import cv2 import glob import os + +import cv2 +import matplotlib.pyplot as plt import numpy as np import tensorflow as tf -import matplotlib.pyplot as plt - +from PIL import Image +from sklearn.cluster import KMeans from tensorflow.keras.applications import VGG19 from tensorflow.keras.preprocessing import image as img_prep -from sklearn.cluster import KMeans -from PIL import Image ## Select GPU -# os.environ["CUDA_VISIBLE_DEVICES"] = "0" +# os.environ["CUDA_VISIBLE_DEVICES"] = "0" -files = glob.glob('/Path/To/Thumbnails/*.png') +files = glob.glob("/Path/To/Thumbnails/*.png") for f in files: ff, _ = os.path.splitext(f) basename = os.path.basename(ff) @@ -24,7 +23,10 @@ image_path = f original_image = Image.open(image_path) original_image = np.array(original_image) - original_image = cv2.resize(original_image, (int(original_image.shape[1]/2), int(original_image.shape[0]/2))) + original_image = cv2.resize( + original_image, + (int(original_image.shape[1] / 2), int(original_image.shape[0] / 2)), + ) # Define the size of the small squares square_size = 32 @@ -35,7 +37,7 @@ num_squares_y = height // square_size # Load pre-trained model + higher level layers - model = VGG19(weights='imagenet', include_top=False) + model = VGG19(weights="imagenet", include_top=False) # Initialize an array to store feature vectors feature_vectors = [] @@ -44,7 +46,10 @@ for i in range(num_squares_x): for j in range(num_squares_y): # Extract small square from original image - square = original_image[i*square_size:(i+1)*square_size, j*square_size:(j+1)*square_size] + square = original_image[ + i * square_size : (i + 1) * square_size, + j * square_size : (j + 1) * square_size, + ] # Preprocess the square square = img_prep.img_to_array(square) square = np.expand_dims(square, axis=0) @@ -57,16 +62,20 @@ # Apply k-means clustering on the feature vectors num_clusters = 7 # Define the number of clusters - kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(feature_vectors) + kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit( + feature_vectors + ) labels = kmeans.labels_ # Create an image with clustering result clustered_image = np.zeros_like(original_image) for i in range(num_squares_x): for j in range(num_squares_y): - label = labels[i*num_squares_y + j] + label = labels[i * num_squares_y + j] color = np.array(plt.cm.rainbow(label / num_clusters)[:3]) * 255 - clustered_image[i*square_size:(i+1)*square_size, j*square_size:(j+1)*square_size] = color - - cv2.imwrite('/Path/To/Outputs/'+ basename + '.png',clustered_image) + clustered_image[ + i * square_size : (i + 1) * square_size, + j * square_size : (j + 1) * square_size, + ] = color + cv2.imwrite("/Path/To/Outputs/" + basename + ".png", clustered_image) diff --git a/src/augmentation/MLD.py b/src/augmentation/MLD.py index 3943717..fc18bfd 100644 --- a/src/augmentation/MLD.py +++ b/src/augmentation/MLD.py @@ -1,5 +1,6 @@ # Multi-lens Distortion + def multi_lens_distortion(image, num_lenses, radius_range, strength_range): """ Apply a smooth lens distortion effect with multiple lenses to an image. @@ -17,12 +18,12 @@ def multi_lens_distortion(image, num_lenses, radius_range, strength_range): # Randomly generate lens centers within the image boundaries. cx = np.random.randint(0, W, size=num_lenses) cy = np.random.randint(0, H, size=num_lenses) - - # Initialize distorted_image to be the original image. + + # Initialize distorted_image to be the original image. # It will be updated as each lens is applied. distorted_image = np.copy(image) yidx, xidx = np.indices((H, W)) - + # Apply each lens. for i in range(num_lenses): # Randomly select radius and strength for the current lens within the provided ranges. @@ -47,10 +48,10 @@ def multi_lens_distortion(image, num_lenses, radius_range, strength_range): distorted_x = dx * (1 - strength * scaling_factor) + cx[i] # Ensure the new indices are not out of bounds. - distorted_y = np.clip(distorted_y, 0, H-1).astype(int) - distorted_x = np.clip(distorted_x, 0, W-1).astype(int) + distorted_y = np.clip(distorted_y, 0, H - 1).astype(int) + distorted_x = np.clip(distorted_x, 0, W - 1).astype(int) # Create the distorted image by mixing original and distorted coordinates. distorted_image = distorted_image[distorted_y, distorted_x] - + return distorted_image diff --git a/src/generator/Generator.py b/src/generator/Generator.py index c87a76c..9f97f3f 100644 --- a/src/generator/Generator.py +++ b/src/generator/Generator.py @@ -4,30 +4,37 @@ from MLD import multi_lens_distortion - def load_patch(x_start_val_lvl3, y_start_val_lvl3, filename, level, patch_size): print("This is the filename: ", filename) print("This X start point: ", x_start_val_lvl3) print("This Y start point: ", y_start_val_lvl3) - if not isinstance(filename, str): filename = filename.numpy().decode("utf-8") x_start_val_lvl3 = np.asarray(x_start_val_lvl3) - y_start_val_lvl3 = np.asarray(y_start_val_lvl3) + y_start_val_lvl3 = np.asarray(y_start_val_lvl3) importer = fast.WholeSlideImageImporter.create(filename) wsi = importer.runAndGetOutputData() patch_access = wsi.getAccess(fast.ACCESS_READ) - patch = patch_access.getPatchAsImage(level, int(x_start_val_lvl3[0]), int(y_start_val_lvl3[0]), patch_size, patch_size, False) - + patch = patch_access.getPatchAsImage( + level, + int(x_start_val_lvl3[0]), + int(y_start_val_lvl3[0]), + patch_size, + patch_size, + False, + ) + # Direct conversion to numpy array upon creation to avoid redundant calls return np.asarray(patch) class CustomDataGenerator(tf.keras.utils.Sequence): - def __init__(self, starting_positions, gts, batch_size, patch_size=256, level=3): + def __init__( + self, starting_positions, gts, batch_size, patch_size=256, level=3 + ): self.starting_positions = starting_positions self.gts = gts self.batch_size = batch_size @@ -36,22 +43,34 @@ def __init__(self, starting_positions, gts, batch_size, patch_size=256, level=3) # Dictionary to hold indices for balancing based on each combination of GT and clustering label self.combination_indices = { - (0, 1): [], (0, 2): [], (0, 3): [], (0, 4): [], - (1, 1): [], (1, 2): [], (1, 3): [], (1, 4): [] + (0, 1): [], + (0, 2): [], + (0, 3): [], + (0, 4): [], + (1, 1): [], + (1, 2): [], + (1, 3): [], + (1, 4): [], } - + # Populate combination_indices with index values for i, address in enumerate(self.starting_positions): gt = self.gts[i] - cluster_label = address[3] # Extract cluster_label from starting_positions + cluster_label = address[ + 3 + ] # Extract cluster_label from starting_positions self.combination_indices[(gt, cluster_label)].append(i) # Calculate the minimum count across all categories to ensure balance - self.min_samples = min([len(indices) for indices in self.combination_indices.values()]) + self.min_samples = min( + [len(indices) for indices in self.combination_indices.values()] + ) def __len__(self): # Each epoch will have a balanced set of samples across all categories - total_samples = self.min_samples * len(self.combination_indices) # Total samples for all categories + total_samples = self.min_samples * len( + self.combination_indices + ) # Total samples for all categories return total_samples // self.batch_size def __getitem__(self, idx): @@ -64,15 +83,26 @@ def __getitem__(self, idx): current_batch_indices = [] for category, indices in self.combination_indices.items(): - selected_indices = np.random.choice(indices, samples_per_category, replace=False) + selected_indices = np.random.choice( + indices, samples_per_category, replace=False + ) current_batch_indices.extend(selected_indices) - np.random.shuffle(current_batch_indices) # Shuffle to mix the categories within the batch + np.random.shuffle( + current_batch_indices + ) # Shuffle to mix the categories within the batch for i, index in enumerate(current_batch_indices): position = self.starting_positions[index] - x_start, y_start, filename, _ = position[1], position[2], position[0], position[3] - image = load_patch(x_start, y_start, filename, self.level, self.patch_size) + x_start, y_start, filename, _ = ( + position[1], + position[2], + position[0], + position[3], + ) + image = load_patch( + x_start, y_start, filename, self.level, self.patch_size + ) # Augmentation image = tf.convert_to_tensor(image, dtype=tf.float32) @@ -84,11 +114,15 @@ def __getitem__(self, idx): image = tf.image.random_flip_up_down(image) # Convert back to numpy for lens distortion augmentation image_np = image.numpy() - image_np = multi_lens_distortion(image_np, num_lenses=4, radius_range=(40, 70), strength_range=(-0.4, 0.4)) + image_np = multi_lens_distortion( + image_np, + num_lenses=4, + radius_range=(40, 70), + strength_range=(-0.4, 0.4), + ) batch_images.append(image_np) batch_labels[i, 0] = 1 - self.gts[index] batch_labels[i, 1] = self.gts[index] return np.array(batch_images), batch_labels - diff --git a/src/models/Build_DR.py b/src/models/Build_DR.py index 54d4197..aa48bdd 100644 --- a/src/models/Build_DR.py +++ b/src/models/Build_DR.py @@ -1,78 +1,84 @@ import tensorflow as tf -from tensorflow.keras import layers, models, Input +from tensorflow.keras import Input +from tensorflow.keras import layers +from tensorflow.keras import models def build_drunet(): - input_image = Input(shape=(1120, 1120, 3), name='input_image') - input_pred = Input(shape=(1120, 1120, 1), name='input_pred') + input_image = Input(shape=(1120, 1120, 3), name="input_image") + input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation='relu', padding='same')(input_pred) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( + input_pred + ) combined = layers.Concatenate()([input_image, conv_pred]) - + # Block 1 - c1 = layers.Conv2D(4, (3, 3), activation='relu', padding='same')(combined) - c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1) + c1 = layers.Conv2D(4, (3, 3), activation="relu", padding="same")(combined) + c1 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(c1) c1 = layers.BatchNormalization()(c1) p1 = layers.MaxPooling2D((2, 2))(c1) - + # Block 2 - c2 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(p1) - c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2) + c2 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(p1) + c2 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(c2) c2 = layers.BatchNormalization()(c2) p2 = layers.MaxPooling2D((2, 2))(c2) - + # Block 3 - c3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p2) + c3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(p2) # c3 = layers.Dropout(0.3)(c3) c3 = layers.SpatialDropout2D(0.3)(c3) - c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3) + c3 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(c3) c3 = layers.BatchNormalization()(c3) p3 = layers.MaxPooling2D((2, 2))(c3) - + # Block 4 - c4 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p3) - c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4) + c4 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(p3) + c4 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(c4) c4 = layers.BatchNormalization()(c4) - + # Bottleneck - bn = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c4) + bn = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(c4) bn = layers.BatchNormalization()(bn) - + # Upsampling (decoder) side # Block 1 - u1 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(bn) + u1 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(bn) u1 = layers.Concatenate()([u1, c4]) u1 = layers.BatchNormalization()(u1) # Block 2 of the Upsampling (decoder) side u2 = layers.UpSampling2D(size=(2, 2))(u1) - u2 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u2) + u2 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(u2) # u2 = layers.ZeroPadding2D(padding=((1, 0), (1, 0)))(u2) # Adjust padding as needed u2 = layers.Concatenate()([u2, c3]) u2 = layers.BatchNormalization()(u2) # Block 3 u3 = layers.UpSampling2D(size=(2, 2))(u2) - u3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u3) + u3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(u3) u3 = layers.Concatenate()([u3, c2]) u3 = layers.BatchNormalization()(u3) # Block 4 u4 = layers.UpSampling2D(size=(2, 2))(u3) - u4 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u4) + u4 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(u4) # u4 = layers.Concatenate()([u4, c1_1]) u4 = layers.BatchNormalization()(u4) - + # Final Layer - x = layers.Conv2D(2, (3, 3), activation='softmax', padding='same')(u4) + x = layers.Conv2D(2, (3, 3), activation="softmax", padding="same")(u4) - return models.Model(inputs=[input_image,input_pred], outputs=x) + return models.Model(inputs=[input_image, input_pred], outputs=x) def embedding_model(img_shape=(224, 224, 3)): - prev_model = tf.keras.applications.DenseNet121(input_shape=img_shape, include_top=False, weights='imagenet') + prev_model = tf.keras.applications.DenseNet121( + input_shape=img_shape, include_top=False, weights="imagenet" + ) z = tf.keras.layers.Flatten()(prev_model.output) z = tf.keras.layers.Dense(32, activation="relu")(z) diff --git a/src/models/Build_Unet.py b/src/models/Build_Unet.py index 609bfc1..517b947 100644 --- a/src/models/Build_Unet.py +++ b/src/models/Build_Unet.py @@ -1,74 +1,79 @@ -from keras.models import Model, load_model +from keras.models import Model +from keras.models import load_model +from tensorflow.keras import Input +from tensorflow.keras import layers +from tensorflow.keras import models from tensorflow.keras.layers import * -from tensorflow.keras import layers, models, Input from tensorflow.keras.utils import Sequence def build_unet(): - input_image = Input(shape=(1120, 1120, 3), name='input_image') - input_pred = Input(shape=(1120, 1120, 1), name='input_pred') + input_image = Input(shape=(1120, 1120, 3), name="input_image") + input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation='relu', padding='same')(input_pred) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( + input_pred + ) combined = layers.Concatenate()([input_image, conv_pred]) - + # Block 1 - c1 = layers.Conv2D(4, (3, 3), activation='relu', padding='same')(combined) - c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1) + c1 = layers.Conv2D(4, (3, 3), activation="relu", padding="same")(combined) + c1 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(c1) c1 = layers.BatchNormalization()(c1) p1 = layers.MaxPooling2D((2, 2))(c1) - + # Block 2 - c2 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(p1) - c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2) + c2 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(p1) + c2 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(c2) c2 = layers.BatchNormalization()(c2) p2 = layers.MaxPooling2D((2, 2))(c2) - + # Block 3 - c3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p2) + c3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(p2) # c3 = layers.Dropout(0.3)(c3) c3 = layers.SpatialDropout2D(0.3)(c3) - c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3) + c3 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(c3) c3 = layers.BatchNormalization()(c3) p3 = layers.MaxPooling2D((2, 2))(c3) - + # Block 4 - c4 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p3) - c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4) + c4 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(p3) + c4 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(c4) c4 = layers.BatchNormalization()(c4) - + # Bottleneck - bn = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c4) + bn = layers.Conv2D(128, (3, 3), activation="relu", padding="same")(c4) bn = layers.BatchNormalization()(bn) - + # Upsampling (decoder) side # Block 1 - u1 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(bn) + u1 = layers.Conv2D(64, (3, 3), activation="relu", padding="same")(bn) u1 = layers.Concatenate()([u1, c4]) u1 = layers.BatchNormalization()(u1) # Block 2 of the Upsampling (decoder) side u2 = layers.UpSampling2D(size=(2, 2))(u1) - u2 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u2) + u2 = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(u2) # u2 = layers.ZeroPadding2D(padding=((1, 0), (1, 0)))(u2) # Adjust padding as needed u2 = layers.Concatenate()([u2, c3]) u2 = layers.BatchNormalization()(u2) # Block 3 u3 = layers.UpSampling2D(size=(2, 2))(u2) - u3 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u3) + u3 = layers.Conv2D(16, (3, 3), activation="relu", padding="same")(u3) u3 = layers.Concatenate()([u3, c2]) u3 = layers.BatchNormalization()(u3) # Block 4 u4 = layers.UpSampling2D(size=(2, 2))(u3) - u4 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u4) + u4 = layers.Conv2D(8, (3, 3), activation="relu", padding="same")(u4) # u4 = layers.Concatenate()([u4, c1_1]) u4 = layers.BatchNormalization()(u4) - + # Final Layer - x = layers.Conv2D(2, (3, 3), activation='softmax', padding='same')(u4) + x = layers.Conv2D(2, (3, 3), activation="softmax", padding="same")(u4) - model = models.Model(inputs=[input_image,input_pred], outputs=x) + model = models.Model(inputs=[input_image, input_pred], outputs=x) return model diff --git a/src/models/losses.py b/src/models/losses.py index 652f783..05d2fde 100644 --- a/src/models/losses.py +++ b/src/models/losses.py @@ -3,36 +3,42 @@ def get_dice_loss(nb_classes=1, use_background=False): def dice_loss(target, output, epsilon=1e-10): - smooth = 1. + smooth = 1.0 dice = 0 for i in range(0 if use_background else 1, nb_classes): output1 = output[..., i] target1 = target[..., i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) - dice += (2. * intersection1 + smooth) / (union1 + smooth) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( + target1 * target1 + ) + dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes else: - dice /= (nb_classes - 1) - return tf.clip_by_value(1. - dice, 0., 1. - epsilon) + dice /= nb_classes - 1 + return tf.clip_by_value(1.0 - dice, 0.0, 1.0 - epsilon) + return dice_loss def dsc_thresholded(nb_classes=2, use_background=False): def dice(target, output, epsilon=1e-10): - smooth = 1. + smooth = 1.0 dice = 0 output = tf.cast(output > 0.5, tf.float32) for i in range(0 if use_background else 1, nb_classes): - output1 = output[:,:,:, i] - target1 = target[:,:,:, i] + output1 = output[:, :, :, i] + target1 = target[:, :, :, i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) - dice += (2. * intersection1 + smooth) / (union1 + smooth) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( + target1 * target1 + ) + dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes else: - dice /= (nb_classes - 1) - return tf.clip_by_value(dice, 0., 1. - epsilon) + dice /= nb_classes - 1 + return tf.clip_by_value(dice, 0.0, 1.0 - epsilon) + return dice diff --git a/src/utils/utilities.py b/src/utils/utilities.py index 542873c..4a31407 100644 --- a/src/utils/utilities.py +++ b/src/utils/utilities.py @@ -1,24 +1,28 @@ import numpy as np import tensorflow as tf + from ...augmentation.MLD import multi_lens_distortion def PreProc(img, pred, mask): - img = img/255. - pred = pred /255. - mask = mask/255. + img = img / 255.0 + pred = pred / 255.0 + mask = mask / 255.0 - img = tf.image.resize(img,IMG_SIZE) - pred = tf.image.resize(pred,IMG_SIZE) - mask = tf.image.resize(mask,IMG_SIZE) + img = tf.image.resize(img, IMG_SIZE) + pred = tf.image.resize(pred, IMG_SIZE) + mask = tf.image.resize(mask, IMG_SIZE) mask = tf.cast(mask > 0.5, tf.float32) return img, pred, mask + def Augmentor(img, pred, mask): # Apply transformations to both the image and the mask using a fixed seed for each random operation - seed = np.random.randint(0, 1e6) # Generate a common seed for this iteration + seed = np.random.randint( + 0, 1e6 + ) # Generate a common seed for this iteration # Random flips if tf.random.uniform((), seed=seed) > 0.5: @@ -32,38 +36,46 @@ def Augmentor(img, pred, mask): mask = tf.image.flip_up_down(mask) if tf.random.uniform((), seed=seed) > 0.5: - nbr_rot = tf.random.uniform(shape=[], minval=1, maxval=4, dtype=tf.int32) - img =tf.image.rot90(img, k=nbr_rot) - pred =tf.image.rot90(pred, k=nbr_rot) - mask =tf.image.rot90(mask, k=nbr_rot) + nbr_rot = tf.random.uniform( + shape=[], minval=1, maxval=4, dtype=tf.int32 + ) + img = tf.image.rot90(img, k=nbr_rot) + pred = tf.image.rot90(pred, k=nbr_rot) + mask = tf.image.rot90(mask, k=nbr_rot) # Other transformations # print(img.shape) # This should print something like (224, 224, 4) for a 4-channel image. augmented_channels = tf.image.random_hue(img, 0.08, seed=seed) - augmented_channels = tf.image.random_contrast(augmented_channels, 0.7, 1.3, seed=seed) - augmented_channels = tf.image.random_brightness(augmented_channels, 0.2, seed=seed) - augmented_channels = tf.image.random_saturation(augmented_channels, 0.7, 1.3, seed=seed) + augmented_channels = tf.image.random_contrast( + augmented_channels, 0.7, 1.3, seed=seed + ) + augmented_channels = tf.image.random_brightness( + augmented_channels, 0.2, seed=seed + ) + augmented_channels = tf.image.random_saturation( + augmented_channels, 0.7, 1.3, seed=seed + ) distortion_seed = np.random.randint(0, 2**32 - 1) # Apply multi_lens_distortion to both the image and the mask img = tf.numpy_function( - multi_lens_distortion, - [img, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + multi_lens_distortion, + [img, 6, (300, 500), (-0.3, 0.5), distortion_seed], + tf.float32, ) pred = tf.numpy_function( - multi_lens_distortion, - [pred, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + multi_lens_distortion, + [pred, 6, (300, 500), (-0.3, 0.5), distortion_seed], + tf.float32, ) mask = tf.numpy_function( multi_lens_distortion, [mask, 6, (300, 500), (-0.3, 0.5), distortion_seed], - tf.float32 + tf.float32, ) return img, pred, mask From b83b3d94d1f0b9b17887c9a6dda4a7a3d1f75bc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:14:04 +0200 Subject: [PATCH 07/18] Added missing import --- src/augmentation/MLD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/augmentation/MLD.py b/src/augmentation/MLD.py index fc18bfd..a9038d9 100644 --- a/src/augmentation/MLD.py +++ b/src/augmentation/MLD.py @@ -1,4 +1,4 @@ -# Multi-lens Distortion +import numpy as np def multi_lens_distortion(image, num_lenses, radius_range, strength_range): From cea8a10c610d834598305f4b1b6764ad57a8917a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:15:44 +0200 Subject: [PATCH 08/18] Added IMG_SIZE as variable to PreProc method --- src/utils/utilities.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/utils/utilities.py b/src/utils/utilities.py index 4a31407..b1df620 100644 --- a/src/utils/utilities.py +++ b/src/utils/utilities.py @@ -1,17 +1,17 @@ import numpy as np import tensorflow as tf -from ...augmentation.MLD import multi_lens_distortion +from ..augmentation.MLD import multi_lens_distortion -def PreProc(img, pred, mask): +def PreProc(img, pred, mask, img_size): img = img / 255.0 pred = pred / 255.0 mask = mask / 255.0 - img = tf.image.resize(img, IMG_SIZE) - pred = tf.image.resize(pred, IMG_SIZE) - mask = tf.image.resize(mask, IMG_SIZE) + img = tf.image.resize(img, img_size) + pred = tf.image.resize(pred, img_size) + mask = tf.image.resize(mask, img_size) mask = tf.cast(mask > 0.5, tf.float32) From b7afbdded53c1c33e32fb2e1554bbe0692322756 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:15:57 +0200 Subject: [PATCH 09/18] Added template req file to be updated --- requirements.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ee18da1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +numpy +tensorflow +pyFAST \ No newline at end of file From b8fbc5303448d3f828944fc65b9a9230f1be4e96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:16:03 +0200 Subject: [PATCH 10/18] Fixed import in Generator --- src/generator/Generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/generator/Generator.py b/src/generator/Generator.py index 9f97f3f..7bf6d22 100644 --- a/src/generator/Generator.py +++ b/src/generator/Generator.py @@ -1,7 +1,7 @@ import fast import numpy as np import tensorflow as tf -from MLD import multi_lens_distortion +from ..augmentation.MLD import multi_lens_distortion def load_patch(x_start_val_lvl3, y_start_val_lvl3, filename, level, patch_size): From e77118304a680f41f924ad06ea955a0475d95379 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:18:26 +0200 Subject: [PATCH 11/18] Removed redundant imports --- ClusteringForTissueBalancing.py | 2 +- FewShot.py | 9 --------- SegmentationTraining.py | 5 +---- 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/ClusteringForTissueBalancing.py b/ClusteringForTissueBalancing.py index 2cbf366..03fa0d6 100644 --- a/ClusteringForTissueBalancing.py +++ b/ClusteringForTissueBalancing.py @@ -2,7 +2,7 @@ import cv2 import numpy as np -from matplotlib import pyplot as plt +import matplotlib.pyplot as plt from sklearn.cluster import KMeans diff --git a/FewShot.py b/FewShot.py index 2cd3b40..a097f22 100644 --- a/FewShot.py +++ b/FewShot.py @@ -1,18 +1,9 @@ import os -import cv2 import numpy as np import onnx import tensorflow as tf import tf2onnx -from keras.models import Model -from keras.models import load_model -from MLD import multi_lens_distortion -from PIL import Image -from PIL import ImageEnhance -from tensorflow.keras import backend as K -from tensorflow.keras import initializers -from tensorflow.keras import layers from tensorflow.keras import optimizers from tensorflow.keras.layers import * diff --git a/SegmentationTraining.py b/SegmentationTraining.py index 3ef9442..d77e95f 100644 --- a/SegmentationTraining.py +++ b/SegmentationTraining.py @@ -5,10 +5,8 @@ import onnx import tensorflow as tf import tf2onnx -from keras.models import Model -from keras.models import load_model +from tensorflow.keras.models import load_model from MLD import multi_lens_distortion -from PIL import Image from tensorflow.keras import Input from tensorflow.keras import layers from tensorflow.keras import models @@ -16,7 +14,6 @@ from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.utils import Sequence -from tensorflow.python.keras.callbacks import CSVLogger from tensorflow.python.keras.callbacks import EarlyStopping from tensorflow.python.keras.callbacks import ModelCheckpoint From 68c4a6133695b21f18232b4a17cc7650c98ca6f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:18:35 +0200 Subject: [PATCH 12/18] Updated req file --- requirements.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ee18da1..a720c73 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,8 @@ numpy tensorflow -pyFAST \ No newline at end of file +pyFAST +opencv-python +matplotlib +scikit-learn +tf2onnx +onnx \ No newline at end of file From a39169e959c032a5c3726a07767f8576564ff09d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:19:29 +0200 Subject: [PATCH 13/18] Fixed import; added new req item --- SegmentationTraining.py | 2 +- requirements.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/SegmentationTraining.py b/SegmentationTraining.py index d77e95f..f4fdba1 100644 --- a/SegmentationTraining.py +++ b/SegmentationTraining.py @@ -6,7 +6,7 @@ import tensorflow as tf import tf2onnx from tensorflow.keras.models import load_model -from MLD import multi_lens_distortion +from src.augmentation.MLD import multi_lens_distortion from tensorflow.keras import Input from tensorflow.keras import layers from tensorflow.keras import models diff --git a/requirements.txt b/requirements.txt index a720c73..0b86b97 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,4 +5,5 @@ opencv-python matplotlib scikit-learn tf2onnx -onnx \ No newline at end of file +onnx +Pillow \ No newline at end of file From 655e0cc1ef11613816f505e4e1d897dd06322c06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:28:02 +0200 Subject: [PATCH 14/18] Updated README --- README.md | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4f5d2e4..cfab347 100644 --- a/README.md +++ b/README.md @@ -1 +1,42 @@ -# DRU-Net \ No newline at end of file +# DRU-Net + +## Introduction + +This project presents DRU-Net: Lung carcinoma segmentation using multi-lens distortion and fusion refinement network. + +## Getting started + +### Setup + +1. Setup virtual environment and activate it: + +``` +python -m venv venv/ +source venv/bin/activate +``` + +2. Install dependencies: + +``` +pip install -r requirements.txt +``` + +### Linting + +First install linting dependencies: + +``` +pip install black==22.3.0 isort==5.10.1 flake8==4.0.1 +``` + +Then run linting test by: + +``` +sh shell/lint.sh +``` + +Perform automatic linting by: + +``` +sh shell/format.sh +``` From 7652e639530021e2ba140789a648297c84862eee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:29:58 +0200 Subject: [PATCH 15/18] Use line-length 120 instead of 80 --- setup.cfg | 4 ++-- shell/format.sh | 2 +- shell/lint.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index ca629cf..5daf692 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,13 +4,13 @@ description-file = README.md [isort] force_single_line=True known_first_party=. -line_length=80 +line_length=120 profile=black [flake8] # imported but unused in __init__.py, that's ok. per-file-ignores=*__init__.py:F401 ignore=E203,W503,W605,F632,E266,E731,E712,E741 -max-line-length=80 +max-line-length=120 filename = *.py exclude = venv/ diff --git a/shell/format.sh b/shell/format.sh index 390b9e0..fc9d509 100644 --- a/shell/format.sh +++ b/shell/format.sh @@ -1,4 +1,4 @@ #!/bin/bash isort --sl . -black --line-length 80 . +black --line-length 120 . flake8 . diff --git a/shell/lint.sh b/shell/lint.sh index 38755e2..286c6d0 100644 --- a/shell/lint.sh +++ b/shell/lint.sh @@ -13,7 +13,7 @@ then exit 1 fi echo "no issues with flake8" -black --check --line-length 80 . +black --check --line-length 120 . if ! [ $? -eq 0 ] then echo "Please run \"sh shell/format.sh\" to format the code." From 278d21d0198065e5cc54dc8420e4c570ae20538a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:32:22 +0200 Subject: [PATCH 16/18] Assume it is features_initial and not features --- ClustringRefinement.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/ClustringRefinement.py b/ClustringRefinement.py index 740861f..cd1c26d 100644 --- a/ClustringRefinement.py +++ b/ClustringRefinement.py @@ -6,11 +6,11 @@ from sklearn.cluster import KMeans -# explicit function to normalize array def normalize(x): - x_norm = (x - np.min(x)) / (np.max(x) - np.min(x)) - - return x_norm + """ + Method that normalizes an input array to range [0, 1]. + """ + return (x - np.min(x)) / (np.max(x) - np.min(x)) names = glob.glob("/Path/To/Test/Thumbnails/*.png") @@ -56,7 +56,7 @@ def normalize(x): ] # Assuming these are your feature arrays # Apply the weights to each feature using map - weighted_features = list(map(lambda f, w: f * w, features, Ws)) + weighted_features = list(map(lambda f, w: f * w, features_initial, Ws)) # Stack the weighted features to create a feature vector for each pixel features_stacked = np.stack(weighted_features, axis=-1) @@ -115,9 +115,7 @@ def fill_holes(binary_img): Gr = cv2.medianBlur(Gr, 11) ret, thresh = cv2.threshold(Gr, 10, 51, cv2.THRESH_BINARY) # print(np.unique(thresh)) - contours, hierarchy = cv2.findContours( - thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE - ) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) empt = np.zeros(Rw2.shape) smoothed_image[1][thresh < 0.5] = 0 smoothed_image[1][smoothed_image[1] > 100] = 255 From 5df7312da9fa096a3e20776f55cfa3882f83f413 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:33:43 +0200 Subject: [PATCH 17/18] Linted more code --- ClusteringForTissueBalancing.py | 26 +++------- FewShot.py | 85 ++++++++------------------------- GenerateGradients.py | 10 ++-- PostProcess.py | 12 ++--- SegmentationTraining.py | 54 ++++++--------------- TissueClustersFromThumbnails.py | 4 +- src/__init__.py | 1 - src/augmentation/__init__.py | 1 - src/generator/Generator.py | 29 ++++------- src/generator/__init__.py | 1 - src/models/Build_DR.py | 8 +--- src/models/Build_Unet.py | 8 +--- src/models/__init__.py | 1 - src/models/losses.py | 8 +--- src/utils/__init__.py | 1 - src/utils/utilities.py | 20 ++------ 16 files changed, 68 insertions(+), 201 deletions(-) diff --git a/ClusteringForTissueBalancing.py b/ClusteringForTissueBalancing.py index 03fa0d6..cf44376 100644 --- a/ClusteringForTissueBalancing.py +++ b/ClusteringForTissueBalancing.py @@ -1,8 +1,8 @@ import os import cv2 -import numpy as np import matplotlib.pyplot as plt +import numpy as np from sklearn.cluster import KMeans @@ -45,13 +45,9 @@ def cluster(image_path, weights=[0.6, 0.1, 0.2], fill_the_holes=True): dim = (width, height) # Resize image - resized_images = [ - cv2.resize(img, dim, interpolation=cv2.INTER_AREA) for img in images - ] + resized_images = [cv2.resize(img, dim, interpolation=cv2.INTER_AREA) for img in images] - weighted_images = [ - img * weight for img, weight in zip(resized_images, weights) - ] + weighted_images = [img * weight for img, weight in zip(resized_images, weights)] # Stack all images to create a feature vector for each pixel features = np.stack(weighted_images, axis=-1).reshape(-1, 3) @@ -61,19 +57,13 @@ def cluster(image_path, weights=[0.6, 0.1, 0.2], fill_the_holes=True): labels = kmeans.fit_predict(features) # Identify the cluster that is closest to white - white_cluster = np.argmin( - np.linalg.norm(kmeans.cluster_centers_ - [1, 1, 1], axis=1) - ) + white_cluster = np.argmin(np.linalg.norm(kmeans.cluster_centers_ - [1, 1, 1], axis=1)) # If the white cluster is not labeled as '0', swap labels if white_cluster != 0: labels[labels == 0] = -1 # Temporary change label '0' to '-1' - labels[labels == white_cluster] = ( - 0 # Assign label '0' to the white cluster - ) - labels[labels == -1] = ( - white_cluster # Assign previous '0' cluster to 'white_cluster' label - ) + labels[labels == white_cluster] = 0 # Assign label '0' to the white cluster + labels[labels == -1] = white_cluster # Assign previous '0' cluster to 'white_cluster' label # Reshape the labels to the image's shape labels_2D = labels.reshape(height, width) @@ -98,9 +88,7 @@ def process_images(input_folder, output_folder): # Save the result output_path = os.path.join(output_folder, "processed_" + filename) - cv2.imwrite( - output_path, result * 255 - ) # Scale back up to 0-255 range + cv2.imwrite(output_path, result * 255) # Scale back up to 0-255 range # Optionally display the result plt.imshow(result) diff --git a/FewShot.py b/FewShot.py index a097f22..204f914 100644 --- a/FewShot.py +++ b/FewShot.py @@ -5,17 +5,12 @@ import tensorflow as tf import tf2onnx from tensorflow.keras import optimizers -from tensorflow.keras.layers import * os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Select GPU with index 0 -filenames_Tumor = next(os.walk("./Path/To/Tumor/"), (None, None, []))[ - 2 -] # [] if no file -filenames_Normal = next(os.walk("./Path/To/Normal/"), (None, None, []))[ - 2 -] # [] if no file +filenames_Tumor = next(os.walk("./Path/To/Tumor/"), (None, None, []))[2] # [] if no file +filenames_Normal = next(os.walk("./Path/To/Normal/"), (None, None, []))[2] # [] if no file data = { "Tumor": ["./Path/To/Tumor/" + i for i in filenames_Tumor], @@ -56,9 +51,7 @@ def load_images(paths): def embedding_model(): - prev_model = tf.keras.applications.DenseNet121( - input_shape=IMG_SHAPE, include_top=False, weights="imagenet" - ) + prev_model = tf.keras.applications.DenseNet121(input_shape=IMG_SHAPE, include_top=False, weights="imagenet") z = tf.keras.layers.Flatten()(prev_model.output) z = tf.keras.layers.Dense(32, activation="relu")(z) @@ -92,34 +85,18 @@ def compute_prototype(embeddings, labels): for epoch in range(num_epochs): epoch_loss_avg = tf.keras.metrics.Mean() # Randomly sample support set and query set for both classes - support_idx_tumor = np.random.choice( - len(data_images["Tumor"]), n_shots, replace=False - ) - query_idx_tumor = np.random.choice( - len(data_images["Tumor"]), n_query, replace=False - ) - - support_idx_normal = np.random.choice( - len(data_images["Normal"]), n_shots, replace=False - ) - query_idx_normal = np.random.choice( - len(data_images["Normal"]), n_query, replace=False - ) + support_idx_tumor = np.random.choice(len(data_images["Tumor"]), n_shots, replace=False) + query_idx_tumor = np.random.choice(len(data_images["Tumor"]), n_query, replace=False) + + support_idx_normal = np.random.choice(len(data_images["Normal"]), n_shots, replace=False) + query_idx_normal = np.random.choice(len(data_images["Normal"]), n_query, replace=False) # Load images using indices and paths - support_tumor = load_images( - [data_images["Tumor"][i] for i in support_idx_tumor] - ) - query_tumor = load_images( - [data_images["Tumor"][i] for i in query_idx_tumor] - ) - - support_normal = load_images( - [data_images["Normal"][i] for i in support_idx_normal] - ) - query_normal = load_images( - [data_images["Normal"][i] for i in query_idx_normal] - ) + support_tumor = load_images([data_images["Tumor"][i] for i in support_idx_tumor]) + query_tumor = load_images([data_images["Tumor"][i] for i in query_idx_tumor]) + + support_normal = load_images([data_images["Normal"][i] for i in support_idx_normal]) + query_normal = load_images([data_images["Normal"][i] for i in query_idx_normal]) support_set = tf.concat([support_normal, support_tumor], axis=0) query_set = tf.concat([query_normal, query_tumor], axis=0) @@ -134,20 +111,14 @@ def compute_prototype(embeddings, labels): support_embeddings = embedding_net(support_set) query_embeddings = embedding_net(query_set) - tumor_prototype = compute_prototype( - support_embeddings, tf.equal(support_labels, 1) - ) - normal_prototype = compute_prototype( - support_embeddings, tf.equal(support_labels, 0) - ) + tumor_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 1)) + normal_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 0)) # print(tumor_prototype.shape) prototypes = tf.stack([tumor_prototype, normal_prototype]) # Compute Euclidean distance from each query embedding to the prototypes - distances = tf.norm( - tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1 - ) + distances = tf.norm(tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1) # Optimize optimizer = tf.keras.optimizers.Adam(learning_rate=0.1) @@ -155,33 +126,19 @@ def compute_prototype(embeddings, labels): # Compute the loss and optimize with tf.GradientTape() as tape: - loss = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - logits=-distances, labels=query_labels_one_hot - ) - ) + loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=-distances, labels=query_labels_one_hot)) epoch_loss_avg.update_state(loss) # All model-related calculations here support_embeddings = embedding_net(support_set) query_embeddings = embedding_net(query_set) - tumor_prototype = compute_prototype( - support_embeddings, tf.equal(support_labels, 1) - ) - normal_prototype = compute_prototype( - support_embeddings, tf.equal(support_labels, 0) - ) + tumor_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 1)) + normal_prototype = compute_prototype(support_embeddings, tf.equal(support_labels, 0)) prototypes = tf.stack([tumor_prototype, normal_prototype]) - distances = tf.norm( - tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1 - ) - loss = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - logits=-distances, labels=query_labels_one_hot - ) - ) + distances = tf.norm(tf.expand_dims(query_embeddings, 1) - prototypes, axis=-1) + loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=-distances, labels=query_labels_one_hot)) print(f"Epoch {epoch+1}: Loss: {epoch_loss_avg.result()}") gradients = tape.gradient(loss, embedding_net.trainable_variables) diff --git a/GenerateGradients.py b/GenerateGradients.py index 031ce43..c82093f 100644 --- a/GenerateGradients.py +++ b/GenerateGradients.py @@ -26,12 +26,8 @@ def generate_gradients(imgPath): resize_factor = 1 # Calculate the new height and width as tensors based on the resize factor - new_height = tf.cast( - tf.cast(original_shape[0], tf.float32) * resize_factor, tf.int32 - ) - new_width = tf.cast( - tf.cast(original_shape[1], tf.float32) * resize_factor, tf.int32 - ) + new_height = tf.cast(tf.cast(original_shape[0], tf.float32) * resize_factor, tf.int32) + new_width = tf.cast(tf.cast(original_shape[1], tf.float32) * resize_factor, tf.int32) # Resize the image to of its original size if necessary resized_img = tf.image.resize(img, [new_height, new_width]) @@ -46,7 +42,7 @@ def generate_gradients(imgPath): gx, gy = gradients[0], gradients[1] # Calculate the magnitude and direction of the gradient magnitude = tf.sqrt(tf.math.square(gx) + tf.math.square(gy)) - direction = tf.math.atan2(gy, gx) + # direction = tf.math.atan2(gy, gx) # print(magnitude) # plt.imshow(magnitude[0,...,0]*255, cmap='gray') diff --git a/PostProcess.py b/PostProcess.py index 79880e1..24b2d36 100644 --- a/PostProcess.py +++ b/PostProcess.py @@ -19,9 +19,7 @@ def remove_small_fragments(image_path, size_threshold): _, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY) # Find all contours - contours, _ = cv2.findContours( - binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) + contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Filter out small fragments for cnt in contours: @@ -37,9 +35,7 @@ def smooth_edges(binary_image, kernel_size=7, iterations=1): # Apply morphological opening (erosion followed by dilation) smoothed_image = cv2.medianBlur(binary_image, ksize=11) - smoothed_image = cv2.morphologyEx( - smoothed_image, cv2.MORPH_OPEN, kernel, iterations=iterations - ) + smoothed_image = cv2.morphologyEx(smoothed_image, cv2.MORPH_OPEN, kernel, iterations=iterations) return smoothed_image @@ -61,9 +57,7 @@ def process_images_in_directory(directory, size_threshold): # Define the directory and size threshold -directory = ( - "/Path/To/SegmentationResults/" # Update with the path to your images -) +directory = "/Path/To/SegmentationResults/" # Update with the path to your images size_threshold = 10 # Update this value based on your requirement process_images_in_directory(directory, size_threshold) diff --git a/SegmentationTraining.py b/SegmentationTraining.py index f4fdba1..2c750fd 100644 --- a/SegmentationTraining.py +++ b/SegmentationTraining.py @@ -5,18 +5,18 @@ import onnx import tensorflow as tf import tf2onnx -from tensorflow.keras.models import load_model -from src.augmentation.MLD import multi_lens_distortion from tensorflow.keras import Input from tensorflow.keras import layers from tensorflow.keras import models -from tensorflow.keras.layers import * +from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.utils import Sequence from tensorflow.python.keras.callbacks import EarlyStopping from tensorflow.python.keras.callbacks import ModelCheckpoint +from src.augmentation.MLD import multi_lens_distortion + os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" os.environ["CUDA_VISIBLE_DEVICES"] = "2" # Select GPU @@ -25,9 +25,7 @@ def build_network(): input_image = Input(shape=(1120, 1120, 3), name="input_image") input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( - input_pred - ) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")(input_pred) combined = layers.Concatenate()([input_image, conv_pred]) @@ -118,9 +116,7 @@ def PreProc(img, pred, mask): def Augmentor(img, pred, mask): # Apply transformations to both the image and the mask using a fixed seed for each random operation - seed = np.random.randint( - 0, 1e6 - ) # Generate a common seed for this iteration + seed = np.random.randint(0, 1e6) # Generate a common seed for this iteration # Random flips if tf.random.uniform((), seed=seed) > 0.5: @@ -134,9 +130,7 @@ def Augmentor(img, pred, mask): mask = tf.image.flip_up_down(mask) if tf.random.uniform((), seed=seed) > 0.5: - nbr_rot = tf.random.uniform( - shape=[], minval=1, maxval=4, dtype=tf.int32 - ) + nbr_rot = tf.random.uniform(shape=[], minval=1, maxval=4, dtype=tf.int32) img = tf.image.rot90(img, k=nbr_rot) pred = tf.image.rot90(pred, k=nbr_rot) mask = tf.image.rot90(mask, k=nbr_rot) @@ -145,15 +139,9 @@ def Augmentor(img, pred, mask): # print(img.shape) # This should print something like (224, 224, 4) for a 4-channel image. augmented_channels = tf.image.random_hue(img, 0.08, seed=seed) - augmented_channels = tf.image.random_contrast( - augmented_channels, 0.7, 1.3, seed=seed - ) - augmented_channels = tf.image.random_brightness( - augmented_channels, 0.2, seed=seed - ) - augmented_channels = tf.image.random_saturation( - augmented_channels, 0.7, 1.3, seed=seed - ) + augmented_channels = tf.image.random_contrast(augmented_channels, 0.7, 1.3, seed=seed) + augmented_channels = tf.image.random_brightness(augmented_channels, 0.2, seed=seed) + augmented_channels = tf.image.random_saturation(augmented_channels, 0.7, 1.3, seed=seed) distortion_seed = np.random.randint(0, 2**32 - 1) @@ -180,9 +168,7 @@ def Augmentor(img, pred, mask): class TrainDataGenerator(Sequence): - def __init__( - self, image_dir, pred_dir, mask_dir, batch_size, augmentation=True - ): + def __init__(self, image_dir, pred_dir, mask_dir, batch_size, augmentation=True): self.image_dir = image_dir self.pred_dir = pred_dir self.mask_dir = mask_dir @@ -198,9 +184,7 @@ def on_epoch_begin(self): def __getitem__(self, index): # Get batch of filenames - batch_files = self.image_filenames[ - index * self.batch_size : (index + 1) * self.batch_size - ] + batch_files = self.image_filenames[index * self.batch_size : (index + 1) * self.batch_size] batch_imgs = [] batch_preds = [] @@ -218,9 +202,7 @@ def __getitem__(self, index): ) # Check if prediction has only one channel - assert ( - pred.shape[2] == 1 - ), f"Prediction {filename} has more than one channel!" + assert pred.shape[2] == 1, f"Prediction {filename} has more than one channel!" # Resize prediction to match the image size # pred = tf.image.resize(pred, (img.shape[0], img.shape[1])) @@ -265,9 +247,7 @@ def __getitem__(self, index): batch_preds.append(pred) batch_masks.append(mask) - return [np.array(batch_imgs), np.array(batch_preds)], np.array( - batch_masks - ) + return [np.array(batch_imgs), np.array(batch_preds)], np.array(batch_masks) batch_size = 2 @@ -295,9 +275,7 @@ def dice_loss(target, output, epsilon=1e-10): output1 = output[..., i] target1 = target[..., i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( - target1 * target1 - ) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes @@ -318,9 +296,7 @@ def dice(target, output, epsilon=1e-10): target1 = target[:, :, :, i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( - target1 * target1 - ) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: diff --git a/TissueClustersFromThumbnails.py b/TissueClustersFromThumbnails.py index 63abdca..aafc71a 100644 --- a/TissueClustersFromThumbnails.py +++ b/TissueClustersFromThumbnails.py @@ -62,9 +62,7 @@ # Apply k-means clustering on the feature vectors num_clusters = 7 # Define the number of clusters - kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit( - feature_vectors - ) + kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(feature_vectors) labels = kmeans.labels_ # Create an image with clustering result diff --git a/src/__init__.py b/src/__init__.py index 8b13789..e69de29 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1 +0,0 @@ - diff --git a/src/augmentation/__init__.py b/src/augmentation/__init__.py index 8b13789..e69de29 100644 --- a/src/augmentation/__init__.py +++ b/src/augmentation/__init__.py @@ -1 +0,0 @@ - diff --git a/src/generator/Generator.py b/src/generator/Generator.py index 7bf6d22..4c1c7ef 100644 --- a/src/generator/Generator.py +++ b/src/generator/Generator.py @@ -1,6 +1,7 @@ import fast import numpy as np import tensorflow as tf + from ..augmentation.MLD import multi_lens_distortion @@ -32,9 +33,7 @@ def load_patch(x_start_val_lvl3, y_start_val_lvl3, filename, level, patch_size): class CustomDataGenerator(tf.keras.utils.Sequence): - def __init__( - self, starting_positions, gts, batch_size, patch_size=256, level=3 - ): + def __init__(self, starting_positions, gts, batch_size, patch_size=256, level=3): self.starting_positions = starting_positions self.gts = gts self.batch_size = batch_size @@ -56,21 +55,15 @@ def __init__( # Populate combination_indices with index values for i, address in enumerate(self.starting_positions): gt = self.gts[i] - cluster_label = address[ - 3 - ] # Extract cluster_label from starting_positions + cluster_label = address[3] # Extract cluster_label from starting_positions self.combination_indices[(gt, cluster_label)].append(i) # Calculate the minimum count across all categories to ensure balance - self.min_samples = min( - [len(indices) for indices in self.combination_indices.values()] - ) + self.min_samples = min([len(indices) for indices in self.combination_indices.values()]) def __len__(self): # Each epoch will have a balanced set of samples across all categories - total_samples = self.min_samples * len( - self.combination_indices - ) # Total samples for all categories + total_samples = self.min_samples * len(self.combination_indices) # Total samples for all categories return total_samples // self.batch_size def __getitem__(self, idx): @@ -83,14 +76,10 @@ def __getitem__(self, idx): current_batch_indices = [] for category, indices in self.combination_indices.items(): - selected_indices = np.random.choice( - indices, samples_per_category, replace=False - ) + selected_indices = np.random.choice(indices, samples_per_category, replace=False) current_batch_indices.extend(selected_indices) - np.random.shuffle( - current_batch_indices - ) # Shuffle to mix the categories within the batch + np.random.shuffle(current_batch_indices) # Shuffle to mix the categories within the batch for i, index in enumerate(current_batch_indices): position = self.starting_positions[index] @@ -100,9 +89,7 @@ def __getitem__(self, idx): position[0], position[3], ) - image = load_patch( - x_start, y_start, filename, self.level, self.patch_size - ) + image = load_patch(x_start, y_start, filename, self.level, self.patch_size) # Augmentation image = tf.convert_to_tensor(image, dtype=tf.float32) diff --git a/src/generator/__init__.py b/src/generator/__init__.py index 8b13789..e69de29 100644 --- a/src/generator/__init__.py +++ b/src/generator/__init__.py @@ -1 +0,0 @@ - diff --git a/src/models/Build_DR.py b/src/models/Build_DR.py index aa48bdd..62f9381 100644 --- a/src/models/Build_DR.py +++ b/src/models/Build_DR.py @@ -8,9 +8,7 @@ def build_drunet(): input_image = Input(shape=(1120, 1120, 3), name="input_image") input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( - input_pred - ) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")(input_pred) combined = layers.Concatenate()([input_image, conv_pred]) @@ -76,9 +74,7 @@ def build_drunet(): def embedding_model(img_shape=(224, 224, 3)): - prev_model = tf.keras.applications.DenseNet121( - input_shape=img_shape, include_top=False, weights="imagenet" - ) + prev_model = tf.keras.applications.DenseNet121(input_shape=img_shape, include_top=False, weights="imagenet") z = tf.keras.layers.Flatten()(prev_model.output) z = tf.keras.layers.Dense(32, activation="relu")(z) diff --git a/src/models/Build_Unet.py b/src/models/Build_Unet.py index 517b947..71d90c7 100644 --- a/src/models/Build_Unet.py +++ b/src/models/Build_Unet.py @@ -1,19 +1,13 @@ -from keras.models import Model -from keras.models import load_model from tensorflow.keras import Input from tensorflow.keras import layers from tensorflow.keras import models -from tensorflow.keras.layers import * -from tensorflow.keras.utils import Sequence def build_unet(): input_image = Input(shape=(1120, 1120, 3), name="input_image") input_pred = Input(shape=(1120, 1120, 1), name="input_pred") - conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")( - input_pred - ) + conv_pred = layers.Conv2D(3, (3, 3), activation="relu", padding="same")(input_pred) combined = layers.Concatenate()([input_image, conv_pred]) diff --git a/src/models/__init__.py b/src/models/__init__.py index 8b13789..e69de29 100644 --- a/src/models/__init__.py +++ b/src/models/__init__.py @@ -1 +0,0 @@ - diff --git a/src/models/losses.py b/src/models/losses.py index 05d2fde..c4aa297 100644 --- a/src/models/losses.py +++ b/src/models/losses.py @@ -9,9 +9,7 @@ def dice_loss(target, output, epsilon=1e-10): output1 = output[..., i] target1 = target[..., i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( - target1 * target1 - ) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes @@ -31,9 +29,7 @@ def dice(target, output, epsilon=1e-10): output1 = output[:, :, :, i] target1 = target[:, :, :, i] intersection1 = tf.reduce_sum(output1 * target1) - union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum( - target1 * target1 - ) + union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1) dice += (2.0 * intersection1 + smooth) / (union1 + smooth) if use_background: dice /= nb_classes diff --git a/src/utils/__init__.py b/src/utils/__init__.py index 8b13789..e69de29 100644 --- a/src/utils/__init__.py +++ b/src/utils/__init__.py @@ -1 +0,0 @@ - diff --git a/src/utils/utilities.py b/src/utils/utilities.py index b1df620..270497e 100644 --- a/src/utils/utilities.py +++ b/src/utils/utilities.py @@ -20,9 +20,7 @@ def PreProc(img, pred, mask, img_size): def Augmentor(img, pred, mask): # Apply transformations to both the image and the mask using a fixed seed for each random operation - seed = np.random.randint( - 0, 1e6 - ) # Generate a common seed for this iteration + seed = np.random.randint(0, 1e6) # Generate a common seed for this iteration # Random flips if tf.random.uniform((), seed=seed) > 0.5: @@ -36,9 +34,7 @@ def Augmentor(img, pred, mask): mask = tf.image.flip_up_down(mask) if tf.random.uniform((), seed=seed) > 0.5: - nbr_rot = tf.random.uniform( - shape=[], minval=1, maxval=4, dtype=tf.int32 - ) + nbr_rot = tf.random.uniform(shape=[], minval=1, maxval=4, dtype=tf.int32) img = tf.image.rot90(img, k=nbr_rot) pred = tf.image.rot90(pred, k=nbr_rot) mask = tf.image.rot90(mask, k=nbr_rot) @@ -47,15 +43,9 @@ def Augmentor(img, pred, mask): # print(img.shape) # This should print something like (224, 224, 4) for a 4-channel image. augmented_channels = tf.image.random_hue(img, 0.08, seed=seed) - augmented_channels = tf.image.random_contrast( - augmented_channels, 0.7, 1.3, seed=seed - ) - augmented_channels = tf.image.random_brightness( - augmented_channels, 0.2, seed=seed - ) - augmented_channels = tf.image.random_saturation( - augmented_channels, 0.7, 1.3, seed=seed - ) + augmented_channels = tf.image.random_contrast(augmented_channels, 0.7, 1.3, seed=seed) + augmented_channels = tf.image.random_brightness(augmented_channels, 0.2, seed=seed) + augmented_channels = tf.image.random_saturation(augmented_channels, 0.7, 1.3, seed=seed) distortion_seed = np.random.randint(0, 2**32 - 1) From fc9961b4ee58a5e436c06728c6fa1bf8d2faffa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Pedersen?= Date: Mon, 29 Apr 2024 16:37:16 +0200 Subject: [PATCH 18/18] Renamed job to tests instead of build in linting yml --- .github/workflows/linting.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 93fcc40..fb6dfe6 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -10,7 +10,7 @@ on: workflow_dispatch: jobs: - build: + test: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v1