From 175b0da4bc26e818fa12f4d6bf70cf589677c5a3 Mon Sep 17 00:00:00 2001 From: stephen-riggs Date: Tue, 9 Apr 2024 11:52:50 +0100 Subject: [PATCH 1/5] Update package versions in setup.py --- setup.py | 49 ++++++++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/setup.py b/setup.py index 243c9f8..8acb14e 100644 --- a/setup.py +++ b/setup.py @@ -2,53 +2,52 @@ import os import re import codecs -import janni + # Create new package with python setup.py sdist here = os.path.abspath(os.path.dirname(__file__)) + def read(*parts): - with codecs.open(os.path.join(here, *parts), 'r') as fp: + with codecs.open(os.path.join(here, *parts), "r") as fp: return fp.read() + def find_version(*file_paths): version_file = read(*file_paths) - version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", - version_file, re.M) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") + setup( - name='janni', + name="janni", version=find_version("janni", "__init__.py"), - python_requires='>=3.5.0, <3.9', - packages=['janni'], - url='https://github.com/MPI-Dortmund/sphire-janni', - license='MIT', - author='Thorsten Wagner', + python_requires=">=3.9, <3.12", + packages=["janni"], + url="https://github.com/MPI-Dortmund/sphire-janni", + license="MIT", + author="Thorsten Wagner", setup_requires=["Cython"], extras_require={ - 'gpu': ['tensorflow-gpu == 1.15.4'], - 'cpu': ['tensorflow == 1.15.4'] + "gpu": ["tensorflow[and-cuda] >= 2.0.0, < 2.16.0"], + "cpu": ["tensorflow-cpu >= 2.0.0, < 2.16.0"], }, install_requires=[ "mrcfile >=1.3.0", - "Keras == 2.3.1", - "numpy >= 1.16.0, < 1.19.0", - "h5py >= 2.5.0, < 3.0.0", + "Keras", + "numpy >= 1.16.0, < 1.26.4", + "h5py >= 2.5.0", "Pillow >= 6.0.0", "Cython", - "tifffile==2020.9.3", - "GooeyDev >= 1.0.8b3", - "wxPython >= 4.1.0", + "tifffile == 2020.9.3", + "Gooey", + "wxPython >= 4.2.1", "scikit-image >= 0.15.0", - "protobuf < 4" + "protobuf == 4.25.3", ], - author_email='thorsten.wagner@mpi-dortmund.mpg.de', - description='noise 2 noise for cryo em data', - entry_points={ - 'console_scripts': [ - 'janni_denoise.py = janni.jmain:_main_' - ]}, + author_email="thorsten.wagner@mpi-dortmund.mpg.de", + description="noise 2 noise for cryo em data", + entry_points={"console_scripts": ["janni_denoise.py = janni.jmain:_main_"]}, ) From ff0f100ffc8a87597f286fb6a123bc146fcfba0e Mon Sep 17 00:00:00 2001 From: stephen-riggs Date: Tue, 9 Apr 2024 11:55:27 +0100 Subject: [PATCH 2/5] Run black on code --- janni/jmain.py | 126 ++++++++++++++++++++++++++--------------------- janni/predict.py | 52 ++++++++++++------- janni/train.py | 68 +++++++++++++------------ janni/utils.py | 58 ++++++++++++---------- 4 files changed, 176 insertions(+), 128 deletions(-) diff --git a/janni/jmain.py b/janni/jmain.py index 195e6f5..e081fdf 100644 --- a/janni/jmain.py +++ b/janni/jmain.py @@ -46,6 +46,7 @@ ARGPARSER = None + def create_config_parser(parser): config_required_group = parser.add_argument_group( "Required arguments", @@ -62,7 +63,7 @@ def create_config_parser(parser): "test": 'user_input.endswith("json")', "message": "File has to end with .json!", }, - "default_file": "config_janni.json" + "default_file": "config_janni.json", }, ) @@ -94,11 +95,10 @@ def create_config_parser(parser): "test": 'user_input.endswith("h5")', "message": "File has to end with .h5!", }, - "default_file": "janni_model.h5" + "default_file": "janni_model.h5", }, ) - config_optional_group = parser.add_argument_group( "Optional arguments", "The arguments are optional to create a config file for JANNI", @@ -108,7 +108,10 @@ def create_config_parser(parser): "--loss", default="mae", help="Loss function that is used during training: Mean squared error (mse) or mean absolute error (mae).", - choices=["mae", "mse",], + choices=[ + "mae", + "mse", + ], ) config_optional_group.add_argument( "--patch_size", @@ -148,9 +151,7 @@ def create_train_parser(parser): "config_path", help="Path to config.json", widget="FileChooser", - gooey_options={ - "wildcard": "*.json" - } + gooey_options={"wildcard": "*.json"}, ) optional_group = parser.add_argument_group( @@ -161,6 +162,7 @@ def create_train_parser(parser): "-g", "--gpu", type=int, default=-1, help="GPU ID to run on" ) + def create_predict_parser(parser): required_group = parser.add_argument_group( "Required arguments", "These options are mandatory to run JANNI" @@ -181,9 +183,7 @@ def create_predict_parser(parser): "model_path", help="File path to trained model.", widget="FileChooser", - gooey_options={ - "wildcard": "*.h5" - } + gooey_options={"wildcard": "*.h5"}, ) optional_group = parser.add_argument_group( @@ -205,21 +205,25 @@ def create_predict_parser(parser): "-g", "--gpu", type=int, default=-1, help="GPU ID to run on" ) + def create_parser(parser): subparsers = parser.add_subparsers(help="sub-command help") - parser_config= subparsers.add_parser("config", help="Create the configuration file for JANNI") + parser_config = subparsers.add_parser( + "config", help="Create the configuration file for JANNI" + ) create_config_parser(parser_config) parser_train = subparsers.add_parser("train", help="Train JANNI for your dataset.") create_train_parser(parser_train) - parser_predict = subparsers.add_parser("denoise", help="Denoise micrographs using a (pre)trained model.") + parser_predict = subparsers.add_parser( + "denoise", help="Denoise micrographs using a (pre)trained model." + ) create_predict_parser(parser_predict) - def get_parser(): parser = GooeyParser(description="Just another noise to noise implementation") create_parser(parser) @@ -238,7 +242,7 @@ def _main_(): Gooey( main, program_name="JANNI " + ini.__version__, - #image_dir=os.path.join(os.path.abspath(os.path.dirname(__file__)), "../icons"), + # image_dir=os.path.join(os.path.abspath(os.path.dirname(__file__)), "../icons"), progress_regex=r"^.* \( Progress:\s+(-?\d+) % \)$", disable_progress_bar_animation=True, tabbed_groups=True, @@ -253,20 +257,20 @@ def main(args=None): parser = get_parser() args = parser.parse_args() - - if "config" in sys.argv[1]: - generate_config_file(config_out_path=args.config_out_path, - architecture="unet", - patch_size=args.patch_size, - movie_dir=args.movie_dir, - even_dir=args.even_dir, - odd_dir=args.odd_dir, - batch_size=args.batch_size, - learning_rate=args.learning_rate, - nb_epoch=args.nb_epoch, - saved_weights_name=args.saved_weights_name, - loss=args.loss,) + generate_config_file( + config_out_path=args.config_out_path, + architecture="unet", + patch_size=args.patch_size, + movie_dir=args.movie_dir, + even_dir=args.even_dir, + odd_dir=args.odd_dir, + batch_size=args.batch_size, + learning_rate=args.learning_rate, + nb_epoch=args.nb_epoch, + saved_weights_name=args.saved_weights_name, + loss=args.loss, + ) else: if isinstance(args.gpu, list): if len(args.gpu) == 1: @@ -281,14 +285,16 @@ def main(args=None): config = read_config(args.config_path) from . import train + loss = "mae" if "loss" in config["train"]: - if "mae" == config["train"]["loss"] or "mse" == config["train"]["loss"]:\ + if "mae" == config["train"]["loss"] or "mse" == config["train"]["loss"]: loss = config["train"]["loss"] else: - print("Unsupported loss chosen:",config["train"]["loss"]) + print("Unsupported loss chosen:", config["train"]["loss"]) print("Use default loss MAE") from . import utils + fbinning = utils.fourier_binning if "binning" in config["train"]: if config["train"]["binning"] == "rescale": @@ -306,7 +312,10 @@ def main(args=None): learning_rate=config["train"]["learning_rate"], epochs=config["train"]["nb_epoch"], model=config["model"]["architecture"], - patch_size=(config["model"]["patch_size"], config["model"]["patch_size"]), + patch_size=( + config["model"]["patch_size"], + config["model"]["patch_size"], + ), batch_size=config["train"]["batch_size"], loss=loss, fbinning=fbinning, @@ -353,37 +362,44 @@ def main(args=None): batch_size=batch_size, ) -def generate_config_file(config_out_path, - architecture, - patch_size, - movie_dir, - even_dir, - odd_dir, - batch_size, - learning_rate, - nb_epoch, - saved_weights_name, - loss): - model_dict = {'architecture': architecture, - 'patch_size': patch_size, - } - - train_dict = {'movie_dir': movie_dir, - 'even_dir': even_dir, - 'odd_dir': odd_dir, - 'batch_size': batch_size, - 'learning_rate': learning_rate, - 'nb_epoch': nb_epoch, - "saved_weights_name": saved_weights_name, - "loss": loss, - } + +def generate_config_file( + config_out_path, + architecture, + patch_size, + movie_dir, + even_dir, + odd_dir, + batch_size, + learning_rate, + nb_epoch, + saved_weights_name, + loss, +): + model_dict = { + "architecture": architecture, + "patch_size": patch_size, + } + + train_dict = { + "movie_dir": movie_dir, + "even_dir": even_dir, + "odd_dir": odd_dir, + "batch_size": batch_size, + "learning_rate": learning_rate, + "nb_epoch": nb_epoch, + "saved_weights_name": saved_weights_name, + "loss": loss, + } from json import dump + dict = {"model": model_dict, "train": train_dict} - with open(config_out_path, 'w') as f: + with open(config_out_path, "w") as f: dump(dict, f, ensure_ascii=False, indent=4) print("Wrote config to", config_out_path) + def read_config(config_path): with open(config_path) as config_buffer: try: diff --git a/janni/predict.py b/janni/predict.py index ecf2f42..5e657f1 100644 --- a/janni/predict.py +++ b/janni/predict.py @@ -35,6 +35,7 @@ import tifffile import pathlib + def predict( input_path, output_path, @@ -43,7 +44,7 @@ def predict( patch_size=(1024, 1024), padding=15, batch_size=4, - output_resize_to=None + output_resize_to=None, ): if model == "unet" or model == b"unet": @@ -63,6 +64,7 @@ def predict( output_resize_to=output_resize_to, ) + def predict_dir( input_path, output_path, @@ -70,7 +72,7 @@ def predict_dir( patch_size=(1024, 1024), padding=15, batch_size=4, - output_resize_to=None + output_resize_to=None, ): """ Denoises images / movies @@ -87,7 +89,7 @@ def predict_dir( if os.path.isfile(input_path): list_files = [input_path] else: - for (dirpath, dirnames, filenames) in os.walk(input_path): + for dirpath, dirnames, filenames in os.walk(input_path): for filename in filenames: if filename.endswith(utils.SUPPORTED_FILES): path = os.path.join(dirpath, filename) @@ -100,11 +102,12 @@ def predict_dir( patch_size=patch_size, padding=padding, batch_size=batch_size, - output_resize_to=output_resize_to + output_resize_to=output_resize_to, ) return denoise_image_paths + def predict_list( image_paths, output_path, @@ -115,7 +118,7 @@ def predict_list( output_resize_to=None, squarify=False, fbinning=utils.fourier_binning, - sliceswise=False + sliceswise=False, ): """ Denoises images / movies @@ -152,7 +155,7 @@ def predict_list( if not os.path.exists(opath): if utils.is_movie(path): if not sliceswise: - even, odd = utils.create_image_pair(path,fbinning) + even, odd = utils.create_image_pair(path, fbinning) denoised_even = predict_np( model, even, @@ -175,7 +178,7 @@ def predict_list( for z in range(img.shape[0]): denoised[z] = predict_np( model, - img[z,:,:], + img[z, :, :], patch_size=patch_size, padding=padding, batch_size=batch_size, @@ -209,15 +212,25 @@ def predict_list( resize_to = [height, width] from PIL import Image - if len(denoised.shape)==2: - denoised = np.array(Image.fromarray(denoised).resize( - (resize_to[1], resize_to[0]), resample=Image.BILINEAR)) - elif len(denoised.shape)==3: - resized_denoised = np.zeros(shape=(denoised.shape[0],resize_to[0],resize_to[1]),dtype=np.float32) - for z in range(img.shape[0]): - resized_denoised[z,:,:] = np.array(Image.fromarray(denoised[z,:,:]).resize( - (resize_to[1], resize_to[0]), resample=Image.BILINEAR)) + if len(denoised.shape) == 2: + denoised = np.array( + Image.fromarray(denoised).resize( + (resize_to[1], resize_to[0]), resample=Image.BILINEAR + ) + ) + elif len(denoised.shape) == 3: + resized_denoised = np.zeros( + shape=(denoised.shape[0], resize_to[0], resize_to[1]), + dtype=np.float32, + ) + for z in range(img.shape[0]): + resized_denoised[z, :, :] = np.array( + Image.fromarray(denoised[z, :, :]).resize( + (resize_to[1], resize_to[0]), + resample=Image.BILINEAR, + ) + ) print("Write denoised image in", opath) if opath.endswith((".mrc", ".mrcs")): @@ -231,18 +244,23 @@ def predict_list( return denoise_image_paths + def squarify(image, size=None): np.random.seed() if size is not None: - target_size=size + target_size = size else: target_size = np.max(image.shape) mean = np.mean(image) rectified_image = np.ones(shape=(target_size, target_size)) * mean - rectified_image[(rectified_image.shape[0]-image.shape[0]):rectified_image.shape[0],0:image.shape[1]] = image + rectified_image[ + (rectified_image.shape[0] - image.shape[0]) : rectified_image.shape[0], + 0 : image.shape[1], + ] = image return rectified_image + def predict_np(model, image, patch_size=(1024, 1024), padding=15, batch_size=4): """ Denoises an image given a keras model. diff --git a/janni/train.py b/janni/train.py index b343722..d103456 100644 --- a/janni/train.py +++ b/janni/train.py @@ -36,6 +36,7 @@ import tifffile import numpy as np + def train( even_path, odd_path, @@ -47,9 +48,9 @@ def train( patch_size=(1024, 1024), batch_size=4, loss="mae", - fbinning=utils.fourier_binning + fbinning=utils.fourier_binning, ): - ''' + """ Does the complete noise2noise training and writes the model file to disk. :param even_path: Path where "even averages" will be written. :param odd_path: Path here "odd averages" will be written @@ -61,7 +62,7 @@ def train( :param patch_size: Patch size in pixel. The network is trained on random patches of the images. :param batch_size: Mini-batch size used during training. :return: trained model - ''' + """ print("Start training") # Read training even/odd micrographs @@ -75,11 +76,12 @@ def train( patch_size=patch_size, batch_size=batch_size, loss=loss, - fbinning=fbinning - ) + fbinning=fbinning, + ) trained_model.save_weights(model_out_path) import h5py - with h5py.File(model_out_path, mode='r+') as f: + + with h5py.File(model_out_path, mode="r+") as f: f["model_name"] = np.array((model), dtype=h5py.special_dtype(vlen=str)) f["patch_size"] = patch_size @@ -87,6 +89,7 @@ def train( return trained_model + def train_movie_dir( even_path, odd_path, @@ -97,9 +100,9 @@ def train_movie_dir( patch_size=(1024, 1024), batch_size=4, loss="mae", - fbinning=utils.fourier_binning + fbinning=utils.fourier_binning, ): - ''' + """ Does the complete noise2noise training. :param even_path: Path where "even averages" will be written. :param odd_path: Path here "odd averages" will be written @@ -112,11 +115,11 @@ def train_movie_dir( :param batch_size: n :param loss: mae or mse are possible :return: trained model - ''' + """ # Read training even/odd micrographs even_files, odd_files = calc_even_odd( - movie_path, even_path, odd_path, recursive=True,fbinning=fbinning + movie_path, even_path, odd_path, recursive=True, fbinning=fbinning ) trained_model = train_pairs( even_files, @@ -127,17 +130,14 @@ def train_movie_dir( batch_size=batch_size, epochs=epochs, valid_split=0.1, - loss=loss + loss=loss, ) return trained_model - -def calc_even_odd(movie_path, - even_path, - odd_path, - recursive=True, - fbinning=utils.fourier_binning): +def calc_even_odd( + movie_path, even_path, odd_path, recursive=True, fbinning=utils.fourier_binning +): """ Calculates averages based on the even/odd frames of the movies in movie_path and save the respective averages in even_path or odd_path. @@ -151,12 +151,12 @@ def calc_even_odd(movie_path, # Read training even/odd micrographs even_files = [] odd_files = [] - for (dirpath, dirnames, filenames) in os.walk(even_path): + for dirpath, dirnames, filenames in os.walk(even_path): for filename in filenames: if filename.endswith(utils.SUPPORTED_FILES): even_files.append(os.path.join(dirpath, filename)) - for (dirpath, dirnames, filenames) in os.walk(odd_path): + for dirpath, dirnames, filenames in os.walk(odd_path): for filename in filenames: if filename.endswith(utils.SUPPORTED_FILES): odd_files.append(os.path.join(dirpath, filename)) @@ -175,20 +175,26 @@ def calc_even_odd(movie_path, filenames_even = list(map(os.path.basename, even_files)) filenames_odd = list(map(os.path.basename, odd_files)) if movie_path: - for (dirpath, dirnames, filenames) in os.walk(movie_path): + for dirpath, dirnames, filenames in os.walk(movie_path): for filename in filenames: if filename.endswith(utils.SUPPORTED_FILES): if filename not in filenames_even and filename not in filenames_odd: path = os.path.join(dirpath, filename) - movies_to_split.append((path,filename)) + movies_to_split.append((path, filename)) if recursive == False: break for tuble_index, movie_tuble in enumerate(movies_to_split): path, filename = movie_tuble - print("Create even/odd average for:", path, "( Progress: ",int(100*tuble_index/len(movies_to_split)),"% )") - even, odd = utils.create_image_pair(path,fbinning) + print( + "Create even/odd average for:", + path, + "( Progress: ", + int(100 * tuble_index / len(movies_to_split)), + "% )", + ) + even, odd = utils.create_image_pair(path, fbinning) out_even_path = os.path.join(even_path, filename) out_odd_path = os.path.join(odd_path, filename) if path.endswith(("mrcs", "mrc")): @@ -221,7 +227,7 @@ def train_pairs( callbacks=[], batch_size=4, valid_split=0.1, - loss="mae" + loss="mae", ): """ Training noise2noise model. @@ -236,11 +242,11 @@ def train_pairs( :param valid_split: training-validion split. :return: Trained keras model """ - #train_valid_split = int(valid_split * len(pair_files_a)) + # train_valid_split = int(valid_split * len(pair_files_a)) train_pair_a_files = pair_files_a - #valid_pair_a_files = pair_files_a[:train_valid_split] + # valid_pair_a_files = pair_files_a[:train_valid_split] train_pair_b_files = pari_files_b - #valid_pair_b_files = pari_files_b[:train_valid_split] + # valid_pair_b_files = pari_files_b[:train_valid_split] train_gen = gen.patch_pair_batch_generator( pair_a_images=train_pair_a_files, @@ -250,24 +256,24 @@ def train_pairs( augment=True, ) - ''' + """ valid_gen = gen.patch_pair_batch_generator( pair_a_images=valid_pair_a_files, pair_b_images=valid_pair_b_files, patch_size=patch_size, batch_size=batch_size, ) - ''' + """ if model == "unet": model = models.get_model_unet(input_size=patch_size, kernel_size=(3, 3)) - opt = Adam(lr=learning_rate, epsilon=10 ** -8, amsgrad=True) + opt = Adam(lr=learning_rate, epsilon=10**-8, amsgrad=True) model.compile(optimizer=opt, loss=loss) history = model.fit_generator( generator=train_gen, epochs=epochs, callbacks=callbacks, workers=4, - use_multiprocessing=False + use_multiprocessing=False, ) return model diff --git a/janni/utils.py b/janni/utils.py index a211fcd..4009f0d 100644 --- a/janni/utils.py +++ b/janni/utils.py @@ -35,13 +35,13 @@ def image_to_patches(image, patch_size=(1024, 1024), padding=15): - ''' + """ Divides an image into patches :param image: 2D numpy array :param patch_size: Size of patches in pixel :param padding: Number of pixel the patches do overlap. :return: 3D Numpy array with shape (NUM_PATCHES,PATCH_WIDTH,PATCH_HIGHT) and applied pads - ''' + """ roi_size = (patch_size[0] - 2 * padding, patch_size[1] - 2 * padding) pad_before0 = padding @@ -83,14 +83,14 @@ def image_to_patches(image, patch_size=(1024, 1024), padding=15): def patches_to_image(patches, pads, image_shape=(4096, 4096), padding=15): - ''' + """ Stitches the image together given the patches. :param patches: 3D numpy array with shape (NUM_PATCHES,PATCH_WIDTH,PATCH_HIGHT) :param pads: Applied pads :param image_shape: Original image size :param padding: Specified padding :return: Image as 2D numpy array - ''' + """ patch_size = (patches.shape[1], patches.shape[2]) roi_size = (patch_size[0] - 2 * padding, patch_size[1] - 2 * padding) @@ -146,44 +146,52 @@ def patches_to_image(patches, pads, image_shape=(4096, 4096), padding=15): image = image[pads[0][0] : -pads[0][1], pads[1][0] : -pads[1][1]] return image + def rescale_binning(image, bin_factor): from skimage.transform import rescale - image = rescale(image,1.0/bin_factor) + + image = rescale(image, 1.0 / bin_factor) return image + def fourier_binning(image, bin_factor): image = np.squeeze(image) newx = image.shape[1] // bin_factor newy = image.shape[0] // bin_factor - assert image.shape[0] % bin_factor == 0 and image.shape[1] % bin_factor == 0, "ERROR! Dimensions are not integer-divisible by downsampling factor" - assert newx % bin_factor == 0 and newy % bin_factor == 0, "ERROR! Final dimensions need to be even (for now)" + assert ( + image.shape[0] % bin_factor == 0 and image.shape[1] % bin_factor == 0 + ), "ERROR! Dimensions are not integer-divisible by downsampling factor" + assert ( + newx % bin_factor == 0 and newy % bin_factor == 0 + ), "ERROR! Final dimensions need to be even (for now)" imft = np.fft.fft2(image) # Shift origin to center (so that I can cut out the middle) - shft= np.roll(np.roll(imft, newx//2, axis=0), newy//2, axis=1) + shft = np.roll(np.roll(imft, newx // 2, axis=0), newy // 2, axis=1) # Cut out the middle - wift= shft[:newy,:newx] + wift = shft[:newy, :newx] # Shift origin back to (0,0) - wishft= np.roll(np.roll(wift, -newx//2, axis=0), -newy//2, axis=1) + wishft = np.roll(np.roll(wift, -newx // 2, axis=0), -newy // 2, axis=1) # Compute invertse FT real_array = np.fft.ifft2(wishft).real - real_array = real_array-np.mean(real_array)+np.mean(image) + real_array = real_array - np.mean(real_array) + np.mean(image) real_array = real_array.astype(np.float32) return real_array -def create_image_pair(movie_path,fbinning=fourier_binning): - ''' + +def create_image_pair(movie_path, fbinning=fourier_binning): + """ Calculates averages based on even / odd frames in a movie :param movie_path: Path to movie :return: even and odd average - ''' + """ import os bin_file = os.path.join(os.path.dirname(movie_path), "bin.txt") @@ -194,19 +202,19 @@ def create_image_pair(movie_path,fbinning=fourier_binning): if os.path.exists(bin_file): bin_factor = int(np.genfromtxt(bin_file)) - print("Do",bin_factor,"x binning", movie_path) - even = fbinning(even,bin_factor) - odd = fbinning(odd,bin_factor) + print("Do", bin_factor, "x binning", movie_path) + even = fbinning(even, bin_factor) + odd = fbinning(odd, bin_factor) return even, odd def normalize(img): - ''' + """ Normalize a 2D image. Furthermore it will limit the values to -3 and 3 standard deviations. :param img: Image to normalize (2D numpy array) :return: Normalized image, mean, standard diviation - ''' + """ mean = np.mean(img) sd = np.std(img) img = (img - mean) / sd @@ -214,18 +222,18 @@ def normalize(img): return img, mean, sd -def read_image(path,use_mmap=False): +def read_image(path, use_mmap=False): if path.endswith((".tif", ".tiff")): try: - img = tifffile.memmap(path,mode="r") + img = tifffile.memmap(path, mode="r") except ValueError: img = tifffile.imread(path) return img elif path.endswith(("mrc", "mrcs")): if use_mmap == False: - mrc_image_data = mrcfile.open(path, permissive=True, mode='r') + mrc_image_data = mrcfile.open(path, permissive=True, mode="r") else: - mrc_image_data = mrcfile.mmap(path, permissive=True, mode='r') + mrc_image_data = mrcfile.mmap(path, permissive=True, mode="r") return mrc_image_data.data else: print("Image format not supported. File: ", path) @@ -233,11 +241,11 @@ def read_image(path,use_mmap=False): def is_movie(path): - ''' + """ Checks if file is movie or not :param path: Path to file :return: True if movie. - ''' + """ if path.endswith((".tif", ".tiff")): tif = tifffile.TiffFile(path) return len(tif.pages) > 1 From bda43c76183ac1c7479a58a1c74b6708f43e22f3 Mon Sep 17 00:00:00 2001 From: stephen-riggs Date: Tue, 9 Apr 2024 12:00:00 +0100 Subject: [PATCH 3/5] Changes suggested by flake8 --- janni/jmain.py | 5 ++--- janni/patch_pair_generator.py | 1 - janni/train.py | 5 ++--- janni/utils.py | 2 +- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/janni/jmain.py b/janni/jmain.py index e081fdf..e6c8982 100644 --- a/janni/jmain.py +++ b/janni/jmain.py @@ -26,7 +26,6 @@ SOFTWARE. """ -import argparse import sys import json import os @@ -235,7 +234,7 @@ def _main_(): import sys if len(sys.argv) >= 2: - if not "--ignore-gooey" in sys.argv: + if "--ignore-gooey" not in sys.argv: sys.argv.append("--ignore-gooey") kwargs = {"terminal_font_family": "monospace", "richtext_controls": True} @@ -339,7 +338,7 @@ def main(args=None): try: u = model.tolist() model = u.decode() - except: + except Exception: pass patch_size = tuple(f["patch_size"]) except KeyError: diff --git a/janni/patch_pair_generator.py b/janni/patch_pair_generator.py index dcc016c..00d17c7 100644 --- a/janni/patch_pair_generator.py +++ b/janni/patch_pair_generator.py @@ -29,7 +29,6 @@ from keras.utils import Sequence from random import shuffle import numpy as np -import mrcfile from . import utils diff --git a/janni/train.py b/janni/train.py index d103456..25b887c 100644 --- a/janni/train.py +++ b/janni/train.py @@ -31,7 +31,6 @@ from . import utils from keras.optimizers import Adam import os -from . import utils import mrcfile import tifffile import numpy as np @@ -182,7 +181,7 @@ def calc_even_odd( path = os.path.join(dirpath, filename) movies_to_split.append((path, filename)) - if recursive == False: + if recursive is False: break for tuble_index, movie_tuble in enumerate(movies_to_split): @@ -269,7 +268,7 @@ def train_pairs( model = models.get_model_unet(input_size=patch_size, kernel_size=(3, 3)) opt = Adam(lr=learning_rate, epsilon=10**-8, amsgrad=True) model.compile(optimizer=opt, loss=loss) - history = model.fit_generator( + model.fit_generator( generator=train_gen, epochs=epochs, callbacks=callbacks, diff --git a/janni/utils.py b/janni/utils.py index 4009f0d..b391ea6 100644 --- a/janni/utils.py +++ b/janni/utils.py @@ -230,7 +230,7 @@ def read_image(path, use_mmap=False): img = tifffile.imread(path) return img elif path.endswith(("mrc", "mrcs")): - if use_mmap == False: + if use_mmap is False: mrc_image_data = mrcfile.open(path, permissive=True, mode="r") else: mrc_image_data = mrcfile.mmap(path, permissive=True, mode="r") From 5022210a34cef27727657cf3738e343516d343ef Mon Sep 17 00:00:00 2001 From: stephen-riggs Date: Tue, 9 Apr 2024 12:06:16 +0100 Subject: [PATCH 4/5] Update keras imports to tensorflow --- janni/models.py | 17 ++++++++++++----- janni/patch_pair_generator.py | 2 +- janni/train.py | 2 +- janni/utils.py | 3 +-- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/janni/models.py b/janni/models.py index 080d556..9c7def9 100644 --- a/janni/models.py +++ b/janni/models.py @@ -26,11 +26,18 @@ SOFTWARE. """ -from keras.models import Model -from keras.layers import Input, Add, Conv2DTranspose, MaxPooling2D, UpSampling2D, ReLU -from keras.layers.convolutional import Conv2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.merge import concatenate +from tensorflow.keras import Model +from tensorflow.keras.layers import ( + Add, + Conv2D, + Conv2DTranspose, + Input, + LeakyReLU, + MaxPooling2D, + ReLU, + UpSampling2D, + concatenate, +) def get_rednet( diff --git a/janni/patch_pair_generator.py b/janni/patch_pair_generator.py index 00d17c7..fc71bfc 100644 --- a/janni/patch_pair_generator.py +++ b/janni/patch_pair_generator.py @@ -26,7 +26,7 @@ SOFTWARE. """ -from keras.utils import Sequence +from tensorflow.keras.utils import Sequence from random import shuffle import numpy as np from . import utils diff --git a/janni/train.py b/janni/train.py index 25b887c..c2d1289 100644 --- a/janni/train.py +++ b/janni/train.py @@ -29,7 +29,7 @@ from . import models from . import patch_pair_generator as gen from . import utils -from keras.optimizers import Adam +from tensorflow.keras.optimizers import Adam import os import mrcfile import tifffile diff --git a/janni/utils.py b/janni/utils.py index b391ea6..d48829e 100644 --- a/janni/utils.py +++ b/janni/utils.py @@ -29,7 +29,6 @@ import mrcfile import tifffile import numpy as np -from . import utils SUPPORTED_FILES = (".mrc", ".mrcs", ".tiff", ".tif") @@ -195,7 +194,7 @@ def create_image_pair(movie_path, fbinning=fourier_binning): import os bin_file = os.path.join(os.path.dirname(movie_path), "bin.txt") - data = utils.read_image(movie_path) + data = read_image(movie_path) even = np.sum(data[::2], axis=0).astype(np.float32) odd = np.sum(data[1::2], axis=0).astype(np.float32) From a1dd3db6883279c185011f1648ccbb66bae39f10 Mon Sep 17 00:00:00 2001 From: stephen-riggs Date: Tue, 9 Apr 2024 12:08:34 +0100 Subject: [PATCH 5/5] Make numpy version same as cryolo --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8acb14e..05c0c0f 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ def find_version(*file_paths): install_requires=[ "mrcfile >=1.3.0", "Keras", - "numpy >= 1.16.0, < 1.26.4", + "numpy >= 1.16.0, <= 1.26.4", "h5py >= 2.5.0", "Pillow >= 6.0.0", "Cython",