Skip to content

Commit

Permalink
Updated code for simulations (#66)
Browse files Browse the repository at this point in the history
* Minor PEP8 fixes

* Cropped phantom and changed output default folder

* Added tqdm progress bar

* Fixed generation of cord and GM masks

* Cleanup

* Updated .gitignore

* Refactoring to remove duplication

* Cleanup

* Updated README

* Started refactoring to get rid of old code deps

* Fixed CSV reading code

* Fixed subprocess syntax

* Output results in separate folder

* Also output WM mask

* Introduced WM mask

* Implemented metrics computation

* Cleanup

* Fixed wrong dict usage; updated README

* Cleanup

* Updated TODO

* Fixed xticklabels

* Fixed Matplotlib warning

Warning:
<ipython-input-3-13658aa197ad>:1: UserWarning: FixedFormatter should only be used together with FixedLocator
  ax.set_yticklabels(yticklabels, fontsize=fontsize_axes)

Solution:
https://stackoverflow.com/questions/63723514/userwarning-fixedformatter-should-only-be-used-together-with-fixedlocator

* Fixed TypeError: a bytes-like object is required, not 'str'

Solution:
https://stackoverflow.com/questions/34283178/typeerror-a-bytes-like-object-is-required-not-str-in-python-and-csv

* Output in simu_results/

* Updated .gitignore

* Need to uncache otherwise the nibabel object changes

More info:
https://nipy.org/nibabel/images_and_memory.html

* Removed verbose

* Added progress bar

* Changed output location

* Compute contrast in %

* Fixed creation of WM mask

It was waaay to big

* Eroded GM and WM masks further

To address the accuracy issue when computing metrics

* Cosmetic changes on the figure

* Cleanup

* Fixed centering of xtick
  • Loading branch information
jcohenadad authored Nov 24, 2021
1 parent 6c5bca4 commit 0fe8d01
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 113 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,6 @@ results*
.DS_Store
venv/

# output
simu_*

6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,11 @@ python generate_figure_spinegeneric.py -ip <PATH_DATA>/participants.tsv -ir cd <
* [simu_create_phantom.py](./simu_create_phantom.py): Generate synthetic phantom
of WM and GM that can be used to validate the proposed evaluation metrics. The phantoms are generated with random noise,
so running the script multiple times will not produce the same output.
This script is meant to be run twice in order to assess the metrics with the following functions.
This script is meant to be run twice in order to assess the metrics with the following functions. Example:
```bash
python simu_create_phantom.py -o simu_phantom1
python simu_create_phantom.py -o simu_phantom2
```
* [simu_process_data.py](./simu_process_data.py): Process data by batch within
folders. This script will look for csv files, which are generated by
simu_create_phantom.py, and which contain file names of the nifti phantom data.
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ seaborn
pandas
matplotlib
ptitprince
numpy
numpy
tqdm
82 changes: 25 additions & 57 deletions simu_create_phantom.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,14 @@
#
# Authors: Stephanie Alley, Julien Cohen-Adad

# TODO: generated cord mask is too large!
# TODO: remove input params and set them as list inside code
# TODO: download PAM50 by default, and have option to set path to atlas
# TODO: param for selecting z

import os, sys
import argparse
import numpy as np
import nibabel as nib
import scipy.ndimage as ndimage
import pandas as pd
import tqdm


def get_parser():
Expand All @@ -35,48 +32,13 @@ def get_parser():
parser.add_argument(
'-o',
help='Name of the output folder',
default='./')
default='phantom')
return parser


def get_tracts(folder_atlas, zslice=500, num_slice=10):
"""
Loads tracts in an atlas folder and converts them from .nii.gz format to numpy ndarray
:param tracts_folder:
:param zslice: slice to select for generating the phantom
:return: ndarray nx,ny,nb_tracts
"""
# parameters
file_info_label = 'info_label.txt'
# read info labels
indiv_labels_ids, indiv_labels_names, indiv_labels_files, combined_labels_ids, combined_labels_names, combined_labels_id_groups, ml_clusters = read_label_file(
folder_atlas, file_info_label)

# fname_tracts = glob.glob(folder_atlas + '/*' + '.nii.gz')
nb_tracts = np.size(indiv_labels_files)
# load first file to get dimensions
im = Image(os.path.join(folder_atlas, indiv_labels_files[0]))
nx, ny, nz, nt, px, py, pz, pt = im.dim
# initialize data tracts
data_tracts = np.zeros([nx, ny, num_slice, nb_tracts])
#Load each partial volume of each tract
for i in range(nb_tracts):
sct.no_new_line_log('Load each atlas label: {}/{}'.format(i + 1, nb_tracts))
# TODO: display counter
data_tracts[:, :, :, i] = Image(os.path.join(folder_atlas, indiv_labels_files[i])).data[:, :, zslice-(num_slice/2):zslice+(num_slice/2)]
return data_tracts


def save_nifti(data, fname):
"""
Create a standard header with nibabel and save matrix as NIFTI
:param data:
:param fname:
:return:
"""
affine = np.diag([1, 1, 1, 1])
im_phantom = nib.Nifti1Image(data, affine)
nib.save(im_phantom, fname)
def crop_data(data):
"""Crop around the spinal cord"""
return data[53:89, 58:82, 845:855]


def main(argv=None):
Expand All @@ -85,9 +47,7 @@ def main(argv=None):
gm_values = [120, 140, 160, 180]
std_noises = [1, 5, 10]
smoothing = [0, 0.5, 1] # standard deviation values for Gaussian kernel
zslice = 850 # 850: corresponds to mid-C4 level (enlargement)
num_slice = 10 # number of slices in z direction
range_tract = 0 # we do not want heterogeneity within WM and within GM. All tracts should have the same value.
thr_mask = 0.9 # Set threshold for masks

# user params
parser = get_parser()
Expand All @@ -105,11 +65,14 @@ def main(argv=None):
nii_atlas_gm = nib.load(os.path.join(folder_template, 'PAM50_gm.nii.gz'))

print("\nGenerate phantom...")
pbar = tqdm.tqdm(total=len(gm_values)*len(std_noises)*len(smoothing))
# loop across gm_value and std_values and generate phantom
for gm_value in gm_values:
for std_noise in std_noises:
for smooth in smoothing:
nii_atlas_wm.uncache()
data_wm = nii_atlas_wm.get_fdata()
nii_atlas_gm.uncache()
data_gm = nii_atlas_gm.get_fdata()
# Add values to each tract
data_wm *= wm_value
Expand All @@ -126,7 +89,7 @@ def main(argv=None):
file_out = "phantom_WM" + str(wm_value) + "_GM" + str(gm_value) + "_Noise" + str(std_noise) + \
"_Smooth" + str(smooth)
# save as NIfTI file
nii_phantom = nib.Nifti1Image(data_phantom, nii_atlas_wm.affine)
nii_phantom = nib.Nifti1Image(crop_data(data_phantom), nii_atlas_wm.affine)
nib.save(nii_phantom, os.path.join(folder_out, file_out + ".nii.gz"))
# save metadata
metadata = pd.Series({'WM': wm_value,
Expand All @@ -135,18 +98,23 @@ def main(argv=None):
'Smooth': smooth,
'File': file_out + ".nii.gz"})
metadata.to_csv(os.path.join(folder_out, file_out + ".csv"))
pbar.update(1)
pbar.close()


# generate mask of spinal cord
data_cord = np.sum(data_tracts[:, :, :, ind_wm+ind_gm], axis=3)
data_cord[np.where(data_cord >= 0.5)] = 1
data_cord[np.where(data_cord < 0.5)] = 0
save_nifti(data_cord, os.path.join(folder_out, "mask_cord.nii.gz"))
# generate mask of gray matter
data_gm = np.sum(data_tracts[:, :, :, ind_gm], axis=3)
data_gm[np.where(data_gm >= 0.5)] = 1
data_gm[np.where(data_gm < 0.5)] = 0
save_nifti(data_gm, os.path.join(folder_out, "mask_gm.nii.gz"))
nii_atlas_gm.uncache()
data_gm = nii_atlas_gm.get_fdata()
data_gm[np.where(data_gm < thr_mask)] = 0
nii_phantom = nib.Nifti1Image(crop_data(data_gm), nii_atlas_wm.affine)
nib.save(nii_phantom, os.path.join(folder_out, "mask_gm.nii.gz"))

# generate mask of white matter
nii_atlas_wm.uncache()
data_wm = nii_atlas_wm.get_fdata()
data_wm[np.where(data_wm < thr_mask)] = 0
nii_phantom = nib.Nifti1Image(crop_data(data_wm), nii_atlas_wm.affine)
nib.save(nii_phantom, os.path.join(folder_out, "mask_wm.nii.gz"))

# display
print("Done!")

Expand Down
42 changes: 20 additions & 22 deletions simu_make_figures.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,22 @@
# Make figures to assess metrics sensitivity to image quality. Run after simu_process_data.py
#
# USAGE:
# The script should be launched using SCT's python:
# ${SCT_DIR}/python/bin/python simu_make_figures.py -i results_all.csv
# python simu_make_figures.py -i simu_results/results_all.csv
#
# OUTPUT:
# Figs
#
# Authors: Julien Cohen-Adad

import os, sys, csv

import os
import csv
import argparse
import numpy as np
# append path to useful SCT scripts
path_sct = os.getenv('SCT_DIR')
sys.path.append(os.path.join(path_sct, 'scripts'))
import sct_utils as sct
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker


def get_parameters():
parser = argparse.ArgumentParser(description='Make figures to assess metrics sensitivity to image quality. Run '
Expand All @@ -36,10 +35,7 @@ def get_parameters():
return args

def main():
sct.init_sct() # start logger
# default params
# smooth = 0
# Read CSV
path_output = 'simu_results/'
results_all = pd.read_csv(file_csv)

# build index
Expand All @@ -60,31 +56,33 @@ def main():
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.20 # the width of the bars
fontsize = 20
fontsize_axes = 16
p2 = ax.bar(ind - width, data[:, 2], width, color='b')
p1 = ax.bar(ind, data[:, 1], width, color='y')
p3 = ax.bar(ind + width, data[:, 0], width, color='r')
fontsize = 16
fontsize_axes = 14
linewidth = 1 # linewidth of bar contour
p2 = ax.bar(ind - width, data[:, 2], width, color='b', linewidth=linewidth, edgecolor='k')
p1 = ax.bar(ind, data[:, 1], width, color='y', linewidth=linewidth, edgecolor='k', align='center')
p3 = ax.bar(ind + width, data[:, 0], width, color='r', linewidth=linewidth, edgecolor='k')
ax.set_title(metric, fontsize=fontsize)
ax.set_xlabel("Simulated Contrast (in %)", fontsize=fontsize)
ax.set_xticks(ind + width / 2)
xticklabels = (abs(list_gm - wm) / [float(min(i, wm)) for i in list_gm] * 100).astype(int)
ax.set_xticks(ind)
xticklabels = [int(100 * abs(i_gm-wm) / min(i_gm, wm)) for i_gm in list_gm]
ax.set_xticklabels(xticklabels, fontsize=fontsize_axes)
# ax.legend((p1[0], p2[0], p3[0]), (["Noise STD = " + str(i) for i in list_noise]))
ax.set_ylabel("Measured " + metric, fontsize=fontsize)
# ax.set_ylim(0, 80)
yticklabels = ax.get_yticks().astype(int)
ax.yaxis.set_major_locator(mticker.FixedLocator(yticklabels))
ax.set_yticklabels(yticklabels, fontsize=fontsize_axes)
plt.grid(axis='y')
plt.grid(axis='y', linestyle="dashed")
ax.autoscale_view()
# plt.show()
plt.savefig("results_" + metric + "_smooth" + str(smooth) + ".png")
plt.savefig(os.path.join(path_output, "results_" + metric + "_smooth" + str(smooth) + ".png"), dpi=300)

# save csv for importing as table
with open("results_" + metric + "_smooth" + str(smooth) + ".csv", "wb") as f:
with open(os.path.join(path_output, "results_" + metric + "_smooth" + str(smooth) + ".csv"), "w") as f:
writer = csv.writer(f)
for row in data.transpose():
row = ["%.2f" % f for f in row] # we need to do this otherwise float conversion gives e.g. 23.00000001
row = [f"%.2f" % f for f in row] # we need to do this otherwise float conversion gives e.g. 23.00000001
writer.writerow(row)


Expand Down
96 changes: 64 additions & 32 deletions simu_process_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,21 @@
# by simu_create_phantom.py), and will process data pairwise between folder1 and folder2.
#
# USAGE:
# The script should be launched using SCT's python:
# ${SCT_DIR}/python/bin/python simu_process_data.py -i phantom1 phantom2 -s phantom1/mask_cord.nii.gz -g phantom1/mask_gm.nii.gz -r 0
# python simu_process_data.py -i phantom1 phantom2 -s phantom1/mask_cord.nii.gz -g phantom1/mask_gm.nii.gz -r 0
#
# OUTPUT:
# results_folder.csv: quantitative results in CSV format
# results_simu.csv: quantitative results in CSV format
#
# Authors: Julien Cohen-Adad

# TODO: fix wrong SNR_diff value

import sys, os, shutil, argparse, pickle, io, glob
import numpy as np
import pandas as pd
# append path to useful SCT scripts
path_sct = os.getenv('SCT_DIR')
sys.path.append(os.path.join(path_sct, 'scripts'))
import sct_utils as sct
import process_data
import pandas as pd
import os
import argparse
import glob
from subprocess import call
import pandas
import tqdm


def get_parameters():
Expand Down Expand Up @@ -53,49 +50,84 @@ def get_parameters():
return args


def run(cmd):
"""Wrapper to run Unix commands"""
call(cmd.split(' '), stdout=open(os.devnull, "w"))


def compute_metrics(file_1, file_2, file_wm, file_gm, path_out):
# Compute SNR using both methods
run(f'sct_image -i {file_1} {file_2} -concat t -o {path_out}data_concat.nii.gz')
run(f'sct_compute_snr -i {path_out}data_concat.nii.gz -method diff -m {file_wm} -o {path_out}snr_diff.txt')
run(f'sct_compute_snr -i {path_out}data_concat.nii.gz -method single -m {file_wm} -m-noise {file_wm} -rayleigh 0 '
f'-o {path_out}snr_single.txt')
snr_single = float(open(f'{path_out}snr_single.txt', 'r').readline())
# Compute average value in WM and GM on a slice-by-slice basis
run(f'sct_extract_metric -i {file_1} -f {file_wm} -method wa -o {path_out}signal_wm.csv')
run(f'sct_extract_metric -i {file_2} -f {file_wm} -method wa -o {path_out}signal_wm.csv -append 1')
run(f'sct_extract_metric -i {file_1} -f {file_gm} -method wa -o {path_out}signal_gm.csv')
run(f'sct_extract_metric -i {file_2} -f {file_gm} -method wa -o {path_out}signal_gm.csv -append 1')
# Compute contrast slicewise and average across slices
pd_gm = pandas.read_csv(f'{path_out}signal_gm.csv')
pd_wm = pandas.read_csv(f'{path_out}signal_wm.csv')
pd_contrast = 100 * abs(pd_gm['WA()'] - pd_wm['WA()']) / pandas.DataFrame([pd_gm['WA()'], pd_wm['WA()']]).min()
contrast = pd_contrast.mean()
# Build dict
dict_out = {
'SNR_single': snr_single,
'SNR_diff': float(open(f'{path_out}snr_diff.txt', 'r').readline()),
'Contrast': contrast,
'CNR': contrast * snr_single,
}
return dict_out


def main():
# output_dir = "./output_wmgm" # TODO: be able to set with argument
file_output = "results_all.csv" # csv output
# fdata2 = "data2.nii.gz"
path_output = 'simu_results/'
file_output = os.path.join(path_output, "results_all.csv")

# Get list of files in folder1
folder1, folder2 = folder_data
fname_csv_list = sorted(glob.glob(os.path.join(folder1, "*.csv")))

# initialize dataframe
results_all = pd.DataFrame(columns={'WM',
'GM',
'Noise',
'Smooth',
'SNR',
'Contrast',
'Sharpness'})
results_all = pandas.DataFrame(columns={'WM',
'GM',
'Noise',
'Smooth',
'SNR',
'Contrast',
'CNR'})

file_wm = os.path.join(folder1, 'mask_wm.nii.gz')
file_gm = os.path.join(folder1, 'mask_gm.nii.gz')

# loop and process
i = 0
os.makedirs(path_output, exist_ok=True)
pbar = tqdm.tqdm(total=len(fname_csv_list))
for fname_csv in fname_csv_list:
# get file name
metadata = pd.Series.from_csv(fname_csv, header=None).to_dict()
metadata = pandas.read_csv(fname_csv, index_col=0).to_dict()['0']
file_data = metadata["File"]
# get fname of each nifti file
fname1 = os.path.join(folder1, file_data)
fname2 = os.path.join(folder2, file_data)
# display
print("\nData #1: " + fname1)
print("Data #2: " + fname2)
# process pair of data
results = process_data.main([fname1, fname2], file_seg, file_gmseg, register=register, verbose=verbose)
results = compute_metrics(fname1, fname2, file_wm, file_gm, path_output)
# append to dataframe
results_all = results_all.append({'WM': metadata['WM'],
'GM': metadata['GM'],
'Noise': metadata['Noise'],
'Smooth': metadata['Smooth'],
'SNR_single': results.loc['SNR_single'][0],
'SNR_diff': results.loc['SNR_diff'][0],
'Contrast': results.loc['Contrast'][0]}, ignore_index=True)
# 'Sharpness': results.loc['Sharpness'][0]}, ignore_index=True)
'SNR_single': results['SNR_single'],
'SNR_diff': results['SNR_diff'],
'Contrast': results['Contrast'],
'CNR': results['CNR']},ignore_index=True)
pbar.update(1)
pbar.close()
results_all.to_csv(file_output)


if __name__ == "__main__":
args = get_parameters()
folder_data = args.input
Expand Down

0 comments on commit 0fe8d01

Please sign in to comment.