Skip to content

Commit

Permalink
Merge pull request #54 from Ian-Goodall-Halliwell/main
Browse files Browse the repository at this point in the history
Added more compatibility options and w-scoring, with report changes
  • Loading branch information
Ian-Goodall-Halliwell authored Jan 23, 2025
2 parents dd6180c + 132c537 commit 1a0aaef
Show file tree
Hide file tree
Showing 9 changed files with 425 additions and 53 deletions.
54 changes: 54 additions & 0 deletions run_3T_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#!/bin/bash

# Enable error handling
set -e

# Set the path to the dataset, or the folder containing the 'derivatives' folder
pth_dataset_ref="/data/mica3/BIDS_MICs"
pth_dataset="/data/mica3/BIDS_PNI"
# Set the directories for micapipe, hippunfold, and zbrains, which will be looked for in the 'derivates' folder
zbrains_dir="zbrains_clinical"
zbrains_dir_ref="zbrains_clinical"
micapipe_dir="micapipe_v0.2.0"
hippunfold_dir="hippunfold_v1.3.0"

# Set the paths to the demographic control and patient files
# The demo_controls are only needed for the analysis, and define the control samples to compare against.
# The demo_patients can be provided if desired, which will run all the patients in the list when the "all" keyword is used,
# otherwise the 'all' keyword will run every patient possible, given the micapipe and hippunfold availability, or, for the analysis
# it will run on all patients who have had zbrains proc run before.
# demo_controls="/host/oncilla/local_raid/oualid/zbrains_csvs/participants_mics_hc.csv"
demo_controls="/data/mica1/03_projects/ian/participants_7T_hc.csv"
# Set the subject IDs and session IDs to 'all', using all patients defined in the PX_participants file.
subject_ids="sub-PNE006"
session_ids="all"

# The code below runs zbrains preserving the old behaviour, with a smooth_ctx of 10, a smooth_hip of 5, and a label_ctx of 'white'
# The new defaults for this are a smooth_ctx of 5, a smooth_hip of 2, and a label_ctx of 'midthickness'
# Much of the new volumetric code is dependent on cortical midthickness, so it is recommended.
./zbrains --run "proc analysis"\
--sub "${subject_ids}" \
--ses "${session_ids}" \
--dataset ${pth_dataset} \
--zbrains ${zbrains_dir} \
--micapipe ${micapipe_dir} \
--hippunfold ${hippunfold_dir} \
--dataset_ref ${pth_dataset} \
--zbrains_ref ${zbrains_dir} \
--demo_ref ${demo_controls} \
--column_map participant_id=ID session_id=SES \
--smooth_ctx 10 \
--smooth_hip 5 \
--n_jobs 4 \
--n_jobs_wb 4 \
--label_ctx "white" \
--wb_path /usr/bin/ \
--verbose 2 \
--control_prefix "PNC" \
--volumetric 0 \
--dicoms 0 \
--pyinit=/data/mica1/03_projects/ian/anaconda3


# Pause to keep the terminal open (optional, remove if not needed)
read -p "Press any key to continue..."
52 changes: 52 additions & 0 deletions run_norm.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#!/bin/bash

# Enable error handling
set -e

# Set the path to the dataset, or the folder containing the 'derivatives' folder
pth_dataset="/data/mica3/BIDS_MICs"
# Set the directories for micapipe, hippunfold, and zbrains, which will be looked for in the 'derivates' folder
zbrains_dir="wbrains"
micapipe_dir="micapipe_v0.2.0"
hippunfold_dir="hippunfold_v1.3.0"

# Set the paths to the demographic control and patient files
# The demo_controls are only needed for the analysis, and define the control samples to compare against.
# The demo_patients can be provided if desired, which will run all the patients in the list when the "all" keyword is used,
# otherwise the 'all' keyword will run every patient possible, given the micapipe and hippunfold availability, or, for the analysis
# it will run on all patients who have had zbrains proc run before.
demo_controls="/host/oncilla/local_raid/oualid/zbrains_csvs/participants_mics_hc.csv"
demo="/host/verges/tank/data/ian/participants_mics_px.csv"
# Set the subject IDs and session IDs to 'all', using all patients defined in the PX_participants file.
subject_ids="sub-PX010"
session_ids="all"

# The code below runs zbrains preserving the old behaviour, with a smooth_ctx of 10, a smooth_hip of 5, and a label_ctx of 'white'
# The new defaults for this are a smooth_ctx of 5, a smooth_hip of 2, and a label_ctx of 'midthickness'
# Much of the new volumetric code is dependent on cortical midthickness, so it is recommended.
./zbrains --run "analysis"\
--sub "${subject_ids}" \
--ses "${session_ids}" \
--dataset ${pth_dataset} \
--zbrains ${zbrains_dir} \
--micapipe ${micapipe_dir} \
--hippunfold ${hippunfold_dir} \
--dataset_ref ${pth_dataset} \
--zbrains_ref ${zbrains_dir} \
--demo ${demo} \
--demo_ref ${demo_controls} \
--column_map participant_id=ID session_id=SES \
--smooth_ctx 10 \
--smooth_hip 5 \
--n_jobs 4 \
--n_jobs_wb 4 \
--label_ctx "white" \
--wb_path /usr/bin/ \
--verbose 2 \
--volumetric 0 \
--dicoms 0 \
--pyinit=/data/mica1/03_projects/ian/anaconda3 \
--normative "age sex"

# Pause to keep the terminal open (optional, remove if not needed)
read -p "Press any key to continue..."
2 changes: 1 addition & 1 deletion src/functions/blurring.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def compute_blurring(
[midthicknessDataArr, midthicknessSurfaceArr],
[wmBoundaryDataArr, wmBoundarySurfaceArr],
]
for dist in ["1", "2", "3"]:
for dist in ["1", "2"]:
whiteMatterDataArr = load_gifti_data(
f"{input_dir}/{bids_id}_hemi-{hemi}_surf-fsnative_label-swm{dist}.0mm_{feat}.func.gii"
)
Expand Down
6 changes: 3 additions & 3 deletions src/functions/clinical_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -558,8 +558,8 @@ def _load_data_sctx(
x = pd.read_csv(sctx_file, header=[0], index_col=0).to_numpy().ravel()
if threshold is not None:
x[np.abs(x) < threshold] *= threshold_alpha
if analysis == "asymmetry":
print("e")
# if analysis == "asymmetry":
# print("e")
# Array of data
array_16 = np.full(16, np.nan)
array_16[0:7] = x[0:7]
Expand Down Expand Up @@ -1033,7 +1033,7 @@ def generate_clinical_report(
label_hip="midthickness",
color_bar="bottom",
cmap="cmo.balance",
color_range=(-2, 2),
color_range=(-3, 3),
cmap_asymmetry="cmo.balance_r",
tmp_dir: PathType = "/tmp",
subject_dir=None,
Expand Down
2 changes: 1 addition & 1 deletion src/functions/help.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@
unless indicated otherwise (e.g., --column_map site=center)
\t{gcolor}--n_jobs{nc} [number] : Number of jobs to run in parallel. Default is {bcolor}1{nc}.
\t{gcolor}--wb_path{nc} [path] : Path to the Connectome Workbench binaries. Default is {bcolor}/data/mica1/01_programs/workbench-1.4.2/bin_linux64{nc}.
\t{gcolor}--patient_prefix{nc} [prefix] : Prefix to use when determining patients versus controls. Default is {bcolor}PX{nc}.
\t{gcolor}--control_prefix{nc} [prefix] : Prefix to use when determining patients versus controls. Default is {bcolor}PX{nc}.
\t{gcolor}--delete_temps{nc} [bool] : If set to True, will delete any ragged temp files left from crashed analyses, then exit. Default is {bcolor}False{nc}.
\t{gcolor}--verbose{nc} [level] : Verbosity level (default is {bcolor}-1{nc}). Levels:
- 0 : Only errors
Expand Down
30 changes: 19 additions & 11 deletions src/functions/run_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,15 @@ def main(
}

# Rename covariates for normative modeling
cov_normative = normative
cov_normative = (
(normative.split(" ") if " " in normative else normative.split("-"))
if normative is not None
else None
)
if cov_normative is not None:
cov_normative = [actual_to_expected.get(col, col) for col in cov_normative]
cov_normative = [
actual_to_expected.get(col, col).upper() for col in cov_normative
]

# Rename covariates for deconfounding
cov_deconfound = deconfound
Expand All @@ -113,10 +119,10 @@ def main(
px_demo = None
if demo is not None:
px_demo = load_demo(demo, rename=actual_to_expected, dtypes=col_dtypes, tmp=tmp)
px_demo = px_demo.loc[(px_demo["participant_id"] == px_id)]
# px_demo = px_demo.loc[
# (px_demo["participant_id"] == px_id) & (px_demo["session_id"] == px_ses)
# ]
# px_demo = px_demo.loc[(px_demo["participant_id"] == px_id)]
px_demo = px_demo.loc[
(px_demo["participant_id"] == px_id) & (px_demo["session_id"] == px_ses)
]

# If no such row exists, create an empty DataFrame with the same columns
if px_demo.empty:
Expand Down Expand Up @@ -168,7 +174,7 @@ def main(
struct,
smooth_ctx,
smooth_hip,
zbrains_ref,
zbrains,
px_id,
px_ses,
px_demo,
Expand Down Expand Up @@ -197,9 +203,10 @@ def main(
print(lab_ctx, lab_hip)
age = None
sex = None
print(px_demo)
if px_demo is not None:
age = px_demo.iloc[0].get("age", None)
sex = px_demo.iloc[0].get("sex", None)
age = px_demo["AGE"]
sex = px_demo["SEX"]

feat_ctx = available_features["cortex"][res_ctx][lab_ctx]
feat_sctx = available_features["subcortex"]
Expand Down Expand Up @@ -242,7 +249,7 @@ def main(
label_ctx=lab_ctx,
label_hip=lab_hip,
tmp_dir=tmp,
feature_means=feature_means
feature_means=feature_means,
)


Expand Down Expand Up @@ -393,9 +400,10 @@ def handle_unhandled_exception(exc_type, exc_value, exc_traceback):
args.labels_ctx = eval(args.labels_ctx)
args.struct = args.struct.split("-")
args.feat = args.feat.split("-")
args.demo_ref = args.demo_ref.split("-")
args.demo_ref = args.demo_ref.split("¥")
args.zbrains_ref = args.zbrains_ref.split("-")
args.resolution = args.resolution.split("-")

print(args.labels_ctx)
run(
subject_id=args.subject_id,
Expand Down
12 changes: 6 additions & 6 deletions src/functions/surface_to_volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,13 +380,13 @@ def process_cortex(
]

# Run the commands
subprocess.run(command_struct)
subprocess.run(command_struct, shell=True)
# subprocess.run(command_struct_native)

subprocess.run(command1)
subprocess.run(command_struct_2)
subprocess.run(command2)
subprocess.run(command3)
subprocess.run(command1, shell=True)
subprocess.run(command_struct_2, shell=True)
subprocess.run(command2, shell=True)
subprocess.run(command3, shell=True)
os.replace(
f"{tmp}/{feature}_{analysis}_{struct}_{smooth}_{hemi}_temp.nii.gz",
f"{outdir}/{subj}_{ses}_hemi-{hemi}_surf-fsLR-32k_label-midthickness_feature-{feature}_smooth-{smooth}_analysis-{analysis}.nii.gz",
Expand Down Expand Up @@ -1015,7 +1015,7 @@ def surface_to_volume(
None
"""
rootfolder = os.path.join(rootfolder, "derivatives")
zbrainsdir = zbrainsdir[0]
# zbrainsdir = zbrainsdir
smooth_ctx = f"{str(smooth_ctx)}mm"
smooth_hipp = f"{str(smooth_hipp)}mm"

Expand Down
Loading

0 comments on commit 1a0aaef

Please sign in to comment.