Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add FMAP conversion capability for ADNI #1119

Merged
merged 16 commits into from
May 7, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
Miniconda3-latest-Linux-x86_64.sh
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved

# PyInstaller
# Usually these files are written by a python script from a template
Expand Down
40 changes: 26 additions & 14 deletions clinica/iotools/bids_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ def create_participants_df(
import numpy as np
import pandas as pd

from clinica.iotools.converters.adni_to_bids.adni_utils import load_clinical_csv
from clinica.utils.stream import cprint

fields_bids = ["participant_id"]
Expand All @@ -92,8 +91,9 @@ def create_participants_df(
participant_df = pd.DataFrame(columns=fields_bids)

for i in range(0, len(participant_fields_db)):
pfdbi = participant_fields_db[i]
# If a field not empty is found
if not pd.isnull(participant_fields_db[i]):
if not pd.isnull(pfdbi):
# Extract the file location of the field and read the value from the file
tmp = field_location[i].split("/")
location = tmp[0]
Expand All @@ -112,28 +112,27 @@ def create_participants_df(
if file_ext == ".xlsx":
file_to_read = pd.read_excel(file_to_read_path, sheet_name=sheet)
elif file_ext == ".csv":
file_to_read = load_clinical_csv(
clinical_data_dir, location.split(".")[0]
)
file_to_read = pd.read_csv(file_to_read_path)
prev_location = location
prev_sheet = sheet

field_col_values = []
# For each field in fields_dataset extract all the column values
for j in range(0, len(file_to_read)):
# Convert the alternative_id_1 to string if is an integer/float
value_to_read = file_to_read[participant_fields_db[i]]
import time
value_to_read = file_to_read[pfdbi]
if participant_fields_bids[i] == "alternative_id_1" and (
value_to_read.dtype == np.float64 or value_to_read.dtype == np.int64
):
if not pd.isnull(file_to_read.at[j, participant_fields_db[i]]):
if not pd.isnull(file_to_read.at[j, pfdbi]):
value_to_append = str(
file_to_read.at[j, participant_fields_db[i]]
file_to_read.at[j, pfdbi]
).rstrip(".0")
else:
value_to_append = np.NaN
else:
value_to_append = file_to_read.at[j, participant_fields_db[i]]
value_to_append = file_to_read.at[j, pfdbi]
field_col_values.append(value_to_append)
# Add the extracted column to the participant_df
participant_df[participant_fields_bids[i]] = pd.Series(field_col_values)
Expand Down Expand Up @@ -246,6 +245,15 @@ def create_sessions_dict_OASIS(
file_to_read = pd.read_excel(file_to_read_path, sheet_name=sheet)
elif file_ext == ".csv":
file_to_read = pd.read_csv(file_to_read_path)
# if file_ext == ".xlsx":
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
# file_to_read = pd.read_excel(file_to_read_path, sheet_name=sheet)
# if file_ext == ".csv":
# file_to_read = pd.read_csv(file_to_read_path)
# with open('/home/[email protected]/Desktop/Code/participant/debug_column_names.txt', 'w') as f:
# f.write(f"Columns in {file_to_read_path}: \n")
# for col in file_to_read.columns:
# f.write(f"{col}\n")
# file_to_read.columns = file_to_read.columns.str.replace('"', '')

for r in range(0, len(file_to_read.values)):
# Extracts the subject ids columns from the dataframe
Expand Down Expand Up @@ -533,7 +541,7 @@ def write_modality_agnostic_files(
_write_bidsignore(bids_dir)


def write_sessions_tsv(bids_dir: Union[str, Path], sessions_dict: dict) -> None:
def write_sessions_tsv(bids_dir: str, sessions_dict: dict) -> None:
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
"""Create <participant_id>_sessions.tsv files.

Basically writes the content of the function
Expand Down Expand Up @@ -617,7 +625,7 @@ def _get_pet_tracer_from_filename(filename: str) -> str:


def write_scans_tsv(
bids_dir: Union[str, Path], participant_ids: List[str], scans_dict: dict
bids_dir: str, participant_ids: List[str], scans_dict: dict
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
) -> None:
"""Write the scans dict into TSV files.

Expand Down Expand Up @@ -883,16 +891,20 @@ def run_dcm2niix(
command = _build_dcm2niix_command(
input_dir, output_dir, output_fmt, compress, bids_sidecar
)
completed_process = subprocess.run(command, capture_output=True)
completed_process = subprocess.run(command)
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved

if completed_process.returncode != 0:
if completed_process.stdout is not None:
output_message = completed_process.stdout.decode('utf-8')
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
else:
output_message = ""
cprint(
msg=(
"DICOM to BIDS conversion with dcm2niix failed:\n"
f"command: {' '.join(command)}\n"
f"{completed_process.stdout.decode('utf-8')}"
f"{output_message}"
),
lvl="warning",
lvl="warning"
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
)
return False
cprint(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ def convert_adni_av45_fbb_pet(
conversion_dir: PathLike,
subjects: Optional[List[str]] = None,
mod_to_update: bool = False,
n_procs: int = 1,
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
):
"""Convert AV-45 and Florbetaben PET images of ADNI into BIDS format.

Expand All @@ -34,24 +33,18 @@ def convert_adni_av45_fbb_pet(
mod_to_update : bool
If True, pre-existing images in the BIDS directory
will be erased and extracted again.

n_procs : int, default=1
The requested number of processes.
If specified, it should be between 1 and the number of available CPUs.
Default=1.
"""
from os import path

import pandas as pd

from clinica.iotools.converters.adni_to_bids.adni_utils import (
load_clinical_csv,
paths_to_bids,
)
from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids
from clinica.utils.stream import cprint

if not subjects:
adni_merge = load_clinical_csv(csv_dir, "ADNIMERGE")
adni_merge_path = path.join(csv_dir, "ADNIMERGE.csv")
adni_merge = pd.read_csv(adni_merge_path, delimiter='","')
adni_merge.columns = adni_merge.columns.str.strip('"')
subjects = list(adni_merge.PTID.unique())

cprint(
Expand All @@ -61,13 +54,7 @@ def convert_adni_av45_fbb_pet(
cprint(
"Paths of AV45 and Florbetaben PET images found. Exporting images into BIDS ..."
)
paths_to_bids(
images,
destination_dir,
"av45_fbb",
mod_to_update=mod_to_update,
n_procs=n_procs,
)
paths_to_bids(images, destination_dir, "av45_fbb", mod_to_update=mod_to_update)
cprint(msg="AV45 and Florbetaben PET conversion done.", lvl="debug")


Expand All @@ -91,7 +78,6 @@ def compute_av45_fbb_pet_paths(source_dir, csv_dir, subjs_list, conversion_dir):
from clinica.iotools.converters.adni_to_bids.adni_utils import (
find_image_path,
get_images_pet,
load_clinical_csv,
)

pet_amyloid_col = [
Expand All @@ -111,22 +97,27 @@ def compute_av45_fbb_pet_paths(source_dir, csv_dir, subjs_list, conversion_dir):
pet_amyloid_dfs_list = []

# Loading needed .csv files
av45qc = load_clinical_csv(csv_dir, "AV45QC")
amyqc = load_clinical_csv(csv_dir, "AMYQC")
pet_meta_list = load_clinical_csv(csv_dir, "PET_META_LIST")
av45qc = pd.read_csv(path.join(csv_dir, "AV45QC.csv"), sep=",", low_memory=False)
amyqc = pd.read_csv(path.join(csv_dir, "AMYQC.csv"), sep=",", low_memory=False)
pet_meta_list = pd.read_csv(
path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False
)

for subj in subjs_list:

NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
ssubj = subj.replace('"','')
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved

# PET images metadata for subject
subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]
subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == ssubj]

if subject_pet_meta.empty:
continue

# QC for AV45 PET images for ADNI 1, GO and 2
av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))]
av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(ssubj[-4:]))]

# QC for Amyloid PET images for ADNI 3
amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))]
amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(ssubj[-4:]))]
amy_qc_subj.insert(0, "EXAMDATE", amy_qc_subj.SCANDATE.to_list())

# Concatenating visits in both QC files
Expand Down
43 changes: 23 additions & 20 deletions clinica/iotools/converters/adni_to_bids/adni_modalities/adni_dwi.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ def convert_adni_dwi(
conversion_dir: PathLike,
subjects: Optional[List[str]] = None,
mod_to_update: bool = False,
n_procs: Optional[int] = 1,
):
"""Convert DW images of ADNI into BIDS format.

Expand All @@ -34,24 +33,18 @@ def convert_adni_dwi(
mod_to_update : bool
If True, pre-existing images in the BIDS directory
will be erased and extracted again.

n_procs : int, optional
The requested number of processes.
If specified, it should be between 1 and the number of available CPUs.
Default=1.
"""
from os import path

import pandas as pd

from clinica.iotools.converters.adni_to_bids.adni_utils import (
load_clinical_csv,
paths_to_bids,
)
from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids
from clinica.utils.stream import cprint

if not subjects:
adni_merge = load_clinical_csv(csv_dir, "ADNIMERGE")
adni_merge_path = path.join(csv_dir, "ADNIMERGE.csv")
adni_merge = pd.read_csv(adni_merge_path, delimiter='","')
adni_merge.columns = adni_merge.columns.str.strip('"')
subjects = list(adni_merge.PTID.unique())

cprint(
Expand All @@ -60,9 +53,7 @@ def convert_adni_dwi(
images = compute_dwi_paths(source_dir, csv_dir, subjects, conversion_dir)
cprint("Paths of DWI images found. Exporting images into BIDS ...")
# dwi_paths_to_bids(images, dest_dir)
paths_to_bids(
images, destination_dir, "dwi", mod_to_update=mod_to_update, n_procs=n_procs
)
paths_to_bids(images, destination_dir, "dwi", mod_to_update=mod_to_update)
cprint(msg="DWI conversion done.", lvl="debug")


Expand All @@ -84,7 +75,6 @@ def compute_dwi_paths(source_dir, csv_dir, subjs_list, conversion_dir):

from clinica.iotools.converters.adni_to_bids.adni_utils import (
find_image_path,
load_clinical_csv,
visits_to_timepoints,
)

Expand All @@ -103,11 +93,19 @@ def compute_dwi_paths(source_dir, csv_dir, subjs_list, conversion_dir):
dwi_dfs_list = []

# Loading needed .csv files
adni_merge = load_clinical_csv(csv_dir, "ADNIMERGE")
mayo_mri_qc = load_clinical_csv(csv_dir, "MAYOADIRL_MRI_IMAGEQC_12_08_15")
# adni_merge_path = path.join(csv_dir, "ADNIMERGE.csv")
NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
# adni_merge = pd.read_csv(adni_merge_path, delimiter='","')
# adni_merge.columns = adni_merge.columns.str.strip('"')
adni_merge = pd.read_csv(path.join(csv_dir, "ADNIMERGE2.csv"), sep=",", engine='python')

mayo_mri_qc = pd.read_csv(
path.join(csv_dir, "MAYOADIRL_MRI_IMAGEQC_12_08_15.csv"),
sep=",",
low_memory=False,
)
mayo_mri_qc = mayo_mri_qc[mayo_mri_qc.series_type == "DTI"]

mri_list = load_clinical_csv(csv_dir, "MRILIST")
mri_list = pd.read_csv(path.join(csv_dir, "MRILIST.csv"), sep=",", low_memory=False)

# Selecting only DTI images that are not Multiband, processed or enhanced images
mri_list = mri_list[mri_list.SEQUENCE.str.contains("dti", case=False, na=False)]
Expand All @@ -119,14 +117,19 @@ def compute_dwi_paths(source_dir, csv_dir, subjs_list, conversion_dir):
]

for subj in subjs_list:

NicolasGensollen marked this conversation as resolved.
Show resolved Hide resolved
ssubj = subj.replace('"','')

# Filter ADNIMERGE, MRI_LIST and QC for only one subject and sort the rows/visits by examination date
adnimerge_subj = adni_merge[adni_merge.PTID == subj]
adnimerge_subj = adnimerge_subj.sort_values("EXAMDATE")

mri_list_subj = mri_list[mri_list.SUBJECT == subj]
mri_list_subj = mri_list[mri_list.SUBJECT == ssubj]
mri_list_subj = mri_list_subj.sort_values("SCANDATE")

mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(subj[-4:])]
ssubj = subj[-4:]
ssubj = ssubj.replace('"','')
mayo_mri_qc_subj = mayo_mri_qc[mayo_mri_qc.RID == int(ssubj)]

# Obtain corresponding timepoints for the subject visits
visits = visits_to_timepoints(subj, mri_list_subj, adnimerge_subj, "DWI")
Expand Down
Loading
Loading