diff --git a/bidscoin/bids.py b/bidscoin/bids.py index 483c52b0..326c75fd 100644 --- a/bidscoin/bids.py +++ b/bidscoin/bids.py @@ -67,7 +67,7 @@ def __init__(self, sourcefile: Union[str, Path]='', plugins: Plugins=None, dataf :param sourcefile: The full filepath of the data source :param plugins: The plugin dictionaries with their options - :param dataformat: The dataformat name in the bidsmap, e.g. DICOM or PAR + :param dataformat: The name of the dataformat (= section in the bidsmap, e.g. DICOM or PAR) :param options: A (bidsmap) dictionary with 'subprefix' and 'sesprefix' fields """ @@ -117,11 +117,11 @@ def resesprefix(self) -> str: return '' if self.sesprefix=='*' else re.escape(self.sesprefix).replace(r'\-','-') - def has_plugin(self) -> bool: - """Test whether the datasource has a plugin that supports the sourcefile. If so, then update self.dataformat accordingly""" + def has_support(self) -> str: + """Find and return the dataformat supported by the plugins. If a dataformat is found, then update self.dataformat accordingly""" if not self.path.is_file() or self.path.is_dir(): - return False + return '' for plugin, options in self.plugins.items(): module = bcoin.import_plugin(plugin, ('has_support',)) @@ -135,9 +135,9 @@ def has_plugin(self) -> bool: if self.dataformat and self.dataformat != supported: LOGGER.bcdebug(f"Inconsistent dataformat found, updating: {self.dataformat} -> {supported}") self.dataformat: str = supported - return True + return supported - return False + return '' def properties(self, tagname: str, runitem=None) -> Union[str, int]: """ @@ -223,7 +223,7 @@ def attributes(self, attributekey: str, validregexp: bool=False, cache: bool=Tru if attributekey in extattr: attributeval = str(extattr[attributekey]) if extattr[attributekey] is not None else '' - elif self.dataformat or self.has_plugin(): + elif self.dataformat or self.has_support(): for plugin, options in self.plugins.items(): module = bcoin.import_plugin(plugin, ('get_attribute',)) if module: @@ -348,41 +348,38 @@ class RunItem: dictionaries and the bids and meta output dictionaries (bidsmap > dataformat > datatype > run-item) """ - def __init__(self, dataformat: str='', datatype: str='', data: dict=None, datasource: DataSource=None, options: Options=None, plugins: Plugins=None): + def __init__(self, dataformat: str='', datatype: str='', data: dict=None, options: Options=None, plugins: Plugins=None): """ Create a run-item with the proper structure, provenance info and a data source. NB: Updates to the attributes traverse to the datasource, but not vice versa - :param dataformat: The name of the dataformat - :param datatype: The name of the datatype + :param dataformat: The name of the dataformat (= section in the bidsmap) + :param datatype: The name of the datatype (= section in a dataformat) :param data: The YAML run-item dictionary with the following keys: provenance, properties, attributes, bids, meta - :param datasource: A data source that is deepcopied and added to the object, otherwise a datasource is created from data['provenance'] :param options: The dictionary with the BIDScoin options :param plugins: The plugin dictionaries with their options """ # Create a YAML data dictionary with all required attribute keys - data = {} if data is None else data + data = data or {} for key, val in {'provenance': '', 'properties': {}, 'attributes': {}, 'bids': {'suffix':''}, 'meta': {}}.items(): if key not in data: data[key] = val super().__setattr__('_data', data) # Set the regular attributes - self.datasource = datasource = copy.deepcopy(datasource) if datasource else DataSource(data['provenance'] if data else '', plugins, dataformat, options) - """The DataSource object that is deepcopied or created from the run-item provenance""" - datasource.subprefix = options['subprefix'] if options else datasource.subprefix - datasource.sesprefix = options['sesprefix'] if options else datasource.sesprefix - self.dataformat = dataformat or datasource.dataformat + self.datasource = DataSource(data['provenance'], plugins, dataformat, options) + """A DataSource object created from the run-item provenance""" + self.dataformat = dataformat """The name of the dataformat""" self.datatype = datatype """The name of the datatype""" self.options = options """The dictionary with the BIDScoin options""" - self.plugins = plugins or datasource.plugins + self.plugins = plugins """The plugin dictionaries with their options""" # Set the data attributes. TODO: create data classes instead? - self.provenance = data['provenance'] or str(datasource.path if datasource.path.name else '') + self.provenance = data['provenance'] """The file path of the data source""" self.properties = Properties(data['properties']) """The file system properties from the data source that can be matched against other data sources""" @@ -414,7 +411,11 @@ def __setattr__(self, name, value): else: _setattr(_name, value) - # Also update the twin attributes of the datasource (should never happen anyway) + # Keep the datasource in sync with the provenance (just in case someone changes this) + if name == 'provenance': + self.datasource.path = Path(value) + + # Also update the identical twin attributes of the datasource (this should never happen) if name == 'dataformat': self.datasource.dataformat = value or '' if name == 'plugins': @@ -672,8 +673,8 @@ def __init__(self, dataformat: str, datatype: str, data: list, options: Options, """ Reads from a YAML datatype dictionary - :param dataformat: The name of the dataformat - :param datatype: The name of the datatype + :param dataformat: The name of the dataformat (= section in the bidsmap) + :param datatype: The name of the datatype (= section in a dataformat) :param data: The YAML datatype dictionary, i.e. a list of runitems :param options: The dictionary with the BIDScoin options :param plugins: The plugin dictionaries with their options @@ -714,7 +715,7 @@ def __hash__(self): def runitems(self) -> List[RunItem]: """Returns a list of the RunItem objects for this datatype""" - return [RunItem(self.dataformat, self.datatype, rundata, None, self.options, self.plugins) for rundata in self._data] + return [RunItem(self.dataformat, self.datatype, rundata, self.options, self.plugins) for rundata in self._data] def delete_run(self, provenance: str): """ @@ -765,7 +766,7 @@ def __init__(self, dataformat: str, data: dict, options: Options, plugins: Plugi """ Reads from a YAML dataformat dictionary - :param dataformat: The name of the dataformat + :param dataformat: The name of the dataformat (= section in the bidsmap) :param data: The YAML dataformat dictionary, i.e. subject and session items + a set of datatypes :param options: The dictionary with the BIDScoin options :param plugins: The plugin dictionaries with their options @@ -968,7 +969,7 @@ def __init__(self, yamlfile: Path, folder: Path=templatefolder, plugins: Iterabl # Add missing bids entities suffix = runitem.bids.get('suffix') - if runitem.datasource.has_plugin(): + if runitem.datasource.has_support(): suffix = runitem.datasource.dynamicvalue(suffix, True, True) for typegroup in filerules.get(datatype.datatype, {}): # E.g. typegroup = 'nonparametric' if suffix in filerules[datatype.datatype][typegroup].suffixes: # run_found = True @@ -1205,7 +1206,7 @@ def dir(self, dataformat: Union[str, DataFormat]) -> List[Path]: """ Make a provenance list of all the runs in the bidsmap[dataformat] - :param dataformat: The information source in the bidsmap that is used, e.g. 'DICOM' + :param dataformat: The dataformat section in the bidsmap that is listed, e.g. 'DICOM' :return: List of all provenances """ @@ -1262,7 +1263,7 @@ def exist_run(self, runitem: RunItem, datatype: Union[str, DataType]='') -> bool return False - def get_matching_run(self, datasource: DataSource, runtime=False) -> Tuple[RunItem, str]: + def get_matching_run(self, sourcefile: Union[str, Path], dataformat, runtime=False) -> Tuple[RunItem, str]: """ Find the first run in the bidsmap with properties and attributes that match with the data source. Only non-empty properties and attributes are matched, except when runtime is True, then the empty attributes are also matched. @@ -1270,34 +1271,32 @@ def get_matching_run(self, datasource: DataSource, runtime=False) -> Tuple[RunIt ignoredatatypes (e.g. 'exclude') -> normal datatypes (e.g. 'anat') -> unknowndatatypes (e.g. 'extra_data') - :param datasource: The data source from which the attributes are read + :param sourcefile: The full filepath of the data source for which to get a run-item + :param dataformat: The dataformat section in the bidsmap in which a matching run is searched for, e.g. 'DICOM' :param runtime: Dynamic <> are expanded if True :return: (run, provenance) A vanilla run that has all its attributes populated with the source file attributes. If there is a match, the provenance of the bidsmap entry is returned, otherwise it will be '' """ - if not datasource.dataformat and not datasource.has_plugin(): - LOGGER.bcdebug(f"No dataformat/plugin support found when getting a matching run for: {datasource}") - + datasource = DataSource(sourcefile, self.plugins, dataformat, options=self.options) unknowndatatypes = self.options.get('unknowntypes') or ['unknown_data'] ignoredatatypes = self.options.get('ignoretypes') or [] - normaldatatypes = [dtype.datatype for dtype in self.dataformat(datasource.dataformat).datatypes if dtype not in unknowndatatypes + ignoredatatypes] - datasource = copy.deepcopy(datasource) - rundata = {'provenance': str(datasource.path), 'properties': {}, 'attributes': {}, 'bids': {}, 'meta': {}} + normaldatatypes = [dtype.datatype for dtype in self.dataformat(dataformat).datatypes if dtype not in unknowndatatypes + ignoredatatypes] + rundata = {'provenance': str(sourcefile), 'properties': {}, 'attributes': {}, 'bids': {}, 'meta': {}} """The a run-item data structure. NB: Keep in sync with the RunItem() data attributes""" # Loop through all datatypes and runs; all info goes cleanly into runitem (to avoid formatting problem of the CommentedMap) if 'fmap' in normaldatatypes: normaldatatypes.insert(0, normaldatatypes.pop(normaldatatypes.index('fmap'))) # Put fmap at the front (to catch inverted polarity scans first for datatype in ignoredatatypes + normaldatatypes + unknowndatatypes: # The ordered datatypes in which a matching run is searched for - if datatype not in self.dataformat(datasource.dataformat).datatypes: continue - for runitem in self.dataformat(datasource.dataformat).datatype(datatype).runitems: + if datatype not in self.dataformat(dataformat).datatypes: continue + for runitem in self.dataformat(dataformat).datatype(datatype).runitems: # Begin with match = True unless all properties and attributes are empty match = any([getattr(runitem, attr)[key] not in (None,'') for attr in ('properties','attributes') for key in getattr(runitem, attr)]) # Initialize a clean run-item data structure - rundata = {'provenance': str(datasource.path), 'properties': {}, 'attributes': {}, 'bids': {}, 'meta': {}} + rundata = {'provenance': str(sourcefile), 'properties': {}, 'attributes': {}, 'bids': {}, 'meta': {}} # Test if the data source matches all the non-empty run-item properties, but do NOT populate them for propkey, propvalue in runitem.properties.items(): @@ -1344,7 +1343,7 @@ def get_matching_run(self, datasource: DataSource, runtime=False) -> Tuple[RunIt # Stop searching the bidsmap if we have a match if match: LOGGER.bcdebug(f"Found bidsmap match: {runitem}") - runitem = RunItem('', datatype, copy.deepcopy(rundata), datasource, self.options, self.plugins) + runitem = RunItem(dataformat, datatype, copy.deepcopy(rundata), self.options, self.plugins) # SeriesDescriptions (and ProtocolName?) may get a suffix like '_SBRef' from the vendor, try to strip it off runitem.strip_suffix() @@ -1352,11 +1351,11 @@ def get_matching_run(self, datasource: DataSource, runtime=False) -> Tuple[RunIt return runitem, runitem.provenance # We don't have a match (all tests failed, so datatype should be the *last* one, e.g. unknowndatatype) - LOGGER.bcdebug(f"Found no bidsmap match for: {datasource.path}") + LOGGER.bcdebug(f"Found no bidsmap match for: {sourcefile}") if datatype not in unknowndatatypes: - LOGGER.warning(f"Datatype was expected to be in {unknowndatatypes}, instead it is '{datatype}' -> {datasource.path}") + LOGGER.warning(f"Datatype was expected to be in {unknowndatatypes}, instead it is '{datatype}' -> {sourcefile}") - runitem = RunItem('', datatype, copy.deepcopy(rundata), datasource, self.options, self.plugins) + runitem = RunItem(dataformat, datatype, copy.deepcopy(rundata), self.options, self.plugins) runitem.strip_suffix() return runitem, '' @@ -1371,7 +1370,7 @@ def get_run(self, datatype: Union[str, DataType], suffix_idx: Union[int, str], d otherwise an empty dict """ - if not datasource.dataformat and not datasource.has_plugin(): + if not datasource.dataformat and not datasource.has_support(): LOGGER.bcdebug(f"No dataformat/plugin support found when getting a run for: {datasource}") datatype = str(datatype) @@ -1704,7 +1703,7 @@ def get_datasource(sourcedir: Path, plugins: Plugins, recurse: int=8) -> DataSou datasource = get_datasource(sourcepath, plugins, recurse-1) elif sourcepath.is_file(): datasource = DataSource(sourcepath, plugins) - if datasource.has_plugin(): + if datasource.has_support(): return datasource return datasource @@ -2559,30 +2558,29 @@ def updatemetadata(datasource: DataSource, targetmeta: Path, usermeta: Meta, ext metapool = json.load(json_fid) # Add the source metadata to the metadict or copy it over - if sourcemeta.name: - for ext in extensions: - for sourcefile in sourcemeta.parent.glob(sourcemeta.with_suffix('').with_suffix(ext).name): - LOGGER.verbose(f"Copying source data from: '{sourcefile}''") - - # Put the metadata in metadict - if ext == '.json': - with sourcefile.open('r') as json_fid: - metadata = json.load(json_fid) - if not isinstance(metadata, dict): - LOGGER.error(f"Skipping unexpectedly formatted meta-data in: {sourcefile}") - continue - for metakey, metaval in metadata.items(): - if metapool.get(metakey) and metapool.get(metakey) != metaval: - LOGGER.info(f"Overruling {metakey} sourcefile values in {targetmeta}: {metapool[metakey]} -> {metaval}") - else: - LOGGER.bcdebug(f"Adding '{metakey}: {metaval}' to: {targetmeta}") - metapool[metakey] = metaval or None + for ext in extensions: + for sourcefile in sourcemeta.parent.glob(sourcemeta.with_suffix('').with_suffix(ext).name): + LOGGER.verbose(f"Copying source data from: '{sourcefile}''") + + # Put the metadata in metadict + if ext == '.json': + with sourcefile.open('r') as json_fid: + metadata = json.load(json_fid) + if not isinstance(metadata, dict): + LOGGER.error(f"Skipping unexpectedly formatted meta-data in: {sourcefile}") + continue + for metakey, metaval in metadata.items(): + if metapool.get(metakey) and metapool.get(metakey) != metaval: + LOGGER.info(f"Overruling {metakey} sourcefile values in {targetmeta}: {metapool[metakey]} -> {metaval}") + else: + LOGGER.bcdebug(f"Adding '{metakey}: {metaval}' to: {targetmeta}") + metapool[metakey] = metaval or None - # Or just copy over the metadata file - else: - targetfile = targetmeta.parent/sourcefile.name - if not targetfile.is_file(): - shutil.copyfile(sourcefile, targetfile) + # Or just copy over the metadata file + else: + targetfile = targetmeta.parent/sourcefile.name + if not targetfile.is_file(): + shutil.copyfile(sourcefile, targetfile) # Add all the metadata to the metadict. NB: the dynamic `IntendedFor` value is handled separately later for metakey, metaval in usermeta.items(): @@ -2696,7 +2694,7 @@ def addparticipant(participants_tsv: Path, subid: str='', sesid: str='', data: d return table, meta -def bidsprov(bidsfolder: Path, source: Path=Path(), runid: str='', datatype: Union[str, DataType]='unknown', targets: Iterable[Path]=()) -> pd.DataFrame: +def bidsprov(bidsfolder: Path, source: Path=Path(), runitem: RunItem=None, targets: Iterable[Path]=()) -> pd.DataFrame: """ Save data transformation information in the bids/code/bidscoin folder (in the future this may be done in accordance with BEP028) @@ -2704,8 +2702,7 @@ def bidsprov(bidsfolder: Path, source: Path=Path(), runid: str='', datatype: Uni :param bidsfolder The bids root folder or one of its subdirectories (e.g. a session folder) :param source: The source file or folder that is being converted - :param runid: The bidsmap runid (provenance) that was used to map the source data, e.g. as returned from get_matching_run() - :param datatype: The BIDS datatype/name of the subfolder where the targets are saved (e.g. extra_data) + :param runitem: The runitem that was used to map the source data, e.g. as returned from get_matching_run() :param targets: The set of output files :return: The dataframe with the provenance data (index_col='source', columns=['runid', 'datatype', 'targets']) """ @@ -2728,7 +2725,7 @@ def bidsprov(bidsfolder: Path, source: Path=Path(), runid: str='', datatype: Uni # Write the provenance data if source.name: LOGGER.bcdebug(f"Writing provenance data to: {provfile}") - provdata.loc[str(source)] = [runid, str(datatype), ', '.join([f"{target.parts[1]+':' if target.parts[0]=='derivatives' else ''}{target.name}" for target in targets])] + provdata.loc[str(source)] = [runitem.provenance, runitem.datatype or 'n/a', ', '.join([f"{target.parts[1]+':' if target.parts[0]=='derivatives' else ''}{target.name}" for target in targets])] provdata.sort_index().to_csv(provfile, sep='\t') return provdata diff --git a/bidscoin/bidseditor.py b/bidscoin/bidseditor.py index 1fedcaec..4df8688b 100755 --- a/bidscoin/bidseditor.py +++ b/bidscoin/bidseditor.py @@ -236,7 +236,7 @@ def show_contextmenu(self, pos): datasource = bids.DataSource(dataformat=dataformat) for filename in filenames: datasource = bids.DataSource(filename, self.output_bidsmap.plugins, dataformat, self.output_bidsmap.options) - if datasource.has_plugin(): + if datasource.has_support(): runitem = self.template_bidsmap.get_run(datatype, 0, datasource) runitem.properties['filepath'] = datasource.properties('filepath') # Make the added run a strict match (i.e. an exception) runitem.properties['filename'] = datasource.properties('filename') # Make the added run a strict match (i.e. an exception) @@ -248,7 +248,7 @@ def show_contextmenu(self, pos): self.ordered_file_index[dataformat] = {datasource.path: 0} else: self.ordered_file_index[dataformat][datasource.path] = max(self.ordered_file_index[dataformat][fname] for fname in self.ordered_file_index[dataformat]) + 1 - if datasource.has_plugin(): + if datasource.has_support(): self.update_subses_samples(dataformat) elif action == delete: diff --git a/bidscoin/plugins/dcm2niix2bids.py b/bidscoin/plugins/dcm2niix2bids.py index 4adb3e7c..26c493b2 100644 --- a/bidscoin/plugins/dcm2niix2bids.py +++ b/bidscoin/plugins/dcm2niix2bids.py @@ -132,7 +132,7 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, for sourcedir in lsdirs(session, '**/*'): for n in range(1): # Option: Use range(2) to scan two files and catch e.g. magnitude1/2 fieldmap files that are stored in one Series folder (but bidscoiner sees only the first file anyhow and it makes bidsmapper 2x slower :-() sourcefile = bids.get_dicomfile(sourcedir, n) - if sourcefile.name and has_support(sourcefile): + if sourcefile.name: sourcefiles.append(sourcefile) elif dataformat == 'PAR': sourcefiles = bids.get_parfiles(session) @@ -150,13 +150,12 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, break # See if we can find a matching run in the old bidsmap - datasource = bids.DataSource(sourcefile, plugins, dataformat, bidsmap_new.options) - run, match = bidsmap_old.get_matching_run(datasource) + run, match = bidsmap_old.get_matching_run(sourcefile, dataformat) # If not, see if we can find a matching run in the template if not match: LOGGER.bcdebug('No match found in the study bidsmap, now trying the template bidsmap') - run, _ = template.get_matching_run(datasource) + run, _ = template.get_matching_run(sourcefile, dataformat) # See if we have collected the run somewhere in our new bidsmap if not bidsmap_new.exist_run(run): @@ -164,7 +163,7 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, # Communicate with the user if the run was not present in bidsmap_old or in template, i.e. that we found a new sample if not match: - LOGGER.info(f"Discovered sample: {datasource}") + LOGGER.info(f"Discovered sample: {run.datasource}") # Try to automagically set the {part: phase/imag/real} (should work for Siemens data) if not run.datatype == '' and 'part' in run.bids and not run.bids['part'][-1] and run.attributes.get('ImageType'): # part[-1]==0 -> part is not specified @@ -181,13 +180,13 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, LOGGER.verbose(f"Updated {run} entity: 'part' -> '{run.bids['part'][run.bids['part'][-1]]}' ({imagetype})") else: - LOGGER.bcdebug(f"Known sample: {datasource}") + LOGGER.bcdebug(f"Known sample: {run.datasource}") # Copy the filled-in run over to the new bidsmap bidsmap_new.insert_run(run) else: - LOGGER.bcdebug(f"Existing/duplicate sample: {datasource}") + LOGGER.bcdebug(f"Existing/duplicate sample: {run.datasource}") @due.dcite(Doi('10.1016/j.jneumeth.2016.03.001'), description='dcm2niix: DICOM to NIfTI converter', tags=['reference-implementation']) @@ -238,38 +237,36 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N scans_table = pd.DataFrame(columns=['acq_time'], dtype='str') scans_table.index.name = 'filename' - # Process all the source files or run subfolders - sourcefile = Path() + # Process all the source files / folders for source in sources: # Get a sourcefile if dataformat == 'DICOM': sourcefile = bids.get_dicomfile(source) - elif dataformat == 'PAR': + else: sourcefile = source if not sourcefile.name or not has_support(sourcefile): continue # Get a matching run from the bidsmap - datasource = bids.DataSource(sourcefile, {'dcm2niix2bids': options}, dataformat, bidsmap.options) - run, runid = bidsmap.get_matching_run(datasource, runtime=True) + run, runid = bidsmap.get_matching_run(sourcefile, dataformat, runtime=True) # Check if we should ignore this run if run.datatype in bidsmap.options['ignoretypes']: - LOGGER.info(f"--> Leaving out: {source}") - bids.bidsprov(bidsses, source, runid, run.datatype) # Write out empty provenance data + LOGGER.info(f"--> Leaving out: {run.datasource}") + bids.bidsprov(bidsses, source, run) # Write out empty provenance data continue # Check if we already know this run if not runid: - LOGGER.error(f"--> Skipping unknown '{run.datatype}' run: {sourcefile}\n-> Re-run the bidsmapper and delete {bidsses} to solve this warning") + LOGGER.error(f"--> Skipping unknown run: {run.datasource}\n-> Re-run the bidsmapper and delete {bidsses} to solve this warning") bids.bidsprov(bidsses, source) # Write out empty provenance data continue - LOGGER.info(f"--> Coining: {source}") + LOGGER.info(f"--> Coining: {run.datasource}") # Create the BIDS session/datatype output folder - suffix = datasource.dynamicvalue(run.bids['suffix'], True, True) + suffix = run.datasource.dynamicvalue(run.bids['suffix'], True, True) if suffix in bids.get_derivatives(run.datatype, exceptions): outfolder = bidsfolder/'derivatives'/manufacturer.replace(' ','')/subid/sesid/run.datatype else: @@ -411,21 +408,21 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N newbidsname = newbidsname.replace('_phasediff_e1', '_phasediff') # Case 1 newbidsname = newbidsname.replace('_phasediff_e2', '_phasediff') # Case 1 newbidsname = newbidsname.replace('_phasediff_ph', '_phasediff') # Case 1 - newbidsname = newbidsname.replace('_magnitude1_ph', '_phase1') # Case 2: One or two magnitude and phase images in one folder/datasource - newbidsname = newbidsname.replace('_magnitude2_ph', '_phase2') # Case 2: Two magnitude + two phase images in one folder/datasource + newbidsname = newbidsname.replace('_magnitude1_ph', '_phase1') # Case 2: One or two magnitude and phase images in one folder + newbidsname = newbidsname.replace('_magnitude2_ph', '_phase2') # Case 2: Two magnitude + two phase images in one folder newbidsname = newbidsname.replace('_phase1_e1', '_phase1') # Case 2 newbidsname = newbidsname.replace('_phase1_e2', '_phase2') # Case 2: This can happen when the e2 image is stored in the same directory as the e1 image, but with the e2 listed first newbidsname = newbidsname.replace('_phase2_e1', '_phase1') # Case 2: This can happen when the e2 image is stored in the same directory as the e1 image, but with the e2 listed first newbidsname = newbidsname.replace('_phase2_e2', '_phase2') # Case 2 - newbidsname = newbidsname.replace('_phase1_ph', '_phase1') # Case 2: One or two magnitude and phase images in one folder/datasource - newbidsname = newbidsname.replace('_phase2_ph', '_phase2') # Case 2: Two magnitude + two phase images in one folder/datasource + newbidsname = newbidsname.replace('_phase1_ph', '_phase1') # Case 2: One or two magnitude and phase images in one folder + newbidsname = newbidsname.replace('_phase2_ph', '_phase2') # Case 2: Two magnitude + two phase images in one folder newbidsname = newbidsname.replace('_magnitude_e1', '_magnitude') # Case 3 = One magnitude + one fieldmap image if len(dcm2niixfiles) == 2: - newbidsname = newbidsname.replace('_fieldmap_e1', '_magnitude') # Case 3: One magnitude + one fieldmap image in one folder/datasource + newbidsname = newbidsname.replace('_fieldmap_e1', '_magnitude') # Case 3: One magnitude + one fieldmap image in one folder newbidsname = newbidsname.replace('_magnitude_fieldmaphz', '_fieldmap') newbidsname = newbidsname.replace('_fieldmap_fieldmaphz', '_fieldmap') newbidsname = newbidsname.replace('_fieldmap_e1', '_fieldmap') # Case 3 - newbidsname = newbidsname.replace('_magnitude_ph', '_fieldmap') # Case 3: One magnitude + one fieldmap image in one folder/datasource + newbidsname = newbidsname.replace('_magnitude_ph', '_fieldmap') # Case 3: One magnitude + one fieldmap image in one folder newbidsname = newbidsname.replace('_fieldmap_ph', '_fieldmap') # Case 3 # Append the dcm2niix info to the fallback-label, may need to be improved/elaborated for future BIDS standards, supporting multi-coil data @@ -454,7 +451,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N oldfile.replace(newbidsfile.with_suffix('').with_suffix(''.join(oldfile.suffixes))) # Write out provenance data - bids.bidsprov(bidsses, source, runid, run.datatype, targets) + bids.bidsprov(bidsses, source, run, targets) # Loop over all non-derivative targets (i.e. the produced output files) and edit the json sidecar data for target in sorted(targets): @@ -467,7 +464,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N jsonfile = target.with_suffix('').with_suffix('.json') if not jsonfile.is_file(): LOGGER.warning(f"Unexpected conversion result, could not find: {jsonfile}") - metadata = bids.updatemetadata(datasource, jsonfile, run.meta, options.get('meta',[])) + metadata = bids.updatemetadata(run.datasource, jsonfile, run.meta, options.get('meta',[])) # Remove the bval/bvec files of sbref- and inv-images (produced by dcm2niix but not allowed by the BIDS specifications) if ((run.datatype == 'dwi' and suffix == 'sbref') or @@ -492,9 +489,9 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N if not ignore: acq_time = '' if dataformat == 'DICOM': - acq_time = f"{datasource.attributes('AcquisitionDate')}T{datasource.attributes('AcquisitionTime')}" + acq_time = f"{run.datasource.attributes('AcquisitionDate')}T{run.datasource.attributes('AcquisitionTime')}" elif dataformat == 'PAR': - acq_time = datasource.attributes('exam_date') + acq_time = run.datasource.attributes('exam_date') if not acq_time or acq_time == 'T': acq_time = f"1925-01-01T{metadata.get('AcquisitionTime','')}" try: @@ -509,7 +506,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N # Check if the target output aligns with dcm2niix's "BidsGuess" datatype and filename entities if not ignore: - typeguess, targetguess = metadata.get('BidsGuess') or ['', ''] # BidsGuess: [datatype, filename] + typeguess, targetguess = metadata.get('BidsGuess') or ['', ''] # BidsGuess: [datatype, filename] LOGGER.bcdebug(f"BidsGuess: [{typeguess}, {targetguess}]") if typeguess and run.datatype != typeguess: LOGGER.info(f"The datatype of {target.relative_to(bidsses)} does not match with the datatype guessed by dcm2niix: {typeguess}") @@ -527,7 +524,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N scans_table.replace('','n/a').to_csv(scans_tsv, sep='\t', encoding='utf-8', na_rep='n/a') # Collect personal data for the participants.tsv file - if dataformat == 'DICOM': # PAR does not contain personal info + if dataformat == 'DICOM': # PAR does not contain personal info? age = datasource.attributes('PatientAge') # A string of characters with one of the following formats: nnnD, nnnW, nnnM, nnnY try: if age.endswith('D'): age = float(age.rstrip('D')) / 365.2524 diff --git a/bidscoin/plugins/nibabel2bids.py b/bidscoin/plugins/nibabel2bids.py index 580e2ddc..b6ed488e 100644 --- a/bidscoin/plugins/nibabel2bids.py +++ b/bidscoin/plugins/nibabel2bids.py @@ -11,7 +11,7 @@ from typing import Union from pathlib import Path from bidscoin import bids -from bidscoin.bids import BidsMap, DataFormat, Plugin, Plugins +from bidscoin.bids import BidsMap, DataFormat, Plugin try: from nibabel.testing import data_path @@ -116,40 +116,34 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, :return: """ - # Get started - plugins = Plugins({'nibabel2bids': bidsmap_new.plugins['nibabel2bids']}) - datasource = bids.get_datasource(session, plugins, recurse=2) - if not datasource.dataformat: - return - if datasource.dataformat not in template.dataformats + bidsmap_old.dataformats: - LOGGER.error(f"No {datasource} source information found in the bidsmap and template") - return - # Collect the different DICOM/PAR source files for all runs in the session - for sourcefile in [file for file in session.rglob('*') if has_support(file)]: + for sourcefile in session.rglob('*'): + + # Check if the sourcefile is of a supported dataformat + if not (dataformat := has_support(sourcefile)): + continue # See if we can find a matching run in the old bidsmap - datasource = bids.DataSource(sourcefile, plugins, has_support(sourcefile)) - run, match = bidsmap_old.get_matching_run(datasource) + run, match = bidsmap_old.get_matching_run(sourcefile, dataformat) # If not, see if we can find a matching run in the template if not match: - run, _ = template.get_matching_run(datasource) + run, _ = template.get_matching_run(sourcefile, dataformat) # See if we have collected the run somewhere in our new bidsmap if not bidsmap_new.exist_run(run): # Communicate with the user if the run was not present in bidsmap_old or in template, i.e. that we found a new sample if not match: - LOGGER.info(f"Discovered data sample: {datasource}") + LOGGER.info(f"Discovered sample: {run.datasource}") else: - LOGGER.bcdebug(f"Known data sample: {datasource}") + LOGGER.bcdebug(f"Known sample: {run.datasource}") # Copy the filled-in run over to the new bidsmap bidsmap_new.insert_run(run) else: - LOGGER.bcdebug(f"Existing/duplicate sample: {datasource}") + LOGGER.bcdebug(f"Existing/duplicate sample: {run.datasource}") def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: @@ -164,15 +158,10 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: """ # Get the subject identifiers from the bidsses folder - subid = bidsses.name if bidsses.name.startswith('sub-') else bidsses.parent.name - sesid = bidsses.name if bidsses.name.startswith('ses-') else '' - - # Get started - options = bidsmap.plugins['nibabel2bids'] - sourcefiles = [file for file in session.rglob('*') if has_support(file)] - if not sourcefiles: - LOGGER.info(f"--> No {__name__} sourcedata found in: {session}") - return + subid = bidsses.name if bidsses.name.startswith('sub-') else bidsses.parent.name + sesid = bidsses.name if bidsses.name.startswith('ses-') else '' + options = bidsmap.plugins['nibabel2bids'] + runid = '' # Read or create a scans_table and tsv-file scans_tsv = bidsses/f"{subid}{'_'+sesid if sesid else ''}_scans.tsv" @@ -183,24 +172,28 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: scans_table.index.name = 'filename' # Collect the different Nibabel source files for all files in the session - for source in sourcefiles: + for sourcefile in session.rglob('*'): + + # Check if the sourcefile is of a supported dataformat + if not (dataformat := has_support(sourcefile)): + continue - datasource = bids.DataSource(source, {'nibabel2bids': options}, has_support(source)) - run, runid = bidsmap.get_matching_run(datasource, runtime=True) + # Get a matching run from the bidsmap + run, runid = bidsmap.get_matching_run(sourcefile, dataformat, runtime=True) # Check if we should ignore this run if run.datatype in bidsmap.options['ignoretypes']: - LOGGER.info(f"--> Leaving out: {datasource}") - bids.bidsprov(bidsses, source, runid, run.datatype) # Write out empty provenance data + LOGGER.info(f"--> Leaving out: {run.datasource}") + bids.bidsprov(bidsses, sourcefile, run) # Write out empty provenance data continue # Check if we already know this run if not runid: - LOGGER.error(f"Skipping unknown '{run.datatype}' run: {source}\n-> Re-run the bidsmapper and delete {bidsses} to solve this warning") - bids.bidsprov(bidsses, source) # Write out empty provenance data + LOGGER.error(f"Skipping unknown run: {run.datasource}\n-> Re-run the bidsmapper and delete {bidsses} to solve this warning") + bids.bidsprov(bidsses, sourcefile) # Write out empty provenance data continue - LOGGER.info(f"--> Coining: {datasource}") + LOGGER.info(f"--> Coining: {run.datasource}") # Create the BIDS session/datatype output folder outfolder = bidsses/run.datatype @@ -225,8 +218,8 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: target.unlink() # Save the sourcefile as a BIDS NIfTI file and write out provenance data - nib.save(nib.load(source), target) - bids.bidsprov(bidsses, source, runid, run.datatype, [target] if target.is_file() else []) + nib.save(nib.load(sourcefile), target) + bids.bidsprov(bidsses, sourcefile, run, [target] if target.is_file() else []) # Check the output if not target.is_file(): @@ -235,7 +228,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: # Load/copy over the source meta-data sidecar = target.with_suffix('').with_suffix('.json') - metadata = bids.updatemetadata(datasource, sidecar, run.meta, options.get('meta', [])) + metadata = bids.updatemetadata(run.datasource, sidecar, run.meta, options.get('meta', [])) if metadata: with sidecar.open('w') as json_fid: json.dump(metadata, json_fid, indent=4) @@ -245,6 +238,10 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> None: acq_time = dateutil.parser.parse(f"1925-01-01T{metadata.get('AcquisitionTime', '')}") scans_table.loc[target.relative_to(bidsses).as_posix(), 'acq_time'] = acq_time.isoformat() + if not runid: + LOGGER.info(f"--> No {__name__} sourcedata found in: {session}") + return + # Write the scans_table to disk LOGGER.verbose(f"Writing data to: {scans_tsv}") scans_table.replace('','n/a').to_csv(scans_tsv, sep='\t', encoding='utf-8', na_rep='n/a') diff --git a/bidscoin/plugins/spec2nii2bids.py b/bidscoin/plugins/spec2nii2bids.py index e5a98675..65e54e26 100644 --- a/bidscoin/plugins/spec2nii2bids.py +++ b/bidscoin/plugins/spec2nii2bids.py @@ -11,7 +11,7 @@ from bids_validator import BIDSValidator from pathlib import Path from bidscoin import bcoin, bids, due, Doi -from bidscoin.bids import BidsMap, DataFormat, Plugin, Plugins +from bidscoin.bids import BidsMap, DataFormat, Plugin LOGGER = logging.getLogger(__name__) @@ -117,40 +117,34 @@ def bidsmapper_plugin(session: Path, bidsmap_new: BidsMap, bidsmap_old: BidsMap, :return: """ - # Get the plugin settings - plugins = Plugins({'spec2nii2bids': bidsmap_new.plugins['spec2nii2bids']}) - # Update the bidsmap with the info from the source files - for sourcefile in [file for file in session.rglob('*') if has_support(file)]: - - datasource = bids.DataSource(sourcefile, plugins, has_support(sourcefile)) + for sourcefile in session.rglob('*'): - # Input checks - if datasource.dataformat not in template.dataformats + bidsmap_old.dataformats: - LOGGER.error(f"No {datasource} information found in the bidsmap and template for: {sourcefile}") - return + # Check if the sourcefile is of a supported dataformat + if not (dataformat := has_support(sourcefile)): + continue # See if we can find a matching run in the old bidsmap - run, match = bidsmap_old.get_matching_run(datasource) + run, match = bidsmap_old.get_matching_run(sourcefile, dataformat) # If not, see if we can find a matching run in the template if not match: - run, _ = template.get_matching_run(datasource) + run, _ = template.get_matching_run(sourcefile, dataformat) # See if we have collected the run somewhere in our new bidsmap if not bidsmap_new.exist_run(run): # Communicate with the user if the run was not present in bidsmap_old or in template, i.e. that we found a new sample if not match: - LOGGER.info(f"Discovered sample: {datasource}") + LOGGER.info(f"Discovered sample: {run.datasource}") else: - LOGGER.bcdebug(f"Known sample: {datasource}") + LOGGER.bcdebug(f"Known sample: {run.datasource}") # Copy the filled-in run over to the new bidsmap bidsmap_new.insert_run(run) else: - LOGGER.bcdebug(f"Existing/duplicate sample: {datasource}") + LOGGER.bcdebug(f"Existing/duplicate sample: {run.datasource}") @due.dcite(Doi('10.1002/mrm.29418'), description='Multi-format in vivo MR spectroscopy conversion to NIFTI', tags=['reference-implementation']) @@ -167,18 +161,11 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N :return: A dictionary with personal data for the participants.tsv file (such as sex or age) """ - # Get the subject identifiers from the bidsses folder - subid = bidsses.name if bidsses.name.startswith('sub-') else bidsses.parent.name - sesid = bidsses.name if bidsses.name.startswith('ses-') else '' - - # Get started and see what dataformat we have - options = bidsmap.plugins['spec2nii2bids'] - datasource = bids.get_datasource(session, {'spec2nii2bids': options}) - dataformat = datasource.dataformat - sourcefiles = [file for file in session.rglob('*') if has_support(file)] - if not sourcefiles: - LOGGER.info(f"--> No {__name__} sourcedata found in: {session}") - return + # Get started + subid = bidsses.name if bidsses.name.startswith('sub-') else bidsses.parent.name + sesid = bidsses.name if bidsses.name.startswith('ses-') else '' + options = bidsmap.plugins['spec2nii2bids'] + runid = '' # Read or create a scans_table and tsv-file scans_tsv = bidsses/f"{subid}{'_'+sesid if sesid else ''}_scans.tsv" @@ -189,25 +176,28 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N scans_table.index.name = 'filename' # Loop over all MRS source data files and convert them to BIDS - for source in sourcefiles: + for sourcefile in session.rglob('*'): + + # Check if the sourcefile is of a supported dataformat + if not (dataformat := has_support(sourcefile)): + continue - # Get a data source, a matching run from the bidsmap - datasource = bids.DataSource(source, {'spec2nii2bids': options}, has_support(source)) - run, runid = bidsmap.get_matching_run(datasource, runtime=True) + # Get a matching run from the bidsmap + run, runid = bidsmap.get_matching_run(sourcefile, dataformat, runtime=True) # Check if we should ignore this run if run.datatype in bidsmap.options['ignoretypes']: - LOGGER.info(f"--> Leaving out: {source}") - bids.bidsprov(bidsses, source, runid, run.datatype) # Write out empty provenance data + LOGGER.info(f"--> Leaving out: {run.datasource}") + bids.bidsprov(bidsses, sourcefile, run) # Write out empty provenance data continue # Check that we know this run if not runid: - LOGGER.error(f"Skipping unknown '{run.datatype}' run: {source}\n-> Re-run the bidsmapper and delete the MRS output data in {bidsses} to solve this warning") - bids.bidsprov(bidsses, source) # Write out empty provenance data + LOGGER.error(f"Skipping unknown run: {run.datasource}\n-> Re-run the bidsmapper and delete the MRS output data in {bidsses} to solve this warning") + bids.bidsprov(bidsses, sourcefile) # Write out empty provenance data continue - LOGGER.info(f"--> Coining: {source}") + LOGGER.info(f"--> Coining: {run.datasource}") # Create the BIDS session/datatype output folder outfolder = bidsses/run.datatype @@ -237,7 +227,7 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N args = options.get('args', OPTIONS['args']) or '' if dataformat == 'SPAR': dformat = 'philips' - arg = f'"{source.with_suffix(".SDAT")}"' + arg = f'"{sourcefile.with_suffix(".SDAT")}"' elif dataformat == 'Twix': dformat = 'twix' arg = '-e image' @@ -247,8 +237,8 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N LOGGER.error(f"Unsupported dataformat: {dataformat}") return command = options.get('command', 'spec2nii') - errcode = bcoin.run_command(f'{command} {dformat} -j -f "{bidsname}" -o "{outfolder}" {args} {arg} "{source}"') - bids.bidsprov(bidsses, source, runid, run.datatype, [target] if target.is_file() else []) + errcode = bcoin.run_command(f'{command} {dformat} -j -f "{bidsname}" -o "{outfolder}" {args} {arg} "{sourcefile}"') + bids.bidsprov(bidsses, sourcefile, run, [target] if target.is_file() else []) if not target.is_file(): if not errcode: LOGGER.error(f"Output file not found: {target}") @@ -256,20 +246,21 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N # Load/copy over and adapt the newly produced json sidecar-file sidecar = target.with_suffix('').with_suffix('.json') - metadata = bids.updatemetadata(datasource, sidecar, run.meta, options.get('meta',[])) + metadata = bids.updatemetadata(run.datasource, sidecar, run.meta, options.get('meta',[])) if metadata: with sidecar.open('w') as json_fid: json.dump(metadata, json_fid, indent=4) # Parse the acquisition time from the source header or else from the json file (NB: assuming the source file represents the first acquisition) + attributes = run.datasource.attributes if not bidsignore: acq_time = '' if dataformat == 'SPAR': - acq_time = datasource.attributes('scan_date') + acq_time = attributes('scan_date') elif dataformat == 'Twix': - acq_time = f"{datasource.attributes('AcquisitionDate')}T{datasource.attributes('AcquisitionTime')}" + acq_time = f"{attributes('AcquisitionDate')}T{attributes('AcquisitionTime')}" elif dataformat == 'Pfile': - acq_time = f"{datasource.attributes('rhr_rh_scan_date')}T{datasource.attributes('rhr_rh_scan_time')}" + acq_time = f"{attributes('rhr_rh_scan_date')}T{attributes('rhr_rh_scan_time')}" if not acq_time or acq_time == 'T': acq_time = f"1925-01-01T{metadata.get('AcquisitionTime','')}" try: @@ -278,30 +269,34 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N acq_time = acq_time.replace(year=1925, month=1, day=1) # Privacy protection (see BIDS specification) acq_time = acq_time.isoformat() except Exception as jsonerror: - LOGGER.warning(f"Could not parse the acquisition time from: {source}\n{jsonerror}") + LOGGER.warning(f"Could not parse the acquisition time from: {sourcefile}\n{jsonerror}") acq_time = 'n/a' scans_table.loc[target.relative_to(bidsses).as_posix(), 'acq_time'] = acq_time + if not runid: + LOGGER.info(f"--> No {__name__} sourcedata found in: {session}") + return + # Write the scans_table to disk LOGGER.verbose(f"Writing acquisition time data to: {scans_tsv}") scans_table.sort_values(by=['acq_time', 'filename'], inplace=True) scans_table.replace('','n/a').to_csv(scans_tsv, sep='\t', encoding='utf-8', na_rep='n/a') - # Collect personal data for the participants.tsv file + # Collect personal data for the participants.tsv file (assumes the dataformat and personal attributes remain the same in the session) personals = {} age = '' if dataformat == 'Twix': - personals['sex'] = datasource.attributes('PatientSex') - personals['size'] = datasource.attributes('PatientSize') - personals['weight'] = datasource.attributes('PatientWeight') - age = datasource.attributes('PatientAge') # A string of characters with one of the following formats: nnnD, nnnW, nnnM, nnnY + personals['sex'] = attributes('PatientSex') + personals['size'] = attributes('PatientSize') + personals['weight'] = attributes('PatientWeight') + age = attributes('PatientAge') # A string of characters with one of the following formats: nnnD, nnnW, nnnM, nnnY elif dataformat == 'Pfile': - sex = datasource.attributes('rhe_patsex') + sex = attributes('rhe_patsex') if sex == '0': personals['sex'] = 'O' elif sex == '1': personals['sex'] = 'M' elif sex == '2': personals['sex'] = 'F' try: - age = dateutil.parser.parse(datasource.attributes('rhr_rh_scan_date')) - dateutil.parser.parse(datasource.attributes('rhe_dateofbirth')) + age = dateutil.parser.parse(attributes('rhr_rh_scan_date')) - dateutil.parser.parse(attributes('rhe_dateofbirth')) age = str(age.days) + 'D' except dateutil.parser.ParserError as exc: pass @@ -315,6 +310,6 @@ def bidscoiner_plugin(session: Path, bidsmap: BidsMap, bidsses: Path) -> Union[N age = int(float(age)) personals['age'] = str(age) except Exception as exc: - LOGGER.warning(f"Could not parse age from: {datasource}\n{exc}") + LOGGER.warning(f"Could not parse age from: {run.datasource}\n{exc}") return personals diff --git a/tests/test_bids.py b/tests/test_bids.py index a5daf6ef..7a9e0670 100644 --- a/tests/test_bids.py +++ b/tests/test_bids.py @@ -56,7 +56,7 @@ def extdatasource(self, dcm_file, tmp_path): return DataSource(ext_dcm_file, {'dcm2niix2bids': Plugin({})}, 'DICOM') def test_is_datasource(self, datasource): - assert datasource.has_plugin() + assert datasource.has_support() assert datasource.dataformat == 'DICOM' def test_properties(self, datasource): @@ -138,12 +138,10 @@ def test_check_run(self): def test_get_bidsname(self, raw_dicomdir): - dicomfile = raw_dicomdir/'Doe^Archibald'/'01-XR C Spine Comp Min 4 Views'/'001-Cervical LAT'/'6154' - # Get a run-item from a bidsmap - datasource = DataSource(dicomfile, {'dcm2niix2bids': Plugin({})}, 'DICOM') - runitem = RunItem('DICOM', '', {'bids': {'acq':'py#dicom', 'foo@':'bar#123', 'run':'<>', 'suffix':'T0w'}}, datasource) - + runitem = RunItem('DICOM', '', {'bids': {'acq':'py#dicom', 'foo@':'bar#123', 'run':'<>', 'suffix':'T0w'}}, plugins={'dcm2niix2bids': {}}) + runitem.provenance = str(raw_dicomdir/'Doe^Archibald'/'01-XR C Spine Comp Min 4 Views'/'001-Cervical LAT'/'6154') + bidsname = runitem.bidsname('sub-001', 'ses-01', validkeys=False, cleanup=False) # Test default: runtime=False assert bidsname == 'sub-001_ses-01_acq-py#dicom_run-<>_foo@-bar#123_T0w' @@ -512,7 +510,7 @@ def test_get_dicomfile(dcm_file, dicomdir): def test_get_datasource(dicomdir): datasource = bids.get_datasource(dicomdir.parent, {'dcm2niix2bids': {}}) - assert datasource.has_plugin() + assert datasource.has_support() assert datasource.dataformat == 'DICOM'