diff --git a/bidscoin/bids.py b/bidscoin/bids.py index defbeeaf..2df14370 100644 --- a/bidscoin/bids.py +++ b/bidscoin/bids.py @@ -117,8 +117,8 @@ def eventstable(self) -> pd.DataFrame: for column, value in self.time['start'].items(): start &= (self.logtable[column].astype(str) == str(value)).values if start.any(): - LOGGER.bcdebug(f"Reseting clock offset of: {df['onset'][start.values].iloc[0]}") - df['onset'] = df['onset'] - df['onset'][start.values].iloc[0] # Take the time of the first occurrence as zero + LOGGER.bcdebug(f"Resetting clock offset: {df['onset'][start.values].iloc[0]}") + df['onset'] -= df['onset'][start.values].iloc[0] # Take the time of the first occurrence as zero # Loop over the row groups to filter/edit the rows rows = pd.Series([len(self.rows) == 0] * len(df)).astype(bool) # Boolean series with True values if no row expressions were specified @@ -197,14 +197,14 @@ def is_float(s): valid = False # Check if the logtable has existing and unique column names - df = self.logtable + columns = self.logtable.columns for name in set([name for item in self.columns for name in item.values()] + [name for item in self.rows for name in item['include'].keys()] + [*self.time.get('start',{}).keys()] + self.time.get('cols',[])): - if name and name not in df: + if name and name not in columns: LOGGER.warning(f"Column '{name}' not found in the event table of {self}") valid = False - if not df.columns[df.columns.duplicated()].empty: - LOGGER.warning(f"Duplicate columns found in: {df.columns}\n{self}") + if columns.duplicated().any(): + LOGGER.warning(f"Duplicate columns found: {columns}\n{self}") valid = False return valid diff --git a/bidscoin/heuristics/bidsmap_dccn.yaml b/bidscoin/heuristics/bidsmap_dccn.yaml index 1acad4b5..e0795e33 100644 --- a/bidscoin/heuristics/bidsmap_dccn.yaml +++ b/bidscoin/heuristics/bidsmap_dccn.yaml @@ -40,6 +40,7 @@ Options: fallback: y # Appends unhandled dcm2niix suffixes to the `acq` label if 'y' (recommended, else the suffix data is discarded) events2bids: table: event # The table that is used to generate the output table (https://www.neurobs.com/pres_docs/html/03_presentation/07_data_reporting/01_logfiles/index.html) + skiprows: 3 # The number of (header) rows that precede the actual table data meta: [.json, .tsv] diff --git a/bidscoin/plugins/events2bids.py b/bidscoin/plugins/events2bids.py index e0f8e0f8..6a00bb5b 100644 --- a/bidscoin/plugins/events2bids.py +++ b/bidscoin/plugins/events2bids.py @@ -13,7 +13,7 @@ LOGGER = logging.getLogger(__name__) # The default/fallback options that are set when installing/using the plugin -OPTIONS = Plugin({'table': 'event', 'meta': ['.json', '.tsv']}) # The file extensions of the equally named metadata sourcefiles that are copied over as BIDS sidecar files +OPTIONS = Plugin({'table': 'event', 'skiprows': 3, 'meta': ['.json', '.tsv']}) # The file extensions of the equally named metadata sourcefiles that are copied over as BIDS sidecar files def test(options: Plugin=OPTIONS) -> int: @@ -240,9 +240,9 @@ def __init__(self, sourcefile: Path, _data: dict, options: dict): super().__init__(sourcefile, _data, options) # Read the log-tables from the Presentation logfile - self._sourcetable = pd.read_csv(self.sourcefile, sep='\t', skiprows=3, skip_blank_lines=True) + self._sourcetable = pd.read_csv(self.sourcefile, sep='\t', skiprows=options.get('skiprows',3), skip_blank_lines=True) """The Presentation log-tables (https://www.neurobs.com/pres_docs/html/03_presentation/07_data_reporting/01_logfiles/index.html)""" - self._columns = self._sourcetable.columns + self._sourcecols = self._sourcetable.columns """Store the original column names""" @property @@ -256,7 +256,7 @@ def logtable(self) -> pd.DataFrame: survey_header = (df.iloc[:, 0] == 'Time').idxmax() or nrows # Get the row indices to slice the event, stimulus, video or survey table - df.columns = self._columns + df.columns = self._sourcecols if self.options['table'] == 'event': begin = 0 end = min(stimulus_header, video_header, survey_header) @@ -277,9 +277,7 @@ def logtable(self) -> pd.DataFrame: end = nrows LOGGER.error(f"NOT IMPLEMENTED TABLE: {self.options['table']}") - LOGGER.bcdebug(f"Slicing '{self.options['table']}{df.shape}' sourcetable[{begin}:{end}]") - - # Ensure unique column names by renaming columns with NaN or empty names and by appending suffixes to duplicate names + # Ensure unique column names by renaming columns with NaN or empty names, and by appending suffixes to duplicate names cols = [] # The new column names dupl = {} # The duplicate index number for i, col in enumerate(df.columns): @@ -296,4 +294,5 @@ def logtable(self) -> pd.DataFrame: df.columns = cols # Return the sliced the table + LOGGER.bcdebug(f"Slicing '{self.options['table']}{df.shape}' sourcetable[{begin}:{end}]") return df.iloc[begin:end]