Skip to content

Commit

Permalink
Merge pull request #1519 from zm711/fix-spikegadgets
Browse files Browse the repository at this point in the history
Allow spikegadgets to handle another data style
  • Loading branch information
samuelgarcia authored Sep 27, 2024
2 parents 375c973 + 92a3095 commit 499db96
Showing 1 changed file with 26 additions and 7 deletions.
33 changes: 26 additions & 7 deletions neo/rawio/spikegadgetsrawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,18 @@ def _source_name(self):
return self.filename

def _produce_ephys_channel_ids(self, n_total_channels, n_channels_per_chip):
"""Compute the channel ID labels
"""Compute the channel ID labels for subset of spikegadgets recordings
The ephys channels in the .rec file are stored in the following order:
hwChan ID of channel 0 of first chip, hwChan ID of channel 0 of second chip, ..., hwChan ID of channel 0 of Nth chip,
hwChan ID of channel 1 of first chip, hwChan ID of channel 1 of second chip, ..., hwChan ID of channel 1 of Nth chip,
...
So if there are 32 channels per chip and 128 channels (4 chips), then the channel IDs are:
0, 32, 64, 96, 1, 33, 65, 97, ..., 128
See also: https://github.com/NeuralEnsemble/python-neo/issues/1215
This doesn't work for all types of spikegadgets
see: https://github.com/NeuralEnsemble/python-neo/issues/1517
"""
ephys_channel_ids_list = []
for hw_channel in range(n_channels_per_chip):
Expand Down Expand Up @@ -129,9 +133,11 @@ def _parse_header(self):
num_ephy_channels = sconf_channels
if sconf_channels > num_ephy_channels:
raise NeoReadWriteError(
"SpikeGadgets: the number of channels in the spike configuration is larger than the number of channels in the hardware configuration"
"SpikeGadgets: the number of channels in the spike configuration is larger "
"than the number of channels in the hardware configuration"
)

# as spikegadgets change we should follow this
try:
num_chan_per_chip = int(sconf.attrib["chanPerChip"])
except KeyError:
Expand Down Expand Up @@ -207,9 +213,16 @@ def _parse_header(self):
signal_streams.append((stream_name, stream_id))
self._mask_channels_bytes[stream_id] = []

channel_ids = self._produce_ephys_channel_ids(num_ephy_channels, num_chan_per_chip)
# we can only produce these channels for a subset of spikegadgets setup. If this criteria isn't
# true then we should just use the raw_channel_ids and let the end user sort everything out
if num_ephy_channels % num_chan_per_chip == 0:
channel_ids = self._produce_ephys_channel_ids(num_ephy_channels, num_chan_per_chip)
raw_channel_ids = False
else:
raw_channel_ids = True

chan_ind = 0
self.is_scaleable = "spikeScalingToUv" in sconf[0].attrib
self.is_scaleable = all("spikeScalingToUv" in trode.attrib for trode in sconf)
if not self.is_scaleable:
self.logger.warning(
"Unable to read channel gain scaling (to uV) from .rec header. Data has no physical units!"
Expand All @@ -224,8 +237,14 @@ def _parse_header(self):
units = ""

for schan in trode:
chan_id = str(channel_ids[chan_ind])
name = "hwChan" + chan_id
# Here we use raw ids if necessary for parsing (for some neuropixel recordings)
# otherwise we default back to the raw hwChan IDs
if raw_channel_ids:
name = "trode" + trode.attrib["id"] + "chan" + schan.attrib["hwChan"]
chan_id = schan.attrib["hwChan"]
else:
chan_id = str(channel_ids[chan_ind])
name = "hwChan" + chan_id

offset = 0.0
signal_channels.append(
Expand All @@ -250,7 +269,7 @@ def _parse_header(self):
signal_streams = np.array(signal_streams, dtype=_signal_stream_dtype)
signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype)

# remove some stream if no wanted
# remove some stream if not wanted
if self.selected_streams is not None:
if isinstance(self.selected_streams, str):
self.selected_streams = [self.selected_streams]
Expand Down

0 comments on commit 499db96

Please sign in to comment.