本文整理汇总了Python中neo.core.SpikeTrain.lazy_shape方法的典型用法代码示例。如果您正苦于以下问题:Python SpikeTrain.lazy_shape方法的具体用法?Python SpikeTrain.lazy_shape怎么用?Python SpikeTrain.lazy_shape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neo.core.SpikeTrain
的用法示例。
在下文中一共展示了SpikeTrain.lazy_shape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_spiketrain
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_spiketrain(self ,
# the 2 first key arguments are imposed by neo.io API
lazy = False,
cascade = True,
segment_duration = 15.,
t_start = -1,
channel_index = 0,
):
"""
With this IO SpikeTrain can e acces directly with its channel number
"""
# There are 2 possibles behaviour for a SpikeTrain
# holding many Spike instance or directly holding spike times
# we choose here the first :
if not HAVE_SCIPY:
raise SCIPY_ERR
num_spike_by_spiketrain = 40
sr = 10000.
if lazy:
times = [ ]
else:
times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
t_start)
# create a spiketrain
spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
units = pq.s,
name = 'it is a spiketrain from exampleio',
)
if lazy:
# we add the attribute lazy_shape with the size if loaded
spiketr.lazy_shape = (num_spike_by_spiketrain,)
# ours spiketrains also hold the waveforms:
# 1 generate a fake spike shape (2d array if trodness >1)
w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
w = np.r_[ w1 , w2 ]
w = -w/max(w)
if not lazy:
# in the neo API the waveforms attr is 3 D in case tetrode
# in our case it is mono electrode so dim 1 is size 1
waveforms = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
waveforms *= np.random.randn(*waveforms.shape)/6+1
spiketr.waveforms = waveforms*pq.mV
spiketr.sampling_rate = sr * pq.Hz
spiketr.left_sweep = 1.5* pq.s
# for attributes out of neo you can annotate
spiketr.annotate(channel_index = channel_index)
return spiketr
示例2: _read_spiketrain
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def _read_spiketrain(self, node, parent):
attributes = self._get_standard_attributes(node)
t_start = self._get_quantity(node["t_start"])
t_stop = self._get_quantity(node["t_stop"])
# todo: handle sampling_rate, waveforms, left_sweep
spiketrain = SpikeTrain(self._get_quantity(node["times"]),
t_start=t_start, t_stop=t_stop,
**attributes)
spiketrain.segment = parent
if self._lazy:
spiketrain.lazy_shape = node["times"].shape
self.object_refs[node.attrs["object_ref"]] = spiketrain
return spiketrain
示例3: _extract_spikes
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def _extract_spikes(self, data, metadata, channel_index, lazy):
spiketrain = None
if lazy:
if channel_index in data[:, 1]:
spiketrain = SpikeTrain([], units=pq.ms, t_stop=0.0)
spiketrain.lazy_shape = None
else:
spike_times = self._extract_array(data, channel_index)
if len(spike_times) > 0:
spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
if spiketrain is not None:
spiketrain.annotate(label=metadata["label"],
channel_index=channel_index,
dt=metadata["dt"])
return spiketrain
示例4: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_segment(self,
lazy = False,
cascade = True,
delimiter = '\t',
t_start = 0.*pq.s,
unit = pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
unit = pq.Quantity(1, unit)
seg = Segment(file_origin = os.path.basename(self.filename))
if not cascade:
return seg
f = open(self.filename, 'Ur')
for i,line in enumerate(f) :
alldata = line[:-1].split(delimiter)
if alldata[-1] == '': alldata = alldata[:-1]
if alldata[0] == '': alldata = alldata[1:]
if lazy:
spike_times = [ ]
t_stop = t_start
else:
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max()*unit
sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
if lazy:
sptr.lazy_shape = len(alldata)
sptr.annotate(channel_index = i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
示例5: __save_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def __save_segment(self):
'''
Write the segment to the Block if it exists
'''
# if this is the beginning of the first condition, then we don't want
# to save, so exit
# but set __seg from None to False so we know next time to create a
# segment even if there are no spike in the condition
if self.__seg is None:
self.__seg = False
return
if not self.__seg:
# create dummy values if there are no SpikeTrains in this condition
self.__seg = Segment(file_origin=self._filename,
**self.__params)
self.__spiketimes = []
if self.__lazy:
train = SpikeTrain(pq.Quantity([], dtype=np.float32,
units=pq.ms),
t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
file_origin=self._filename)
train.lazy_shape = len(self.__spiketimes)
else:
times = pq.Quantity(self.__spiketimes, dtype=np.float32,
units=pq.ms)
train = SpikeTrain(times,
t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
file_origin=self._filename)
self.__seg.spiketrains = [train]
self.__unit.spiketrains.append(train)
self._blk.segments.append(self.__seg)
# set an empty segment
# from now on, we need to set __seg to False rather than None so
# that if there is a condition with no SpikeTrains we know
# to create an empty Segment
self.__seg = False
示例6: _handle_processing_group
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def _handle_processing_group(self, block):
# todo: handle other modules than Units
units_group = self._file.get('processing/Units/UnitTimes')
segment_map = dict((segment.name, segment) for segment in block.segments)
for name, group in units_group.items():
if name == 'unit_list':
pass # todo
else:
segment_name = group['source'].value
#desc = group['unit_description'].value # use this to store Neo Unit id?
segment = segment_map[segment_name]
if self._lazy:
times = np.array(())
lazy_shape = group['times'].shape
else:
times = group['times'].value
spiketrain = SpikeTrain(times, units=pq.second,
t_stop=group['t_stop'].value*pq.second) # todo: this is a custom Neo value, general NWB files will not have this - use segment.t_stop instead in that case?
if self._lazy:
spiketrain.lazy_shape = lazy_shape
spiketrain.segment = segment
segment.spiketrains.append(spiketrain)
示例7: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
#.........这里部分代码省略.........
unit = dataBlockHeader['Unit']
pos = pos_spikes[chan,unit]
stimearrays[chan, unit][pos] = time
if load_spike_waveform and n1*n2 != 0 :
swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
else:
fid.seek(n1*n2*2,1)
pos_spikes[chan,unit] +=1
elif dataBlockHeader['Type'] == 4:
# event
pos = eventpositions[chan]
evarrays[chan]['times'][pos] = time
evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
eventpositions[chan]+= 1
elif dataBlockHeader['Type'] == 5:
#signal
data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
sample_positions[chan] += data.size
## Step 4: create neo object
for chan, h in iteritems(eventHeaders):
if lazy:
times = []
labels = None
else:
times = evarrays[chan]['times']
labels = evarrays[chan]['labels']
ea = EventArray(
times*pq.s,
labels=labels,
channel_name=eventHeaders[chan]['Name'],
channel_index=chan
)
if lazy:
ea.lazy_shape = nb_events[chan]
seg.eventarrays.append(ea)
for chan, h in iteritems(slowChannelHeaders):
if lazy:
signal = [ ]
else:
if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
elif globalHeader['Version'] ==102 :
gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
elif globalHeader['Version'] >= 103:
gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
signal = sigarrays[chan]*gain
anasig = AnalogSignal(signal*pq.V,
sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
t_start = t_starts[chan]*pq.s,
channel_index = slowChannelHeaders[chan]['Channel'],
channel_name = slowChannelHeaders[chan]['Name'],
)
if lazy:
anasig.lazy_shape = nb_samples[chan]
seg.analogsignals.append(anasig)
for (chan, unit), value in np.ndenumerate(nb_spikes):
if nb_spikes[chan, unit] == 0: continue
if lazy:
times = [ ]
waveforms = None
t_stop = 0
else:
times = stimearrays[chan,unit]
t_stop = times.max()
if load_spike_waveform:
if globalHeader['Version'] <103:
gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
elif globalHeader['Version'] >105:
gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])
waveforms = swfarrays[chan, unit] * gain * pq.V
else:
waveforms = None
sptr = SpikeTrain(
times,
units='s',
t_stop=t_stop*pq.s,
waveforms=waveforms
)
sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
sptr.annotate(channel_index = chan)
for key, val in dspChannelHeaders[chan].iteritems():
sptr.annotate(**{key: val})
if lazy:
sptr.lazy_shape = nb_spikes[chan,unit]
seg.spiketrains.append(sptr)
seg.create_many_to_one_relationship()
return seg
示例8: read_one_channel_event_or_spike
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_one_channel_event_or_spike(self, fid, channel_num, header,
lazy=True):
# return SPikeTrain or Event
channelHeader = header.channelHeaders[channel_num]
if channelHeader.firstblock < 0:
return
if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
return
# # Step 1 : type of blocks
if channelHeader.kind in [2, 3, 4]:
# Event data
fmt = [('tick', 'i4')]
elif channelHeader.kind in [5]:
# Marker data
fmt = [('tick', 'i4'), ('marker', 'i4')]
elif channelHeader.kind in [6]:
# AdcMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('adc', 'S%d' % channelHeader.n_extra)]
elif channelHeader.kind in [7]:
# RealMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('real', 'S%d' % channelHeader.n_extra)]
elif channelHeader.kind in [8]:
# TextMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('label', 'S%d' % channelHeader.n_extra)]
dt = np.dtype(fmt)
## Step 2 : first read for allocating mem
fid.seek(channelHeader.firstblock)
totalitems = 0
for _ in range(channelHeader.blocks):
blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
totalitems += blockHeader.items
if blockHeader.succ_block > 0:
fid.seek(blockHeader.succ_block)
#~ print 'totalitems' , totalitems
if lazy:
if channelHeader.kind in [2, 3, 4, 5, 8]:
ea = Event()
ea.annotate(channel_index=channel_num)
ea.lazy_shape = totalitems
return ea
elif channelHeader.kind in [6, 7]:
# correct value for t_stop to be put in later
sptr = SpikeTrain([] * pq.s, t_stop=1e99)
sptr.annotate(channel_index=channel_num, ced_unit = 0)
sptr.lazy_shape = totalitems
return sptr
else:
alltrigs = np.zeros(totalitems, dtype=dt)
## Step 3 : read
fid.seek(channelHeader.firstblock)
pos = 0
for _ in range(channelHeader.blocks):
blockHeader = HeaderReader(
fid, np.dtype(blockHeaderDesciption))
# read all events in block
trigs = np.fromstring(
fid.read(blockHeader.items * dt.itemsize), dtype=dt)
alltrigs[pos:pos + trigs.size] = trigs
pos += trigs.size
if blockHeader.succ_block > 0:
fid.seek(blockHeader.succ_block)
## Step 3 convert in neo standard class: eventarrays or spiketrains
alltimes = alltrigs['tick'].astype(
'f') * header.us_per_time * header.dtime_base * pq.s
if channelHeader.kind in [2, 3, 4, 5, 8]:
#events
ea = Event(alltimes)
ea.annotate(channel_index=channel_num)
if channelHeader.kind >= 5:
# Spike2 marker is closer to label sens of neo
ea.labels = alltrigs['marker'].astype('S32')
if channelHeader.kind == 8:
ea.annotate(extra_labels=alltrigs['label'])
return ea
elif channelHeader.kind in [6, 7]:
# spiketrains
# waveforms
if channelHeader.kind == 6:
waveforms = np.fromstring(alltrigs['adc'].tostring(),
dtype='i2')
waveforms = waveforms.astype(
'f4') * channelHeader.scale / 6553.6 + \
channelHeader.offset
elif channelHeader.kind == 7:
waveforms = np.fromstring(alltrigs['real'].tostring(),
dtype='f4')
if header.system_id >= 6 and channelHeader.interleave > 1:
#.........这里部分代码省略.........
示例9: read_nev
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms = False):
# basic header
dt = [('header_id','S8'),
('ver_major','uint8'),
('ver_minor','uint8'),
('additionnal_flag', 'uint16'), # Read flags, currently basically unused
('header_size', 'uint32'), #i.e. index of first data
('packet_size', 'uint32'),# Read number of packet bytes, i.e. byte per sample
('sampling_rate', 'uint32'),# Read time resolution in Hz of time stamps, i.e. data packets
('waveform_sampling_rate', 'uint32'),# Read sampling frequency of waveforms in Hz
('window_datetime', 'S16'),
('application', 'S32'), #
('comments', 'S256'), # comments
('num_ext_header', 'uint32') #Read number of extended headers
]
nev_header = h = np.fromfile(filename_nev, count = 1, dtype = dt)[0]
version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
assert h['header_id'].decode('ascii') == 'NEURALEV' or version == '2.1', 'Unsupported version {0}'.format(version)
version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
seg.annotate(blackrock_version = version)
seg.rec_datetime = get_window_datetime(nev_header['window_datetime'])
sr = float(h['sampling_rate'])
wsr = float(h['waveform_sampling_rate'])
if not cascade:
return
# extented header
# this consist in N block with code 8bytes + 24 data bytes
# the data bytes depend on the code and need to be converted cafilename_nsx, segse by case
raw_ext_header = np.memmap(filename_nev, offset = np.dtype(dt).itemsize,
dtype = [('code', 'S8'), ('data', 'S24')], shape = h['num_ext_header'])
# this is for debuging
ext_header = { }
for code, dt_ext in ext_nev_header_codes.items():
sel = raw_ext_header['code']==code
ext_header[code] = raw_ext_header[sel].view(dt_ext)
# channel label
neuelbl_header = ext_header['NEUEVLBL']
# Sometimes when making the channel labels we have only one channel and so must address it differently.
try:
channel_labels = dict(zip(neuelbl_header['channel_id'], neuelbl_header['channel_label']))
except TypeError:
channel_labels = dict([(neuelbl_header['channel_id'], neuelbl_header['channel_label'])])
# TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case
# TODO ECOMMENT + CCOMMENT for annotations
# TODO NEUEVFLT for annotations
# read data packet and markers
dt0 = [('samplepos', 'uint32'),
('id', 'uint16'),
('value', 'S{0}'.format(h['packet_size']-6)),
]
data = np.memmap( filename_nev, offset = h['header_size'], dtype = dt0)
all_ids = np.unique(data['id'])
t_start = 0*pq.s
t_stop = data['samplepos'][-1]/sr*pq.s
# read event (digital 9+ analog+comment)
def create_event_array_trig_or_analog(selection, name, labelmode = None):
if lazy:
times = [ ]
labels = np.array([ ], dtype = 'S')
else:
times = data_trigger['samplepos'][selection].astype(float)/sr
if labelmode == 'digital_port':
labels = data_trigger['digital_port'][selection].astype('S2')
elif labelmode is None:
label = None
ev = EventArray(times= times*pq.s,
labels= labels,
name=name)
if lazy:
ev.lazy_shape = np.sum(is_digital)
seg.eventarrays.append(ev)
mask = (data['id']==0)
dt_trig = [('samplepos', 'uint32'),
('id', 'uint16'),
('reason', 'uint8'),
('reserved0', 'uint8'),
('digital_port', 'uint16'),
('reserved1', 'S{0}'.format(h['packet_size']-10)),
]
data_trigger = data.view(dt_trig)[mask]
# Digital Triggers (PaquetID 0)
is_digital = (data_trigger ['reason']&1)>0
create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode = 'digital_port' )
# Analog Triggers (PaquetID 0)
if version in ['2.1', '2.2' ]:
for i in range(5):
#.........这里部分代码省略.........
示例10: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_segment(self, lazy=False, cascade=True):
fid = open(self.filename, 'rb')
global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# ~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version=global_header['version'])
seg.annotate(comment=global_header['comment'])
if not cascade:
return seg
offset = 544
for i in range(global_header['nvar']):
entity_header = HeaderReader(fid, EntityHeader).read_f(
offset=offset + i * 208)
entity_header['name'] = entity_header['name'].replace('\x00', '')
#print 'i',i, entityHeader['type']
if entity_header['type'] == 0:
# neuron
if lazy:
spike_times = [] * pq.s
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=global_header['tend'] /
global_header['freq'] * pq.s,
name=entity_header['name'])
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 1:
# event
if lazy:
event_times = [] * pq.s
else:
event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
event_times = event_times.astype('f8') / global_header[
'freq'] * pq.s
labels = np.array([''] * event_times.size, dtype='S')
evar = Event(times=event_times, labels=labels,
channel_name=entity_header['name'])
if lazy:
evar.lazy_shape = entity_header['n']
seg.events.append(evar)
if entity_header['type'] == 2:
# interval
if lazy:
start_times = [] * pq.s
stop_times = [] * pq.s
else:
start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
start_times = start_times.astype('f8') / global_header[
'freq'] * pq.s
stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
stop_times = stop_times.astype('f') / global_header[
'freq'] * pq.s
epar = Epoch(times=start_times,
durations=stop_times - start_times,
labels=np.array([''] * start_times.size,
dtype='S'),
channel_name=entity_header['name'])
if lazy:
epar.lazy_shape = entity_header['n']
seg.epochs.append(epar)
if entity_header['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [] * pq.s
waveforms = None
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
#.........这里部分代码省略.........
示例11: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
#.........这里部分代码省略.........
time /= globalHeader["ADFrequency"]
if n2 < 0:
break
if dataBlockHeader["Type"] == 1:
# spike
unit = dataBlockHeader["Unit"]
pos = pos_spikes[chan, unit]
stimearrays[chan, unit][pos] = time
if load_spike_waveform and n1 * n2 != 0:
swfarrays[chan, unit][pos, :, :] = (
np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
)
else:
fid.seek(n1 * n2 * 2, 1)
pos_spikes[chan, unit] += 1
elif dataBlockHeader["Type"] == 4:
# event
pos = eventpositions[chan]
evarrays[chan][pos] = time
eventpositions[chan] += 1
elif dataBlockHeader["Type"] == 5:
# signal
data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
sample_positions[chan] += data.size
## Step 3: create neo object
for chan, h in iteritems(eventHeaders):
if lazy:
times = []
else:
times = evarrays[chan]
ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
if lazy:
ea.lazy_shape = nb_events[chan]
seg.eventarrays.append(ea)
for chan, h in iteritems(slowChannelHeaders):
if lazy:
signal = []
else:
if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
elif globalHeader["Version"] == 102:
gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
elif globalHeader["Version"] >= 103:
gain = globalHeader["SlowMaxMagnitudeMV"] / (
0.5
* (2 ** globalHeader["BitsPerSpikeSample"])
* slowChannelHeaders[chan]["Gain"]
* slowChannelHeaders[chan]["PreampGain"]
)
signal = sigarrays[chan] * gain
anasig = AnalogSignal(
signal * pq.V,
sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
t_start=t_starts[chan] * pq.s,
channel_index=slowChannelHeaders[chan]["Channel"],
channel_name=slowChannelHeaders[chan]["Name"],
)
if lazy:
anasig.lazy_shape = nb_samples[chan]
seg.analogsignals.append(anasig)
for (chan, unit), value in np.ndenumerate(nb_spikes):
if nb_spikes[chan, unit] == 0:
continue
if lazy:
times = []
waveforms = None
t_stop = 0
else:
times = stimearrays[chan, unit]
t_stop = times.max()
if load_spike_waveform:
if globalHeader["Version"] < 103:
gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
gain = globalHeader["SpikeMaxMagnitudeMV"] / (
0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
)
elif globalHeader["Version"] > 105:
gain = globalHeader["SpikeMaxMagnitudeMV"] / (
0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
)
waveforms = swfarrays[chan, unit] * gain * pq.V
else:
waveforms = None
sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
sptr.annotate(channel_index=chan)
if lazy:
sptr.lazy_shape = nb_spikes[chan, unit]
seg.spiketrains.append(sptr)
seg.create_many_to_one_relationship()
return seg
示例12: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_segment(self, blockname=None, lazy=False, cascade=True, sortname=''):
"""
Read a single segment from the tank. Note that TDT blocks are Neo
segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
refers to the TDT block's name, which will be the Neo segment name.
sortname is used to specify the external sortcode generated by offline spike sorting, if sortname=='PLX',
there should be a ./sort/PLX/*.SortResult file in the tdt block, which stores the sortcode for every spike,
default to '', which uses the original online sort
"""
if not blockname:
blockname = os.listdir(self.dirname)[0]
if blockname == 'TempBlk': return None
if not self.is_tdtblock(blockname): return None # if not a tdt block
subdir = os.path.join(self.dirname, blockname)
if not os.path.isdir(subdir): return None
seg = Segment(name=blockname)
tankname = os.path.basename(self.dirname)
#TSQ is the global index
tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
dt = [('size','int32'),
('evtype','int32'),
('code','S4'),
('channel','uint16'),
('sortcode','uint16'),
('timestamp','float64'),
('eventoffset','int64'),
('dataformat','int32'),
('frequency','float32'),
]
tsq = np.fromfile(tsq_filename, dtype=dt)
#0x8801: 'EVTYPE_MARK' give the global_start
global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]
#TEV is the old data file
try:
tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
#tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
tev_array = np.fromfile(tev_filename, dtype='uint8')
except IOError:
tev_filename = None
#if exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
sortresult_filename = None
if sortname is not '':
try:
for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
if file.endswith(".SortResult"):
sortresult_filename = os.path.join(subdir, 'sort', sortname, file)
# get new sortcode
newsorcode = np.fromfile(sortresult_filename,'int8')[1024:] # the first 1024 byte is file header
# update the sort code with the info from this file
tsq['sortcode'][1:-1]=newsorcode
# print('sortcode updated')
break
except OSError:
sortresult_filename = None
except IOError:
sortresult_filename = None
for type_code, type_label in tdt_event_type:
mask1 = tsq['evtype']==type_code
codes = np.unique(tsq[mask1]['code'])
for code in codes:
mask2 = mask1 & (tsq['code']==code)
channels = np.unique(tsq[mask2]['channel'])
for channel in channels:
mask3 = mask2 & (tsq['channel']==channel)
if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype=str)
else:
times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
ea = Event(times=times,
name=code,
channel_index=int(channel),
labels=labels)
if lazy:
ea.lazy_shape = np.sum(mask3)
seg.events.append(ea)
elif type_label == 'EVTYPE_SNIP':
sortcodes = np.unique(tsq[mask3]['sortcode'])
for sortcode in sortcodes:
mask4 = mask3 & (tsq['sortcode']==sortcode)
nb_spike = np.sum(mask4)
#.........这里部分代码省略.........
示例13: read_block
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_block(self,
lazy = False,
cascade = True,
):
bl = Block()
tankname = os.path.basename(self.dirname)
bl.file_origin = tankname
if not cascade : return bl
for blockname in os.listdir(self.dirname):
if blockname == 'TempBlk': continue
subdir = os.path.join(self.dirname,blockname)
if not os.path.isdir(subdir): continue
seg = Segment(name = blockname)
bl.segments.append( seg)
#TSQ is the global index
tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
dt = [('size','int32'),
('evtype','int32'),
('code','S4'),
('channel','uint16'),
('sortcode','uint16'),
('timestamp','float64'),
('eventoffset','int64'),
('dataformat','int32'),
('frequency','float32'),
]
tsq = np.fromfile(tsq_filename, dtype = dt)
#0x8801: 'EVTYPE_MARK' give the global_start
global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]
#TEV is the old data file
if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
#tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
tev_array = np.fromfile(tev_filename, dtype = 'uint8')
else:
tev_filename = None
for type_code, type_label in tdt_event_type:
mask1 = tsq['evtype']==type_code
codes = np.unique(tsq[mask1]['code'])
for code in codes:
mask2 = mask1 & (tsq['code']==code)
channels = np.unique(tsq[mask2]['channel'])
for channel in channels:
mask3 = mask2 & (tsq['channel']==channel)
if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype = str)
else:
times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
ea = EventArray(times = times, name = code , channel_index = int(channel), labels = labels)
if lazy:
ea.lazy_shape = np.sum(mask3)
seg.eventarrays.append(ea)
elif type_label == 'EVTYPE_SNIP':
sortcodes = np.unique(tsq[mask3]['sortcode'])
for sortcode in sortcodes:
mask4 = mask3 & (tsq['sortcode']==sortcode)
nb_spike = np.sum(mask4)
sr = tsq[mask4]['frequency'][0]
waveformsize = tsq[mask4]['size'][0]-10
if lazy:
times = [ ]*pq.s
waveforms = None
else:
times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
waveforms = waveforms * pq.mV
if nb_spike>0:
# t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
t_start = 0 *pq.s
t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s
else:
t_start = 0 *pq.s
t_stop = 0 *pq.s
st = SpikeTrain(times = times,
name = 'Chan{} Code{}'.format(channel,sortcode),
t_start = t_start,
t_stop = t_stop,
waveforms = waveforms,
left_sweep = waveformsize/2./sr * pq.s,
sampling_rate = sr * pq.Hz,
)
st.annotate(channel_index = channel)
#.........这里部分代码省略.........
示例14: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
#.........这里部分代码省略.........
float_signal = self.rescale_signal_raw_to_float(
raw_signal,
dtype='float32',
channel_indexes=channel_indexes)
for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
channel_indexes,
signal_group_mode=signal_group_mode).items():
units = np.unique(signal_channels[ind_abs]['units'])
assert len(units) == 1
units = ensure_signal_units(units[0])
if signal_group_mode == 'split-all':
# in that case annotations by channel is OK
chan_index = ind_abs[0]
d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
'signals'][chan_index]
annotations = dict(d)
if 'name' not in annotations:
annotations['name'] = signal_channels['name'][chan_index]
else:
# when channel are grouped by same unit
# annotations have channel_names and channel_ids array
# this will be moved in array annotations soon
annotations = {}
annotations['name'] = 'Channel bundle ({}) '.format(
','.join(signal_channels[ind_abs]['name']))
annotations['channel_names'] = signal_channels[ind_abs]['name']
annotations['channel_ids'] = signal_channels[ind_abs]['id']
annotations = check_annotations(annotations)
if lazy:
anasig = AnalogSignal(np.array([]), units=units, copy=False,
sampling_rate=sr, t_start=sig_t_start, **annotations)
anasig.lazy_shape = (sig_size, len(ind_within))
else:
anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
sampling_rate=sr, t_start=sig_t_start, **annotations)
seg.analogsignals.append(anasig)
# SpikeTrain and waveforms (optional)
unit_channels = self.header['unit_channels']
for unit_index in range(len(unit_channels)):
if not lazy and load_waveforms:
raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
seg_index=seg_index,
unit_index=unit_index,
t_start=t_start_, t_stop=t_stop_)
float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
unit_index=unit_index)
wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
waveforms = pq.Quantity(float_waveforms, units=wf_units,
dtype='float32', copy=False)
wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
if wf_left_sweep > 0:
wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
else:
wf_left_sweep = None
wf_sampling_rate = wf_sampling_rate * pq.Hz
else:
waveforms = None
wf_left_sweep = None
wf_sampling_rate = None
d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
unit_index]
示例15: read_block
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import lazy_shape [as 别名]
def read_block(self,
lazy = False,
cascade = True,
):
bl = Block()
tankname = os.path.basename(self.dirname)
bl.file_origin = tankname
if not cascade : return bl
for blockname in os.listdir(self.dirname):
if blockname == 'TempBlk': continue
subdir = os.path.join(self.dirname,blockname)
if not os.path.isdir(subdir): continue
seg = Segment(name = blockname)
bl.segments.append( seg)
global_t_start = None
# Step 1 : first loop for counting - tsq file
tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
hr = HeaderReader(tsq, TsqDescription)
allsig = { }
allspiketr = { }
allevent = { }
while 1:
h= hr.read_f()
if h==None:break
channel, code , evtype = h['channel'], h['code'], h['evtype']
if Types[evtype] == 'EVTYPE_UNKNOWN':
pass
elif Types[evtype] == 'EVTYPE_MARK' :
if global_t_start is None:
global_t_start = h['timestamp']
elif Types[evtype] == 'EVTYPE_SCALER' :
# TODO
pass
elif Types[evtype] == 'EVTYPE_STRON' or \
Types[evtype] == 'EVTYPE_STROFF':
# EVENTS
if code not in allevent:
allevent[code] = { }
if channel not in allevent[code]:
ea = EventArray(name = code , channel_index = channel)
# for counting:
ea.lazy_shape = 0
ea.maxlabelsize = 0
allevent[code][channel] = ea
allevent[code][channel].lazy_shape += 1
strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
strobe = str(strobe)
if len(strobe)>= allevent[code][channel].maxlabelsize:
allevent[code][channel].maxlabelsize = len(strobe)
#~ ev = Event()
#~ ev.time = h['timestamp'] - global_t_start
#~ ev.name = code
#~ # it the strobe attribute masked with eventoffset
#~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
#~ ev.label = str(strobe)
#~ seg._events.append( ev )
elif Types[evtype] == 'EVTYPE_SNIP' :
if code not in allspiketr:
allspiketr[code] = { }
if channel not in allspiketr[code]:
allspiketr[code][channel] = { }
if h['sortcode'] not in allspiketr[code][channel]:
sptr = SpikeTrain([ ], units = 's',
name = str(h['sortcode']),
#t_start = global_t_start,
t_start = 0.*pq.s,
t_stop = 0.*pq.s, # temporary
left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
sampling_rate = h['frequency'] * pq.Hz,
)
#~ sptr.channel = channel
#sptr.annotations['channel_index'] = channel
sptr.annotate(channel_index = channel)
# for counting:
sptr.lazy_shape = 0
sptr.pos = 0
sptr.waveformsize = h['size']-10
#.........这里部分代码省略.........