本文整理汇总了Python中neo.core.SpikeTrain.annotate方法的典型用法代码示例。如果您正苦于以下问题:Python SpikeTrain.annotate方法的具体用法?Python SpikeTrain.annotate怎么用?Python SpikeTrain.annotate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neo.core.SpikeTrain
的用法示例。
在下文中一共展示了SpikeTrain.annotate方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_spiketrain
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_spiketrain(self ,
# the 2 first key arguments are imposed by neo.io API
lazy = False,
cascade = True,
segment_duration = 15.,
t_start = -1,
channel_index = 0,
):
"""
With this IO SpikeTrain can e acces directly with its channel number
"""
# There are 2 possibles behaviour for a SpikeTrain
# holding many Spike instance or directly holding spike times
# we choose here the first :
if not HAVE_SCIPY:
raise SCIPY_ERR
num_spike_by_spiketrain = 40
sr = 10000.
if lazy:
times = [ ]
else:
times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
t_start)
# create a spiketrain
spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
units = pq.s,
name = 'it is a spiketrain from exampleio',
)
if lazy:
# we add the attribute lazy_shape with the size if loaded
spiketr.lazy_shape = (num_spike_by_spiketrain,)
# ours spiketrains also hold the waveforms:
# 1 generate a fake spike shape (2d array if trodness >1)
w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
w = np.r_[ w1 , w2 ]
w = -w/max(w)
if not lazy:
# in the neo API the waveforms attr is 3 D in case tetrode
# in our case it is mono electrode so dim 1 is size 1
waveforms = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
waveforms *= np.random.randn(*waveforms.shape)/6+1
spiketr.waveforms = waveforms*pq.mV
spiketr.sampling_rate = sr * pq.Hz
spiketr.left_sweep = 1.5* pq.s
# for attributes out of neo you can annotate
spiketr.annotate(channel_index = channel_index)
return spiketr
示例2: _extract_spikes
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def _extract_spikes(self, data, metadata, channel_index, lazy):
spiketrain = None
if lazy:
if channel_index in data[:, 1]:
spiketrain = SpikeTrain([], units=pq.ms, t_stop=0.0)
spiketrain.lazy_shape = None
else:
spike_times = self._extract_array(data, channel_index)
if len(spike_times) > 0:
spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
if spiketrain is not None:
spiketrain.annotate(label=metadata["label"],
channel_index=channel_index,
dt=metadata["dt"])
return spiketrain
示例3: create_all_annotated
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
示例4: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_segment(self,
lazy = False,
cascade = True,
delimiter = '\t',
t_start = 0.*pq.s,
unit = pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
unit = pq.Quantity(1, unit)
seg = Segment(file_origin = os.path.basename(self.filename))
if not cascade:
return seg
f = open(self.filename, 'Ur')
for i,line in enumerate(f) :
alldata = line[:-1].split(delimiter)
if alldata[-1] == '': alldata = alldata[:-1]
if alldata[0] == '': alldata = alldata[1:]
if lazy:
spike_times = [ ]
t_stop = t_start
else:
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max()*unit
sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
if lazy:
sptr.lazy_shape = len(alldata)
sptr.annotate(channel_index = i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
示例5: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
Read in a segment.
Arguments:
load_spike_waveform : load or not waveform of spikes (default True)
"""
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader.pop('Year'),
globalHeader.pop('Month'),
globalHeader.pop('Day'),
globalHeader.pop('Hour'),
globalHeader.pop('Minute'),
globalHeader.pop('Second')
)
seg.file_origin = os.path.basename(self.filename)
for key, val in globalHeader.iteritems():
seg.annotate(**{key: val})
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = spikes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader['NumDSPChannels']):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
dspChannelHeaders[channelHeader['Channel']] = channelHeader
maxunit = max(channelHeader['NUnits'], maxunit)
maxchan = max(channelHeader['Channel'], maxchan)
# event channel header
eventHeaders = { }
for _ in range(globalHeader['NumEventChannels']):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader['Channel']] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader['NumSlowChannels']):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype='f')
#spiketimes and waveform
nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')
# eventarrays
nb_events = { }
#maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
#maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() !=-1 :
# read block header
dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
if dataBlockHeader is None : break
chan = dataBlockHeader['Channel']
unit = dataBlockHeader['Unit']
n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
dataBlockHeader['TimeStamp'])
if dataBlockHeader['Type'] == 1:
nb_spikes[chan,unit] +=1
wf_sizes[chan,unit,:] = [n1,n2]
fid.seek(n1*n2*2,1)
elif dataBlockHeader['Type'] ==4:
#event
nb_events[chan] += 1
elif dataBlockHeader['Type'] == 5:
#continuous signal
fid.seek(n2*2, 1)
if n2> 0:
nb_samples[chan] += n2
if nb_samples[chan] ==0:
t_starts[chan] = time
#.........这里部分代码省略.........
示例6: read_one_channel_event_or_spike
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_one_channel_event_or_spike(self, fid, channel_num, header,
lazy=True):
# return SPikeTrain or Event
channelHeader = header.channelHeaders[channel_num]
if channelHeader.firstblock < 0:
return
if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
return
# # Step 1 : type of blocks
if channelHeader.kind in [2, 3, 4]:
# Event data
fmt = [('tick', 'i4')]
elif channelHeader.kind in [5]:
# Marker data
fmt = [('tick', 'i4'), ('marker', 'i4')]
elif channelHeader.kind in [6]:
# AdcMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('adc', 'S%d' % channelHeader.n_extra)]
elif channelHeader.kind in [7]:
# RealMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('real', 'S%d' % channelHeader.n_extra)]
elif channelHeader.kind in [8]:
# TextMark data
fmt = [('tick', 'i4'), ('marker', 'i4'),
('label', 'S%d' % channelHeader.n_extra)]
dt = np.dtype(fmt)
## Step 2 : first read for allocating mem
fid.seek(channelHeader.firstblock)
totalitems = 0
for _ in range(channelHeader.blocks):
blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
totalitems += blockHeader.items
if blockHeader.succ_block > 0:
fid.seek(blockHeader.succ_block)
#~ print 'totalitems' , totalitems
if lazy:
if channelHeader.kind in [2, 3, 4, 5, 8]:
ea = Event()
ea.annotate(channel_index=channel_num)
ea.lazy_shape = totalitems
return ea
elif channelHeader.kind in [6, 7]:
# correct value for t_stop to be put in later
sptr = SpikeTrain([] * pq.s, t_stop=1e99)
sptr.annotate(channel_index=channel_num, ced_unit = 0)
sptr.lazy_shape = totalitems
return sptr
else:
alltrigs = np.zeros(totalitems, dtype=dt)
## Step 3 : read
fid.seek(channelHeader.firstblock)
pos = 0
for _ in range(channelHeader.blocks):
blockHeader = HeaderReader(
fid, np.dtype(blockHeaderDesciption))
# read all events in block
trigs = np.fromstring(
fid.read(blockHeader.items * dt.itemsize), dtype=dt)
alltrigs[pos:pos + trigs.size] = trigs
pos += trigs.size
if blockHeader.succ_block > 0:
fid.seek(blockHeader.succ_block)
## Step 3 convert in neo standard class: eventarrays or spiketrains
alltimes = alltrigs['tick'].astype(
'f') * header.us_per_time * header.dtime_base * pq.s
if channelHeader.kind in [2, 3, 4, 5, 8]:
#events
ea = Event(alltimes)
ea.annotate(channel_index=channel_num)
if channelHeader.kind >= 5:
# Spike2 marker is closer to label sens of neo
ea.labels = alltrigs['marker'].astype('S32')
if channelHeader.kind == 8:
ea.annotate(extra_labels=alltrigs['label'])
return ea
elif channelHeader.kind in [6, 7]:
# spiketrains
# waveforms
if channelHeader.kind == 6:
waveforms = np.fromstring(alltrigs['adc'].tostring(),
dtype='i2')
waveforms = waveforms.astype(
'f4') * channelHeader.scale / 6553.6 + \
channelHeader.offset
elif channelHeader.kind == 7:
waveforms = np.fromstring(alltrigs['real'].tostring(),
dtype='f4')
if header.system_id >= 6 and channelHeader.interleave > 1:
#.........这里部分代码省略.........
示例7: read_nev
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms = False):
# basic header
dt = [('header_id','S8'),
('ver_major','uint8'),
('ver_minor','uint8'),
('additionnal_flag', 'uint16'), # Read flags, currently basically unused
('header_size', 'uint32'), #i.e. index of first data
('packet_size', 'uint32'),# Read number of packet bytes, i.e. byte per sample
('sampling_rate', 'uint32'),# Read time resolution in Hz of time stamps, i.e. data packets
('waveform_sampling_rate', 'uint32'),# Read sampling frequency of waveforms in Hz
('window_datetime', 'S16'),
('application', 'S32'), #
('comments', 'S256'), # comments
('num_ext_header', 'uint32') #Read number of extended headers
]
nev_header = h = np.fromfile(filename_nev, count = 1, dtype = dt)[0]
version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
assert h['header_id'].decode('ascii') == 'NEURALEV' or version == '2.1', 'Unsupported version {0}'.format(version)
version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
seg.annotate(blackrock_version = version)
seg.rec_datetime = get_window_datetime(nev_header['window_datetime'])
sr = float(h['sampling_rate'])
wsr = float(h['waveform_sampling_rate'])
if not cascade:
return
# extented header
# this consist in N block with code 8bytes + 24 data bytes
# the data bytes depend on the code and need to be converted cafilename_nsx, segse by case
raw_ext_header = np.memmap(filename_nev, offset = np.dtype(dt).itemsize,
dtype = [('code', 'S8'), ('data', 'S24')], shape = h['num_ext_header'])
# this is for debuging
ext_header = { }
for code, dt_ext in ext_nev_header_codes.items():
sel = raw_ext_header['code']==code
ext_header[code] = raw_ext_header[sel].view(dt_ext)
# channel label
neuelbl_header = ext_header['NEUEVLBL']
# Sometimes when making the channel labels we have only one channel and so must address it differently.
try:
channel_labels = dict(zip(neuelbl_header['channel_id'], neuelbl_header['channel_label']))
except TypeError:
channel_labels = dict([(neuelbl_header['channel_id'], neuelbl_header['channel_label'])])
# TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case
# TODO ECOMMENT + CCOMMENT for annotations
# TODO NEUEVFLT for annotations
# read data packet and markers
dt0 = [('samplepos', 'uint32'),
('id', 'uint16'),
('value', 'S{0}'.format(h['packet_size']-6)),
]
data = np.memmap( filename_nev, offset = h['header_size'], dtype = dt0)
all_ids = np.unique(data['id'])
t_start = 0*pq.s
t_stop = data['samplepos'][-1]/sr*pq.s
# read event (digital 9+ analog+comment)
def create_event_array_trig_or_analog(selection, name, labelmode = None):
if lazy:
times = [ ]
labels = np.array([ ], dtype = 'S')
else:
times = data_trigger['samplepos'][selection].astype(float)/sr
if labelmode == 'digital_port':
labels = data_trigger['digital_port'][selection].astype('S2')
elif labelmode is None:
label = None
ev = EventArray(times= times*pq.s,
labels= labels,
name=name)
if lazy:
ev.lazy_shape = np.sum(is_digital)
seg.eventarrays.append(ev)
mask = (data['id']==0)
dt_trig = [('samplepos', 'uint32'),
('id', 'uint16'),
('reason', 'uint8'),
('reserved0', 'uint8'),
('digital_port', 'uint16'),
('reserved1', 'S{0}'.format(h['packet_size']-10)),
]
data_trigger = data.view(dt_trig)[mask]
# Digital Triggers (PaquetID 0)
is_digital = (data_trigger ['reason']&1)>0
create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode = 'digital_port' )
# Analog Triggers (PaquetID 0)
if version in ['2.1', '2.2' ]:
for i in range(5):
#.........这里部分代码省略.........
示例8: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_segment(self, lazy=False, cascade=True):
fid = open(self.filename, 'rb')
global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# ~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version=global_header['version'])
seg.annotate(comment=global_header['comment'])
if not cascade:
return seg
offset = 544
for i in range(global_header['nvar']):
entity_header = HeaderReader(fid, EntityHeader).read_f(
offset=offset + i * 208)
entity_header['name'] = entity_header['name'].replace('\x00', '')
#print 'i',i, entityHeader['type']
if entity_header['type'] == 0:
# neuron
if lazy:
spike_times = [] * pq.s
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=global_header['tend'] /
global_header['freq'] * pq.s,
name=entity_header['name'])
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 1:
# event
if lazy:
event_times = [] * pq.s
else:
event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
event_times = event_times.astype('f8') / global_header[
'freq'] * pq.s
labels = np.array([''] * event_times.size, dtype='S')
evar = Event(times=event_times, labels=labels,
channel_name=entity_header['name'])
if lazy:
evar.lazy_shape = entity_header['n']
seg.events.append(evar)
if entity_header['type'] == 2:
# interval
if lazy:
start_times = [] * pq.s
stop_times = [] * pq.s
else:
start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
start_times = start_times.astype('f8') / global_header[
'freq'] * pq.s
stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
stop_times = stop_times.astype('f') / global_header[
'freq'] * pq.s
epar = Epoch(times=start_times,
durations=stop_times - start_times,
labels=np.array([''] * start_times.size,
dtype='S'),
channel_name=entity_header['name'])
if lazy:
epar.lazy_shape = entity_header['n']
seg.epochs.append(epar)
if entity_header['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [] * pq.s
waveforms = None
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
#.........这里部分代码省略.........
示例9: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
#.........这里部分代码省略.........
# print('sortcode updated')
break
except OSError:
sortresult_filename = None
except IOError:
sortresult_filename = None
for type_code, type_label in tdt_event_type:
mask1 = tsq['evtype']==type_code
codes = np.unique(tsq[mask1]['code'])
for code in codes:
mask2 = mask1 & (tsq['code']==code)
channels = np.unique(tsq[mask2]['channel'])
for channel in channels:
mask3 = mask2 & (tsq['channel']==channel)
if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype=str)
else:
times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
ea = Event(times=times,
name=code,
channel_index=int(channel),
labels=labels)
if lazy:
ea.lazy_shape = np.sum(mask3)
seg.events.append(ea)
elif type_label == 'EVTYPE_SNIP':
sortcodes = np.unique(tsq[mask3]['sortcode'])
for sortcode in sortcodes:
mask4 = mask3 & (tsq['sortcode']==sortcode)
nb_spike = np.sum(mask4)
sr = tsq[mask4]['frequency'][0]
waveformsize = tsq[mask4]['size'][0]-10
if lazy:
times = [ ]*pq.s
waveforms = None
else:
times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
waveforms = waveforms * pq.mV
if nb_spike > 0:
# t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
t_start = 0 *pq.s
t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s
else:
t_start = 0 *pq.s
t_stop = 0 *pq.s
st = SpikeTrain(times = times,
name = 'Chan{0} Code{1}'.format(channel,sortcode),
t_start = t_start,
t_stop = t_stop,
waveforms = waveforms,
left_sweep = waveformsize/2./sr * pq.s,
sampling_rate = sr * pq.Hz,
)
st.annotate(channel_index=channel)
if lazy:
st.lazy_shape = nb_spike
seg.spiketrains.append(st)
elif type_label == 'EVTYPE_STREAM':
dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
shape = np.sum(tsq[mask3]['size']-10)
sr = tsq[mask3]['frequency'][0]
if lazy:
signal = [ ]
else:
if PY3K:
signame = code.decode('ascii')
else:
signame = code
sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
try:
#sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
sig_array = np.fromfile(sev_filename, dtype='uint8')
except IOError:
sig_array = tev_array
signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'], sig_array).view(dt)
anasig = AnalogSignal(signal = signal* pq.V,
name = '{0} {1}'.format(code, channel),
sampling_rate = sr * pq.Hz,
t_start = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
channel_index = int(channel)
)
if lazy:
anasig.lazy_shape = shape
seg.analogsignals.append(anasig)
return seg
示例10: read_block
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_block(self,
lazy = False,
cascade = True,
):
bl = Block()
tankname = os.path.basename(self.dirname)
bl.file_origin = tankname
if not cascade : return bl
for blockname in os.listdir(self.dirname):
if blockname == 'TempBlk': continue
subdir = os.path.join(self.dirname,blockname)
if not os.path.isdir(subdir): continue
seg = Segment(name = blockname)
bl.segments.append( seg)
#TSQ is the global index
tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
dt = [('size','int32'),
('evtype','int32'),
('code','S4'),
('channel','uint16'),
('sortcode','uint16'),
('timestamp','float64'),
('eventoffset','int64'),
('dataformat','int32'),
('frequency','float32'),
]
tsq = np.fromfile(tsq_filename, dtype = dt)
#0x8801: 'EVTYPE_MARK' give the global_start
global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]
#TEV is the old data file
if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
#tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
tev_array = np.fromfile(tev_filename, dtype = 'uint8')
else:
tev_filename = None
for type_code, type_label in tdt_event_type:
mask1 = tsq['evtype']==type_code
codes = np.unique(tsq[mask1]['code'])
for code in codes:
mask2 = mask1 & (tsq['code']==code)
channels = np.unique(tsq[mask2]['channel'])
for channel in channels:
mask3 = mask2 & (tsq['channel']==channel)
if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype = str)
else:
times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
ea = EventArray(times = times, name = code , channel_index = int(channel), labels = labels)
if lazy:
ea.lazy_shape = np.sum(mask3)
seg.eventarrays.append(ea)
elif type_label == 'EVTYPE_SNIP':
sortcodes = np.unique(tsq[mask3]['sortcode'])
for sortcode in sortcodes:
mask4 = mask3 & (tsq['sortcode']==sortcode)
nb_spike = np.sum(mask4)
sr = tsq[mask4]['frequency'][0]
waveformsize = tsq[mask4]['size'][0]-10
if lazy:
times = [ ]*pq.s
waveforms = None
else:
times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
waveforms = waveforms * pq.mV
if nb_spike>0:
# t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
t_start = 0 *pq.s
t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s
else:
t_start = 0 *pq.s
t_stop = 0 *pq.s
st = SpikeTrain(times = times,
name = 'Chan{} Code{}'.format(channel,sortcode),
t_start = t_start,
t_stop = t_stop,
waveforms = waveforms,
left_sweep = waveformsize/2./sr * pq.s,
sampling_rate = sr * pq.Hz,
)
st.annotate(channel_index = channel)
#.........这里部分代码省略.........
示例11: read_block
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_block(self,
lazy = False,
cascade = True,
):
bl = Block()
tankname = os.path.basename(self.dirname)
bl.file_origin = tankname
if not cascade : return bl
for blockname in os.listdir(self.dirname):
if blockname == 'TempBlk': continue
subdir = os.path.join(self.dirname,blockname)
if not os.path.isdir(subdir): continue
seg = Segment(name = blockname)
bl.segments.append( seg)
global_t_start = None
# Step 1 : first loop for counting - tsq file
tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
hr = HeaderReader(tsq, TsqDescription)
allsig = { }
allspiketr = { }
allevent = { }
while 1:
h= hr.read_f()
if h==None:break
channel, code , evtype = h['channel'], h['code'], h['evtype']
if Types[evtype] == 'EVTYPE_UNKNOWN':
pass
elif Types[evtype] == 'EVTYPE_MARK' :
if global_t_start is None:
global_t_start = h['timestamp']
elif Types[evtype] == 'EVTYPE_SCALER' :
# TODO
pass
elif Types[evtype] == 'EVTYPE_STRON' or \
Types[evtype] == 'EVTYPE_STROFF':
# EVENTS
if code not in allevent:
allevent[code] = { }
if channel not in allevent[code]:
ea = EventArray(name = code , channel_index = channel)
# for counting:
ea.lazy_shape = 0
ea.maxlabelsize = 0
allevent[code][channel] = ea
allevent[code][channel].lazy_shape += 1
strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
strobe = str(strobe)
if len(strobe)>= allevent[code][channel].maxlabelsize:
allevent[code][channel].maxlabelsize = len(strobe)
#~ ev = Event()
#~ ev.time = h['timestamp'] - global_t_start
#~ ev.name = code
#~ # it the strobe attribute masked with eventoffset
#~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
#~ ev.label = str(strobe)
#~ seg._events.append( ev )
elif Types[evtype] == 'EVTYPE_SNIP' :
if code not in allspiketr:
allspiketr[code] = { }
if channel not in allspiketr[code]:
allspiketr[code][channel] = { }
if h['sortcode'] not in allspiketr[code][channel]:
sptr = SpikeTrain([ ], units = 's',
name = str(h['sortcode']),
#t_start = global_t_start,
t_start = 0.*pq.s,
t_stop = 0.*pq.s, # temporary
left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
sampling_rate = h['frequency'] * pq.Hz,
)
#~ sptr.channel = channel
#sptr.annotations['channel_index'] = channel
sptr.annotate(channel_index = channel)
# for counting:
sptr.lazy_shape = 0
sptr.pos = 0
sptr.waveformsize = h['size']-10
#.........这里部分代码省略.........
示例12: read_segment
# 需要导入模块: from neo.core import SpikeTrain [as 别名]
# 或者: from neo.core.SpikeTrain import annotate [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
"""
fid = open(self.filename, "rb")
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader["Year"],
globalHeader["Month"],
globalHeader["Day"],
globalHeader["Hour"],
globalHeader["Minute"],
globalHeader["Second"],
)
seg.file_origin = os.path.basename(self.filename)
seg.annotate(plexon_version=globalHeader["Version"])
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = sipkes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader["NumDSPChannels"]):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
dspChannelHeaders[channelHeader["Channel"]] = channelHeader
maxunit = max(channelHeader["NUnits"], maxunit)
maxchan = max(channelHeader["Channel"], maxchan)
# event channel header
eventHeaders = {}
for _ in range(globalHeader["NumEventChannels"]):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader["Channel"]] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader["NumSlowChannels"]):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype="f")
# spiketimes and waveform
nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")
# eventarrays
nb_events = {}
# maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
# maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() != -1:
# read block header
dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
if dataBlockHeader is None:
break
chan = dataBlockHeader["Channel"]
unit = dataBlockHeader["Unit"]
n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
if dataBlockHeader["Type"] == 1:
nb_spikes[chan, unit] += 1
wf_sizes[chan, unit, :] = [n1, n2]
fid.seek(n1 * n2 * 2, 1)
elif dataBlockHeader["Type"] == 4:
# event
nb_events[chan] += 1
elif dataBlockHeader["Type"] == 5:
# continuous signal
fid.seek(n2 * 2, 1)
if n2 > 0:
nb_samples[chan] += n2
if nb_samples[chan] == 0:
t_starts[chan] = time
## Step 3: allocating memory and 2 loop for reading if not lazy
if not lazy:
# allocating mem for signal
sigarrays = {}
for chan, h in iteritems(slowChannelHeaders):
sigarrays[chan] = np.zeros(nb_samples[chan])
#.........这里部分代码省略.........