本文整理汇总了Python中neo.core.Segment.rec_datetime方法的典型用法代码示例。如果您正苦于以下问题:Python Segment.rec_datetime方法的具体用法?Python Segment.rec_datetime怎么用?Python Segment.rec_datetime使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neo.core.Segment
的用法示例。
在下文中一共展示了Segment.rec_datetime方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _group_to_neo
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import rec_datetime [as 别名]
def _group_to_neo(self, nix_group):
neo_attrs = self._nix_attr_to_neo(nix_group)
neo_segment = Segment(**neo_attrs)
neo_segment.rec_datetime = datetime.fromtimestamp(
nix_group.created_at
)
self._neo_map[nix_group.name] = neo_segment
return neo_segment
示例2: proc_src_comments
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import rec_datetime [as 别名]
def proc_src_comments(srcfile, filename):
'''Get the comments in an src file that has been#!N
processed by the official
matlab function. See proc_src for details'''
comm_seg = Segment(name='Comments', file_origin=filename)
commentarray = srcfile['comments'].flatten()[0]
senders = [res[0] for res in commentarray['sender'].flatten()]
texts = [res[0] for res in commentarray['text'].flatten()]
timeStamps = [res[0, 0] for res in commentarray['timeStamp'].flatten()]
timeStamps = np.array(timeStamps, dtype=np.float32)
t_start = timeStamps.min()
timeStamps = pq.Quantity(timeStamps - t_start, units=pq.d).rescale(pq.s)
texts = np.array(texts, dtype='S')
senders = np.array(senders, dtype='S')
t_start = brainwaresrcio.convert_brainwaresrc_timestamp(t_start.tolist())
comments = Event(times=timeStamps, labels=texts, senders=senders)
comm_seg.events = [comments]
comm_seg.rec_datetime = t_start
return comm_seg
示例3: read_segment
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import rec_datetime [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
Read in a segment.
Arguments:
load_spike_waveform : load or not waveform of spikes (default True)
"""
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader.pop('Year'),
globalHeader.pop('Month'),
globalHeader.pop('Day'),
globalHeader.pop('Hour'),
globalHeader.pop('Minute'),
globalHeader.pop('Second')
)
seg.file_origin = os.path.basename(self.filename)
for key, val in globalHeader.iteritems():
seg.annotate(**{key: val})
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = spikes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader['NumDSPChannels']):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
dspChannelHeaders[channelHeader['Channel']] = channelHeader
maxunit = max(channelHeader['NUnits'], maxunit)
maxchan = max(channelHeader['Channel'], maxchan)
# event channel header
eventHeaders = { }
for _ in range(globalHeader['NumEventChannels']):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader['Channel']] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader['NumSlowChannels']):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype='f')
#spiketimes and waveform
nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')
# eventarrays
nb_events = { }
#maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
#maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() !=-1 :
# read block header
dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
if dataBlockHeader is None : break
chan = dataBlockHeader['Channel']
unit = dataBlockHeader['Unit']
n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
dataBlockHeader['TimeStamp'])
if dataBlockHeader['Type'] == 1:
nb_spikes[chan,unit] +=1
wf_sizes[chan,unit,:] = [n1,n2]
fid.seek(n1*n2*2,1)
elif dataBlockHeader['Type'] ==4:
#event
nb_events[chan] += 1
elif dataBlockHeader['Type'] == 5:
#continuous signal
fid.seek(n2*2, 1)
if n2> 0:
nb_samples[chan] += n2
if nb_samples[chan] ==0:
t_starts[chan] = time
#.........这里部分代码省略.........
示例4: read_block
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import rec_datetime [as 别名]
#.........这里部分代码省略.........
# step 4: compute the length (number of samples) of the channels
chan_len = np.zeros(len(list_data), dtype = np.int)
for ind_chan, list_blocks in enumerate(list_data):
for ind_block in list_blocks:
chan_len[ind_chan] += count_samples(
file_blocks[ind_block]['m_length'])
# step 5: find channels for which data are available
ind_valid_chan = np.nonzero(chan_len)[0]
# step 6: load the data
# TODO give the possibility to load data as AnalogSignalArrays
for ind_chan in ind_valid_chan:
list_blocks = list_data[ind_chan]
ind = 0 # index in the data vector
# read time stamp for the beginning of the signal
form = '<l' # reading format
ind_block = list_blocks[0]
count = count_samples(file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6+count*2)
buf = fid.read(struct.calcsize(form))
val = struct.unpack(form , buf)
start_index = val[0]
# WARNING: in the following blocks are read supposing taht they
# are all contiguous and sorted in time. I don't know if it's
# always the case. Maybe we should use the time stamp of each
# data block to choose where to put the read data in the array.
if not lazy:
temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
# NOTE: we could directly create an empty AnalogSignal and
# load the data in it, but it is much faster to load data
# in a temporary numpy array and create the AnalogSignals
# from this temporary array
for ind_block in list_blocks:
count = count_samples(
file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6)
temp_array[ind:ind+count] = \
np.fromfile(fid, dtype = np.int16, count = count)
ind += count
sampling_rate = \
file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
t_start = (start_index / sampling_rate).simplified
if lazy:
ana_sig = AnalogSignal([],
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
ana_sig.lazy_shape = chan_len[ind_chan]
else:
ana_sig = AnalogSignal(temp_array,
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
# ana_sig.channel_index = \
# file_blocks[list_chan[ind_chan]]['m_numChannel']
ana_sig.annotate(channel_name = \
file_blocks[list_chan[ind_chan]]['m_Name'])
ana_sig.annotate(channel_type = \
file_blocks[list_chan[ind_chan]]['type_subblock'])
seg.analogsignals.append(ana_sig)
fid.close()
if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
blck.rec_datetime = datetime.datetime(\
file_blocks[0]['m_date_year'],
file_blocks[0]['m_date_month'],
file_blocks[0]['m_date_day'],
file_blocks[0]['m_time_hour'],
file_blocks[0]['m_time_minute'],
file_blocks[0]['m_time_second'],
10000 * file_blocks[0]['m_time_hsecond'])
# the 10000 is here to convert m_time_hsecond from centisecond
# to microsecond
version = file_blocks[0]['m_version']
blck.annotate(alphamap_version = version)
if cascade:
seg.rec_datetime = blck.rec_datetime.replace()
# I couldn't find a simple copy function for datetime,
# using replace without arguments is a twisted way to make a
# copy
seg.annotate(alphamap_version = version)
if cascade:
blck.create_many_to_one_relationship()
return blck
示例5: read_segment
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import rec_datetime [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
"""
fid = open(self.filename, "rb")
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader["Year"],
globalHeader["Month"],
globalHeader["Day"],
globalHeader["Hour"],
globalHeader["Minute"],
globalHeader["Second"],
)
seg.file_origin = os.path.basename(self.filename)
seg.annotate(plexon_version=globalHeader["Version"])
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = sipkes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader["NumDSPChannels"]):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
dspChannelHeaders[channelHeader["Channel"]] = channelHeader
maxunit = max(channelHeader["NUnits"], maxunit)
maxchan = max(channelHeader["Channel"], maxchan)
# event channel header
eventHeaders = {}
for _ in range(globalHeader["NumEventChannels"]):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader["Channel"]] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader["NumSlowChannels"]):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype="f")
# spiketimes and waveform
nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")
# eventarrays
nb_events = {}
# maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
# maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() != -1:
# read block header
dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
if dataBlockHeader is None:
break
chan = dataBlockHeader["Channel"]
unit = dataBlockHeader["Unit"]
n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
if dataBlockHeader["Type"] == 1:
nb_spikes[chan, unit] += 1
wf_sizes[chan, unit, :] = [n1, n2]
fid.seek(n1 * n2 * 2, 1)
elif dataBlockHeader["Type"] == 4:
# event
nb_events[chan] += 1
elif dataBlockHeader["Type"] == 5:
# continuous signal
fid.seek(n2 * 2, 1)
if n2 > 0:
nb_samples[chan] += n2
if nb_samples[chan] == 0:
t_starts[chan] = time
## Step 3: allocating memory and 2 loop for reading if not lazy
if not lazy:
# allocating mem for signal
sigarrays = {}
for chan, h in iteritems(slowChannelHeaders):
sigarrays[chan] = np.zeros(nb_samples[chan])
#.........这里部分代码省略.........