本文整理汇总了Python中neo.core.Segment.file_origin方法的典型用法代码示例。如果您正苦于以下问题:Python Segment.file_origin方法的具体用法?Python Segment.file_origin怎么用?Python Segment.file_origin使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neo.core.Segment
的用法示例。
在下文中一共展示了Segment.file_origin方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_segment
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import file_origin [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
Read in a segment.
Arguments:
load_spike_waveform : load or not waveform of spikes (default True)
"""
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader.pop('Year'),
globalHeader.pop('Month'),
globalHeader.pop('Day'),
globalHeader.pop('Hour'),
globalHeader.pop('Minute'),
globalHeader.pop('Second')
)
seg.file_origin = os.path.basename(self.filename)
for key, val in globalHeader.iteritems():
seg.annotate(**{key: val})
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = spikes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader['NumDSPChannels']):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
dspChannelHeaders[channelHeader['Channel']] = channelHeader
maxunit = max(channelHeader['NUnits'], maxunit)
maxchan = max(channelHeader['Channel'], maxchan)
# event channel header
eventHeaders = { }
for _ in range(globalHeader['NumEventChannels']):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader['Channel']] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader['NumSlowChannels']):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype='f')
#spiketimes and waveform
nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')
# eventarrays
nb_events = { }
#maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
#maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() !=-1 :
# read block header
dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
if dataBlockHeader is None : break
chan = dataBlockHeader['Channel']
unit = dataBlockHeader['Unit']
n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
dataBlockHeader['TimeStamp'])
if dataBlockHeader['Type'] == 1:
nb_spikes[chan,unit] +=1
wf_sizes[chan,unit,:] = [n1,n2]
fid.seek(n1*n2*2,1)
elif dataBlockHeader['Type'] ==4:
#event
nb_events[chan] += 1
elif dataBlockHeader['Type'] == 5:
#continuous signal
fid.seek(n2*2, 1)
if n2> 0:
nb_samples[chan] += n2
if nb_samples[chan] ==0:
t_starts[chan] = time
#.........这里部分代码省略.........
示例2: read_block
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import file_origin [as 别名]
def read_block(self,
# the 2 first keyword arguments are imposed by neo.io API
lazy = False,
cascade = True):
"""
Return a Block.
"""
def count_samples(m_length):
"""
Count the number of signal samples available in a type 5 data block
of length m_length
"""
# for information about type 5 data block, see [1]
count = int((m_length-6)/2-2)
# -6 corresponds to the header of block 5, and the -2 take into
# account the fact that last 2 values are not available as the 4
# corresponding bytes are coding the time stamp of the beginning
# of the block
return count
# create the neo Block that will be returned at the end
blck = Block(file_origin = os.path.basename(self.filename))
blck.file_origin = os.path.basename(self.filename)
fid = open(self.filename, 'rb')
# NOTE: in the following, the word "block" is used in the sense used in
# the alpha-omega specifications (ie a data chunk in the file), rather
# than in the sense of the usual Block object in neo
# step 1: read the headers of all the data blocks to load the file
# structure
pos_block = 0 # position of the current block in the file
file_blocks = [] # list of data blocks available in the file
if not cascade:
# we read only the main header
m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
# m_TypeBlock should be 'h', as we read the first block
block = HeaderReader(fid,
dict_header_type.get(m_TypeBlock,
Type_Unknown)).read_f()
block.update({'m_length': m_length,
'm_TypeBlock': m_TypeBlock,
'pos': pos_block})
file_blocks.append(block)
else: # cascade == True
seg = Segment(file_origin = os.path.basename(self.filename))
seg.file_origin = os.path.basename(self.filename)
blck.segments.append(seg)
while True:
first_4_bytes = fid.read(4)
if len(first_4_bytes) < 4:
# we have reached the end of the file
break
else:
m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)
block = HeaderReader(fid,
dict_header_type.get(m_TypeBlock,
Type_Unknown)).read_f()
block.update({'m_length': m_length,
'm_TypeBlock': m_TypeBlock,
'pos': pos_block})
if m_TypeBlock == '2':
# The beginning of the block of type '2' is identical for
# all types of channels, but the following part depends on
# the type of channel. So we need a special case here.
# WARNING: How to check the type of channel is not
# described in the documentation. So here I use what is
# proposed in the C code [2].
# According to this C code, it seems that the 'm_isAnalog'
# is used to distinguished analog and digital channels, and
# 'm_Mode' encodes the type of analog channel:
# 0 for continuous, 1 for level, 2 for external trigger.
# But in some files, I found channels that seemed to be
# continuous channels with 'm_Modes' = 128 or 192. So I
# decided to consider every channel with 'm_Modes'
# different from 1 or 2 as continuous. I also couldn't
# check that values of 1 and 2 are really for level and
# external trigger as I had no test files containing data
# of this types.
type_subblock = 'unknown_channel_type(m_Mode=' \
+ str(block['m_Mode'])+ ')'
description = Type2_SubBlockUnknownChannels
block.update({'m_Name': 'unknown_name'})
if block['m_isAnalog'] == 0:
# digital channel
#.........这里部分代码省略.........
示例3: read_segment
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import file_origin [as 别名]
def read_segment(self, lazy=False, cascade=True):
fid = open(self.filename, 'rb')
global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# ~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version=global_header['version'])
seg.annotate(comment=global_header['comment'])
if not cascade:
return seg
offset = 544
for i in range(global_header['nvar']):
entity_header = HeaderReader(fid, EntityHeader).read_f(
offset=offset + i * 208)
entity_header['name'] = entity_header['name'].replace('\x00', '')
#print 'i',i, entityHeader['type']
if entity_header['type'] == 0:
# neuron
if lazy:
spike_times = [] * pq.s
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=global_header['tend'] /
global_header['freq'] * pq.s,
name=entity_header['name'])
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 1:
# event
if lazy:
event_times = [] * pq.s
else:
event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
event_times = event_times.astype('f8') / global_header[
'freq'] * pq.s
labels = np.array([''] * event_times.size, dtype='S')
evar = Event(times=event_times, labels=labels,
channel_name=entity_header['name'])
if lazy:
evar.lazy_shape = entity_header['n']
seg.events.append(evar)
if entity_header['type'] == 2:
# interval
if lazy:
start_times = [] * pq.s
stop_times = [] * pq.s
else:
start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
start_times = start_times.astype('f8') / global_header[
'freq'] * pq.s
stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
stop_times = stop_times.astype('f') / global_header[
'freq'] * pq.s
epar = Epoch(times=start_times,
durations=stop_times - start_times,
labels=np.array([''] * start_times.size,
dtype='S'),
channel_name=entity_header['name'])
if lazy:
epar.lazy_shape = entity_header['n']
seg.epochs.append(epar)
if entity_header['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [] * pq.s
waveforms = None
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
#.........这里部分代码省略.........
示例4: read_segment
# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import file_origin [as 别名]
def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
"""
"""
fid = open(self.filename, "rb")
globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# metadatas
seg = Segment()
seg.rec_datetime = datetime.datetime(
globalHeader["Year"],
globalHeader["Month"],
globalHeader["Day"],
globalHeader["Hour"],
globalHeader["Minute"],
globalHeader["Second"],
)
seg.file_origin = os.path.basename(self.filename)
seg.annotate(plexon_version=globalHeader["Version"])
if not cascade:
return seg
## Step 1 : read headers
# dsp channels header = sipkes and waveforms
dspChannelHeaders = {}
maxunit = 0
maxchan = 0
for _ in range(globalHeader["NumDSPChannels"]):
# channel is 1 based
channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
dspChannelHeaders[channelHeader["Channel"]] = channelHeader
maxunit = max(channelHeader["NUnits"], maxunit)
maxchan = max(channelHeader["Channel"], maxchan)
# event channel header
eventHeaders = {}
for _ in range(globalHeader["NumEventChannels"]):
eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
eventHeaders[eventHeader["Channel"]] = eventHeader
# slow channel header = signal
slowChannelHeaders = {}
for _ in range(globalHeader["NumSlowChannels"]):
slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader
## Step 2 : a first loop for counting size
# signal
nb_samples = np.zeros(len(slowChannelHeaders))
sample_positions = np.zeros(len(slowChannelHeaders))
t_starts = np.zeros(len(slowChannelHeaders), dtype="f")
# spiketimes and waveform
nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")
# eventarrays
nb_events = {}
# maxstrsizeperchannel = { }
for chan, h in iteritems(eventHeaders):
nb_events[chan] = 0
# maxstrsizeperchannel[chan] = 0
start = fid.tell()
while fid.tell() != -1:
# read block header
dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
if dataBlockHeader is None:
break
chan = dataBlockHeader["Channel"]
unit = dataBlockHeader["Unit"]
n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
if dataBlockHeader["Type"] == 1:
nb_spikes[chan, unit] += 1
wf_sizes[chan, unit, :] = [n1, n2]
fid.seek(n1 * n2 * 2, 1)
elif dataBlockHeader["Type"] == 4:
# event
nb_events[chan] += 1
elif dataBlockHeader["Type"] == 5:
# continuous signal
fid.seek(n2 * 2, 1)
if n2 > 0:
nb_samples[chan] += n2
if nb_samples[chan] == 0:
t_starts[chan] = time
## Step 3: allocating memory and 2 loop for reading if not lazy
if not lazy:
# allocating mem for signal
sigarrays = {}
for chan, h in iteritems(slowChannelHeaders):
sigarrays[chan] = np.zeros(nb_samples[chan])
#.........这里部分代码省略.........