本文整理汇总了Python中neo.core.Block类的典型用法代码示例。如果您正苦于以下问题:Python Block类的具体用法?Python Block怎么用?Python Block使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Block类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_block_write
def test_block_write(self):
block = Block(name=self.rword(),
description=self.rsentence())
self.write_and_compare([block])
block.annotate(**self.rdict(5))
self.write_and_compare([block])
示例2: read_block
def read_block(self, lazy=False, group=None, reader=None):
"""
Read a Block from the file
:param lazy: Enables lazy reading
:param group: HDF5 Group representing the block in NSDF model tree (optional)
:param reader: NSDFReader instance (optional)
:return: Read block
"""
assert not lazy, 'Do not support lazy'
block = Block()
group, reader = self._select_first_container(group, reader, 'block')
if group is None:
return None
attrs = group.attrs
self._read_block_children(block, group, reader)
block.create_many_to_one_relationship()
self._read_container_metadata(attrs, block)
return block
示例3: read_block
def read_block(self, lazy=False, cascade=True, channel_index=None):
"""
Arguments:
Channel_index: can be int, iterable or None to select one, many or all channel(s)
"""
blk = Block()
if cascade:
seg = Segment(file_origin=self._filename)
blk.segments += [seg]
if channel_index:
if type(channel_index) is int:
channel_index = [channel_index]
if type(channel_index) is list:
channel_index = np.array(channel_index)
else:
channel_index = np.arange(0, self._attrs["shape"][1])
chx = ChannelIndex(name="all channels", index=channel_index)
blk.channel_indexes.append(chx)
ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)
ana.channel_index = chx
seg.duration = (self._attrs["shape"][0] / self._attrs["kwik"]["sample_rate"]) * pq.s
# neo.tools.populate_RecordingChannel(blk)
blk.create_many_to_one_relationship()
return blk
示例4: _block_to_neo
def _block_to_neo(self, nix_block):
neo_attrs = self._nix_attr_to_neo(nix_block)
neo_block = Block(**neo_attrs)
neo_block.rec_datetime = datetime.fromtimestamp(
nix_block.created_at
)
self._neo_map[nix_block.name] = neo_block
return neo_block
示例5: proc_dam
def proc_dam(filename):
'''Load an dam file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareDamIO to
make sure BrainwareDamIO is working properly
block = proc_dam(filename)
filename: The file name of the numpy file to load. It should end with
'*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.dam', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_dam_py2.npz'
dam file name = 'file1.dam'
'''
with np.load(filename) as damobj:
damfile = damobj.items()[0][1].flatten()
filename = os.path.basename(filename[:-12]+'.dam')
signals = [res.flatten() for res in damfile['signal']]
stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
timestamps = [res[0, 0] for res in damfile['timestamp']]
block = Block(file_origin=filename)
chx = ChannelIndex(file_origin=filename,
index=np.array([0]),
channel_ids=np.array([1]),
channel_names=np.array(['Chan1'], dtype='S'))
block.channel_indexes.append(chx)
params = [res['params'][0, 0].flatten() for res in damfile['stim']]
values = [res['values'][0, 0].flatten() for res in damfile['stim']]
params = [[res1[0] for res1 in res] for res in params]
values = [[res1 for res1 in res] for res in values]
stims = [dict(zip(param, value)) for param, value in zip(params, values)]
fulldam = zip(stimIndexes, timestamps, signals, stims)
for stimIndex, timestamp, signal, stim in fulldam:
sig = AnalogSignal(signal=signal*pq.mV,
t_start=timestamp*pq.d,
file_origin=filename,
sampling_period=1.*pq.s)
segment = Segment(file_origin=filename,
index=stimIndex,
**stim)
segment.analogsignals = [sig]
block.segments.append(segment)
block.create_many_to_one_relationship()
return block
示例6: read_block
def read_block(self,
lazy = False,
cascade = True,
):
"""
"""
tree = ElementTree.parse(self.filename)
root = tree.getroot()
acq = root.find('acquisitionSystem')
nbits = int(acq.find('nBits').text)
nbchannel = int(acq.find('nChannels').text)
sampling_rate = float(acq.find('samplingRate').text)*pq.Hz
voltage_range = float(acq.find('voltageRange').text)
#offset = int(acq.find('offset').text)
amplification = float(acq.find('amplification').text)
bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', ''))
if cascade:
seg = Segment()
bl.segments.append(seg)
# RC and RCG
rc_list = [ ]
for i, xml_rcg in enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')):
rcg = RecordingChannelGroup(name = 'Group {0}'.format(i))
bl.recordingchannelgroups.append(rcg)
for xml_rc in xml_rcg:
rc = RecordingChannel(index = int(xml_rc.text))
rc_list.append(rc)
rcg.recordingchannels.append(rc)
rc.recordingchannelgroups.append(rcg)
rcg.channel_indexes = np.array([rc.index for rc in rcg.recordingchannels], dtype = int)
rcg.channel_names = np.array(['Channel{0}'.format(rc.index) for rc in rcg.recordingchannels], dtype = 'S')
# AnalogSignals
reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat'))
seg2 = reader.read_segment(cascade = True, lazy = lazy,
sampling_rate = sampling_rate,
t_start = 0.*pq.s,
unit = pq.V, nbchannel = nbchannel,
bytesoffset = 0,
dtype = np.int16 if nbits<=16 else np.int32,
rangemin = -voltage_range/2.,
rangemax = voltage_range/2.,)
for s, sig in enumerate(seg2.analogsignals):
if not lazy:
sig /= amplification
sig.segment = seg
seg.analogsignals.append(sig)
rc_list[s].analogsignals.append(sig)
bl.create_many_to_one_relationship()
return bl
示例7: proc_src
def proc_src(filename):
'''Load an src file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareSrcIO to
make sure BrainwareSrcIO is working properly
block = proc_src(filename)
filename: The file name of the numpy file to load. It should end with
'*_src_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.src', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_src_py2.npz'
src file name = 'file1.src'
'''
with np.load(filename) as srcobj:
srcfile = srcobj.items()[0][1]
filename = os.path.basename(filename[:-12]+'.src')
block = Block(file_origin=filename)
NChannels = srcfile['NChannels'][0, 0][0, 0]
side = str(srcfile['side'][0, 0][0])
ADperiod = srcfile['ADperiod'][0, 0][0, 0]
comm_seg = proc_src_comments(srcfile, filename)
block.segments.append(comm_seg)
rcg = proc_src_units(srcfile, filename)
chan_nums = np.arange(NChannels, dtype='int')
chan_names = []
for i in chan_nums:
name = 'Chan'+str(i)
chan_names.append(name)
chan = RecordingChannel(file_origin='filename',
name=name,
index=int(i))
rcg.recordingchannels.append(chan)
rcg.channel_indexes = chan_nums
rcg.channel_names = np.array(chan_names, dtype='string_')
block.recordingchannelgroups.append(rcg)
for rep in srcfile['sets'][0, 0].flatten():
proc_src_condition(rep, filename, ADperiod, side, block)
block.create_many_to_one_relationship()
return block
示例8: read_block
def read_block(self, lazy=False, cascade=True):
bl = Block()
tankname = os.path.basename(self.dirname)
bl.file_origin = tankname
if not cascade : return bl
for blockname in os.listdir(self.dirname):
seg = self.read_segment(blockname, lazy, cascade)
bl.segments.append(seg)
bl.create_many_to_one_relationship()
return bl
示例9: test__children
def test__children(self):
blk = Block(name='block1')
blk.recordingchannelgroups = [self.rcg1]
blk.create_many_to_one_relationship()
self.assertEqual(self.rcg1._container_child_objects, ('Unit',))
self.assertEqual(self.rcg1._data_child_objects, ('AnalogSignalArray',))
self.assertEqual(self.rcg1._single_parent_objects, ('Block',))
self.assertEqual(self.rcg1._multi_child_objects, ('RecordingChannel',))
self.assertEqual(self.rcg1._multi_parent_objects, ())
self.assertEqual(self.rcg1._child_properties, ())
self.assertEqual(self.rcg1._single_child_objects,
('Unit', 'AnalogSignalArray',))
self.assertEqual(self.rcg1._container_child_containers, ('units',))
self.assertEqual(self.rcg1._data_child_containers,
('analogsignalarrays',))
self.assertEqual(self.rcg1._single_child_containers,
('units', 'analogsignalarrays'))
self.assertEqual(self.rcg1._single_parent_containers, ('block',))
self.assertEqual(self.rcg1._multi_child_containers,
('recordingchannels',))
self.assertEqual(self.rcg1._multi_parent_containers, ())
self.assertEqual(self.rcg1._child_objects,
('Unit', 'AnalogSignalArray', 'RecordingChannel'))
self.assertEqual(self.rcg1._child_containers,
('units', 'analogsignalarrays', 'recordingchannels'))
self.assertEqual(self.rcg1._parent_objects, ('Block',))
self.assertEqual(self.rcg1._parent_containers, ('block',))
self.assertEqual(len(self.rcg1.children),
(len(self.units1) +
len(self.rchan1) +
len(self.sigarr1)))
self.assertEqual(self.rcg1.children[0].name, self.unitnames1[0])
self.assertEqual(self.rcg1.children[1].name, self.unitnames1[1])
self.assertEqual(self.rcg1.children[2].name, self.sigarrnames1[0])
self.assertEqual(self.rcg1.children[3].name, self.sigarrnames1[1])
self.assertEqual(self.rcg1.children[4].name, self.rchannames1[0])
self.assertEqual(self.rcg1.children[5].name, self.rchannames1[1])
self.assertEqual(len(self.rcg1.parents), 1)
self.assertEqual(self.rcg1.parents[0].name, 'block1')
self.rcg1.create_many_to_one_relationship()
self.rcg1.create_many_to_many_relationship()
self.rcg1.create_relationship()
assert_neo_object_is_compliant(self.rcg1)
示例10: read
def read(self, lazy=False, cascade=True, **kargs):
if Block in self.readable_objects:
if hasattr(self, "read_all_blocks") and callable(getattr(self, "read_all_blocks")):
return self.read_all_blocks(lazy=lazy, cascade=cascade, **kargs)
return [self.read_block(lazy=lazy, cascade=cascade, **kargs)]
elif Segment in self.readable_objects:
bl = Block(name="One segment only")
if not cascade:
return bl
seg = self.read_segment(lazy=lazy, cascade=cascade, **kargs)
bl.segments.append(seg)
bl.create_many_to_one_relationship()
return [bl]
else:
raise NotImplementedError
示例11: read
def read(self, lazy=False, **kargs):
if lazy:
assert self.support_lazy, 'This IO do not support lazy loading'
if Block in self.readable_objects:
if (hasattr(self, 'read_all_blocks') and
callable(getattr(self, 'read_all_blocks'))):
return self.read_all_blocks(lazy=lazy, **kargs)
return [self.read_block(lazy=lazy, **kargs)]
elif Segment in self.readable_objects:
bl = Block(name='One segment only')
seg = self.read_segment(lazy=lazy, **kargs)
bl.segments.append(seg)
bl.create_many_to_one_relationship()
return [bl]
else:
raise NotImplementedError
示例12: read_block
def read_block(self,
lazy = False,
cascade = True,
group = 0):
blo = Block(name = 'test')
if cascade:
tree = getbyroute(self.pul.tree,[0,group])
for i,child in enumerate(tree['children']):
blo.segments.append(self.read_segment(group=group,series = i))
annotations = tree['contents'].__dict__.keys()
annotations.remove('readlist')
for a in annotations:
d = {a:str(tree['contents'].__dict__[a])}
blo.annotate(**d)
create_many_to_one_relationship(blo)
return blo
示例13: test__construct_subsegment_by_unit
def test__construct_subsegment_by_unit(self):
nb_seg = 3
nb_unit = 7
unit_with_sig = np.array([0, 2, 5])
signal_types = ['Vm', 'Conductances']
sig_len = 100
# channelindexes
chxs = [ChannelIndex(name='Vm',
index=unit_with_sig),
ChannelIndex(name='Conductance',
index=unit_with_sig)]
# Unit
all_unit = []
for u in range(nb_unit):
un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
assert_neo_object_is_compliant(un)
all_unit.append(un)
blk = Block()
blk.channel_indexes = chxs
for s in range(nb_seg):
seg = Segment(name='Simulation %s' % s)
for j in range(nb_unit):
st = SpikeTrain([1, 2], units='ms',
t_start=0., t_stop=10)
st.unit = all_unit[j]
for t in signal_types:
anasigarr = AnalogSignal(np.zeros((sig_len,
len(unit_with_sig))),
units='nA',
sampling_rate=1000.*pq.Hz,
channel_indexes=unit_with_sig)
seg.analogsignals.append(anasigarr)
blk.create_many_to_one_relationship()
for unit in all_unit:
assert_neo_object_is_compliant(unit)
for chx in chxs:
assert_neo_object_is_compliant(chx)
assert_neo_object_is_compliant(blk)
# what you want
newseg = seg.construct_subsegment_by_unit(all_unit[:4])
assert_neo_object_is_compliant(newseg)
示例14: read_block
def read_block(self, lazy=False, cascade=True, **kargs):
"""
Reads a block from the raw data file "fname" generated
with BrainWare
"""
# there are no keyargs implemented to so far. If someone tries to pass
# them they are expecting them to do something or making a mistake,
# neither of which should pass silently
if kargs:
raise NotImplementedError("This method does not have any " "argument implemented yet")
self._fsrc = None
block = Block(file_origin=self._filename)
# if we aren't doing cascade, don't load anything
if not cascade:
return block
# create the objects to store other objects
rcg = RecordingChannelGroup(file_origin=self._filename)
rchan = RecordingChannel(file_origin=self._filename, index=1, name="Chan1")
# load objects into their containers
rcg.recordingchannels.append(rchan)
block.recordingchannelgroups.append(rcg)
rcg.channel_indexes = np.array([1])
rcg.channel_names = np.array(["Chan1"], dtype="S")
# open the file
with open(self._path, "rb") as fobject:
# while the file is not done keep reading segments
while True:
seg = self._read_segment(fobject, lazy)
# if there are no more Segments, stop
if not seg:
break
# store the segment and signals
block.segments.append(seg)
rchan.analogsignals.append(seg.analogsignals[0])
# remove the file object
self._fsrc = None
block.create_many_to_one_relationship()
return block
示例15: read_block
def read_block(self, lazy=False, cascade=True, **kargs):
'''
Reads a block from the raw data file "fname" generated
with BrainWare
'''
# there are no keyargs implemented to so far. If someone tries to pass
# them they are expecting them to do something or making a mistake,
# neither of which should pass silently
if kargs:
raise NotImplementedError('This method does not have any '
'arguments implemented yet')
self._fsrc = None
block = Block(file_origin=self._filename)
# if we aren't doing cascade, don't load anything
if not cascade:
return block
# create the objects to store other objects
chx = ChannelIndex(file_origin=self._filename,
channel_ids=np.array([1]),
index=np.array([0]),
channel_names=np.array(['Chan1'], dtype='S'))
# load objects into their containers
block.channel_indexes.append(chx)
# open the file
with open(self._path, 'rb') as fobject:
# while the file is not done keep reading segments
while True:
seg = self._read_segment(fobject, lazy)
# if there are no more Segments, stop
if not seg:
break
# store the segment and signals
seg.analogsignals[0].channel_index = chx
block.segments.append(seg)
# remove the file object
self._fsrc = None
block.create_many_to_one_relationship()
return block