本文整理汇总了Python中neo.core.Segment类的典型用法代码示例。如果您正苦于以下问题:Python Segment类的具体用法?Python Segment怎么用?Python Segment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Segment类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_read_nse_data
def test_read_nse_data(self):
t_start, t_stop = None, None # in samples
nio = NeuralynxIO(self.sn, use_cache='never')
seg = Segment('testsegment')
for el_id, el_dict in nio.parameters_nse.iteritems():
filepath = nio.parameters_nse[el_id]['recording_file_name']
filename = filepath.split('/')[-1].split('\\')[-1].split('.')[0]
nio.read_nse(filename, seg, t_start=t_start, t_stop=t_stop,
waveforms=True)
spiketrain = seg.filter({'electrode_id': el_id},
objects=SpikeTrain)[0]
# target_data = np.zeros((500, 32))
# timestamps = np.zeros(500)
entries = []
with open(self.pd + '/%s.txt' % filename) as datafile:
for i, line in enumerate(datafile):
line = line.strip('\xef\xbb\xbf')
entries.append(line.split())
entries = np.asarray(entries, dtype=float)
target_data = entries[:-1, 11:]
timestamps = entries[:-1, 0]
timestamps = (timestamps * pq.microsecond -
nio.parameters_global['t_start'])
np.testing.assert_array_equal(timestamps.magnitude,
spiketrain.magnitude)
np.testing.assert_array_equal(target_data,
spiketrain.waveforms)
示例2: _handle_epochs_group
def _handle_epochs_group(self, block):
# Note that an NWB Epoch corresponds to a Neo Segment, not to a Neo Epoch.
epochs = self._file.get('epochs')
# todo: handle epochs.attrs.get('tags')
for name, epoch in epochs.items():
# todo: handle epoch.attrs.get('links')
timeseries = []
for key, value in epoch.items():
if key == 'start_time':
t_start = value * pq.second
elif key == 'stop_time':
t_stop = value * pq.second
else:
# todo: handle value['count']
# todo: handle value['idx_start']
timeseries.append(self._handle_timeseries(key, value.get('timeseries')))
segment = Segment(name=name)
for obj in timeseries:
obj.segment = segment
if isinstance(obj, AnalogSignal):
segment.analogsignals.append(obj)
elif isinstance(obj, IrregularlySampledSignal):
segment.irregularlysampledsignals.append(obj)
elif isinstance(obj, Event):
segment.events.append(obj)
elif isinstance(obj, Epoch):
segment.epochs.append(obj)
segment.block = block
block.segments.append(segment)
示例3: test__children
def test__children(self):
params = {"test2": "y1", "test3": True}
evt = Event(
1.5 * pq.ms,
label="test epoch",
name="test",
description="tester",
file_origin="test.file",
test1=1,
**params
)
evt.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(evt)
segment = Segment(name="seg1")
segment.events = [evt]
segment.create_many_to_one_relationship()
self.assertEqual(evt._single_parent_objects, ("Segment",))
self.assertEqual(evt._multi_parent_objects, ())
self.assertEqual(evt._single_parent_containers, ("segment",))
self.assertEqual(evt._multi_parent_containers, ())
self.assertEqual(evt._parent_objects, ("Segment",))
self.assertEqual(evt._parent_containers, ("segment",))
self.assertEqual(len(evt.parents), 1)
self.assertEqual(evt.parents[0].name, "seg1")
assert_neo_object_is_compliant(evt)
示例4: test__children
def test__children(self):
signal = self.signals[0]
segment = Segment(name='seg1')
segment.analogsignals = [signal]
segment.create_many_to_one_relationship()
rchan = RecordingChannel(name='rchan1')
rchan.analogsignals = [signal]
rchan.create_many_to_one_relationship()
self.assertEqual(signal._single_parent_objects,
('Segment', 'RecordingChannel'))
self.assertEqual(signal._multi_parent_objects, ())
self.assertEqual(signal._single_parent_containers,
('segment', 'recordingchannel'))
self.assertEqual(signal._multi_parent_containers, ())
self.assertEqual(signal._parent_objects,
('Segment', 'RecordingChannel'))
self.assertEqual(signal._parent_containers,
('segment', 'recordingchannel'))
self.assertEqual(len(signal.parents), 2)
self.assertEqual(signal.parents[0].name, 'seg1')
self.assertEqual(signal.parents[1].name, 'rchan1')
assert_neo_object_is_compliant(signal)
示例5: test__children
def test__children(self):
segment = Segment(name='seg1')
segment.spikes = [self.spike1]
segment.create_many_to_one_relationship()
unit = Unit(name='unit1')
unit.spikes = [self.spike1]
unit.create_many_to_one_relationship()
self.assertEqual(self.spike1._single_parent_objects,
('Segment', 'Unit'))
self.assertEqual(self.spike1._multi_parent_objects, ())
self.assertEqual(self.spike1._single_parent_containers,
('segment', 'unit'))
self.assertEqual(self.spike1._multi_parent_containers, ())
self.assertEqual(self.spike1._parent_objects,
('Segment', 'Unit'))
self.assertEqual(self.spike1._parent_containers,
('segment', 'unit'))
self.assertEqual(len(self.spike1.parents), 2)
self.assertEqual(self.spike1.parents[0].name, 'seg1')
self.assertEqual(self.spike1.parents[1].name, 'unit1')
assert_neo_object_is_compliant(self.spike1)
示例6: test__children
def test__children(self):
signal = self.signals[0]
segment = Segment(name='seg1')
segment.analogsignals = [signal]
segment.create_many_to_one_relationship()
chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1]))
chx.analogsignals = [signal]
chx.create_many_to_one_relationship()
self.assertEqual(signal._single_parent_objects, ('Segment', 'ChannelIndex'))
self.assertEqual(signal._multi_parent_objects, ())
self.assertEqual(signal._single_parent_containers, ('segment', 'channel_index'))
self.assertEqual(signal._multi_parent_containers, ())
self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex'))
self.assertEqual(signal._parent_containers, ('segment', 'channel_index'))
self.assertEqual(len(signal.parents), 2)
self.assertEqual(signal.parents[0].name, 'seg1')
self.assertEqual(signal.parents[1].name, 'chx1')
assert_neo_object_is_compliant(signal)
示例7: read_segment
def read_segment(self,
lazy = False,
cascade = True,
group = 0,
series = 0):
seg = Segment( name = 'test')
if cascade:
tree = getbyroute(self.pul.tree,[0,group,series])
for sw,sweep in enumerate(tree['children']):
if sw == 0:
starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
for ch,channel in enumerate(sweep['children']):
sig = self.read_analogsignal(group=group,
series=series,
sweep=sw,
channel = ch)
annotations = sweep['contents'].__dict__.keys()
annotations.remove('readlist')
for a in annotations:
d = {a:str(sweep['contents'].__dict__[a])}
sig.annotate(**d)
sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
seg.analogsignals.append(sig)
annotations = tree['contents'].__dict__.keys()
annotations.remove('readlist')
for a in annotations:
d = {a:str(tree['contents'].__dict__[a])}
seg.annotate(**d)
create_many_to_one_relationship(seg)
return seg
示例8: read_block
def read_block(self, lazy=False, cascade=True, channel_index=None):
"""
Arguments:
Channel_index: can be int, iterable or None to select one, many or all channel(s)
"""
blk = Block()
if cascade:
seg = Segment(file_origin=self._filename)
blk.segments += [seg]
if channel_index:
if type(channel_index) is int:
channel_index = [channel_index]
if type(channel_index) is list:
channel_index = np.array(channel_index)
else:
channel_index = np.arange(0, self._attrs["shape"][1])
chx = ChannelIndex(name="all channels", index=channel_index)
blk.channel_indexes.append(chx)
ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)
ana.channel_index = chx
seg.duration = (self._attrs["shape"][0] / self._attrs["kwik"]["sample_rate"]) * pq.s
# neo.tools.populate_RecordingChannel(blk)
blk.create_many_to_one_relationship()
return blk
示例9: test__children
def test__children(self):
params = {'test2': 'y1', 'test3': True}
epc = Epoch([1.1, 1.5, 1.7]*pq.ms, durations=[20, 40, 60]*pq.ns,
labels=np.array(['test epoch 1',
'test epoch 2',
'test epoch 3'], dtype='S'),
name='test', description='tester',
file_origin='test.file',
test1=1, **params)
epc.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(epc)
segment = Segment(name='seg1')
segment.epochs = [epc]
segment.create_many_to_one_relationship()
self.assertEqual(epc._single_parent_objects, ('Segment',))
self.assertEqual(epc._multi_parent_objects, ())
self.assertEqual(epc._single_parent_containers, ('segment',))
self.assertEqual(epc._multi_parent_containers, ())
self.assertEqual(epc._parent_objects, ('Segment',))
self.assertEqual(epc._parent_containers, ('segment',))
self.assertEqual(len(epc.parents), 1)
self.assertEqual(epc.parents[0].name, 'seg1')
assert_neo_object_is_compliant(epc)
示例10: proc_dam
def proc_dam(filename):
'''Load an dam file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareDamIO to
make sure BrainwareDamIO is working properly
block = proc_dam(filename)
filename: The file name of the numpy file to load. It should end with
'*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.dam', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_dam_py2.npz'
dam file name = 'file1.dam'
'''
with np.load(filename) as damobj:
damfile = damobj.items()[0][1].flatten()
filename = os.path.basename(filename[:-12]+'.dam')
signals = [res.flatten() for res in damfile['signal']]
stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
timestamps = [res[0, 0] for res in damfile['timestamp']]
block = Block(file_origin=filename)
rcg = RecordingChannelGroup(file_origin=filename)
chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
rcg.channel_indexes = np.array([1])
rcg.channel_names = np.array(['Chan1'], dtype='S')
block.recordingchannelgroups.append(rcg)
rcg.recordingchannels.append(chan)
params = [res['params'][0, 0].flatten() for res in damfile['stim']]
values = [res['values'][0, 0].flatten() for res in damfile['stim']]
params = [[res1[0] for res1 in res] for res in params]
values = [[res1 for res1 in res] for res in values]
stims = [dict(zip(param, value)) for param, value in zip(params, values)]
fulldam = zip(stimIndexes, timestamps, signals, stims)
for stimIndex, timestamp, signal, stim in fulldam:
sig = AnalogSignal(signal=signal*pq.mV,
t_start=timestamp*pq.d,
file_origin=filename,
sampling_period=1.*pq.s)
segment = Segment(file_origin=filename,
index=stimIndex,
**stim)
segment.analogsignals = [sig]
block.segments.append(segment)
create_many_to_one_relationship(block)
return block
示例11: _group_to_neo
def _group_to_neo(self, nix_group):
neo_attrs = self._nix_attr_to_neo(nix_group)
neo_segment = Segment(**neo_attrs)
neo_segment.rec_datetime = datetime.fromtimestamp(
nix_group.created_at
)
self._neo_map[nix_group.name] = neo_segment
return neo_segment
示例12: test_segment_write
def test_segment_write(self):
block = Block(name=self.rword())
segment = Segment(name=self.rword(), description=self.rword())
block.segments.append(segment)
self.write_and_compare([block])
segment.annotate(**self.rdict(2))
self.write_and_compare([block])
示例13: read_segment
def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True):
"""Reads a Segment from the file and stores in database.
The Segment will contain one AnalogSignal for each channel
and will go from n_start to n_stop (in samples).
Arguments:
n_start : time in samples that the Segment begins
n_stop : time in samples that the Segment ends
Python indexing is used, so n_stop is not inclusive.
Returns a Segment object containing the data.
"""
# If no channel numbers provided, get all of them
if chlist is None:
chlist = self.loader.get_neural_channel_numbers()
# Conversion from bits to full_range units
conversion = self.full_range / 2**(8*self.header.sample_width)
# Create the Segment
seg = Segment(file_origin=self.filename)
t_start = float(n_start) / self.header.f_samp
t_stop = float(n_stop) / self.header.f_samp
seg.annotate(t_start=t_start)
seg.annotate(t_stop=t_stop)
# Load data from each channel and store
for ch in chlist:
if lazy:
sig = np.array([]) * conversion
else:
# Get the data from the loader
sig = np.array(\
self.loader._get_channel(ch)[n_start:n_stop]) * conversion
# Create an AnalogSignal with the data in it
anasig = AnalogSignal(signal=sig,
sampling_rate=self.header.f_samp*pq.Hz,
t_start=t_start*pq.s, file_origin=self.filename,
description='Channel %d from %f to %f' % (ch, t_start, t_stop),
channel_index=int(ch))
if lazy:
anasig.lazy_shape = n_stop-n_start
# Link the signal to the segment
seg.analogsignals.append(anasig)
# Link the signal to the recording channel from which it came
#rc = self.channel_number_to_recording_channel[ch]
#rc.analogsignals.append(anasig)
return seg
示例14: read_segment
def read_segment(self, lazy=False, cascade=True,
gdf_id_list=None, time_unit=pq.ms, t_start=None,
t_stop=None, id_column=0, time_column=1, **args):
"""
Read a Segment which contains SpikeTrain(s) with specified neuron IDs
from the GDF data.
Parameters
----------
lazy : bool, optional, default: False
cascade : bool, optional, default: True
gdf_id_list : list or tuple, default: None
Can be either list of GDF IDs of which to return SpikeTrain(s) or
a tuple specifying the range (includes boundaries [start, stop])
of GDF IDs. Must be specified if the GDF file contains neuron
IDs, the default None then raises an error. Specify an empty
list [] to retrieve the spike trains of all neurons with at least
one spike.
time_unit : Quantity (time), optional, default: quantities.ms
The time unit of recorded time stamps.
t_start : Quantity (time), default: None
Start time of SpikeTrain. t_start must be specified, the default None
raises an error.
t_stop : Quantity (time), default: None
Stop time of SpikeTrain. t_stop must be specified, the default None
raises an error.
id_column : int, optional, default: 0
Column index of neuron IDs.
time_column : int, optional, default: 1
Column index of time stamps.
Returns
-------
seg : Segment
The Segment contains one SpikeTrain for each ID in gdf_id_list.
"""
if isinstance(gdf_id_list, tuple):
gdf_id_list = range(gdf_id_list[0], gdf_id_list[1] + 1)
# __read_spiketrains() needs a list of IDs
if gdf_id_list is None:
gdf_id_list = [None]
# create an empty Segment and fill in the spike trains
seg = Segment()
seg.spiketrains = self.__read_spiketrains(gdf_id_list,
time_unit, t_start,
t_stop,
id_column, time_column,
**args)
return seg
示例15: _read_segment
def _read_segment(self, node, parent):
attributes = self._get_standard_attributes(node)
segment = Segment(**attributes)
signals = []
for name, child_node in node['analogsignals'].items():
if "AnalogSignal" in name:
signals.append(self._read_analogsignal(child_node, parent=segment))
if signals and self.merge_singles:
segment.unmerged_analogsignals = signals # signals will be merged later
signals = []
for name, child_node in node['analogsignalarrays'].items():
if "AnalogSignalArray" in name:
signals.append(self._read_analogsignalarray(child_node, parent=segment))
segment.analogsignals = signals
irr_signals = []
for name, child_node in node['irregularlysampledsignals'].items():
if "IrregularlySampledSignal" in name:
irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
if irr_signals and self.merge_singles:
segment.unmerged_irregularlysampledsignals = irr_signals
irr_signals = []
segment.irregularlysampledsignals = irr_signals
epochs = []
for name, child_node in node['epochs'].items():
if "Epoch" in name:
epochs.append(self._read_epoch(child_node, parent=segment))
if self.merge_singles:
epochs = self._merge_data_objects(epochs)
for name, child_node in node['epocharrays'].items():
if "EpochArray" in name:
epochs.append(self._read_epocharray(child_node, parent=segment))
segment.epochs = epochs
events = []
for name, child_node in node['events'].items():
if "Event" in name:
events.append(self._read_event(child_node, parent=segment))
if self.merge_singles:
events = self._merge_data_objects(events)
for name, child_node in node['eventarrays'].items():
if "EventArray" in name:
events.append(self._read_eventarray(child_node, parent=segment))
segment.events = events
spiketrains = []
for name, child_node in node['spikes'].items():
raise NotImplementedError('Spike objects not yet handled.')
for name, child_node in node['spiketrains'].items():
if "SpikeTrain" in name:
spiketrains.append(self._read_spiketrain(child_node, parent=segment))
segment.spiketrains = spiketrains
segment.block = parent
return segment