当前位置: 首页>>代码示例>>Python>>正文


Python Segment.spiketrains方法代码示例

本文整理汇总了Python中neo.core.Segment.spiketrains方法的典型用法代码示例。如果您正苦于以下问题:Python Segment.spiketrains方法的具体用法?Python Segment.spiketrains怎么用?Python Segment.spiketrains使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neo.core.Segment的用法示例。


在下文中一共展示了Segment.spiketrains方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
    def _read_segment(self, node, parent):
        attributes = self._get_standard_attributes(node)
        segment = Segment(**attributes)

        signals = []
        for name, child_node in node['analogsignals'].items():
            if "AnalogSignal" in name:
                signals.append(self._read_analogsignal(child_node, parent=segment))
        if signals and self.merge_singles:
            segment.unmerged_analogsignals = signals  # signals will be merged later
            signals = []
        for name, child_node in node['analogsignalarrays'].items():
            if "AnalogSignalArray" in name:
                signals.append(self._read_analogsignalarray(child_node, parent=segment))
        segment.analogsignals = signals

        irr_signals = []
        for name, child_node in node['irregularlysampledsignals'].items():
            if "IrregularlySampledSignal" in name:
                irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
        if irr_signals and self.merge_singles:
            segment.unmerged_irregularlysampledsignals = irr_signals
            irr_signals = []
        segment.irregularlysampledsignals = irr_signals

        epochs = []
        for name, child_node in node['epochs'].items():
            if "Epoch" in name:
                epochs.append(self._read_epoch(child_node, parent=segment))
        if self.merge_singles:
            epochs = self._merge_data_objects(epochs)
        for name, child_node in node['epocharrays'].items():
            if "EpochArray" in name:
                epochs.append(self._read_epocharray(child_node, parent=segment))
        segment.epochs = epochs

        events = []
        for name, child_node in node['events'].items():
            if "Event" in name:
                events.append(self._read_event(child_node, parent=segment))
        if self.merge_singles:
            events = self._merge_data_objects(events)
        for name, child_node in node['eventarrays'].items():
            if "EventArray" in name:
                events.append(self._read_eventarray(child_node, parent=segment))
        segment.events = events

        spiketrains = []
        for name, child_node in node['spikes'].items():
            raise NotImplementedError('Spike objects not yet handled.')
        for name, child_node in node['spiketrains'].items():
            if "SpikeTrain" in name:
                spiketrains.append(self._read_spiketrain(child_node, parent=segment))
        segment.spiketrains = spiketrains

        segment.block = parent
        return segment
开发者ID:CINPLA,项目名称:python-neo,代码行数:59,代码来源:hdf5io.py

示例2: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
    def read_segment(self, lazy=False, cascade=True,
                     gdf_id_list=None, time_unit=pq.ms, t_start=None,
                     t_stop=None, id_column=0, time_column=1, **args):
        """
        Read a Segment which contains SpikeTrain(s) with specified neuron IDs
        from the GDF data.

        Parameters
        ----------
        lazy : bool, optional, default: False
        cascade : bool, optional, default: True
        gdf_id_list : list or tuple, default: None
            Can be either list of GDF IDs of which to return SpikeTrain(s) or
            a tuple specifying the range (includes boundaries [start, stop])
            of GDF IDs. Must be specified if the GDF file contains neuron
            IDs, the default None then raises an error. Specify an empty
            list [] to retrieve the spike trains of all neurons with at least
            one spike.
        time_unit : Quantity (time), optional, default: quantities.ms
            The time unit of recorded time stamps.
        t_start : Quantity (time), default: None
            Start time of SpikeTrain. t_start must be specified, the default None
            raises an error.
        t_stop : Quantity (time), default: None
            Stop time of SpikeTrain. t_stop must be specified, the default None
            raises an error.
        id_column : int, optional, default: 0
            Column index of neuron IDs.
        time_column : int, optional, default: 1
            Column index of time stamps.

        Returns
        -------
        seg : Segment
            The Segment contains one SpikeTrain for each ID in gdf_id_list.
        """

        if isinstance(gdf_id_list, tuple):
            gdf_id_list = range(gdf_id_list[0], gdf_id_list[1] + 1)

        # __read_spiketrains() needs a list of IDs
        if gdf_id_list is None:
            gdf_id_list = [None]

        # create an empty Segment and fill in the spike trains
        seg = Segment()
        seg.spiketrains = self.__read_spiketrains(gdf_id_list,
                                                  time_unit, t_start,
                                                  t_stop,
                                                  id_column, time_column,
                                                  **args)

        return seg
开发者ID:BerndSchuller,项目名称:UP-Tasks,代码行数:55,代码来源:gdfio.py

示例3: proc_src_condition

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
def proc_src_condition(rep, filename, ADperiod, side, block):
    '''Get the condition in a src file that has been processed by the official
    matlab function.  See proc_src for details'''

    chx = block.channel_indexes[0]

    stim = rep['stim'].flatten()
    params = [str(res[0]) for res in stim['paramName'][0].flatten()]
    values = [res for res in stim['paramVal'][0].flatten()]
    stim = dict(zip(params, values))
    sweepLen = rep['sweepLen'][0, 0]

    if not len(rep):
        return

    unassignedSpikes = rep['unassignedSpikes'].flatten()
    if len(unassignedSpikes):
        damaIndexes = [res[0, 0] for res in unassignedSpikes['damaIndex']]
        timeStamps = [res[0, 0] for res in unassignedSpikes['timeStamp']]
        spikeunit = [res.flatten() for res in unassignedSpikes['spikes']]
        respWin = np.array([], dtype=np.int32)
        trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
                                         respWin, damaIndexes, timeStamps,
                                         filename)
        chx.units[0].spiketrains.extend(trains)
        atrains = [trains]
    else:
        damaIndexes = []
        timeStamps = []
        atrains = []

    clusters = rep['clusters'].flatten()
    if len(clusters):
        IdStrings = [res[0] for res in clusters['IdString']]
        sweepLens = [res[0, 0] for res in clusters['sweepLen']]
        respWins = [res.flatten() for res in clusters['respWin']]
        spikeunits = []
        for cluster in clusters['sweeps']:
            if len(cluster):
                spikes = [res.flatten() for res in
                          cluster['spikes'].flatten()]
            else:
                spikes = []
            spikeunits.append(spikes)
    else:
        IdStrings = []
        sweepLens = []
        respWins = []
        spikeunits = []

    for unit, IdString in zip(chx.units[1:], IdStrings):
        unit.name = str(IdString)

    fullunit = zip(spikeunits, chx.units[1:], sweepLens, respWins)
    for spikeunit, unit, sweepLen, respWin in fullunit:
        trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
                                         respWin, damaIndexes, timeStamps,
                                         filename)
        atrains.append(trains)
        unit.spiketrains.extend(trains)

    atrains = zip(*atrains)
    for trains in atrains:
        segment = Segment(file_origin=filename, feature_type=-1,
                          go_by_closest_unit_center=False,
                          include_unit_bounds=False, **stim)
        block.segments.append(segment)
        segment.spiketrains = trains
开发者ID:INM-6,项目名称:python-neo,代码行数:70,代码来源:test_brainwaresrcio.py

示例4: proc_f32

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
def proc_f32(filename):
    """Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    """

    filenameorig = os.path.basename(filename[:-12] + ".f32")

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype="S")
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if "as a pickle" in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file["sweeplength"]]
    stims = [res.flatten().tolist() for res in f32file["stim"]]

    sweeps = [res["spikes"].flatten() for res in f32file["sweep"] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype("float32")
            else:
                trainpts = []

            paramnames = ["Param%s" % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
开发者ID:bal47,项目名称:python-neo,代码行数:70,代码来源:test_brainwaref32io.py

示例5: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
    def read_segment(self, gid_list=None, time_unit=pq.ms, t_start=None,
                     t_stop=None, sampling_period=None, id_column_dat=0,
                     time_column_dat=1, value_columns_dat=2,
                     id_column_gdf=0, time_column_gdf=1, value_types=None,
                     value_units=None, lazy=False):
        """
        Reads a Segment which contains SpikeTrain(s) with specified neuron IDs
        from the GDF data.

        Arguments
        ----------
        gid_list : list, default: None
            A list of GDF IDs of which to return SpikeTrain(s). gid_list must
            be specified if the GDF file contains neuron IDs, the default None
            then raises an error. Specify an empty list [] to retrieve the spike
            trains of all neurons.
        time_unit : Quantity (time), optional, default: quantities.ms
            The time unit of recorded time stamps in DAT as well as GDF files.
        t_start : Quantity (time), optional, default: 0 * pq.ms
            Start time of SpikeTrain.
        t_stop : Quantity (time), default: None
            Stop time of SpikeTrain. t_stop must be specified, the default None
            raises an error.
        sampling_period : Quantity (frequency), optional, default: None
            Sampling period of the recorded data.
        id_column_dat : int, optional, default: 0
            Column index of neuron IDs in the DAT file.
        time_column_dat : int, optional, default: 1
            Column index of time stamps in the DAT file.
        value_columns_dat : int, optional, default: 2
            Column index of the analog values recorded in the DAT file.
        id_column_gdf : int, optional, default: 0
            Column index of neuron IDs in the GDF file.
        time_column_gdf : int, optional, default: 1
            Column index of time stamps in the GDF file.
        value_types : str, optional, default: None
            Nest data type of the analog values recorded, eg.'V_m', 'I', 'g_e'
        value_units : Quantity (amplitude), default: None
            The physical unit of the recorded signal values.
        lazy : bool, optional, default: False

        Returns
        -------
        seg : Segment
            The Segment contains one SpikeTrain and one AnalogSignal for
            each ID in gid_list.
        """
        assert not lazy, 'Do not support lazy'

        if isinstance(gid_list, tuple):
            if gid_list[0] > gid_list[1]:
                raise ValueError('The second entry in gid_list must be '
                                 'greater or equal to the first entry.')
            gid_list = range(gid_list[0], gid_list[1] + 1)

        # __read_xxx() needs a list of IDs
        if gid_list is None:
            gid_list = [None]

        # create an empty Segment
        seg = Segment(file_origin=",".join(self.filenames))
        seg.file_datetime = datetime.fromtimestamp(os.stat(self.filenames[0]).st_mtime)
        # todo: rather than take the first file for the timestamp, we should take the oldest
        #       in practice, there won't be much difference

        # Load analogsignals and attach to Segment
        if 'dat' in self.avail_formats:
            seg.analogsignals = self.__read_analogsignals(
                gid_list,
                time_unit,
                t_start,
                t_stop,
                sampling_period=sampling_period,
                id_column=id_column_dat,
                time_column=time_column_dat,
                value_columns=value_columns_dat,
                value_types=value_types,
                value_units=value_units)
        if 'gdf' in self.avail_formats:
            seg.spiketrains = self.__read_spiketrains(
                gid_list,
                time_unit,
                t_start,
                t_stop,
                id_column=id_column_gdf,
                time_column=time_column_gdf)

        return seg
开发者ID:INM-6,项目名称:python-neo,代码行数:90,代码来源:nestio.py

示例6: read_block

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import spiketrains [as 别名]
    def read_block(self, lazy=False, cascade=True, signal_names=None, signal_units=None):
        block = Block(file_origin=self.filename)
        segment = Segment(name="default")
        block.segments.append(segment)
        segment.block = block

        spike_times = defaultdict(list)
        spike_file = self.filename + ".dat"
        print("SPIKEFILE: {}".format(spike_file))
        if os.path.exists(spike_file):
            print("Loading data from {}".format(spike_file))
            with open(spike_file, 'r') as fp:
                for line in fp:
                    if line[0] != '#':
                        entries = line.strip().split()
                        if len(entries) > 1:
                            time = float(entries[0])
                            for id in entries[1:]:
                                spike_times[id].append(time)
                t_stop = float(entries[0])
            if spike_times:
                min_id = min(map(int, spike_times))
            segment.spiketrains = [SpikeTrain(times, t_stop=t_stop, units="ms",
                                              id=int(id), source_index=int(id) - min_id)
                                   for id, times in spike_times.items()]
        signal_files = glob("{}_state.*.dat".format(self.filename))
        print(signal_files)
        for signal_file in signal_files:
            print("Loading data from {}".format(signal_file))
            population = os.path.basename(signal_file).split(".")[1]
            try:
                data = np.loadtxt(signal_file, delimiter=", ")
            except ValueError:
                print("Couldn't load data from file {}".format(signal_file))
                continue
            t_start = data[0, 1]
            ids = data[:, 0]
            unique_ids = np.unique(ids)
            for column in range(2, data.shape[1]):
                if signal_names is None:
                    signal_name = "signal{}".format(column - 2)
                else:
                    signal_name = signal_names[column - 2]
                if signal_units is None:
                    units = "mV"  # seems like a reasonable default
                else:
                    units = signal_units[column - 2]
                signals_by_id = {}
                for id in unique_ids:
                    times = data[ids==id, 1]
                    unique_times, idx = np.unique(times, return_index=True)  # some time points are represented twice
                    signals_by_id[id] = data[ids==id, column][idx]
                channel_ids = np.array(list(signals_by_id.keys()))
                if len(unique_times) > 1:
                    sampling_period = unique_times[1] - unique_times[0]
                    assert sampling_period != 0.0, sampling_period
                    signal_lengths = np.array([s.size for s in signals_by_id.values()])
                    min_length = signal_lengths.min()
                    if not (signal_lengths == signal_lengths[0]).all():
                        print("Warning: signals have different sizes: min={}, max={}".format(min_length,
                                                                                             signal_lengths.max()))
                        print("Truncating to length {}".format(min_length))
                    signal = AnalogSignal(np.vstack([s[:min_length] for s in signals_by_id.values()]).T,
                                          units=units,
                                          t_start=t_start * pq.ms,
                                          sampling_period=sampling_period*pq.ms,
                                          name=signal_name,
                                          population=population)
                    #signal.channel_index = ChannelIndex(np.arange(signal.shape[1], int),
                    #                                    channel_ids=channel_ids)
                    signal.channel_index = channel_ids
                    segment.analogsignals.append(signal)

        return block
开发者ID:INCF,项目名称:NineML_demo_2016,代码行数:76,代码来源:ninemltoolkitio.py


注:本文中的neo.core.Segment.spiketrains方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。