当前位置: 首页>>代码示例>>Python>>正文


Python Segment.annotate方法代码示例

本文整理汇总了Python中neo.core.Segment.annotate方法的典型用法代码示例。如果您正苦于以下问题:Python Segment.annotate方法的具体用法?Python Segment.annotate怎么用?Python Segment.annotate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neo.core.Segment的用法示例。


在下文中一共展示了Segment.annotate方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     return seg
开发者ID:psilentp,项目名称:Analysis,代码行数:32,代码来源:heka_io.py

示例2: test_segment_write

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def test_segment_write(self):
        block = Block(name=self.rword())
        segment = Segment(name=self.rword(), description=self.rword())
        block.segments.append(segment)
        self.write_and_compare([block])

        segment.annotate(**self.rdict(2))
        self.write_and_compare([block])
开发者ID:theunissenlab,项目名称:python-neo,代码行数:10,代码来源:test_nixio.py

示例3: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True):
        """Reads a Segment from the file and stores in database.

        The Segment will contain one AnalogSignal for each channel
        and will go from n_start to n_stop (in samples).

        Arguments:
            n_start : time in samples that the Segment begins
            n_stop : time in samples that the Segment ends

        Python indexing is used, so n_stop is not inclusive.

        Returns a Segment object containing the data.
        """
        # If no channel numbers provided, get all of them
        if chlist is None:
            chlist = self.loader.get_neural_channel_numbers()

        # Conversion from bits to full_range units
        conversion = self.full_range / 2**(8*self.header.sample_width)

        # Create the Segment
        seg = Segment(file_origin=self.filename)
        t_start = float(n_start) / self.header.f_samp
        t_stop = float(n_stop) / self.header.f_samp
        seg.annotate(t_start=t_start)
        seg.annotate(t_stop=t_stop)

        # Load data from each channel and store
        for ch in chlist:
            if lazy:
                sig = np.array([]) * conversion
            else:
                # Get the data from the loader
                sig = np.array(\
                    self.loader._get_channel(ch)[n_start:n_stop]) * conversion

            # Create an AnalogSignal with the data in it
            anasig = AnalogSignal(signal=sig,
                sampling_rate=self.header.f_samp*pq.Hz,
                t_start=t_start*pq.s, file_origin=self.filename,
                description='Channel %d from %f to %f' % (ch, t_start, t_stop),
                channel_index=int(ch))

            if lazy:
                anasig.lazy_shape = n_stop-n_start


            # Link the signal to the segment
            seg.analogsignals.append(anasig)

            # Link the signal to the recording channel from which it came
            #rc = self.channel_number_to_recording_channel[ch]
            #rc.analogsignals.append(anasig)

        return seg
开发者ID:ChrisNolan1992,项目名称:python-neo,代码行数:58,代码来源:blackrockio.py

示例4: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     ### add protocols to signals
     for sig_index,sig in enumerate(seg.analogsignals):
         pgf_index = sig.annotations['pgf_index']
         st_rec = self.pgf.tree['children'][pgf_index]['contents']
         chnls = [ch for ch in self.pgf.tree['children'][pgf_index]['children']]
         for ch_index, chnl in enumerate(chnls):
             ep_start = sig.t_start
             for se_epoch_index, se_epoch in enumerate(chnl['children']):
                 se_rec = se_epoch['contents']
                 se_duration = pq.Quantity(float(se_rec.seDuration),'s')
                 if not(int(se_rec.seVoltageSource)):
                     se_voltage = pq.Quantity(float(se_rec.seVoltage),'V')
                 else:
                     se_voltage = pq.Quantity(float(chnl['contents'].chHolding),'V')
                 epoch = neo.Epoch(ep_start,se_duration,'protocol_epoch',value=se_voltage,channel_index=ch_index)
                 fully_annototate(chnl,epoch)
                 epoch.annotations['sig_index'] = sig_index
                 ep_start = ep_start + se_duration
                 seg.epochs.append(epoch)
     return seg
开发者ID:psilentp,项目名称:syanptic_analysis,代码行数:51,代码来源:heka_io.py

示例5: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict((k, metadata.get(k, 'unknown')) for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'], metadata['last_index'] + 1):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(dt=metadata['dt']) # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             signal = self._extract_signals(data, metadata, lazy)
             if signal is not None:
                 seg.analogsignals.append(signal)
         seg.create_many_to_one_relationship()
     return seg
开发者ID:CINPLA,项目名称:python-neo,代码行数:19,代码来源:pynnio.py

示例6: create_all_annotated

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
开发者ID:theunissenlab,项目名称:python-neo,代码行数:45,代码来源:test_nixio.py

示例7: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

#.........这里部分代码省略.........
开发者ID:CINPLA,项目名称:python-neo,代码行数:103,代码来源:neurosharectypesio.py

示例8: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

#.........这里部分代码省略.........
开发者ID:bal47,项目名称:python-neo,代码行数:103,代码来源:plexonio.py

示例9: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
#.........这里部分代码省略.........
开发者ID:Silmathoron,项目名称:python-neo,代码行数:103,代码来源:neuroexplorerio.py

示例10: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, cascade=True, lazy=False, ):
        """
        Arguments:
        """
        f = StructFile(open(self.filename, 'rb'))

        # Name
        f.seek(64, 0)
        surname = f.read(22).decode('ascii')
        while surname[-1] == ' ':
            if len(surname) == 0:
                break
            surname = surname[:-1]
        firstname = f.read(20).decode('ascii')
        while firstname[-1] == ' ':
            if len(firstname) == 0:
                break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(name=str(firstname + ' ' + surname),
                      file_origin=os.path.basename(self.filename))
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            f.close()
            return seg

        # area
        f.seek(176, 0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
                      'IMPED_E', 'MONTAGE',
                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
                      'EVENT B', 'TRIGGER']
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((-1, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)

        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
                 101: pq.dimensionless, 102: pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip(b"\x00").decode('ascii')
            ground = f.read(6).strip(b"\x00").decode('ascii')
            (logical_min, logical_max, logical_ground, physical_min,
             physical_max) = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max - physical_min) / float(
                    logical_max - logical_min + 1)
                signal = (rawdata[:, c].astype(
                    'f') - logical_ground) * factor * unit

            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   name=str(label), channel_index=c)
            if lazy:
#.........这里部分代码省略.........
开发者ID:MartinHeroux,项目名称:ScientificallySound_files,代码行数:103,代码来源:micromedio.py

示例11: read_block

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]

#.........这里部分代码省略.........

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
#                ana_sig.channel_index = \
#                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            blck.create_many_to_one_relationship()

        return blck
开发者ID:CINPLA,项目名称:python-neo,代码行数:104,代码来源:alphaomegaio.py

示例12: read_segment

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

#.........这里部分代码省略.........
开发者ID:toddrjen,项目名称:python-neo,代码行数:103,代码来源:plexonio.py

示例13: read

# 需要导入模块: from neo.core import Segment [as 别名]
# 或者: from neo.core.Segment import annotate [as 别名]
    def read(self):
        """read the nex file and add the analog signals from matfile

        all the data from different channels and blocks is found in a big
        data array and has to be extracted by the indexes in other variables
        """

        nex_block = super(NexIOplus, self).read_segment()

        # create a new block to return in the end
        block = Block(
            name="mechanical and heat stimulation recording",
            description=nex_block.annotations,
            file_origin=self.filename,
        )
        train = nex_block.spiketrains[0]

        # load data from matlab file
        mat = sio.loadmat(self.matname, squeeze_me=True)
        n_channels, n_segments = np.shape(mat["datastart"])

        # convert blocktimes to posix format (from stupid matlab convention)
        blockt_pos = (mat["blocktimes"] - 719529) * 86400.0
        blockt_pos = blockt_pos - blockt_pos[0]

        for segment in range(n_segments):

            seg = Segment(name=str(segment))
            for channel in range(n_channels):

                rate = mat["samplerate"][channel, segment] / self.f_down
                start = mat["datastart"][channel, segment] - 1
                end = mat["dataend"][channel, segment]
                tmp_sig = mat["data"][start:end][:: self.f_down]

                # check for mechanical stimulation
                if (n_channels, channel) in [(3, 0), (4, 1)]:
                    onoffs = self._extract_onsets(tmp_sig)
                    for i, (x1, x2) in enumerate(onoffs):
                        seg.epochs.append(Epoch(x1, x2 - x1, i + 1))
                    if onoffs:
                        seg.annotate(mechanical=True)
                if (n_channels, channel) in [(3, 2), (4, 3)]:
                    if np.max(tmp_sig) - np.min(tmp_sig) > self.boring_thresh:
                        seg.annotate(temp=True)

                ansig = AnalogSignal(
                    signal=tmp_sig,
                    name=mat["titles"][channel],
                    # TODO use unittextmap properly
                    units=mat["unittext"].item(),
                    sampling_rate=rate * (1 / pq.s),
                    t_start=blockt_pos[segment] * pq.s,
                )
                seg.analogsignals.append(ansig)

            # ignore segments without heat or mechanical stimulation
            if not seg.annotations:
                continue

            # last segment has to be treated differently
            if segment + 1 < n_segments:
                t = train[(train > blockt_pos[segment]) & (train < blockt_pos[segment + 1])]
                end = blockt_pos[segment + 1]
            else:
                t = train[train > blockt_pos[segment]]
                end = blockt_pos[segment] + len(ansig) / rate

            seg.spiketrains.append(
                SpikeTrain(times=t.magnitude, units=train.units, t_start=blockt_pos[segment], t_stop=end)
            )
            block.segments.append(seg)
        return block
开发者ID:dedan,项目名称:unmixpain,代码行数:75,代码来源:nexioplus.py


注:本文中的neo.core.Segment.annotate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。