本文整理汇总了Python中neo.core.AnalogSignal.annotate方法的典型用法代码示例。如果您正苦于以下问题:Python AnalogSignal.annotate方法的具体用法?Python AnalogSignal.annotate怎么用?Python AnalogSignal.annotate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neo.core.AnalogSignal
的用法示例。
在下文中一共展示了AnalogSignal.annotate方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_analogsignal
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):
"""
Read raw traces
Arguments:
channel_index: must be integer array
"""
if self._attrs["app_data"]:
bit_volts = self._attrs["app_data"]["channel_bit_volts"]
sig_unit = "uV"
else:
bit_volts = np.ones((self._attrs["shape"][1])) # TODO: find conversion in phy generated files
sig_unit = "bit"
if lazy:
anasig = AnalogSignal(
[],
units=sig_unit,
sampling_rate=self._attrs["kwik"]["sample_rate"] * pq.Hz,
t_start=self._attrs["kwik"]["start_time"] * pq.s,
)
# we add the attribute lazy_shape with the size if loaded
anasig.lazy_shape = self._attrs["shape"][0]
else:
data = self._kwd["recordings"][str(self._dataset)]["data"].value[:, channel_index]
data = data * bit_volts[channel_index]
anasig = AnalogSignal(
data,
units=sig_unit,
sampling_rate=self._attrs["kwik"]["sample_rate"] * pq.Hz,
t_start=self._attrs["kwik"]["start_time"] * pq.s,
)
data = [] # delete from memory
# for attributes out of neo you can annotate
anasig.annotate(info="raw traces")
return anasig
示例2: read_analogsignal
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_analogsignal(self ,
# the 2 first key arguments are imposed by neo.io API
lazy = False,
cascade = True,
channel_index = 0,
segment_duration = 15.,
t_start = -1,
):
"""
With this IO AnalogSignal can e acces directly with its channel number
"""
sr = 10000.
sinus_freq = 3. # Hz
#time vector for generated signal:
tvect = np.arange(t_start, t_start+ segment_duration , 1./sr)
if lazy:
anasig = AnalogSignal([], units='V', sampling_rate=sr * pq.Hz,
t_start=t_start * pq.s,
channel_index=channel_index)
# we add the attribute lazy_shape with the size if loaded
anasig.lazy_shape = tvect.shape
else:
# create analogsignal (sinus of 3 Hz)
sig = np.sin(2*np.pi*tvect*sinus_freq + channel_index/5.*2*np.pi)+np.random.rand(tvect.size)
anasig = AnalogSignal(sig, units= 'V', sampling_rate=sr * pq.Hz,
t_start=t_start * pq.s,
channel_index=channel_index)
# for attributes out of neo you can annotate
anasig.annotate(info = 'it is a sinus of %f Hz' %sinus_freq )
return anasig
示例3: read_analogsignal
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_analogsignal(self,
# the 2 first key arguments are imposed by neo.io
lazy = False,
cascade = True,
#channel index as given by the neuroshare API
channel_index = 0,
#time in seconds to be read
segment_duration = 0.,
#time in seconds to start reading from
t_start = 0.,
):
#some controls:
#if no segment duration is given, use the complete file
if segment_duration ==0.:
segment_duration=float(self.metadata["TimeSpan"])
#if the segment duration is bigger than file, use the complete file
if segment_duration >=float(self.metadata["TimeSpan"]):
segment_duration=float(self.metadata["TimeSpan"])
if lazy:
anasig = AnalogSignal([], units="V", sampling_rate = self.metadata["sampRate"] * pq.Hz,
t_start=t_start * pq.s,
)
#create a dummie time vector
tvect = np.arange(t_start, t_start+ segment_duration , 1./self.metadata["sampRate"])
# we add the attribute lazy_shape with the size if loaded
anasig.lazy_shape = tvect.shape
else:
#get the analog object
sig = self.fd.get_entity(channel_index)
#get the units (V, mV etc)
sigUnits = sig.units
#get the electrode number
chanName = sig.label[-4:]
#transform t_start into index (reading will start from this index)
startat = int(t_start*self.metadata["sampRate"])
#get the number of bins to read in
bins = int((segment_duration+t_start) * self.metadata["sampRate"])
#if the number of bins to read is bigger than
#the total number of bins, read only till the end of analog object
if startat+bins > sig.item_count:
bins = sig.item_count-startat
#read the data from the sig object
sig,_,_ = sig.get_data(index = startat, count = bins)
#store it to the 'AnalogSignal' object
anasig = AnalogSignal(sig, units = sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
t_start=t_start * pq.s,
t_stop = (t_start+segment_duration)*pq.s,
channel_index=channel_index)
# annotate from which electrode the signal comes from
anasig.annotate(info = "signal from channel %s" %chanName )
return anasig
示例4: create_all_annotated
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
示例5: _extract_signals
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def _extract_signals(self, data, metadata, lazy):
signal = None
if lazy and data.size > 0:
signal = AnalogSignal([],
units=self._determine_units(metadata),
sampling_period=metadata['dt']*pq.ms)
signal.lazy_shape = None
else:
arr = numpy.vstack(self._extract_array(data, channel_index)
for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
if len(arr) > 0:
signal = AnalogSignal(arr.T,
units=self._determine_units(metadata),
sampling_period=metadata['dt']*pq.ms)
if signal is not None:
signal.annotate(label=metadata["label"],
variable=metadata["variable"])
return signal
示例6: read_analogsignal
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_analogsignal(self,
channel_index=None,
lazy=False,
cascade=True,
):
"""
Read raw traces
Arguments:
channel_index: must be integer
"""
try:
channel_index = int(channel_index)
except TypeError:
print('channel_index must be int, not %s' %type(channel_index))
if self._attrs['app_data']:
bit_volts = self._attrs['app_data']['channel_bit_volts']
sig_unit = 'uV'
else:
bit_volts = np.ones((self._attrs['shape'][1])) # TODO: find conversion in phy generated files
sig_unit = 'bit'
if lazy:
anasig = AnalogSignal([],
units=sig_unit,
sampling_rate=self._attrs['kwik']['sample_rate']*pq.Hz,
t_start=self._attrs['kwik']['start_time']*pq.s,
channel_index=channel_index,
)
# we add the attribute lazy_shape with the size if loaded
anasig.lazy_shape = self._attrs['shape'][0]
else:
data = self._kwd['recordings'][str(self._dataset)]['data'].value[:,channel_index]
data = data * bit_volts[channel_index]
anasig = AnalogSignal(data,
units=sig_unit,
sampling_rate=self._attrs['kwik']['sample_rate']*pq.Hz,
t_start=self._attrs['kwik']['start_time']*pq.s,
channel_index=channel_index,
)
data = [] # delete from memory
# for attributes out of neo you can annotate
anasig.annotate(info='raw traces')
return anasig
示例7: _extract_signal
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def _extract_signal(self, data, metadata, channel_index, lazy):
signal = None
if lazy:
if channel_index in data[:, 1]:
signal = AnalogSignal([],
units=self._determine_units(metadata),
sampling_period=metadata['dt']*pq.ms,
channel_index=channel_index)
signal.lazy_shape = None
else:
arr = self._extract_array(data, channel_index)
if len(arr) > 0:
signal = AnalogSignal(arr,
units=self._determine_units(metadata),
sampling_period=metadata['dt']*pq.ms,
channel_index=channel_index)
if signal is not None:
signal.annotate(label=metadata["label"],
variable=metadata["variable"])
return signal
示例8: gettrace
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def gettrace(trec,f):
import numpy as np
format_type_lenghts = [2,4,4,8]
format_type = [np.int16,np.int32,np.float32,np.float64]
pointsize = format_type_lenghts[int(trec.trDataFormat)]
dtype = format_type[int(trec.trDataFormat)]
f.seek(int(trec.trData))
byte_string = f.read(int(trec.trDataPoints)*pointsize)
import numpy as np
ydata = np.fromstring(byte_string,dtype = dtype)
tunit = pq.Quantity(1,str(trec.trXUnit))
yunit = pq.Quantity(1,str(trec.trYUnit))
sig = AnalogSignal(ydata*float(trec.trDataScaler)*yunit,
sampling_period=float(trec.trXInterval)*tunit,
units = trec.trYUnit[0])
annotations = trec.__dict__.keys()
annotations.remove('readlist')
for a in annotations:
d = {a:str(trec.__dict__[a])}
sig.annotate(**d)
return sig
示例9: read_segment
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_segment(self, import_neuroshare_segment = True,
lazy=False, cascade=True):
"""
Arguments:
import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.
"""
seg = Segment( file_origin = os.path.basename(self.filename), )
if sys.platform.startswith('win'):
neuroshare = ctypes.windll.LoadLibrary(self.dllname)
elif sys.platform.startswith('linux'):
neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
neuroshare = DllWithError(neuroshare)
#elif sys.platform.startswith('darwin'):
# API version
info = ns_LIBRARYINFO()
neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))
if not cascade:
return seg
# open file
hFile = ctypes.c_uint32(0)
neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
fileinfo = ns_FILEINFO()
neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
# read all entities
for dwEntityID in range(fileinfo.dwEntityCount):
entityInfo = ns_ENTITYINFO()
neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))
# EVENT
if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
pEventInfo = ns_EVENTINFO()
neuroshare.ns_GetEventInfo ( hFile, dwEntityID, ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))
if pEventInfo.dwEventType == 0: #TEXT
pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
elif pEventInfo.dwEventType == 1:#CVS
pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
elif pEventInfo.dwEventType == 2:# 8bit
pData = ctypes.c_byte(0)
elif pEventInfo.dwEventType == 3:# 16bit
pData = ctypes.c_int16(0)
elif pEventInfo.dwEventType == 4:# 32bit
pData = ctypes.c_int32(0)
pdTimeStamp = ctypes.c_double(0.)
pdwDataRetSize = ctypes.c_uint32(0)
ea = Event(name = str(entityInfo.szEntityLabel),)
if not lazy:
times = [ ]
labels = [ ]
for dwIndex in range(entityInfo.dwItemCount ):
neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
ctypes.byref(pdTimeStamp), ctypes.byref(pData),
ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
times.append(pdTimeStamp.value)
labels.append(str(pData.value))
ea.times = times*pq.s
ea.labels = np.array(labels, dtype ='S')
else :
ea.lazy_shape = entityInfo.dwItemCount
seg.eventarrays.append(ea)
# analog
if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
pAnalogInfo = ns_ANALOGINFO()
neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
dwIndexCount = entityInfo.dwItemCount
if lazy:
signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
else:
pdwContCount = ctypes.c_uint32(0)
pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
total_read = 0
while total_read< entityInfo.dwItemCount:
dwStartIndex = ctypes.c_uint32(total_read)
dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
neuroshare.ns_GetAnalogData( hFile, dwEntityID, dwStartIndex,
dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
total_read += pdwContCount.value
signal = pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)
#t_start
dwIndex = 0
pdTime = ctypes.c_double(0)
neuroshare.ns_GetTimeByIndex( hFile, dwEntityID, dwIndex, ctypes.byref(pdTime))
#.........这里部分代码省略.........
示例10: read_segment
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
def read_segment(self, cascade=True, lazy=False, ):
"""
Arguments:
"""
f = StructFile(open(self.filename, 'rb'))
# Name
f.seek(64, 0)
surname = f.read(22).decode('ascii')
while surname[-1] == ' ':
if len(surname) == 0:
break
surname = surname[:-1]
firstname = f.read(20).decode('ascii')
while firstname[-1] == ' ':
if len(firstname) == 0:
break
firstname = firstname[:-1]
#Date
f.seek(128, 0)
day, month, year, hour, minute, sec = f.read_f('bbbbbb')
rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
sec)
f.seek(138, 0)
Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
'IHHHH')
#~ print Num_Chan, Bytes
#header version
f.seek(175, 0)
header_version, = f.read_f('b')
assert header_version == 4
seg = Segment(name=str(firstname + ' ' + surname),
file_origin=os.path.basename(self.filename))
seg.annotate(surname=surname)
seg.annotate(firstname=firstname)
seg.annotate(rec_datetime=rec_datetime)
if not cascade:
f.close()
return seg
# area
f.seek(176, 0)
zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
'IMPED_E', 'MONTAGE',
'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
'EVENT B', 'TRIGGER']
zones = {}
for zname in zone_names:
zname2, pos, length = f.read_f('8sII')
zones[zname] = zname2, pos, length
#~ print zname2, pos, length
# reading raw data
if not lazy:
f.seek(Data_Start_Offset, 0)
rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
rawdata = rawdata.reshape((-1, Num_Chan))
# Reading Code Info
zname2, pos, length = zones['ORDER']
f.seek(pos, 0)
code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)
units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
101: pq.dimensionless, 102: pq.dimensionless}
for c in range(Num_Chan):
zname2, pos, length = zones['LABCOD']
f.seek(pos + code[c] * 128 + 2, 0)
label = f.read(6).strip(b"\x00").decode('ascii')
ground = f.read(6).strip(b"\x00").decode('ascii')
(logical_min, logical_max, logical_ground, physical_min,
physical_max) = f.read_f('iiiii')
k, = f.read_f('h')
if k in units.keys():
unit = units[k]
else:
unit = pq.uV
f.seek(8, 1)
sampling_rate, = f.read_f('H') * pq.Hz
sampling_rate *= Rate_Min
if lazy:
signal = [] * unit
else:
factor = float(physical_max - physical_min) / float(
logical_max - logical_min + 1)
signal = (rawdata[:, c].astype(
'f') - logical_ground) * factor * unit
ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
name=str(label), channel_index=c)
if lazy:
#.........这里部分代码省略.........
示例11: read_segment
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
#.........这里部分代码省略.........
l = f.readline()
# sampling rate sample
l = f.readline()
sampling_rate = 1. / float(l) * pq.Hz
# nb channel
l = f.readline()
nbchannel = int(l) - 2
#channel label
labels = []
for c in range(nbchannel + 2):
labels.append(f.readline()[:-1])
# channel type
types = []
for c in range(nbchannel + 2):
types.append(f.readline()[:-1])
# channel unit
units = []
for c in range(nbchannel + 2):
units.append(f.readline()[:-1])
#print units
#range
min_physic = []
for c in range(nbchannel + 2):
min_physic.append(float(f.readline()))
max_physic = []
for c in range(nbchannel + 2):
max_physic.append(float(f.readline()))
min_logic = []
for c in range(nbchannel + 2):
min_logic.append(float(f.readline()))
max_logic = []
for c in range(nbchannel + 2):
max_logic.append(float(f.readline()))
#info filter
info_filter = []
for c in range(nbchannel + 2):
info_filter.append(f.readline()[:-1])
f.close()
#raw data
n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
data = np.fromfile(self.filename, dtype='i' + str(n))
data = data.byteswap().reshape(
(data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
for c in range(nbchannel):
if lazy:
sig = []
else:
sig = (data[:, c] - min_logic[c]) / (
max_logic[c] - min_logic[c]) * \
(max_physic[c] - min_physic[c]) + min_physic[c]
try:
unit = pq.Quantity(1, units[c])
except:
unit = pq.Quantity(1, '')
ana_sig = AnalogSignal(
sig * unit, sampling_rate=sampling_rate,
t_start=0. * pq.s, name=labels[c], channel_index=c)
if lazy:
ana_sig.lazy_shape = data.shape[0]
ana_sig.annotate(channel_name=labels[c])
seg.analogsignals.append(ana_sig)
# triggers
f = open(self.filename + '.pos')
times = []
labels = []
reject_codes = []
for l in f.readlines():
r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
times.append(float(r[0][0]) / sampling_rate.magnitude)
labels.append(str(r[0][1]))
reject_codes.append(str(r[0][2]))
if lazy:
times = [] * pq.S
labels = np.array([], dtype='S')
reject_codes = []
else:
times = np.array(times) * pq.s
labels = np.array(labels)
reject_codes = np.array(reject_codes)
ea = Event(times=times, labels=labels, reject_codes=reject_codes)
if lazy:
ea.lazy_shape = len(times)
seg.events.append(ea)
f.close()
seg.create_many_to_one_relationship()
return seg
示例12: read_block
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
#.........这里部分代码省略.........
# step 4: compute the length (number of samples) of the channels
chan_len = np.zeros(len(list_data), dtype = np.int)
for ind_chan, list_blocks in enumerate(list_data):
for ind_block in list_blocks:
chan_len[ind_chan] += count_samples(
file_blocks[ind_block]['m_length'])
# step 5: find channels for which data are available
ind_valid_chan = np.nonzero(chan_len)[0]
# step 6: load the data
# TODO give the possibility to load data as AnalogSignalArrays
for ind_chan in ind_valid_chan:
list_blocks = list_data[ind_chan]
ind = 0 # index in the data vector
# read time stamp for the beginning of the signal
form = '<l' # reading format
ind_block = list_blocks[0]
count = count_samples(file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6+count*2)
buf = fid.read(struct.calcsize(form))
val = struct.unpack(form , buf)
start_index = val[0]
# WARNING: in the following blocks are read supposing taht they
# are all contiguous and sorted in time. I don't know if it's
# always the case. Maybe we should use the time stamp of each
# data block to choose where to put the read data in the array.
if not lazy:
temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
# NOTE: we could directly create an empty AnalogSignal and
# load the data in it, but it is much faster to load data
# in a temporary numpy array and create the AnalogSignals
# from this temporary array
for ind_block in list_blocks:
count = count_samples(
file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6)
temp_array[ind:ind+count] = \
np.fromfile(fid, dtype = np.int16, count = count)
ind += count
sampling_rate = \
file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
t_start = (start_index / sampling_rate).simplified
if lazy:
ana_sig = AnalogSignal([],
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
ana_sig.lazy_shape = chan_len[ind_chan]
else:
ana_sig = AnalogSignal(temp_array,
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
# ana_sig.channel_index = \
# file_blocks[list_chan[ind_chan]]['m_numChannel']
ana_sig.annotate(channel_name = \
file_blocks[list_chan[ind_chan]]['m_Name'])
ana_sig.annotate(channel_type = \
file_blocks[list_chan[ind_chan]]['type_subblock'])
seg.analogsignals.append(ana_sig)
fid.close()
if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
blck.rec_datetime = datetime.datetime(\
file_blocks[0]['m_date_year'],
file_blocks[0]['m_date_month'],
file_blocks[0]['m_date_day'],
file_blocks[0]['m_time_hour'],
file_blocks[0]['m_time_minute'],
file_blocks[0]['m_time_second'],
10000 * file_blocks[0]['m_time_hsecond'])
# the 10000 is here to convert m_time_hsecond from centisecond
# to microsecond
version = file_blocks[0]['m_version']
blck.annotate(alphamap_version = version)
if cascade:
seg.rec_datetime = blck.rec_datetime.replace()
# I couldn't find a simple copy function for datetime,
# using replace without arguments is a twisted way to make a
# copy
seg.annotate(alphamap_version = version)
if cascade:
blck.create_many_to_one_relationship()
return blck
示例13: spike_triggered_average
# 需要导入模块: from neo.core import AnalogSignal [as 别名]
# 或者: from neo.core.AnalogSignal import annotate [as 别名]
#.........这里部分代码省略.........
"must be a time quantity.")
if window_stoptime <= window_starttime:
raise ValueError("The start time of the window (window[0]) must be "
"earlier than the stop time of the window (window[1]).")
# checks on signal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of analog signals
num_signals = signal.shape[1]
else:
raise ValueError("Empty analog signal, hence no averaging possible.")
if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
raise ValueError("The chosen time window is larger than the "
"time duration of the signal.")
# spiketrains type check
if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
spiketrains = [spiketrains]
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
"one of those, not %s." % type(spiketrains))
# multiplying spiketrain in case only a single spiketrain is given
if len(spiketrains) == 1 and num_signals != 1:
template = spiketrains[0]
spiketrains = []
for i in range(num_signals):
spiketrains.append(template)
# checking for matching numbers of signals and spiketrains
if num_signals != len(spiketrains):
raise ValueError(
"The number of signals and spiketrains has to be the same.")
# checking the times of signal and spiketrains
for i in range(num_signals):
if spiketrains[i].t_start < signal.t_start:
raise ValueError(
"The spiketrain indexed by %i starts earlier than "
"the analog signal." % i)
if spiketrains[i].t_stop > signal.t_stop:
raise ValueError(
"The spiketrain indexed by %i stops later than "
"the analog signal." % i)
# *** Main algorithm: ***
# window_bins: number of bins of the chosen averaging interval
window_bins = int(np.ceil(((window_stoptime - window_starttime) *
signal.sampling_rate).simplified))
# result_sta: array containing finally the spike-triggered averaged signal
result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
sampling_rate=signal.sampling_rate, units=signal.units)
# setting of correct times of the spike-triggered average
# relative to the spike
result_sta.t_start = window_starttime
used_spikes = np.zeros(num_signals, dtype=int)
unused_spikes = np.zeros(num_signals, dtype=int)
total_used_spikes = 0
for i in range(num_signals):
# summing over all respective signal intervals around spiketimes
for spiketime in spiketrains[i]:
# checks for sufficient signal data around spiketime
if (spiketime + window_starttime >= signal.t_start and
spiketime + window_stoptime <= signal.t_stop):
# calculating the startbin in the analog signal of the
# averaging window for spike
startbin = int(np.floor(((spiketime + window_starttime -
signal.t_start) * signal.sampling_rate).simplified))
# adds the signal in selected interval relative to the spike
result_sta[:, i] += signal[
startbin: startbin + window_bins, i]
# counting of the used spikes
used_spikes[i] += 1
else:
# counting of the unused spikes
unused_spikes[i] += 1
# normalization
result_sta[:, i] = result_sta[:, i] / used_spikes[i]
total_used_spikes += used_spikes[i]
if total_used_spikes == 0:
warnings.warn(
"No spike at all was either found or used for averaging")
result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)
return result_sta