本文整理汇总了Python中numpy.correlate函数的典型用法代码示例。如果您正苦于以下问题:Python correlate函数的具体用法?Python correlate怎么用?Python correlate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了correlate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_preamble_properties
def check_preamble_properties(preamble, x_preamble):
x_1st = x_preamble[0:len(x_preamble) // 2]
x_2nd = x_preamble[-len(x_preamble) // 2:]
if not np.all(np.abs(x_1st - x_2nd) < 1e-12):
print np.abs(x_1st - x_2nd)
raise ValueError('preamble timeslots do not repeat!')
from correlation import cross_correlate_naive, auto_correlate_halfs
from utils import calculate_signal_energy
x_ampl = np.sqrt(calculate_signal_energy(x_preamble))
preamble *= x_ampl
x_preamble *= x_ampl
x_energy = calculate_signal_energy(x_preamble)
if np.abs(2. * auto_correlate_halfs(x_preamble) / x_energy) -1. > 1e-10:
raise ValueError('auto correlating halfs of preamble fails!')
print 'normalized preamble xcorr val: ', np.correlate(x_preamble, x_preamble) / x_energy
print 'windowed normalized preamble: ', np.correlate(preamble[-len(x_preamble):], x_preamble) / x_energy
fxc = np.correlate(preamble, x_preamble, 'full') / x_energy
vxc = np.correlate(preamble, x_preamble, 'valid') / x_energy
nxc = cross_correlate_naive(preamble, x_preamble) / x_energy
import matplotlib.pyplot as plt
plt.plot(np.abs(fxc))
plt.plot(np.abs(vxc))
plt.plot(np.abs(nxc))
plt.show()
示例2: find
def find(self, target):
if len(target) == 4:
#check pattern d
sum = 0
for i in range(len(target)):
sum += np.correlate(target[i], self.pd[i])[0]
if sum >= self.threshold_expand['pd']:
return True
else:
return False
elif len(target) == 3:
if len(target[0]) == 4:
#check pattern c
sum = 0
for i in range(len(target)):
sum += np.correlate(target[i], self.pc[i])[0]
if sum >= self.threshold_expand['pc']:
return True
else:
return False
elif len(target[0]) == 3:
# common cases
for k in self.threshold:
sum = 0
pt = k[0]
tr = k[1]
for i in range(len(target)):
sum += np.correlate(target[i], pt[i])[0]
if sum >= tr:
return True
return False
示例3: ccovf
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
示例4: plot_acorr
def plot_acorr(x, ax=None, title="", xlabel="Shift", ylabel="",
append_analysis=True):
"""Plot the autocorrelation
If variance is too small (i.e. for a deterministic process),
falls back to plotting autocovariance
"""
x_centered = x - np.mean(x)
x_var = np.var(x)
x_len = len(x)
x_centered_sample = x_centered[:int(x_len//2)]
if len(np.unique(x.round(decimals=12))) > 1:
# compute autocorrelation
x_acorr = np.correlate(x_centered, x_centered_sample, 'valid')/x_var
analysis_mode = "Autocorrelation"
else:
# if process is deterministic, autocorrelation is undefined
# use the autocovariance instead
x_acorr = np.correlate(x_centered, x_centered_sample, 'valid')
analysis_mode = "Autocovariance"
if ax is None:
fig, ax = plt.subplots(nrows=1, figsize=(12,3))
ax.plot(x_acorr[:100], 'o')
if append_analysis:
ax.set_title(title+analysis_mode)
else:
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
limit_ylim(ax)
return ax
示例5: findInserts
def findInserts(self):
#interpolate to the base phantom slice thickness
#X=numpy.linspace(0,self.phBase.slicethk*(self.slices*self.slicethk/self.phBase.slicethk),(self.slices*self.slicethk)/self.phBase.slicethk+1)
X=numpy.arange(0,self.slices*self.slicethk,self.phBase.slicethk)
Xp=numpy.linspace(0,(self.slices-1)*self.slicethk,self.slices)
profileResc=numpy.interp(X,Xp,self.profile)
profileRescMirror=numpy.fliplr([profileResc,numpy.zeros(len(profileResc))])[0,:]
#find order of acquisition
fwdcor=numpy.correlate(self.phBase.profile[:,1],profileResc,'full')
rwdcor=numpy.correlate(self.phBase.profile[:,1],profileRescMirror,'full')
reverse=False
if numpy.amax(fwdcor)>=numpy.amax(rwdcor):
shift=numpy.argmax(fwdcor)
else:
reverse=True
shift=numpy.argmax(rwdcor)
#align profile and base profile
#get index of slices
Xcor=(X/self.phBase.slicethk)-len(X)+1+shift
#find phantom slice nearest to base inserts
Inserts=["resolution","sliceThk","uniform","dgp"]
for insert in Inserts:
if (Xcor==self.phBase.inserts[insert][0]).any() or (Xcor==self.phBase.inserts[insert][1]).any():
f=max(self.phBase.inserts[insert][0],Xcor[0])
s=min(self.phBase.inserts[insert][1],Xcor[len(Xcor)-1])
self.inserts[insert]=numpy.round(((numpy.array([f,s])+len(X)-1-shift)*float(self.phBase.slicethk))/float(self.slicethk))
if reverse:
self.inserts[insert]=numpy.abs(self.inserts[insert]-self.slices+1)
(self.inserts[insert]).sort()
示例6: dbpsk_demod
def dbpsk_demod(rx_data, sample_rate, L):
print "Demodulating [email protected]", sample_rate
time_seq = np.arange(0, len(rx_data), 1, dtype=float) / sample_rate
two_pi_fc_t = 2 * np.pi * CenterFreq * time_seq
# Filter out-of-band noise
rx_inband = np.convolve(rx_data, bp_filt)
N = len(rx_inband)
# Downconvert I/Q channels into baseband signals
rx_bb_i = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.cos(two_pi_fc_t))
rx_bb_q = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.sin(two_pi_fc_t))
# Filter any high frequency remnants
audio_bb_i = np.convolve(rx_bb_i, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
audio_bb_q = np.convolve(rx_bb_q, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
decoded_bits = np.zeros(L * BitsPerChar)
# Previous Phase and decode bit
pp = 0
pb = 0
detected_bitstream = np.zeros(L * BitsPerChar, dtype=int)
T = SamplesPerSymbol
# Matched filter is just a rectangular pulse
rect_pulse = np.ones(T)
for demod in np.arange(L * BitsPerChar):
sym_i = np.correlate(audio_bb_i[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
sym_q = np.correlate(audio_bb_q[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
cp = np.arctan(sym_q / sym_i)
# print "Phase Diff:", cp-pp
if np.abs(cp - pp) > 0.1:
detected_bitstream[demod] = pb ^ 1
else:
detected_bitstream[demod] = detected_bitstream[demod - 1]
pb = detected_bitstream[demod]
pp = cp
return detected_bitstream
示例7: determineDelay
def determineDelay(source, target, maxdel=2**16, ax=None):
'''
Determine the delay between two signals
(based on correlation extrema)
Parameters:
* Signals
- source
- target
* maxdel: maximum delay to look for (in both directions)
'''
sample_start = 0
xd = source[sample_start:sample_start+maxdel]
yd = target[sample_start:sample_start+maxdel]
Cxx = np.correlate(xd, xd, 'full')
Cxy = np.correlate(yd, xd, 'full')
Pkx = np.argmax(np.abs(Cxx))
Pky = np.argmax(np.abs(Cxy))
if ax:
try:
ax.plot(Cxx)
except AttributeError:
fig, ax = pl.subplots(1)
ax.plot(Cxx)
ax.plot(Cxy)
ax.axvline(Pkx, color='red')
ax.plot(Pky, Cxy[Pky], 'o')
delay = Pky-Pkx
return delay
示例8: get_best_time_window
def get_best_time_window(data, samplerate, fundamental_frequency, eod_cycles):
eod_peaks1, eod_peak_idx1, _, _ = peakdet(data)
max_time = len(data) / samplerate
time_for_eod_cycles_in_window = eod_cycles / fundamental_frequency
if time_for_eod_cycles_in_window > max_time * .2:
time_for_eod_cycles_in_window = max_time * .2
warnings.warn("You are reqeusting a window that is too long. Using T=%f" % (time_for_eod_cycles_in_window,))
sample_points_in_window = int(fundamental_frequency * time_for_eod_cycles_in_window)
tApp = np.arange(len(data)) / samplerate
w1 = np.ones(sample_points_in_window) / sample_points_in_window
local_mean = np.correlate(eod_peaks1, w1, mode='valid')
local_std = np.sqrt(np.correlate(eod_peaks1 ** 2., w1, mode='valid') - local_mean ** 2.)
COV = local_std / local_mean
mi = min(COV)
for ind, j in enumerate(COV):
if j == mi:
v = (eod_peak_idx1[ind])
idx = (tApp >= tApp[v]) & (tApp < tApp[v] + time_for_eod_cycles_in_window)
tApp = tApp[idx]
dat_app = data[idx]
tApp = tApp - tApp[0]
return tApp, dat_app
示例9: _updateBuffer
def _updateBuffer(self, v):
"""
Keep a buffer of the running data and process it to determine if there is
a peak.
"""
self._rtData.append(v)
wndwCenter = int(np.floor(self._window / 2.0))
# pop the end of the buffer
if len(self._rtData) > self._window:
self._rtData = self._rtData[1:]
if self._isPeak:
lm = self._rtData.findPeaks()
for l in lm:
if l[0] == wndwCenter and l[1] > self._cutoff:
if self.doCorr:
corrVal = np.correlate(self._rtData.normalize(), self._template)
thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
if corrVal[0] > thresh:
self.count += 1
else:
self.count += 1
else:
lm = self._rtData.findValleys()
for l in lm:
if l[0] == wndwCenter and l[1] < self._cutoff:
if self.doCorr:
corrVal = np.correlate(self._rtData.normalize(), self._template)
thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
if corrVal[0] > thresh:
self.count += 1
else:
self.count += 1
return self.count
示例10: correlation
def correlation (results = [], bin_size = 100, N =1000):
wait_time=0.
print 'N = ',N
nr_datasets = results ['max_ind']
ind = 0
for counter in np.arange(nr_datasets):
dati = results [str(counter)]
if (len(dati)>2*N+1):
if (bin_size>1 ):
b = bin_data (data = dati, bin_size = bin_size)
else:
b = dati
t = np.arange(len(b))*bin_size*(20e-6+wait_time*1e-6)
mu = np.mean(b)
sigma = np.std(b)
corr = np.correlate (b-mu, b-mu, 'full')/(np.correlate(b-mu, b-mu)+0.)
t_corr = (np.arange (len(corr))-len(corr)/2.)*(wait_time+20.)*1e-6*bin_size
nn = len(corr)
corr2 = corr [nn/2-N:nn/2+N]
t_corr2 = t_corr [nn/2-N:nn/2+N]
if (ind == 0):
avg_corr = corr2
else:
avg_corr = avg_corr+corr2
ind = ind + 1
avg_corr[N] = 0
avg_corr = avg_corr/max(avg_corr)
return t_corr2, avg_corr
示例11: calculate_maxcrosscorrelation
def calculate_maxcrosscorrelation(reference_signal, unknown_signal):
'''
function:
---------
given a reference signal and an unknown signal, calculate the max cross correlation score. the higher the score,
the more similar two signals are.
the max cross correlation score will be used to identify events.
parameters:
-----------
@reference_signal: 150 unit numpy array, representing reference signal.
@unknown_signal: 150 unit numpy array
returns:
--------
@score: int between [0,1]; represents similarity between two curves.
'''
# https://stackoverflow.com/questions/1289415/what-is-a-good-r-value-when-comparing-2-signals-using-cross-correlation
x = max(np.correlate(reference_signal, reference_signal, 'full'))
y = max(np.correlate(unknown_signal, unknown_signal, 'full'))
z = max(np.correlate(reference_signal, unknown_signal, 'full'))
score = (z ** 2) / float(x * y)
return score
示例12: chickling_corr
def chickling_corr(shotno, date=time.strftime("%Y%m%d"), bandwidth=40000):
fname, data = file_finder(shotno,date)
samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth)
phase_avr_co2 = np.zeros(samplesize)
phase_avr_hene = np.zeros(samplesize)
#reshape the array of x points (20M for 1s) into a 2d array each with 40k segments.
phasediff_co2 = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))
phasediff_hene = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))
#for each horizontal column perform an average
for i in range(0,samplesize):
phase_avr_co2[i] = np.mean(phasediff_co2[i])
phase_avr_hene[i] = np.mean(phasediff_hene[i])
x = np.linspace(0,1,samplesize)
plt.figure("2 Channels | Blue = Scene | Orange = Reference | Green = Cross-Correlation | shot " + str(shotno) + " Date " + str(date))
plt.xlabel("Time, s")
plt.ylabel("Phase Difference, Radians")
plt.plot(x,phase_avr_co2-np.average(phase_avr_co2))
plt.plot(x,phase_avr_hene-np.average(phase_avr_hene))
a = (phase_avr_co2 - np.mean(phase_avr_co2)) / (np.std(phase_avr_co2) * len(phase_avr_co2))
b = (phase_avr_hene - np.mean(phase_avr_hene)) / (np.std(phase_avr_hene))
yc = np.correlate(a, b, 'full')
print(np.correlate(a, b, 'valid'))
xc = np.linspace(0,1,yc.size)
plt.plot(xc,yc)#,'o',ms=0.4)
示例13: linearCouplingCoeff2
def linearCouplingCoeff2(dataH, dataX, timeH, timeX, transFnXtoH, segStartTime,
segEndTime, timeShift, samplFreq, logFid, debugLevel):
# LINEARCOUPLINGCOEFF - calculate the cross correlation coeff b/w the gravitational
# ave channel H and the "projected" instrumental channel X. The noise in the
# instrumental channel X is projected to the domain of the H using a linear coupling
# function Txh
rXH = np.asarray([])
rMaxXH = np.asarray([])
if((len(dataH)==0) | (len(dataX)==0)):
logFid.write('Error: One or more data vectors are empty..\n')
logFid.write('Error: len(dataH) = %d len(dataX) = %d..\n' %(len(dataH), len(dataX[0])))
elif(len(dataH)!=len(dataX[0])):
logFid.write('Error: Different lengths. len(dataH) = %d len(dataX) = %d..\n'%(len(dataH), len(dataX[0])))
else:
dataH = dataH #- np.mean(dataH)
dataX = dataX[0] #- np.mean(dataX[0])
segIdxH = np.intersect1d(np.where(timeH>=segStartTime)[0], np.where(timeH<segEndTime)[0])
dataH = dataH[segIdxH]
segIdxX = np.intersect1d(np.where(timeX + timeShift >= segStartTime)[0], np.where(timeX + timeShift < segEndTime)[0])
dataX = dataX[segIdxX]
a = np.correlate(dataH, dataX)/(np.sqrt(np.correlate(dataH, dataH)*np.correlate(dataX, dataX)))
rXH = np.append(rXH, a)
rMaxXH = np.append(rMaxXH, a)
return [rXH, rMaxXH]
示例14: aligndata
def aligndata(baselineremoved, brightest, pulsar):
nbins = baselineremoved.shape[0]
nprofiles = baselineremoved.shape[1]
template = baselineremoved[:,brightest]
# rotate template to put peak at 1/4
peakbin = np.argmax(template)
fixedlag = int(nbins/4)-peakbin
aligned = np.zeros((nbins,nprofiles))
newtemplate = np.roll(template, fixedlag)
template = newtemplate
plt.plot(newtemplate)
plt.savefig('./{0}/{0}_brightest.png' .format(pulsar))
plt.clf()
for i in range(nprofiles):
xcorr = np.correlate(template,baselineremoved[:,i],"full")
lag = np.argmax(xcorr)
aligned[:,i] = np.roll(baselineremoved[:,i],lag)
template = np.median(aligned,1)
# repeat with better template now and shift peak to 1/4 of the profile
peakbin = np.argmax(template)
fixedlag = int(nbins/4)-peakbin
double = np.zeros(2*nbins)
for i in range(nprofiles):
double[0:nbins] = baselineremoved[:,i]
double[nbins:2*nbins] = baselineremoved[:,i]
# xcorr = np.correlate(template,baselineremoved[:,i],"full")
xcorr = np.correlate(template,double,"full")
lag = np.argmax(xcorr) + fixedlag
aligned[:,i] = np.roll(baselineremoved[:,i],lag)
newtemplate = np.median(aligned,1)
return np.array(aligned), np.array(newtemplate)
示例15: correlationIndividual
def correlationIndividual(data, idx = (0,1), cls = -1, delay = (-100, 100)):
"""Calculate corrs and auto correlation in time between the various measures"""
n = len(idx);
means = np.mean(data[:,:-1], axis = 0);
nd = delay[1] - delay[0] + 1;
cc = np.zeros((nd,n,n))
for i in range(n):
for j in range(n):
if delay[0] < 0:
cm = np.correlate(data[:, i] - means[i], data[-delay[0]:, j] - means[j]);
else:
cm = [0];
if delay[1] > 0:
cp = np.correlate(data[:, j] - means[j], data[delay[1]:, i] - means[i]);
else:
cp = [0];
ca = np.concatenate((cm[1:], cp[::-1]));
if delay[0] > 0:
cc[:,i,j] = ca[delay[0]:];
elif delay[1] < 0:
cc[:,i,j] = ca[:-delay[1]];
else:
cc[:,i,j] = ca;
return cc;