本文整理汇总了Python中scipy.fftpack方法的典型用法代码示例。如果您正苦于以下问题:Python scipy.fftpack方法的具体用法?Python scipy.fftpack怎么用?Python scipy.fftpack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy
的用法示例。
在下文中一共展示了scipy.fftpack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_wave
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
N = len(self.hs)
ys = scipy.fftpack.idct(self.hs, type=2) / 2 / N
#NOTE: whatever the start time was, we lose it when
# we transform back
#ts = self.start + np.arange(len(ys)) / self.framerate
return Wave(ys, framerate=self.framerate)
示例2: make_dct
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def make_dct(self):
"""Computes the DCT of this wave.
"""
N = len(self.ys)
hs = scipy.fftpack.dct(self.ys, type=2)
fs = (0.5 + np.arange(N)) / 2
return Dct(hs, fs, self.framerate)
示例3: test_fftpack_import
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def test_fftpack_import(self):
base = Path(scipy.__file__).parent
regexp = r"\s*from.+\.fftpack import .*\n"
for path in base.rglob("*.py"):
if base / "fftpack" in path.parents:
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
assert_(all(not re.fullmatch(regexp, line)
for line in file),
"{0} contains an import from fftpack".format(path))
示例4: DST
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def DST(x):
"""
Converts Scipy's DST output to Matlab's DST (scaling).
"""
X = scipy.fftpack.dst(x,type=1,axis=0)
return X/2.0
示例5: IDST
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def IDST(X):
"""
Inverse DST. Python -> Matlab
"""
n = X.shape[0]
x = np.real(scipy.fftpack.idst(X,type=1,axis=0))
return x/(n+1.0)
示例6: noise_processing
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def noise_processing(fft_para,dataS):
'''
this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013)
PARMAETERS:
------------------------
fft_para: dictionary containing all useful variables used for fft and cc
dataS: 2D matrix of all segmented noise data
# OUTPUT VARIABLES:
source_white: 2D matrix of data spectra
'''
# load parameters first
time_norm = fft_para['time_norm']
freq_norm = fft_para['freq_norm']
smooth_N = fft_para['smooth_N']
N = dataS.shape[0]
#------to normalize in time or not------
if time_norm != 'no':
if time_norm == 'one_bit': # sign normalization
white = np.sign(dataS)
elif time_norm == 'rma': # running mean: normalization over smoothed absolute average
white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
for kkk in range(N):
white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)
else: # don't normalize
white = dataS
#-----to whiten or not------
if freq_norm != 'no':
source_white = whiten(white,fft_para) # whiten and return FFT
else:
Nfft = int(next_fast_len(int(dataS.shape[1])))
source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT
return source_white
示例7: noise_processing
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def noise_processing(fft_para,dataS):
'''
this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013)
PARMAETERS:
------------------------
fft_para: dictionary containing all useful variables used for fft and cc
dataS: 2D matrix of all segmented noise data
# OUTPUT VARIABLES:
source_white: 2D matrix of data spectra
'''
# load parameters first
time_norm = fft_para['time_norm']
freq_norm = fft_para['freq_norm']
smooth_N = fft_para['smooth_N']
N = dataS.shape[0]
#------to normalize in time or not------
if time_norm != 'no':
if time_norm == 'one_bit': # sign normalization
white = np.sign(dataS)
elif time_norm == 'rma': # running mean: normalization over smoothed absolute average
white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
for kkk in range(N):
white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)
else: # don't normalize
white = dataS
#-----to whiten or not------
if freq_norm != 'no':
source_white = whiten(white,fft_para) # whiten and return FFT
else:
Nfft = int(next_fast_len(int(dataS.shape[1])))
source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT
return source_white
示例8: optimized_correlate1
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def optimized_correlate1(fft1_smoothed_abs,fft2,maxlag,dt,Nfft,nwin,method="cross-correlation"):
'''
Optimized version of the correlation functions: put the smoothed
source spectrum amplitude out of the inner for loop.
It also takes advantage of the linear relationship of ifft, so that
stacking in spectrum first to reduce the total number of times for ifft,
which is the most time consuming steps in the previous correlate function
'''
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(nwin*(Nfft//2),dtype=np.complex64)
corr = fft1_smoothed_abs.reshape(fft1_smoothed_abs.size,) * fft2.reshape(fft2.size,)
if method == "coherence":
temp = moving_ave(np.abs(fft2.reshape(fft2.size,)),10)
corr /= temp
corr = corr.reshape(nwin,Nfft//2)
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr[0]=complex(0,0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
示例9: check_and_phase_shift
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def check_and_phase_shift(trace):
# print trace
taper_length = 20.0
# if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
# trace.data = np.zeros(trace.stats.npts)
# return trace
dt = np.mod(trace.stats.starttime.datetime.microsecond*1.0e-6,
trace.stats.delta)
if (trace.stats.delta - dt) <= np.finfo(float).eps:
dt = 0
if dt != 0:
if dt <= (trace.stats.delta / 2.):
dt = -dt
# direction = "left"
else:
dt = (trace.stats.delta - dt)
# direction = "right"
trace.detrend(type="demean")
trace.detrend(type="simple")
taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
trace.taper(taper_1s)
n = int(2**nextpow2(len(trace.data)))
FFTdata = scipy.fftpack.fft(trace.data, n=n)
fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
trace.data = np.real(scipy.fftpack.ifft(FFTdata, n=n)[:len(trace.data)])
trace.stats.starttime += dt
return trace
else:
return trace
示例10: C3_process
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def C3_process(S1_data,S2_data,Nfft,win):
'''
performs all C3 processes including 1) cutting the time window for P-N parts;
2) doing FFT for the two time-seris; 3) performing cross-correlations in freq;
4) ifft to time domain
'''
#-----initialize the spectrum variables----
ccp1 = np.zeros(Nfft,dtype=np.complex64)
ccn1 = ccp1
ccp2 = ccp1
ccn2 = ccp1
ccp = ccp1
ccn = ccp1
#------find the time window for sta1------
S1_data_N = S1_data[win[0]:win[1]]
S1_data_N = S1_data_N[::-1]
S1_data_P = S1_data[win[2]:win[3]]
S2_data_N = S2_data[win[0]:win[1]]
S2_data_N = S2_data_N[::-1]
S2_data_P = S2_data[win[2]:win[3]]
#---------------do FFT-------------
ccp1 = scipy.fftpack.fft(S1_data_P, Nfft)
ccn1 = scipy.fftpack.fft(S1_data_N, Nfft)
ccp2 = scipy.fftpack.fft(S2_data_P, Nfft)
ccn2 = scipy.fftpack.fft(S2_data_N, Nfft)
#------cross correlations--------
ccp = np.conj(ccp1)*ccp2
ccn = np.conj(ccn1)*ccn2
return ccp,ccn
示例11: optimized_correlate1
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def optimized_correlate1(fft1_smoothed_abs,fft2,maxlag,dt,Nfft,nwin,method="cross-correlation"):
'''
Optimized version of the correlation functions: put the smoothed
source spectrum amplitude out of the inner for loop.
It also takes advantage of the linear relationship of ifft, so that
stacking in spectrum first to reduce the total number of times for ifft,
which is the most time consuming steps in the previous correlate function
'''
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(nwin*(Nfft//2),dtype=np.complex64)
corr = fft1_smoothed_abs.reshape(fft1_smoothed_abs.size,) * fft2.reshape(fft2.size,)
if method == "coherence":
temp = moving_ave(np.abs(fft2.reshape(fft2.size,)),10)
try:
corr /= temp
except ValueError:
raise ValueError('smoothed spectrum has zero values')
corr = corr.reshape(nwin,Nfft//2)
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
示例12: adaptive_filter
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def adaptive_filter(cc_array,g):
'''
the adaptive covariance filter to enhance coherent signals. Fellows the method of
Nakata et al., 2015 (Appendix B)
the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra
and P is the filter. P is constructed by using the temporal covariance matrix.
PARAMETERS:
----------------------
cc_array: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
g: a positive number to adjust the filter harshness
RETURNS:
----------------------
narr: numpy vector contains the stacked cross correlation function
Written by Chengxin Jiang @Harvard (Oct2019)
'''
if cc_array.ndim == 1:
print('2D matrix is needed for adaptive filtering')
return cc_array
N,M = cc_array.shape
Nfft = next_fast_len(M)
# fft the 2D array
spec = scipy.fftpack.fft(cc_array,axis=1,n=Nfft)[:,:M]
# make cross-spectrm matrix
cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
for ii in range(N):
for jj in range(N):
kk = ii*N+jj
cspec[kk] = spec[ii]*np.conjugate(spec[jj])
S1 = np.zeros(M,dtype=np.complex64)
S2 = np.zeros(M,dtype=np.complex64)
# construct the filter P
for ii in range(N):
mm = ii*N+ii
S2 += cspec[mm]
for jj in range(N):
kk = ii*N+jj
S1 += cspec[kk]
p = np.power((S1-S2)/(S2*(N-1)),g)
# make ifft
narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
return np.mean(narr,axis=0)
示例13: adaptive_filter
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def adaptive_filter(arr,g):
'''
the adaptive covariance filter to enhance coherent signals. Fellows the method of
Nakata et al., 2015 (Appendix B)
the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra
and P is the filter. P is constructed by using the temporal covariance matrix.
PARAMETERS:
----------------------
arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
g: a positive number to adjust the filter harshness
RETURNS:
----------------------
narr: numpy vector contains the stacked cross correlation function
'''
if arr.ndim == 1:
return arr
N,M = arr.shape
Nfft = next_fast_len(M)
# fft the 2D array
spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M]
# make cross-spectrm matrix
cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
for ii in range(N):
for jj in range(N):
kk = ii*N+jj
cspec[kk] = spec[ii]*np.conjugate(spec[jj])
S1 = np.zeros(M,dtype=np.complex64)
S2 = np.zeros(M,dtype=np.complex64)
# construct the filter P
for ii in range(N):
mm = ii*N+ii
S2 += cspec[mm]
for jj in range(N):
kk = ii*N+jj
S1 += cspec[kk]
p = np.power((S1-S2)/(S2*(N-1)),g)
# make ifft
narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
return np.mean(narr,axis=0)
示例14: adaptive_filter
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def adaptive_filter(arr,g):
'''
the adaptive covariance filter to enhance coherent signals. Fellows the method of
Nakata et al., 2015 (Appendix B)
the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra
and P is the filter. P is constructed by using the temporal covariance matrix.
PARAMETERS:
----------------------
arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
g: a positive number to adjust the filter harshness
RETURNS:
----------------------
narr: numpy vector contains the stacked cross correlation function
'''
if arr.ndim == 1:
return arr
N,M = arr.shape
Nfft = next_fast_len(M)
# fft the 2D array
spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M]
# make cross-spectrm matrix
cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
for ii in range(N):
for jj in range(N):
kk = ii*N+jj
cspec[kk] = spec[ii]*np.conjugate(spec[jj])
S1 = np.zeros(M,dtype=np.complex64)
S2 = np.zeros(M,dtype=np.complex64)
# construct the filter P
for ii in range(N):
mm = ii*N+ii
S2 += cspec[mm]
for jj in range(N):
kk = ii*N+jj
S1 += cspec[kk]
p = np.power((S1-S2)/(S2*(N-1)),g)
# make ifft
narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
return np.mean(narr,axis=0)
示例15: correlate
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import fftpack [as 别名]
def correlate(fft1,fft2, maxlag,dt, Nfft, method="cross-correlation"):
"""This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
and returns the cross-correlation function between [-*maxlag*:*maxlag*].
:type fft1: :class:`numpy.ndarray`
:param fft1: This array contains the fft of each timeseries to be cross-correlated.
:type maxlag: int
:param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.
:rtype: :class:`numpy.ndarray`
:returns: The cross-correlation function between [-maxlag:maxlag]
"""
# Speed up FFT by padding to optimal size for FFTPACK
t0=time.time()
if fft1.ndim == 1:
axis = 0
nwin=1
elif fft1.ndim == 2:
axis = 1
nwin= int(fft1.shape[0])
corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)
corr[:,:Nfft//2] = np.conj(fft1) * fft2
if method == 'deconv':
ind = np.where(np.abs(fft1)>0)
corr[ind] /= moving_ave(np.abs(fft1[ind]),10)**2
#corr[ind] /= running_abs_mean(np.abs(fft1[ind]),10) ** 2
elif method == 'coherence':
ind = np.where(np.abs(fft1)>0)
corr[ind] /= running_abs_mean(np.abs(fft1[ind]),5)
ind = np.where(np.abs(fft2)>0)
corr[ind] /= running_abs_mean(np.abs(fft2[ind]),5)
elif method == 'raw':
ind = 1
#--------------------problems: [::-1] only flips along axis=0 direction------------------------
#corr[:,-(Nfft // 2):] = corr[:,:(Nfft // 2)].conjugate()[::-1] # fill in the complex conjugate
#----------------------------------------------------------------------------------------------
corr[:,0] = complex(0,0)
corr[:,-(Nfft//2)+1:]=np.flip(np.conj(corr[:,1:(Nfft//2)]),axis=axis)
corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(corr, Nfft, axis=axis)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
if axis == 1:
corr = corr[:,ind]
else:
corr = corr[ind]
tcorr=tcorr[ind]
t1=time.time()
print('original takes '+str(t1-t0))
return corr,tcorr