當前位置: 首頁>>代碼示例>>Python>>正文


Python fftpack.next_fast_len方法代碼示例

本文整理匯總了Python中scipy.fftpack.next_fast_len方法的典型用法代碼示例。如果您正苦於以下問題:Python fftpack.next_fast_len方法的具體用法?Python fftpack.next_fast_len怎麽用?Python fftpack.next_fast_len使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.fftpack的用法示例。


在下文中一共展示了fftpack.next_fast_len方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def __init__(self, **kw):
    """  
      Constructor of affine, equidistant 3d mesh class
      ucell : unit cell vectors (in coordinate space)
      Ecut  : Energy cutoff to parametrize the discretization 
    """
    from scipy.fftpack import next_fast_len
    
    self.ucell = kw['ucell'] if 'ucell' in kw else 30.0*np.eye(3) # Not even unit cells vectors are required by default
    self.Ecut = Ecut = kw['Ecut'] if 'Ecut' in kw else 50.0 # 50.0 Hartree by default
    luc = np.sqrt(np.einsum('ix,ix->i', self.ucell, self.ucell))
    self.shape = nn = np.array([next_fast_len( int(np.rint(l * np.sqrt(Ecut)/2))) for l in luc], dtype=int)
    self.size  = np.prod(self.shape)
    gc = self.ucell/(nn) # This is probable the best for finite systems, for PBC use nn, not (nn-1)
    self.dv = np.abs(np.dot(gc[0], np.cross(gc[1], gc[2] )))
    rr = [np.array([gc[i]*j for j in range(nn[i])]) for i in range(3)]
    self.rr = rr
    self.origin = kw['origin'] if 'origin' in kw else np.zeros(3) 
開發者ID:pyscf,項目名稱:pyscf,代碼行數:20,代碼來源:mesh_affine_equ.py

示例2: fast_fft_len

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def fast_fft_len(n):
    """
    Returns the smallest integer greater than input such that the fft can
    be computed efficiently at this size

    Parameters
    ----------
    n : `int`
        minimum size

    Returns
    -------
    N : `int`
        smallest integer greater than n which permits efficient ffts.
    """
    N = next_fast_len(n)
    return N if N % 2 == 0 else fast_fft_len(N + 1) 
開發者ID:pyxem,項目名稱:diffsims,代碼行數:19,代碼來源:fourier_transform.py

示例3: _fftautocorr

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def _fftautocorr(x):
    """Compute the autocorrelation of a real array and crop the result."""
    N = x.shape[-1]
    use_N = fftpack.next_fast_len(2*N-1)
    x_fft = np.fft.rfft(x, use_N, axis=-1)
    cxy = np.fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]
    # Or equivalently (but in most cases slower):
    # cxy = np.array([np.convolve(xx, yy[::-1], mode='full')
    #                 for xx, yy in zip(x, x)])[:, N-1:2*N-1]
    return cxy 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:12,代碼來源:windows.py

示例4: optimal_fft_size

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def optimal_fft_size(target, real = False):
    """Wrapper around scipy function next_fast_len() for calculating optimal FFT padding.
    scipy.fft was only added in 1.4.0, so we fall back to scipy.fftpack
    if it is not available. The main difference is that next_fast_len()
    does not take a second argument in the older implementation.

    Parameters
    ----------
    target : int
        Length to start searching from. Must be a positive integer.
    real : bool, optional
        True if the FFT involves real input or output, only available
        for scipy > 1.4.0
    Returns
    -------
    int
        Optimal FFT size.
    """

    try: # pragma: no cover
        from scipy.fft import next_fast_len

        support_real = True

    except ImportError: # pragma: no cover
        from scipy.fftpack import next_fast_len

        support_real = False

    if support_real: # pragma: no cover
        return next_fast_len(target, real)
    else: # pragma: no cover
        return next_fast_len(target)

# Functions used in correlate_library. 
開發者ID:pyxem,項目名稱:pyxem,代碼行數:37,代碼來源:indexation_utils.py

示例5: autocov

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def autocov(ary, axis=-1):
    """Compute autocovariance estimates for every lag for the input array.

    Parameters
    ----------
    ary : Numpy array
        An array containing MCMC samples

    Returns
    -------
    acov: Numpy array same size as the input array
    """
    axis = axis if axis > 0 else len(ary.shape) + axis
    n = ary.shape[axis]
    m = next_fast_len(2 * n)

    ary = ary - ary.mean(axis, keepdims=True)

    # added to silence tuple warning for a submodule
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        ifft_ary = np.fft.rfft(ary, n=m, axis=axis)
        ifft_ary *= np.conjugate(ifft_ary)

        shape = tuple(
            slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape)
        )
        cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape]
        cov /= n

    return cov 
開發者ID:arviz-devs,項目名稱:arviz,代碼行數:34,代碼來源:stats_utils.py

示例6: shift_data_subpixel

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def shift_data_subpixel(inputs):
    ''' rigid shift of X by ymax and xmax '''
    ''' allows subpixel shifts '''
    ''' ** not being used ** '''
    X, ymax, xmax, pad_fft = inputs
    ymax = ymax.flatten()
    xmax = xmax.flatten()
    if X.ndim<3:
        X = X[np.newaxis,:,:]

    nimg, Ly0, Lx0 = X.shape
    if pad_fft:
        X = fft2(X.astype('float32'), (next_fast_len(Ly0), next_fast_len(Lx0)))
    else:
        X = fft2(X.astype('float32'))
    nimg, Ly, Lx = X.shape
    Ny = fft.ifftshift(np.arange(-np.fix(Ly/2), np.ceil(Ly/2)))
    Nx = fft.ifftshift(np.arange(-np.fix(Lx/2), np.ceil(Lx/2)))
    [Nx,Ny] = np.meshgrid(Nx,Ny)
    Nx = Nx.astype('float32') / Lx
    Ny = Ny.astype('float32') / Ly
    dph = Nx * np.reshape(xmax, (-1,1,1)) + Ny * np.reshape(ymax, (-1,1,1))
    Y = np.real(ifft2(X * np.exp((2j * np.pi) * dph)))
    # crop back to original size
    if Ly0<Ly or Lx0<Lx:
        Lyhalf = int(np.floor(Ly/2))
        Lxhalf = int(np.floor(Lx/2))
        Y = Y[np.ix_(np.arange(0,nimg,1,int),
                     np.arange(-np.fix(Ly0/2), np.ceil(Ly0/2),1,int) + Lyhalf,
                     np.arange(-np.fix(Lx0/2), np.ceil(Lx0/2),1,int) + Lxhalf)]
    return Y 
開發者ID:MouseLand,項目名稱:suite2p,代碼行數:33,代碼來源:rigid.py

示例7: n_fft_samples

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def n_fft_samples(self):
        if self._n_fft_samples is None:
            self._n_fft_samples = next_fast_len(
                self.n_time_samples_per_window)
        return self._n_fft_samples 
開發者ID:Eden-Kramer-Lab,項目名稱:spectral_connectivity,代碼行數:7,代碼來源:transforms.py

示例8: _auto_correlation

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def _auto_correlation(data, axis=-1):
    n_time_samples_per_window = data.shape[axis]
    n_fft_samples = next_fast_len(2 * n_time_samples_per_window - 1)
    dpss_fft = fft(data, n_fft_samples, axis=axis)
    power = dpss_fft * dpss_fft.conj()
    return np.real(ifft(power, axis=axis)) 
開發者ID:Eden-Kramer-Lab,項目名稱:spectral_connectivity,代碼行數:8,代碼來源:transforms.py

示例9: pws

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def pws(cc_array,sampling_rate,power=2,pws_timegate=5.):
    '''
    Performs phase-weighted stack on array of time series.
    Follows methods of Schimmel and Paulssen, 1997. 
    If s(t) is time series data (seismogram, or cross-correlation),
    S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t)
    S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where
    A(t) is envelope of s(t) and phi(t) is phase of s(t)
    Phase-weighted stack, g(t), is then:
    g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v
    where N is number of traces used, v is sharpness of phase-weighted stack
    
    PARAMETERS:
    ---------------------
    arr: N length array of time series data (numpy.ndarray)
    sampling_rate: sampling rate of time series arr (int)
    power: exponent for phase stack (int)
    pws_timegate: number of seconds to smooth phase stack (float)
    
    RETURNS:
    ---------------------
    weighted: Phase weighted stack of time series data (numpy.ndarray)

    Originally written by Tim Clements
    Modified by Chengxin Jiang @Harvard
    '''

    if cc_array.ndim == 1:
        print('2D matrix is needed for pws')
        return cc_array
    N,M = cc_array.shape

    # construct analytical signal
    analytic = hilbert(cc_array,axis=1, N=next_fast_len(M))[:,:M]
    phase = np.angle(analytic)
    phase_stack = np.mean(np.exp(1j*phase),axis=0)
    phase_stack = np.abs(phase_stack)**(power)

    # weighted is the final waveforms
    weighted = np.multiply(cc_array,phase_stack)
    return np.mean(weighted,axis=0) 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:43,代碼來源:comp_stacking.py

示例10: noise_processing

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def noise_processing(fft_para,dataS):
    '''
    this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
    the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013)
    PARMAETERS:
    ------------------------
    fft_para: dictionary containing all useful variables used for fft and cc
    dataS: 2D matrix of all segmented noise data
    # OUTPUT VARIABLES:
    source_white: 2D matrix of data spectra
    '''
    # load parameters first
    time_norm   = fft_para['time_norm']
    freq_norm   = fft_para['freq_norm']
    smooth_N    = fft_para['smooth_N']
    N = dataS.shape[0]

    #------to normalize in time or not------
    if time_norm != 'no':

        if time_norm == 'one_bit': 	# sign normalization
            white = np.sign(dataS)
        elif time_norm == 'rma': # running mean: normalization over smoothed absolute average
            white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
            for kkk in range(N):
                white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)

    else:	# don't normalize
        white = dataS

    #-----to whiten or not------
    if freq_norm != 'no':
        source_white = whiten(white,fft_para)	# whiten and return FFT
    else:
        Nfft = int(next_fast_len(int(dataS.shape[1])))
        source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT

    return source_white 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:40,代碼來源:noise_module.py

示例11: pws

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def pws(arr,sampling_rate,power=2,pws_timegate=5.):
    '''
    Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents.
    Follows methods of Schimmel and Paulssen, 1997.
    If s(t) is time series data (seismogram, or cross-correlation),
    S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t)
    S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where
    A(t) is envelope of s(t) and phi(t) is phase of s(t)
    Phase-weighted stack, g(t), is then:
    g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v
    where N is number of traces used, v is sharpness of phase-weighted stack

    PARAMETERS:
    ---------------------
    arr: N length array of time series data (numpy.ndarray)
    sampling_rate: sampling rate of time series arr (int)
    power: exponent for phase stack (int)
    pws_timegate: number of seconds to smooth phase stack (float)

    RETURNS:
    ---------------------
    weighted: Phase weighted stack of time series data (numpy.ndarray)
    '''

    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M]
    phase = np.angle(analytic)
    phase_stack = np.mean(np.exp(1j*phase),axis=0)
    phase_stack = np.abs(phase_stack)**(power)

    # smoothing
    #timegate_samples = int(pws_timegate * sampling_rate)
    #phase_stack = moving_ave(phase_stack,timegate_samples)
    weighted = np.multiply(arr,phase_stack)
    return np.mean(weighted,axis=0) 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:39,代碼來源:noise_module.py

示例12: pws

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def pws(arr,sampling_rate,power=2,pws_timegate=5.):
    '''
    Performs phase-weighted stack on array of time series. Modified on the noise function by Tim Climents.
    Follows methods of Schimmel and Paulssen, 1997. 
    If s(t) is time series data (seismogram, or cross-correlation),
    S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t)
    S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where
    A(t) is envelope of s(t) and phi(t) is phase of s(t)
    Phase-weighted stack, g(t), is then:
    g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v
    where N is number of traces used, v is sharpness of phase-weighted stack
    
    PARAMETERS:
    ---------------------
    arr: N length array of time series data (numpy.ndarray)
    sampling_rate: sampling rate of time series arr (int)
    power: exponent for phase stack (int)
    pws_timegate: number of seconds to smooth phase stack (float)
    
    RETURNS:
    ---------------------
    weighted: Phase weighted stack of time series data (numpy.ndarray)
    '''

    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M]
    phase = np.angle(analytic)
    phase_stack = np.mean(np.exp(1j*phase),axis=0)
    phase_stack = np.abs(phase_stack)**(power)

    # smoothing 
    #timegate_samples = int(pws_timegate * sampling_rate)
    #phase_stack = moving_ave(phase_stack,timegate_samples)
    weighted = np.multiply(arr,phase_stack)
    return np.mean(weighted,axis=0) 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:39,代碼來源:noise_module.py

示例13: noise_processing

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def noise_processing(fft_para,dataS):
    '''
    this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
    the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013) 
    PARMAETERS:
    ------------------------
    fft_para: dictionary containing all useful variables used for fft and cc
    dataS: 2D matrix of all segmented noise data
    # OUTPUT VARIABLES:
    source_white: 2D matrix of data spectra
    '''
    # load parameters first
    time_norm   = fft_para['time_norm']
    freq_norm   = fft_para['freq_norm']
    smooth_N    = fft_para['smooth_N']
    N = dataS.shape[0]

    #------to normalize in time or not------
    if time_norm != 'no':

        if time_norm == 'one_bit': 	# sign normalization
            white = np.sign(dataS)
        elif time_norm == 'rma': # running mean: normalization over smoothed absolute average           
            white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
            for kkk in range(N):
                white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)

    else:	# don't normalize
        white = dataS

    #-----to whiten or not------
    if freq_norm != 'no':
        source_white = whiten(white,fft_para)	# whiten and return FFT
    else:
        Nfft = int(next_fast_len(int(dataS.shape[1])))
        source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT
    
    return source_white 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:40,代碼來源:noise_module.py

示例14: pws

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def pws(arr,power=2.,sampling_rate=20.,pws_timegate = 5.):
    """
    Performs phase-weighted stack on array of time series. 

    Follows methods of Schimmel and Paulssen, 1997. 
    If s(t) is time series data (seismogram, or cross-correlation),
    S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t)
    S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where
    A(t) is envelope of s(t) and phi(t) is phase of s(t)
    Phase-weighted stack, g(t), is then:
    g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v
    where N is number of traces used, v is sharpness of phase-weighted stack

    :type arr: numpy.ndarray
    :param arr: N length array of time series data 
    :type power: float
    :param power: exponent for phase stack
    :type sampling_rate: float 
    :param sampling_rate: sampling rate of time series 
    :type pws_timegate: float 
    :param pws_timegate: number of seconds to smooth phase stack
    :Returns: Phase weighted stack of time series data
    :rtype: numpy.ndarray  
    """

    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    analytic = arr + 1j * hilbert(arr,axis=1, N=next_fast_len(M))[:,:M]
    phase = np.angle(analytic)
    phase_stack = np.mean(np.exp(1j*phase),axis=0)/N
    phase_stack = np.abs(phase_stack)**2

    # smoothing 
    timegate_samples = int(pws_timegate * sampling_rate)
    phase_stack = runningMean(phase_stack,timegate_samples)
    weighted = np.multiply(arr,phase_stack)
    return np.mean(weighted,axis=0)/N 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:40,代碼來源:noise_module.py

示例15: pws

# 需要導入模塊: from scipy import fftpack [as 別名]
# 或者: from scipy.fftpack import next_fast_len [as 別名]
def pws(arr,sampling_rate,power=2,pws_timegate=5.):
    """
    Performs phase-weighted stack on array of time series. 
    Modified on the noise function by Tim Climents.

    Follows methods of Schimmel and Paulssen, 1997. 
    If s(t) is time series data (seismogram, or cross-correlation),
    S(t) = s(t) + i*H(s(t)), where H(s(t)) is Hilbert transform of s(t)
    S(t) = s(t) + i*H(s(t)) = A(t)*exp(i*phi(t)), where
    A(t) is envelope of s(t) and phi(t) is phase of s(t)
    Phase-weighted stack, g(t), is then:
    g(t) = 1/N sum j = 1:N s_j(t) * | 1/N sum k = 1:N exp[i * phi_k(t)]|^v
    where N is number of traces used, v is sharpness of phase-weighted stack

    :type arr: numpy.ndarray
    :param arr: N length array of time series data 
    :type power: float
    :param power: exponent for phase stack
    :type sampling_rate: float 
    :param sampling_rate: sampling rate of time series 
    :type pws_timegate: float 
    :param pws_timegate: number of seconds to smooth phase stack
    :Returns: Phase weighted stack of time series data
    :rtype: numpy.ndarray  
    """

    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    analytic = hilbert(arr,axis=1, N=next_fast_len(M))[:,:M]
    phase = np.angle(analytic)
    phase_stack = np.mean(np.exp(1j*phase),axis=0)
    phase_stack = np.abs(phase_stack)**(power)

    # smoothing 
    #timegate_samples = int(pws_timegate * sampling_rate)
    #phase_stack = moving_ave(phase_stack,timegate_samples)
    weighted = np.multiply(arr,phase_stack)
    return np.mean(weighted,axis=0) 
開發者ID:mdenolle,項目名稱:NoisePy,代碼行數:41,代碼來源:noise_module.py


注:本文中的scipy.fftpack.next_fast_len方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。