当前位置: 首页>>代码示例>>Python>>正文


Python fft.fft函数代码示例

本文整理汇总了Python中scikits.cuda.fft.fft函数的典型用法代码示例。如果您正苦于以下问题:Python fft函数的具体用法?Python fft怎么用?Python fft使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fft函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: thunk

        def thunk():
            input_shape = inputs[0][0].shape
            output_shape = input_shape

            z = outputs[0]

            # only allocate if there is no previous allocation of the
            # right size.
            if z[0] is None or z[0].shape != output_shape:
                z[0] = CudaNdarray.zeros(output_shape)

            input_pycuda = to_gpuarray(inputs[0][0])
            # I thought we'd need to change the type on output_pycuda
            # so it is complex64, but as it turns out scikits.cuda.fft
            # doesn't really care either way and treats the array as
            # if it is complex64 anyway.
            output_pycuda = to_gpuarray(z[0])

            # only initialise plan if necessary
            if plan[0] is None or plan_input_shape[0] != input_shape:
                plan_input_shape[0] = input_shape
                plan[0] = fft.Plan(input_shape[1:-1], np.complex64, np.complex64,
                                   batch=input_shape[0])

            fft.fft(input_pycuda, output_pycuda, plan[0])
            compute_map[node.outputs[0]][0] = True
开发者ID:Thrandis,项目名称:complex_RNN,代码行数:26,代码来源:fftconv.py

示例2: thunk

        def thunk():
            input_shape = inputs[0][0].shape

            # construct output shape
            output_shape = tuple(input_shape)

            # print 'FFT shapes:', input_shape, '->', output_shape
            # print 'Batch size:', input_shape[0]
            # print 'Core shape:', input_shape[1:-1]

            z = outputs[0]

            # only allocate if there is no previous allocation of the right size.
            if z[0] is None or z[0].shape != output_shape:
                z[0] = CudaNdarray.zeros(output_shape)

            input_pycuda = to_gpuarray(inputs[0][0])
            # I thought we'd need to change the type on output_pycuda
            # so it is complex64, but as it turns out scikits.cuda.fft
            # doesn't really care either way and treats the array as
            # if it is complex64 anyway.
            output_pycuda = to_gpuarray(z[0])

            # only initialise plan if necessary
            if plan[0] is None or plan_input_shape[0] != input_shape:
                plan_input_shape[0] = input_shape
                plan[0] = fft.Plan(shape=input_shape[1:-1],  # Exclude batch dim and complex dim
                                   in_dtype=np.complex64,
                                   out_dtype=np.complex64,
                                   batch=input_shape[0])

            fft.fft(input_pycuda, output_pycuda, plan[0])
开发者ID:soroushmehr,项目名称:BP-FFT,代码行数:32,代码来源:cuda_fft.py

示例3: fft_multiply_repeated

def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
    """Do FFT multiplication by a filter function (possibly using CUDA)

    Parameters
    ----------
    h_fft : 1-d array or gpuarray
        The filtering array to apply.
    x : 1-d array
        The array to filter.
    cuda_dict : dict
        Dictionary constructed using setup_cuda_multiply_repeated().

    Returns
    -------
    x : 1-d array
        Filtered version of x.
    """
    if not cuda_dict["use_cuda"]:
        # do the fourier-domain operations
        x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
    else:
        # do the fourier-domain operations, results in second param
        cuda_dict["x"].set(x.astype(np.float64))
        cudafft.fft(cuda_dict["x"], cuda_dict["x_fft"], cuda_dict["fft_plan"])
        cuda_multiply_inplace_c128(h_fft, cuda_dict["x_fft"])
        # If we wanted to do it locally instead of using our own kernel:
        # cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
        cudafft.ifft(cuda_dict["x_fft"], cuda_dict["x"], cuda_dict["ifft_plan"], False)
        x = np.array(cuda_dict["x"].get(), dtype=x.dtype, subok=True, copy=False)
    return x
开发者ID:TanayGahlot,项目名称:mne-python,代码行数:30,代码来源:cuda.py

示例4: thunk

        def thunk():
            input_shape = inputs[0][0].shape

            # construct output shape
            output_shape = list(input_shape)
            # DFT of real input is symmetric, no need to store
            # redundant coefficients
            output_shape[-1] = output_shape[-1] // 2 + 1
            # extra dimension with length 2 for real/imag
            output_shape += [2]
            output_shape = tuple(output_shape)

            z = outputs[0]

            # only allocate if there is no previous allocation of the
            # right size.
            if z[0] is None or z[0].shape != output_shape:
                z[0] = CudaNdarray.zeros(output_shape)

            input_pycuda = to_gpuarray(inputs[0][0])
            # I thought we'd need to change the type on output_pycuda
            # so it is complex64, but as it turns out scikits.cuda.fft
            # doesn't really care either way and treats the array as
            # if it is complex64 anyway.
            output_pycuda = to_gpuarray(z[0])

            # only initialise plan if necessary
            if plan[0] is None or plan_input_shape[0] != input_shape:
                plan_input_shape[0] = input_shape
                plan[0] = fft.Plan(input_shape[1:], np.float32, np.complex64,
                                   batch=input_shape[0])

            fft.fft(input_pycuda, output_pycuda, plan[0])
开发者ID:Ambier,项目名称:Theano,代码行数:33,代码来源:fftconv.py

示例5: gpu_r2c_fft

def gpu_r2c_fft(in1, is_gpuarray=False, store_on_gpu=False):
    """
    This function makes use of the scikits implementation of the FFT for GPUs to take the real to complex FFT.

    INPUTS:
    in1             (no default):       The array on which the FFT is to be performed.
    is_gpuarray     (default=True):     Boolean specifier for whether or not input is on the gpu.
    store_on_gpu    (default=False):    Boolean specifier for whether the result is to be left on the gpu or not.

    OUTPUTS:
    gpu_out1                            The gpu array containing the result.
    OR
    gpu_out1.get()                      The result from the gpu array.
    """

    if is_gpuarray:
        gpu_in1 = in1
    else:
        gpu_in1 = gpuarray.to_gpu_async(in1.astype(np.float32))

    output_size = np.array(in1.shape)
    output_size[1] = 0.5*output_size[1] + 1

    gpu_out1 = gpuarray.empty([output_size[0], output_size[1]], np.complex64)
    gpu_plan = Plan(gpu_in1.shape, np.float32, np.complex64)
    fft(gpu_in1, gpu_out1, gpu_plan)

    if store_on_gpu:
        return gpu_out1
    else:
        return gpu_out1.get()
开发者ID:AstroChem,项目名称:PyMORESANE,代码行数:31,代码来源:iuwt_convolution.py

示例6: test_fft_float64_to_complex128

 def test_fft_float64_to_complex128(self):
     x = np.asarray(np.random.rand(self.N), np.float64)
     xf = np.fft.fft(x)
     x_gpu = gpuarray.to_gpu(x)
     xf_gpu = gpuarray.empty(self.N/2+1, np.complex128)
     plan = fft.Plan(x.shape, np.float64, np.complex128)
     fft.fft(x_gpu, xf_gpu, plan)
     assert np.allclose(xf[0:self.N/2+1], xf_gpu.get(), atol=atol_float64)
开发者ID:jfrelinger,项目名称:scikits.cuda,代码行数:8,代码来源:test_fft.py

示例7: test_batch_fft_float64_to_complex128_2d

 def test_batch_fft_float64_to_complex128_2d(self):
     x = np.asarray(np.random.rand(self.B, self.N, self.M), np.float64)
     xf = np.fft.rfftn(x, axes=(1,2))
     x_gpu = gpuarray.to_gpu(x)
     xf_gpu = gpuarray.empty((self.B, self.N, self.M/2+1), np.complex128)
     plan = fft.Plan([self.N, self.M], np.float64, np.complex128, batch=self.B)
     fft.fft(x_gpu, xf_gpu, plan)
     assert np.allclose(xf, xf_gpu.get(), atol=atol_float64)
开发者ID:GiladAmar,项目名称:scikits.cuda,代码行数:8,代码来源:test_fft.py

示例8: test_batch_fft_float64_to_complex128_1d

 def test_batch_fft_float64_to_complex128_1d(self):
     x = np.asarray(np.random.rand(self.B, self.N), np.float64)
     xf = np.fft.rfft(x, axis=1)
     x_gpu = gpuarray.to_gpu(x)
     xf_gpu = gpuarray.empty((self.B, self.N/2+1), np.complex128)
     plan = fft.Plan(x.shape[1], np.float64, np.complex128, batch=self.B)
     fft.fft(x_gpu, xf_gpu, plan)
     assert np.allclose(xf, xf_gpu.get(), atol=atol_float64)
开发者ID:GiladAmar,项目名称:scikits.cuda,代码行数:8,代码来源:test_fft.py

示例9: test_fft_float32_to_complex64_2d

 def test_fft_float32_to_complex64_2d(self):
     x = np.asarray(np.random.rand(self.N, self.M), np.float32)
     xf = np.fft.rfftn(x)
     x_gpu = gpuarray.to_gpu(x)
     xf_gpu = gpuarray.empty((self.N, self.M/2+1), np.complex64)
     plan = fft.Plan(x.shape, np.float32, np.complex64)
     fft.fft(x_gpu, xf_gpu, plan)
     assert np.allclose(xf, xf_gpu.get(), atol=atol_float32)
开发者ID:GiladAmar,项目名称:scikits.cuda,代码行数:8,代码来源:test_fft.py

示例10: rfft2

 def rfft2(self, i, o = None, cache = True):
     shape = i.shape[:-2]
     rshape = i.shape[-2:]
     cshape = (rshape[0], rshape[1]/2+1)
     batch = np.prod(shape, dtype=np.int)
     plan = self.get_plan(cache, rshape, self.rtype, self.ctype, batch)
     if o is None:
         o = self.context.empty(shape+cshape, self.ctype)
     cu_fft.fft(i, o, plan, scale=False)
     return o
开发者ID:EelcoHoogendoorn,项目名称:ThreadPy,代码行数:10,代码来源:Context.py

示例11: test_multiple_streams

 def test_multiple_streams(self):
     x = np.asarray(np.random.rand(self.N), np.float32)
     xf = np.fft.fft(x)
     y = np.asarray(np.random.rand(self.N), np.float32)
     yf = np.fft.fft(y)
     x_gpu = gpuarray.to_gpu(x)
     y_gpu = gpuarray.to_gpu(y)
     xf_gpu = gpuarray.empty(self.N/2+1, np.complex64)
     yf_gpu = gpuarray.empty(self.N/2+1, np.complex64)
     stream0 = drv.Stream()
     stream1 = drv.Stream()
     plan1 = fft.Plan(x.shape, np.float32, np.complex64, stream=stream0)
     plan2 = fft.Plan(y.shape, np.float32, np.complex64, stream=stream1)
     fft.fft(x_gpu, xf_gpu, plan1)
     fft.fft(y_gpu, yf_gpu, plan2)
     assert np.allclose(xf[0:self.N/2+1], xf_gpu.get(), atol=atol_float32)
     assert np.allclose(yf[0:self.N/2+1], yf_gpu.get(), atol=atol_float32)
开发者ID:jfrelinger,项目名称:scikits.cuda,代码行数:17,代码来源:test_fft.py

示例12: convol

    def convol(self, data1, data2):
	self.init()
	self.ctx.push()
	plan = self.__class__.plans[self.shape]
	data1_gpu = self.__class__.data1_gpus[self.shape]
	data2_gpu = self.__class__.data2_gpus[self.shape]
	data1_gpu.set(data1.astype(numpy.complex128))
	cu_fft.fft(data1_gpu, data1_gpu, plan)
	data2_gpu.set(data2.astype(numpy.complex128))
	cu_fft.fft(data2_gpu, data2_gpu, plan)
	# data1_gpu *= data2_gpu.conj()
	self.multconj(data1_gpu, data2_gpu)
	cu_fft.ifft(data1_gpu, data1_gpu, plan, True)
	# self.ctx.synchronize()
	res = data1_gpu.get().real
	self.ctx.pop()
   	return res
开发者ID:pierrepaleo,项目名称:directConvolution,代码行数:17,代码来源:fft.py

示例13: cufft

def cufft(data,shape=None,inverse=False):

    if shape:
        data = pad2(data,shape)
                        
    plan  = CUFFT_PLANS.get(data.shape)
    if not plan:
        plan = cu_fft.Plan(data.shape,np.complex64,np.complex64)
        CUFFT_PLANS[data.shape] = plan
    
    gpu_data = gpuarray.to_gpu(np.cast[np.complex64](data))
    if inverse:
        cu_fft.ifft(gpu_data,gpu_data,plan)
    else:
        cu_fft.fft(gpu_data,gpu_data,plan)
    r = gpu_data.get()
    
    return r
开发者ID:yamins81,项目名称:v1framework,代码行数:18,代码来源:v1_pyfft.py

示例14: fft

def fft(invec,outvec,prec,itype,otype):
    cuplan = _get_fwd_plan(invec.dtype,outvec.dtype,len(invec))
    cu_fft.fft(invec.data,outvec.data,cuplan)
开发者ID:AbhayMK,项目名称:pycbc,代码行数:3,代码来源:cufft.py

示例15: sample_defrost_gpu

def sample_defrost_gpu(lat, func, gamma, m2_eff):
    """Calculates a sample of random values in the lattice

    lat = Lattice
    func = name of Cuda kernel
    n = size of cubic lattice
    gamma = -0.25 or +0.25
    m2_eff = effective mass

    This uses CuFFT to calculate FFTW.
    """
    import scikits.cuda.fft as fft
    import fftw3

    "Various constants:"
    mpl = lat.mpl
    n = lat.n
    nn = lat.nn
    os = 16
    nos = n*pow(os,2)
    dk = lat.dk
    dx = lat.dx
    dkos = dk/(2.*os)
    dxos = dx/os
    kcut = nn*dk/2.0
    norm = 0.5/(math.sqrt(2*pi*dk**3.)*mpl)*(dkos/dxos)

    ker = np.empty(nos,dtype = lat.prec_real)
    fft1 = fftw3.Plan(ker,ker, direction='forward', flags=['measure'],
                     realtypes = ['realodd 10'])

    for k in xrange(nos):
        kk = (k+0.5)*dkos
        ker[k]=kk*(kk**2. + m2_eff)**gamma*math.exp(-(kk/kcut)**2.)
    fft1.execute()
    fftw3.destroy_plan(fft1)

    for k in xrange(nos):
        ker[k] = norm*ker[k]/(k+1)

    Fk_gpu = gpuarray.zeros((n/2+1,n,n), dtype = lat.prec_complex)

    ker_gpu = gpuarray.to_gpu(ker)
    tmp_gpu = gpuarray.zeros((n,n,n),dtype = lat.prec_real)

    plan = fft.Plan(tmp_gpu.shape, lat.prec_real, lat.prec_complex)
    plan2 = fft.Plan(tmp_gpu.shape, lat.prec_complex, lat.prec_real)
    
    func(tmp_gpu, ker_gpu, np.uint32(nn), np.float64(os),
         np.uint32(lat.dimx), np.uint32(lat.dimy), np.uint32(lat.dimz),
         block = lat.cuda_block_1, grid = lat.cuda_grid)
    
    fft.fft(tmp_gpu, Fk_gpu, plan)
    
    if lat.test==True:
        print'Testing mode on! Set testQ to False to disable this.\n'
        np.random.seed(1)

    rr1 = (np.random.normal(size=Fk_gpu.shape)+
           np.random.normal(size=Fk_gpu.shape)*1j)

    Fk = Fk_gpu.get()
    Fk*= rr1
    Fk_gpu = gpuarray.to_gpu(Fk)

    fft.ifft(Fk_gpu, tmp_gpu, plan2)
    res = (tmp_gpu.get()).astype(lat.prec_real)

    res *= 1./lat.VL

    return res
开发者ID:jtksai,项目名称:PyCOOL,代码行数:71,代码来源:field_init.py


注:本文中的scikits.cuda.fft.fft函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。