当前位置: 首页>>代码示例>>Python>>正文


Python fft.fft2函数代码示例

本文整理汇总了Python中numpy.fft.fft2函数的典型用法代码示例。如果您正苦于以下问题:Python fft2函数的具体用法?Python fft2怎么用?Python fft2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fft2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: repeated_sales

def repeated_sales(df, artistname, artname, r2thresh=7000, fftr2thresh=10000, IMAGES_DIR='/home/ryan/asi_images/'):
    """
        Takes a dataframe, artistname and artname and tries to decide, via image matching, if there is a repeat sale. Returns a dict of lot_ids, each entry a list of repeat sales
    """
    artdf = df[(df['artistID']==artistname) & (df['artTitle']==artname)]

    artdf.images = artdf.images.apply(getpath)
    paths = artdf[['_id','images']].dropna()
    id_dict = {}
    img_buffer = {}
    already_ordered = []
    for i, path_i in paths.values:
        id_dict[i] = []
        img_buffer[i] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_i), (300,300))))
        for j, path_j in paths[paths._id != i].values:
            if j > i and j not in already_ordered:
                if j not in img_buffer.keys():
                    img_buffer[j] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_j), (300,300))))
                if norm(img_buffer[i] - img_buffer[j]) < r2thresh and\
                        norm(fft2(img_buffer[i]) - fft2(img_buffer[j])) < fftr2thresh:
                    id_dict[i].append(j)
                    already_ordered.append(j)
    for key in id_dict.keys():
        if id_dict[key] == []:
            id_dict.pop(key)
    return id_dict
开发者ID:rhsimplex,项目名称:artsift,代码行数:26,代码来源:art_utils.py

示例2: image_compare

def image_compare(df, IMAGES_DIR='/home/ryan/asi_images/'):
    '''
    takes a list of n image ids and returns sum(n..n-1) n comparisons of r2 difference, r2(fft) difference, and average number of thresholded pixels
    '''
    img_buffer = {}
    return_list = []
    artdf = df[['_id', 'images']].copy()
    artdf.images = artdf.images.apply(getpath) 
    paths = artdf[['_id','images']].dropna()
    paths.index = paths._id
    paths = paths.images
    if paths.shape[0] < 2:
        return DataFrame([])
    for id_pair in combinations(paths.index, 2):
        if id_pair[0] in img_buffer:
            img1 = img_buffer[id_pair[0]]
        else:
            img_buffer[id_pair[0]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[0]]), (300,300))))
            img1 = img_buffer[id_pair[0]]
        
        if id_pair[1] in img_buffer:
            img2 = img_buffer[id_pair[1]]
        else:
            img_buffer[id_pair[1]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[1]]), (300,300))))
            img2 = img_buffer[id_pair[1]]
        return_list.append(
                [id_pair[0], id_pair[1], \
                    norm(img1 - img2), \
                    norm(fft2(img1) - fft2(img2)), \
                    #mean([sum(img1 > threshold_otsu(img1)), sum(img2 > threshold_otsu(img2))])]
                    #mean([sum(img1 > 0.9), sum(img2 > 0.9)])] 
                    std(img1)+std(img2)/2.]
       )
    return DataFrame(return_list, columns=['id1','id2','r2diff', 'fftdiff', 'stdavg'])
开发者ID:rhsimplex,项目名称:artsift,代码行数:34,代码来源:art_utils.py

示例3: create_matching_kernel

def create_matching_kernel(source_psf, target_psf, window=None):
    """
    Create a kernel to match 2D point spread functions (PSF) using the
    ratio of Fourier transforms.

    Parameters
    ----------
    source_psf : 2D `~numpy.ndarray`
        The source PSF.  The source PSF should have higher resolution
        (i.e. narrower) than the target PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    target_psf : 2D `~numpy.ndarray`
        The target PSF.  The target PSF should have lower resolution
        (i.e. broader) than the source PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    window : callable, optional
        The window (or taper) function or callable class instance used
        to remove high frequency noise from the PSF matching kernel.
        Some examples include:

        * `~photutils.psf.matching.HanningWindow`
        * `~photutils.psf.matching.TukeyWindow`
        * `~photutils.psf.matching.CosineBellWindow`
        * `~photutils.psf.matching.SplitCosineBellWindow`
        * `~photutils.psf.matching.TopHatWindow`

        For more information on window functions and example usage, see
        :ref:`psf_matching`.

    Returns
    -------
    kernel : 2D `~numpy.ndarray`
        The matching kernel to go from ``source_psf`` to ``target_psf``.
        The output matching kernel is normalized such that it sums to 1.
    """

    # inputs are copied so that they are not changed when normalizing
    source_psf = np.copy(np.asanyarray(source_psf))
    target_psf = np.copy(np.asanyarray(target_psf))

    if source_psf.shape != target_psf.shape:
        raise ValueError('source_psf and target_psf must have the same shape '
                         '(i.e. registered with the same pixel scale).')

    # ensure input PSFs are normalized
    source_psf /= source_psf.sum()
    target_psf /= target_psf.sum()

    source_otf = fftshift(fft2(source_psf))
    target_otf = fftshift(fft2(target_psf))
    ratio = target_otf / source_otf

    # apply a window function in frequency space
    if window is not None:
        ratio *= window(target_psf.shape)

    kernel = np.real(fftshift((ifft2(ifftshift(ratio)))))
    return kernel / kernel.sum()
开发者ID:astropy,项目名称:photutils,代码行数:60,代码来源:fourier.py

示例4: SpectralCrossCorrelation

 def SpectralCrossCorrelation(self, cstep = 1):
     
     # Measure the length of the list
     num_regions = len(self.Regions)
     
     # Width and height
     height = self.Regions[0].Height;
     width  = self.Regions[0].Width;
     
     # Allocate the correlation
     spectral_corr = Correlation(np.zeros((height, width), dtype = "complex"))
     
     # Calculate the FT of the first region.
     # Do this outside the loop so that we
     # only have to perform one FFT per iteration.
     ft_01 = fft.fft2(self.Regions[0].Data)
     
     # Correlate all the regions
     for k in range(num_regions - 1):    
         ft_02 = fft.fft2(self.Regions[k].Data)
         
         # Conjugate multiply
         spectral_corr.Data += ft_01 * np.conj(ft_02);
         
         # Shift the second FT into
         # the position of the first FT.
         ft_01 = ft_02
         
     return spectral_corr
开发者ID:matthewgiarra,项目名称:pyrana,代码行数:29,代码来源:correlations.py

示例5: compute_pspec

    def compute_pspec(self):
        '''
        Compute the 2D power spectrum.

        The quantity calculated here is the same as Equation 3 in Lazarian &
        Esquivel (2003), but the inputted arrays are not in the same form as
        described. We can, however, adjust for the use of normalized Centroids
        and the linewidth.

        An unnormalized centroid can be constructed by multiplying the centroid
        array by the moment0. Velocity dispersion is the square of the linewidth
        subtracted by the square of the normalized centroid.
        '''

        term1 = fft2(self.centroid*self.moment0)

        term2 = np.power(self.linewidth, 2) + np.power(self.centroid, 2)

        mvc_fft = term1 - term2 * fft2(self.moment0)

        # Shift to the center
        mvc_fft = fftshift(mvc_fft)

        self.ps2D = np.abs(mvc_fft) ** 2.

        return self
开发者ID:hopehhchen,项目名称:TurbuStat,代码行数:26,代码来源:mvc.py

示例6: get_spectrum_1d

def get_spectrum_1d(data_reg,x_reg,y_reg):
    """Compute the 1d power spectrum.
    """
    # remove the mean and squarize
    data_reg-=data_reg.mean()
    jpj,jpi = data_reg.shape
    msize = min(jpj,jpi)
    data_reg = data_reg[:msize-1,:msize-1]
    x_reg = x_reg[:msize-1,:msize-1]
    y_reg = y_reg[:msize-1,:msize-1]
    # wavenumber vector
    x1dreg,y1dreg = x_reg[0,:],y_reg[:,0]
    Ni,Nj = msize-1,msize-1
    dx=npy.int(npy.ceil(x1dreg[1]-x1dreg[0]))
    k_max  = npy.pi / dx
    kx = fft.fftshift(fft.fftfreq(Ni, d=1./(2.*k_max)))
    ky = fft.fftshift(fft.fftfreq(Nj, d=1./(2.*k_max)))
    kkx, kky = npy.meshgrid( ky, kx )
    Kh = npy.sqrt(kkx**2 + kky**2)
    Nmin  = min(Ni,Nj)
    leng  = Nmin/2+1
    kstep = npy.zeros(leng)
    kstep[0] =  k_max / Nmin
    for ind in range(1, leng):
        kstep[ind] = kstep[ind-1] + 2*k_max/Nmin
    norm_factor = 1./( (Nj*Ni)**2 )
    # tukey windowing = tapered cosine window
    cff_tukey = 0.25
    yw=npy.linspace(0, 1, Nj)
    wdw_j = npy.ones(yw.shape)
    xw=npy.linspace(0, 1, Ni)
    wdw_i= npy.ones(xw.shape)
    first_conditioni = xw<cff_tukey/2
    first_conditionj = yw<cff_tukey/2
    wdw_i[first_conditioni] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (xw[first_conditioni] - cff_tukey/2) ))
    wdw_j[first_conditionj] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (yw[first_conditionj] - cff_tukey/2) ))
    third_conditioni = xw>=(1 - cff_tukey/2)
    third_conditionj = yw>=(1 - cff_tukey/2)
    wdw_i[third_conditioni] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (xw[third_conditioni] - 1 + cff_tukey/2)))
    wdw_j[third_conditionj] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (yw[third_conditionj] - 1 + cff_tukey/2)))
    wdw_ii, wdw_jj = npy.meshgrid(wdw_j, wdw_i, sparse=True)
    wdw = wdw_ii * wdw_jj
    data_reg*=wdw
    #2D spectrum
    cff  = norm_factor
    tempconj=fft.fft2(data_reg).conj()
    tempamp=cff * npy.real(tempconj*fft.fft2(data_reg))
    spec_2d=fft.fftshift(tempamp)
    #1D spectrum
    leng    = len(kstep)
    spec_1d = npy.zeros(leng)
    krange     = Kh <= kstep[0]
    spec_1d[0] = spec_2d[krange].sum()
    for ind in range(1, leng):
        krange = (kstep[ind-1] < Kh) & (Kh <= kstep[ind])
        spec_1d[ind] = spec_2d[krange].sum()
    spec_1d[0] /= kstep[0]
    for ind in range(1, leng):
        spec_1d[ind] /= kstep[ind]-kstep[ind-1]
    return spec_1d, kstep
开发者ID:ecosme38,项目名称:codes,代码行数:60,代码来源:WavenumberSpectrum.py

示例7: Convolve

def Convolve(image1, image2, MinPad=True, pad=True):
    """
    Convolves image1 with image2.

    :param image1: 2D image array
    :param image2: 2D image array
    :param MinPad: whether to use minimal padding
    :param pad: whether to pad the array
    """
    #The size of the images:
    r1, c1 = image1.shape
    r2, c2 = image2.shape

    if MinPad:
        r = r1 + r2
        c = c1 + c2
    else:
        r = 2*max(r1,r2)
        c = 2*max(c1,c2)
    
    #or in power of two
    if pad:
        pr2 = int(m.log(r)/m.log(2.) + 1.)
        pc2 = int(m.log(c)/m.log(2.) + 1.)
        rOrig = r
        cOrig = c
        r = 2**pr2
        c = 2**pc2
    
    fftimage = fft2(image1, s=(r,c))*fft2(image2[::-1,::-1],s=(r,c))

    if pad:
        return (ifft2(fftimage))[:rOrig,:cOrig].real
    else:
        return (ifft2(fftimage)).real
开发者ID:eddienko,项目名称:SamPy,代码行数:35,代码来源:ImageConvolution.py

示例8: phase_corr

def phase_corr(A, B):
    """Phase correlation of two images.

    Parameters
    ----------
    A, B : (M,N) ndarray
        Input images.

    Returns
    -------
    out : (M,N) ndarray
        Correlation coefficients.

    Examples
    --------

    Set up test data.  One array is offset (10, 10) from the other.

    >>> x = np.random.random((50, 50))
    >>> y = np.zeros_like(x)
    >>> y[10:, 10:] = x[0:-10, 0:-10]

    Correlate the two arrays, and ensure the peak is at (10, 10).

    >>> out = phase_corr(y, x)
    >>> m, n = np.unravel_index(np.argmax(out), out.shape)
    >>> print m, n
    (10, 10)

    """
    out = fft2(A) * fft2(B).conj()
    out /= np.abs(out)
    out = np.abs(ifft2(out))

    return out
开发者ID:Germanc,项目名称:supreme,代码行数:35,代码来源:image.py

示例9: cross_corr

def cross_corr(img1,img2,mask=None):
    '''Compute the autocorrelation of two images.
        Right now does not take mask into account.
        todo: take mask into account (requires extra calculations)
        input: 
            img1: first image
            img2: second image
            mask: a mask array
        output:
            the autocorrelation of the two images (same shape as the correlated images)
        
    '''
    #if(mask is not None):
    #   img1 *= mask
    #  img2 *= mask
    
    #img1_mean = np.mean( img1.flat )
    #img2_mean = np.mean( img2.flat )
    
    # imgc = fftshift( ifft2(     
    #        fft2(img1/img1_mean -1.0 )*np.conj(fft2( img2/img2_mean -1.0 ))).real )
    
    #imgc = fftshift( ifft2(     
    #        fft2(  img1/img1_mean  )*np.conj(fft2(  img2/img2_mean   ))).real )
    
    imgc = fftshift( ifft2(     
            fft2(  img1  )*np.conj(fft2(  img2  ))).real )
    
    #imgc /= (img1.shape[0]*img1.shape[1])**2
    if(mask is not None):
        maskc = cross_corr(mask,mask)        
        imgc /= np.maximum( 1, maskc )
            
            
    return imgc
开发者ID:yugangzhang,项目名称:chxanalys,代码行数:35,代码来源:Spatial_Correlation_Function.py

示例10: increment_mccf

def increment_mccf(A, B, X, y, nu=0.125, l=0.01, boundary='constant'):
    r"""
    Incremental Multi-Channel Correlation Filter (MCCF)
    """
    # number of images; number of channels, height and width
    n, k, hx, wx = X.shape
    x_shape = (hx, wx)

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # extended shape
    ext_h = hx + hy - 1
    ext_w = wx + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for x in X:
        # extend image
        ext_x = pad(x, ext_shape, boundary=boundary)
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # combine old and new auto and cross spectral energy matrices
    sXY = (1 - nu) * A + nu * sXY
    sXX = (1 - nu) * B + nu * sXX
    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    ext_f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
    # crop extended filter to match desired response shape
    f = crop(ext_f, y_shape)

    return f, sXY, sXX
开发者ID:duxiaofei283,项目名称:templatetracker,代码行数:59,代码来源:correlationfilter.py

示例11: initialize

    def initialize(self, b_phi1, b_phi2):
        distribution = np.zeros([self.gd.comm.size], int)
        if self.gd.comm.rank == 0:
            d3 = b_phi1.shape[2]
            gd = self.gd
            N_c1 = gd.N_c[:2, np.newaxis]
            i_cq = np.indices(gd.N_c[:2]).reshape((2, -1))
            i_cq += N_c1 // 2
            i_cq %= N_c1
            i_cq -= N_c1 // 2
            B_vc = 2.0 * np.pi * gd.icell_cv.T[:2, :2]
            k_vq = np.dot(B_vc, i_cq)
            k_vq *= k_vq
            k_vq2 = np.sum(k_vq, axis=0)
            k_vq2 = k_vq2.reshape(-1)

            b_phi1 = fft2(b_phi1, None, (0,1))
            b_phi2 = fft2(b_phi2, None, (0,1))

            b_phi1 = b_phi1[:, :, -1].reshape(-1)
            b_phi2 = b_phi2[:, :, 0].reshape(-1)

            loc_b_phi1 = np.array_split(b_phi1, self.gd.comm.size)
            loc_b_phi2 = np.array_split(b_phi2, self.gd.comm.size)
            loc_k_vq2 = np.array_split(k_vq2, self.gd.comm.size)

            self.loc_b_phi1 = loc_b_phi1[0]
            self.loc_b_phi2 = loc_b_phi2[0]
            self.k_vq2 = loc_k_vq2[0]

            for i in range(self.gd.comm.size):
                distribution[i] = len(loc_b_phi1[i])
            self.gd.comm.broadcast(distribution, 0)

            for i in range(1, self.gd.comm.size):
                self.gd.comm.ssend(loc_b_phi1[i], i, 135)
                self.gd.comm.ssend(loc_b_phi2[i], i, 246)
                self.gd.comm.ssend(loc_k_vq2[i], i, 169)
        else:
            self.gd.comm.broadcast(distribution, 0)
            self.loc_b_phi1 = np.zeros([distribution[self.gd.comm.rank]],
                                       dtype=complex)
            self.loc_b_phi2 = np.zeros([distribution[self.gd.comm.rank]],
                                       dtype=complex)
            self.k_vq2 = np.zeros([distribution[self.gd.comm.rank]])
            self.gd.comm.receive(self.loc_b_phi1, 0, 135)
            self.gd.comm.receive(self.loc_b_phi2, 0, 246)
            self.gd.comm.receive(self.k_vq2, 0, 169)


        k_distribution = np.arange(np.sum(distribution))
        self.k_distribution = np.array_split(k_distribution,
                                             self.gd.comm.size)

        self.d1, self.d2, self.d3 = self.gd.N_c
        self.r_distribution = np.array_split(np.arange(self.d3),
                                             self.gd.comm.size)
        self.comm_reshape = not (self.gd.parsize_c[0] == 1
                                 and self.gd.parsize_c[1] == 1)
开发者ID:qsnake,项目名称:gpaw,代码行数:59,代码来源:poisson.py

示例12: test_kosta_comp_abs

    def test_kosta_comp_abs(self):
        ft_image = fft2(self.image)
        ft_mask = fft2(self.inert_mask_padded_kosta)

        ft_result = ft_image * ft_mask
        result = ifft2(ft_result)

        assert_array_almost_equal(abs(result), self.image)
开发者ID:rtatishvili,项目名称:img-proc-repo,代码行数:8,代码来源:test_multiply_fft.py

示例13: InitVelField

def InitVelField(_N, _M, _h, h, dt, rho=1.0, mu=1.0, DeltaType=0):
    WideLambda = zeros((_N, _M), float64)
    ShortLambda = zeros((_N, _M), float64)
    IB_c.InitWideLaplacian(_N, _M, _h, WideLambda)
    IB_c.InitShortLaplacian(_N, _M, _h, ShortLambda)
    DxSymbol = InitDxSymbol(_N, _M, _h)
    DySymbol = InitDySymbol(_N, _M, _h)

    r = int(ceil(3.0 * h / _h))

    fx = zeros((_N, _M), float64)
    for j in range(-r, r + 1):
        deltx = Delta(h, j * _h, DeltaType)
        for k in range(-r, r + 1):
            delt = deltx * Delta(h, k * _h, DeltaType) * 1.0
            fx[j % _N][k % _M] = fx[j % _N][k % _M] + delt
    #       print j%_N, k%_M, fx[j%_N][k%_M]

    fx, fy = fft2(dt * fx), zeros((_N, _M), float64)

    P = Solve_P_Hat(dt, WideLambda, DxSymbol, DySymbol, fx, fy)
    P[0, 0] = 0.0

    u, v = Solve_uv_Hat(dt, ShortLambda, DxSymbol, DySymbol, P, fx, fy, rho, mu)
    u = 1.0 * ifft2(u).real
    v = 1.0 * ifft2(v).real
    #    P = ifft2(P).real

    Fx1 = array(zeros((_N, _M), float64))
    Fy1 = array(zeros((_N, _M), float64))

    IB_c.WholeGridSpread(u, float(h), float(_h), int(r), Fx1, DeltaType)
    IB_c.WholeGridSpread(v, float(h), float(_h), int(r), Fy1, DeltaType)

    fy = zeros((_N, _M), float64)
    for j in range(-r, r + 1):
        deltx = Delta(h, j * _h, DeltaType)
        for k in range(-r, r + 1):
            delt = deltx * Delta(h, k * _h, DeltaType) * 1.0
            fy[j % _N][k % _M] = fy[j % _N][k % _M] + delt
    #       print j%_N, k%_M, fx[j%_N][k%_M]

    fx, fy = zeros((_N, _M), float64), fft2(dt * fy)

    P = Solve_P_Hat(dt, WideLambda, DxSymbol, DySymbol, fx, fy)
    P[0, 0] = 0.0

    u, v = Solve_uv_Hat(dt, ShortLambda, DxSymbol, DySymbol, P, fx, fy, rho, mu)
    u = 1.0 * ifft2(u).real
    v = 1.0 * ifft2(v).real

    Fx2 = array(zeros((_N, _M), float64))
    Fy2 = array(zeros((_N, _M), float64))

    IB_c.WholeGridSpread(u, float(h), float(_h), int(r), Fx2, DeltaType)
    IB_c.WholeGridSpread(v, float(h), float(_h), int(r), Fy2, DeltaType)

    return Fx1, Fy1, Fx2, Fy2
开发者ID:Haider-BA,项目名称:Paper-Implicit-IBM-2D,代码行数:58,代码来源:IB_Methods.py

示例14: FFT_coregistration

def FFT_coregistration(ref_band_mat,target_band_mat):

    '''
    Alternative method used to coregister the images based on the FFT

    :param ref_band_mat: numpy 8 bit array containing reference image
    :param target_band_mat: numpy 8 bit array containing target image
    :returns: the shift among the two input images 

    Author: Mostapha Harb - Daniele De Vecchi - Daniel Aurelio Galeazzo
    Last modified: 14/11/2014
    '''
    status = Bar(3, "FFT")
    #Normalization - http://en.wikipedia.org/wiki/Cross-correlation#Normalized_cross-correlation 
    ref_band_mat = (ref_band_mat - ref_band_mat.mean()) / ref_band_mat.std()
    target_band_mat = (target_band_mat - target_band_mat.mean()) / target_band_mat.std() 

    #Check dimensions - they have to match
    rows_ref,cols_ref =  ref_band_mat.shape
    rows_target,cols_target = target_band_mat.shape

    if rows_target < rows_ref:
        print 'Rows - correction needed'

        diff = rows_ref - rows_target
        target_band_mat = np.vstack((target_band_mat,np.zeros((diff,cols_target))))
    elif rows_ref < rows_target:
        print 'Rows - correction needed'
        diff = rows_target - rows_ref
        ref_band_mat = np.vstack((ref_band_mat,np.zeros((diff,cols_ref))))
    status(1)
    rows_target,cols_target = target_band_mat.shape
    rows_ref,cols_ref = ref_band_mat.shape

    if cols_target < cols_ref:
        print 'Columns - correction needed'
        diff = cols_ref - cols_target
        target_band_mat = np.hstack((target_band_mat,np.zeros((rows_target,diff))))
    elif cols_ref < cols_target:
        print 'Columns - correction needed'
        diff = cols_target - cols_ref
        ref_band_mat = np.hstack((ref_band_mat,np.zeros((rows_ref,diff))))

    rows_target,cols_target = target_band_mat.shape   
    status(2)
    #translation(im_target,im_ref)
    freq_target = fft2(target_band_mat)   
    freq_ref = fft2(ref_band_mat)  
    inverse = abs(ifft2((freq_target * freq_ref.conjugate()) / (abs(freq_target) * abs(freq_ref))))   
    #Converts a flat index or array of flat indices into a tuple of coordinate arrays. would give the pixel of the max inverse value
    y_shift,x_shift = np.unravel_index(np.argmax(inverse),(rows_target,cols_target))

    if y_shift > rows_target // 2: # // used to truncate the division
        y_shift -= rows_target
    if x_shift > cols_target // 2: # // used to truncate the division
        x_shift -= cols_target
    status(3)
    return -x_shift, -y_shift
开发者ID:SENSUM-project,项目名称:sensum_rs,代码行数:58,代码来源:preprocess.py

示例15: test_kost_comp_real

    def test_kost_comp_real(self):
        ft_image = fft2(self.image)
        ft_mask_padded = fft2(self.inert_mask_padded)

        ft_result = ft_image * ft_mask_padded

        result = ifft2(ft_result)

        assert_array_equal(result.real, self.image)
开发者ID:rtatishvili,项目名称:img-proc-repo,代码行数:9,代码来源:test_multiply_fft.py


注:本文中的numpy.fft.fft2函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。