当前位置: 首页>>代码示例>>Python>>正文


Python numpy.fliplr函数代码示例

本文整理汇总了Python中numpy.fliplr函数的典型用法代码示例。如果您正苦于以下问题:Python fliplr函数的具体用法?Python fliplr怎么用?Python fliplr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fliplr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: paduavals2coefs

def paduavals2coefs(f):
    useFFTwhenNisMoreThan = 100
    m = len(f)
    n = int(round(-1.5 + np.sqrt(.25 + 2 * m)))
    x = padua_points(n)
    idx = _find_m(n)
    w = 0 * x[0] + 1. / (n * (n + 1))
    idx1 = np.all(np.abs(x) == 1, axis=0)
    w[idx1] = .5 * w[idx1]
    idx2 = np.all(np.abs(x) != 1, axis=0)
    w[idx2] = 2 * w[idx2]

    G = np.zeros(idx.max() + 1)
    G[idx] = 4 * w * f

    if (n < useFFTwhenNisMoreThan):
        t1 = np.r_[0:n + 1].reshape(-1, 1)
        Tn1 = np.cos(t1 * t1.T * np.pi / n)
        t2 = np.r_[0:n + 2].reshape(-1, 1)
        Tn2 = np.cos(t2 * t2.T * np.pi / (n + 1))
        C = np.dot(Tn2, np.dot(G, Tn1))
    else:

        # dct = @(c) chebtech2.coeffs2vals(c);
        C = np.rot90(dct(dct(G.T).T)) #, axis=1)

    C[0] = .5 * C[0]
    C[:, 1] = .5 * C[:, 1]
    C[0, -1] = .5 * C[0, -1]
    del C[-1]

    # Take upper-left triangular part:
    return np.fliplr(np.triu(np.fliplr(C)))
开发者ID:eelcovv,项目名称:pywafo,代码行数:33,代码来源:padua.py

示例2: put_image_quadrants

def put_image_quadrants (Q,odd_size=True):
    """
    Reassemble image from 4 quadrants Q = (Q0, Q1, Q2, Q3)
    The reverse process to get_image_quadrants()
    Qi defined in abel.hansenlaw.iabel_hansenlaw
    
    Parameters:
      - Q: tuple of numpy array quadrants
      - even_size: boolean, whether final image is even or odd pixel size
                   odd size requires trimming 1 row from Q1, Q0, and
                                              1 column from Q1, Q2

    Returns:  
      - rows x cols numpy array - the reassembled image
    """


    if not odd_size:
        Top    = np.concatenate((np.fliplr(Q[1]), Q[0]), axis=1)
        Bottom = np.flipud(np.concatenate((np.fliplr(Q[2]), Q[3]), axis=1))
    else:
        # odd size image remove extra row/column added in get_image_quadrant()
        Top    = np.concatenate((np.fliplr(Q[1][:-1,:-1]), Q[0][:-1,:]), axis=1)
        Bottom = np.flipud(np.concatenate((np.fliplr(Q[2][:,:-1]), Q[3]), axis=1))

    IM = np.concatenate((Top,Bottom), axis=0)

    return IM
开发者ID:stggh,项目名称:PyAbel,代码行数:28,代码来源:symmetry.py

示例3: rotate_data

def rotate_data(bg, overlay, slices_list, axis_name, shape):
    # Rotate the data as required
    # Return the rotated data, and an updated slice list if necessary
    if axis_name == 'axial':
        # Align so that right is right
        overlay = np.rot90(overlay)
        overlay = np.fliplr(overlay)
        bg = np.rot90(bg)
        bg = np.fliplr(bg)
    
    elif axis_name == 'coronal':
        overlay = np.rot90(overlay)
        bg = np.rot90(bg)
        overlay = np.flipud(np.swapaxes(overlay, 0, 2))
        bg = np.flipud(np.swapaxes(bg, 0, 2))
        slices_list[1] = [ shape - n - 3 for n in slices_list[1] ] 
        
    elif axis_name == 'sagittal':
        overlay = np.flipud(np.swapaxes(overlay, 0, 2))
        bg = np.flipud(np.swapaxes(bg, 0, 2))
    
    else:
        print '\n************************'
        print 'ERROR: data could not be rotated\n'
        parser.print_help()
        sys.exit()
    
    return bg, overlay, slices_list
开发者ID:KirstieJane,项目名称:DESCRIBING_DATA,代码行数:28,代码来源:MakePngs_DTI.py

示例4: load_images

def load_images(random_state=1234):
    train_df = pd.read_csv("data/train.csv", index_col="id", usecols=[0])
    depths_df = pd.read_csv("data/depths.csv", index_col="id")
    train_df = train_df.join(depths_df)
    test_df = depths_df[~depths_df.index.isin(train_df.index)]
    print(">>> train_df:",train_df.shape)
    print(train_df.head())
    print(">>> test_df:", test_df.shape)
    print(test_df.head())
    train_df["images"] = [gradmag(np.array(imread(path_train_images+"{}.png".format(idx)))) for idx in tqdm(train_df.index)]
    train_df["masks"] = [np.array(load_img(path_train_masks+"{}.png".format(idx),grayscale=True))/255 for idx in tqdm(train_df.index)]
    train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
    train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
    print("*** TRAIN ***")
    print(train_df.head())
    print("*** TEST ***")
    print(test_df.head())
    ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split(
        train_df.index.values,
        np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1),
        np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1),
        train_df.coverage.values,
        train_df.z.values,
        test_size=0.2,
        stratify=train_df.coverage_class,
        random_state=random_state)
    #Data augmentation
    x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
    y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
    print(x_train2.shape)
    print(y_valid.shape)
    x_test = np.array([gradmag(np.array(imread(path_test_images+"{}.png".format(idx)))) for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)
    return x_train2, x_valid, y_train2, y_valid, x_test, test_df.index.values
开发者ID:gtesei,项目名称:fast-furious,代码行数:33,代码来源:unet_start_4_grad.py

示例5: wrapper

    def wrapper(*args):
        x = args[0]
        w = args[1]
        if x.ndim == 3:
            w = np.flipud(w)
            w = np.transpose(w, (1, 2, 0))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 2, 1))
        elif x.ndim == 4:
            w = np.fliplr(np.flipud(w))
            w = np.transpose(w, (2, 3, 0, 1))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 3, 1, 2))
        else:
            w = np.flip(np.fliplr(np.flipud(w)), axis=2)
            w = np.transpose(w, (3, 4, 0, 1, 2))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 4, 1, 2, 3))

        y = func(x, w, args[2], args[3])

        if args[3] == 'channels_last':
            if y.ndim == 3:
                y = np.transpose(y, (0, 2, 1))
            elif y.ndim == 4:
                y = np.transpose(y, (0, 2, 3, 1))
            else:
                y = np.transpose(y, (0, 2, 3, 4, 1))

        return y
开发者ID:joelthchao,项目名称:keras,代码行数:30,代码来源:reference_operations.py

示例6: array_transpose

 def array_transpose(self, flip=False):
     """Transpose the arrays in strand coverage"""
     self.transpose_cov1 = []
     self.transpose_cov2 = []
     # print(self.coverage)
     for a in self.cov_sense_all:
         if flip:
             # print(a[:, 0])
             # print(a[:, 1])
             a1 = np.transpose(a[:, 0])
             a1.shape = (a1.shape[0],1)
             self.transpose_cov1.append(np.fliplr(a1))
             a2 = np.transpose(a[:, 0])
             a2.shape = (a2.shape[0], 1)
             self.transpose_cov2.append(np.fliplr(a2))
         else:
             # print(a[:, 0])
             # print(a[:, 1])
             a1 = np.transpose(a[:, 0])
             a1.shape = (a1.shape[0], 1)
             self.transpose_cov1.append(a1)
             a2 = np.transpose(a[:, 1])
             a2.shape = (a2.shape[0], 1)
             self.transpose_cov2.append(a2)
     self.transpose_cov1 = np.array(self.transpose_cov1)
     self.transpose_cov2 = np.array(self.transpose_cov2)
开发者ID:eggduzao,项目名称:reg-gen,代码行数:26,代码来源:CoverageSet.py

示例7: rforests

def rforests(trainx, trainy, test, n_estimators=100, k=5):
	trainy = np.ravel(trainy)

	forest = RandomForestClassifier(n_estimators)
	forest.fit(trainx, trainy)


	prob_train = forest.predict_proba(trainx)
	prob_test = forest.predict_proba(test)

	# Since the index is the number of the country that's been chosen
	# we can use these with argsort to get the maximum 5., we will have to do this
	# for the entire matrix though.
	sort_train = np.argsort(prob_train)[:,-k:]
	sort_test = np.argsort(prob_test)[:,-k:]

	# Now we need to transform these back to countries, but to map I need to
	# have a dataframe.
	col_names = []

	for i in range(k):
		name = "country_destination_" + str(i+1)
		col_names.append(name)

	pred_train = pd.DataFrame(sort_train, columns=col_names)
	pred_test = pd.DataFrame(sort_test, columns=col_names)

	for name in col_names:
		pred_train[name] = pred_train[name].map(dicts.country)
		pred_test[name] = pred_test[name].map(dicts.country)

	pred_train = np.fliplr(pred_train)
	pred_test = np.fliplr(pred_test)

	return forest, pred_train, pred_test
开发者ID:oew1v07,项目名称:kaggle_playaround,代码行数:35,代码来源:forests.py

示例8: save

    def save(self, config, args):
        """
        save LSDMap object in .lsdmap file and eigenvalues/eigenvectors in .eg/.ev files
        """

        if isinstance(self.struct_filename, list):
            struct_filename = self.struct_filename[0]
        else:
            struct_filename = self.struct_filename

        path, ext = os.path.splitext(struct_filename)
        np.savetxt(path + '.eg', np.fliplr(self.eigs[np.newaxis]), fmt='%9.6f')
        np.savetxt(path + '.ev', np.fliplr(self.evs), fmt='%.18e')
        #np.save(path + '_eg.npy', np.fliplr(self.eigs[np.newaxis]))
        #np.save(path + '_ev.npy', np.fliplr(self.evs))

        if args.output_file is None:
            try:
                lsdmap_filename = config.get('LSDMAP', 'lsdmfile')
            except:
                return
        else:
            lsdmap_filename = args.output_file
        with open(lsdmap_filename, "w") as file:
            pickle.dump(self, file)
开发者ID:jp43,项目名称:LSDMap,代码行数:25,代码来源:lsdm.py

示例9: phase_diagram

	def phase_diagram(self,updown,leftright,xlab,ylab):
		mdense = np.loadtxt("mdense.txt", delimiter=',')
		m1d = np.loadtxt("m1d.txt", delimiter=',')
		m2d = np.loadtxt("m2d.txt", delimiter=',')
		mdis = np.loadtxt("mdis.txt", delimiter=',')
		mtotal = np.loadtxt('mtotal.txt',delimiter=',')
		mdense_p = mdense/mtotal
		m1d_p = m1d/mtotal
		m2d_p = m2d/mtotal
		if updown:
			mdense_p = np.flipud(mdense_p)
			m1d_p = np.flipud(m1d_p)
			m2d_p = np.flipud(m2d_p)
		if leftright:
			mdense_p = np.fliplr(mdense_p)
			m1d_p = np.fliplr(m1d_p)
			m2d_p = np.fliplr(m2d_p)
		r = m1d_p
		g = m2d_p
		b = mdense_p
		rgb = np.dstack((r,g,b))
		im = Image.fromarray(np.uint8(rgb*255.999))
		plt.imshow(im,extent=[0.125,1.125,self.nmet_init/self.num_mol,self.nmet_max/self.num_mol],aspect="auto")
		plt.xlabel(xlab)
		plt.ylabel(ylab)
开发者ID:jorghyq,项目名称:Monte-Carlo-Simulation,代码行数:25,代码来源:analyzer2.py

示例10: fetcher

    def fetcher(self):
        try:
            for i in xrange(self.batch_size_):
                sample, fname, label = self.jpeg_pack_.get(self.param_['segment'], self.index, self.param_['color'], self.mean_sub_)
                if self.crop_:
                    if self.output2_:
                        cx = random.randint(0, (sample.shape[0] - self.crop_dim_[0])/self.ratio) * self.ratio
                        cy = random.randint(0, (sample.shape[1] - self.crop_dim_[1])/self.ratio) * self.ratio
                    else:
                        cx = random.randint(0, (sample.shape[0] - self.crop_dim_[0]))
                        cy = random.randint(0, (sample.shape[1] - self.crop_dim_[1]))
                    sample = sample[cx:cx+self.crop_dim_[0], cy:cy+self.crop_dim_[1], :]
                if self.mirror_:
                    flag_mirror = random.random() < 0.5
                    if flag_mirror:
                        sample = numpy.fliplr(sample)
                self.buffer[i,...] = sample.transpose((2,0,1)) * self.scale_
                if self.output_label:
                    self.label_buffer[i,0,0,0] = label
                if self.output2_:
                    sample2, fname, label = self.jpeg_pack2_.get(self.param_['segment2'], self.index, self.param_['color2'], self.mean_sub2_)
                    if self.crop_:
                        cx2 = cx / self.ratio
                        cy2 = cy / self.ratio
                        sample2 = sample2[cx2:cx2+self.crop_dim2_[0], cy2:cy2+self.crop_dim2_[1]]
                    if self.mirror_ and flag_mirror:
                        sample2 = numpy.fliplr(sample2)
                    self.buffer2[i,...] = sample2.transpose((2,0,1)) * self.scale2_

                self.index += 1
        except:
            self.worker_succeed = False
            raise
        else:
            self.worker_succeed = True
开发者ID:piiswrong,项目名称:caffe,代码行数:35,代码来源:jpeg_data_layer.py

示例11: recale

def recale(matrix):
    l = len(matrix)
    bigmat = np.zeros([2*l,2*l])
    bigmat[l:2*l,l:2*l] = matrix
    bigmat[l:2*l,0:l] = np.fliplr(matrix)
    bigmat[0:l] = np.transpose(np.fliplr(np.transpose(bigmat[l:2*l])))
    return bigmat
开发者ID:willdvaz,项目名称:nombres,代码行数:7,代码来源:nompy.py

示例12: center_normTrace_decomp

    def center_normTrace_decomp(K):
        print 'centering kernel'
        #### Get transformed features for K_train that DONT snoop when centering, tracing, or eiging#####
        Kcent=KernelCenterer()
        Ktrain=Kcent.fit_transform(K[:in_samples,:in_samples])
        #Ktrain=Ktrain/float(np.trace(Ktrain))
        #[EigVals,EigVectors]=scipy.sparse.linalg.eigsh(Ktrain,k=reduced_dimen,which='LM')
        [EigVals,EigVectors]=scipy.linalg.eigh(Ktrain,eigvals=(in_samples-reduced_dimen,in_samples-1))
        for i in range(len(EigVals)): 
            if EigVals[i]<=0: EigVals[i]=0
        EigVals=np.flipud(np.fliplr(np.diag(EigVals)))
        EigVectors=np.fliplr(EigVectors)
        Ktrain_decomp=np.dot(EigVectors,scipy.linalg.sqrtm(EigVals))
       
        #### Get transformed features for K_test using K_train implied mapping ####
        Kcent=KernelCenterer()
        Kfull=Kcent.fit_transform(K)
        #Kfull=Kfull/float(np.trace(Kfull))
        K_train_test=Kfull[in_samples:,:in_samples]
        Ktest_decomp=np.dot(K_train_test,np.linalg.pinv(Ktrain_decomp.T))

        ####combine mapped train and test vectors and normalize each vector####
        Kdecomp=np.vstack((Ktrain_decomp,Ktest_decomp))
        print 'doing normalization'
        Kdecomp=normalize(Kdecomp,copy=False)
        return Kdecomp
开发者ID:matthew-norton,项目名称:SVM-Kernel-Selection,代码行数:26,代码来源:Kernels.py

示例13: gridVisDVF

def gridVisDVF(dvfImFileName,sliceNum = -1,titleString = 'DVF',saveFigPath ='.',deformedImFileName = None, contourNum=40):
     dvf = sitk.ReadImage(dvfImFileName)
     dvfIm  = sitk.GetArrayFromImage(dvf) # get numpy array
     z_dim, y_dim, x_dim, channels = dvfIm.shape # get 3D volume shape
     if not (channels == 3 ):
       print "dvf image expected to have three scalor channels"

     if sliceNum == -1:
            sliceNum = z_dim/2
     [gridX,gridY]=np.meshgrid(np.arange(1,x_dim+1),np.arange(1,y_dim+1))

     fig = plt.figure()
     if deformedImFileName :
         bgGray = sitk.ReadImage(deformedImFileName)
         bgGrayIm  = sitk.GetArrayFromImage(bgGray) # get numpy array
         plt.imshow(np.fliplr(np.flipud(bgGrayIm[sliceNum,:,:])),cmap=plt.cm.gray)

     idMap = np.zeros(dvfIm.shape)
     for i in range(z_dim):
        for j in range(y_dim):
            for k in range(x_dim):
                idMap[i,j,k,0] = i
                idMap[i,j,k,1] = j
                idMap[i,j,k,2] = k
     mapIm = dvfIm + idMap

     CS = plt.contour(gridX,gridY,np.fliplr(np.flipud(mapIm[sliceNum,:,:,1])), contourNum, hold='on', colors='red')
     CS = plt.contour(gridX,gridY,np.fliplr(np.flipud(mapIm[sliceNum,:,:,2])), contourNum, hold='on', colors='red')
     plt.title(titleString)
     plt.savefig(saveFigPath + '/' + titleString)
     fig.clf()
     plt.close(fig)
     return
开发者ID:rameshvs,项目名称:pyLAR,代码行数:33,代码来源:low_rank_atlas_iter.py

示例14: ps_batch

	def ps_batch (self):
		x_batch = np.zeros([CONST.lenPATCH, CONST.lenPATCH, CONST.COLOR_IN]).astype('float32')
		y_batch = np.zeros([CONST.lenPATCH, CONST.lenPATCH, CONST.COLOR_IN]).astype('float32')

		rand_index = self.index_list[0]
		self.index_list = self.index_list[1:]

		x_batch = self.dset_train[1][:,:,rand_index]
		y_batch = self.dset_train[2][:,:,rand_index]

		x_batch = np.reshape(x_batch, (CONST.lenPATCH, CONST.lenPATCH, 1 ) )
		y_batch = np.reshape(y_batch, (CONST.lenPATCH, CONST.lenPATCH, 1 ) )

		## Data Augmentation
		if random.randint(0,1) :
			x_batch = np.fliplr(x_batch)
			y_batch = np.fliplr(y_batch)
		if random.randint(0,1) :
			x_batch = np.flipud(x_batch)
			y_batch = np.flipud(y_batch)
		rand_rot = random.randint(0,3)
		x_batch = np.rot90(x_batch, rand_rot)
		y_batch = np.rot90(y_batch, rand_rot)

		return np.array([x_batch, y_batch])
开发者ID:ByungKeon-Ko,项目名称:SRCNN,代码行数:25,代码来源:batch_manager.py

示例15: reflectEdges

 def reflectEdges(self, width=None):
     """Extend the edges of the image by reflection.
     The corners aren't dealt with properly, but this might give some help when applying a hanningFilter after."""
     # Extend the size of the image and do some bookkeeping.
     if width == None:
         width = min(self.nx, self.ny) / 4.0            
     self.zeroPad(width)
     # And then put reflected copy of data into the boundaries.        
     #  Reflect/flip left edge.
     xmin = self.padx
     xmax = self.padx * 2 
     ymin = self.pady
     ymax = self.ny - self.pady
     self.image[ymin:ymax, 0:xmin] = numpy.fliplr(self.image[ymin:ymax, xmin:xmax])
     # Reflect/flip right edge
     xmin = self.nx - self.padx*2
     xmax = self.nx - self.padx
     self.image[ymin:ymax, (self.nx-self.padx):self.nx] = numpy.fliplr(self.image[ymin:ymax, xmin:xmax])
     # Reflect/flip bottom edge
     xmin = self.padx
     xmax = self.nx - self.padx
     ymin = self.padx
     ymax = self.padx * 2
     self.image[0:self.pady, xmin:xmax] = numpy.flipud(self.image[ymin:ymax, xmin:xmax])
     # Reflect/flip top edge
     ymin = self.ny - self.pady*2
     ymax = self.ny - self.pady
     self.image[(self.ny - self.pady):self.ny, xmin:xmax] = numpy.flipud(self.image[ymin:ymax, xmin:xmax])
     # I should interpolate over the corners, but .. todo.         
     return
开发者ID:lsst,项目名称:sims_selfcal,代码行数:30,代码来源:pImage.py


注:本文中的numpy.fliplr函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。