当前位置: 首页>>代码示例>>Python>>正文


Python numpy.load函数代码示例

本文整理汇总了Python中numpy.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generateConnectionMatrix

	def generateConnectionMatrix(self, o_shape, generate):
		if self.file == 'default':
			self.setFName(o_shape)
		
		try: 
			if generate:
				np.load('asd')
			else:
				Wi=np.load(self.file)
				print '[info] Weights loaded from file!'
				print 'Shape = ' + str(Wi.shape)
		except IOError:
			print "[info] Weights file wasn't found. Generating new connections"
			kern1 = gkern2(self.shape,self.sigma)
			#kern1 = np.zeros(filter_shape)
			Wi = kernel2connection(kern1, self.i_shape, o_shape)
			#Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*15
			print 'Shape = ' + str(Wi.shape)
			if np.sum(Wi,1)[0] != 1:
				Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*self.k
			np.save(self.file,Wi)
		#if not self.R:
		#	Wi *= -(np.identity(Wi.shape[0])-1)

		return Wi
开发者ID:jypuigbo,项目名称:TheanoNNs,代码行数:25,代码来源:theanoCortex04.py

示例2: dobetterstuff

def dobetterstuff(inpath):
    data_files = [f for f in os.listdir(inpath) if f.endswith('.mel.npy')]
    random.shuffle(data_files)
    artists = set([f[:18] for f in data_files])
    artist_string_to_id = dict([(s,i) for i, s in enumerate(artists)])

    def get_split(datafiles___, splitpercent):
        # gen = filtered_stratified_split(datafiles___,
        #                                 sklearn.cross_validation.StratifiedShuffleSplit,
        #                                 [1] * len(datafiles___), n_iterations=1, test_size=splitpercent)
        gen = sklearn.cross_validation.ShuffleSplit(len(datafiles___), 1, splitpercent)
        for i_trs, i_tes in gen:
            return [datafiles___[i] for i in i_trs],  [datafiles___[i] for i in i_tes]

    training_files, test_files =  get_split(data_files, .2)
    training_files, validation_files = get_split(training_files, .2)

    print training_files
    print test_files
    print validation_files

    train_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in training_files])
    train_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in training_files])
    test_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in test_files])
    test_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in test_files])
    validation_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in validation_files])
    validation_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in validation_files])

    datasets = [(train_set_x, train_set_y), (validation_set_x, validation_set_y), (test_set_x, test_set_y)]
    return datasets
开发者ID:bmcfee,项目名称:deep-artists,代码行数:30,代码来源:make_dataset.py

示例3: compute_signif_conf_Z_list

def compute_signif_conf_Z_list(cor_mat_file,conf_cor_mat_file,coords_file):       
        
    import rpy,os
    import nibabel as nib
    import numpy as np
    
    from dmgraphanalysis.utils_cor import export_List_net_from_list,export_Louvain_net_from_list
    from dmgraphanalysis.utils_cor import return_signif_conf_net_list
    from dmgraphanalysis.utils_plot import plot_cormat
    
    print "loading cor_mat_file"
    
    cor_mat = np.load(cor_mat_file)
    
    print "loading conf_cor_mat_file"
    
    conf_cor_mat = np.load(conf_cor_mat_file)
    
    print 'load coords'
    
    coords = np.array(np.loadtxt(coords_file),dtype = int)
    
    print "computing net_list by thresholding conf_cor_mat based on distance and net_threshold"
    
    net_list,binary_signif_matrix = return_signif_conf_net_list(cor_mat,conf_cor_mat)
    
    print binary_signif_matrix.shape
    
    print "saving binary_signif_matrix"
    
    binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.npy')
    
    np.save(binary_signif_matrix_file,binary_signif_matrix)
    
    print "plotting binary_signif_matrix"
    
    plot_binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.eps')
    
    plot_cormat(plot_binary_signif_matrix_file,binary_signif_matrix,list_labels = [])
    
    ## Z correl_mat as list of edges
    
    print "saving net_list as list of edges"
    
    net_List_file = os.path.abspath('net_List_signif_conf.txt')
    
    export_List_net_from_list(net_List_file,net_list)
    
    ### Z correl_mat as Louvain format
    
    print "saving net_list as Louvain format"
    
    net_Louvain_file = os.path.abspath('net_Louvain_signif_conf.txt')
    
    export_Louvain_net_from_list(net_Louvain_file,net_list,coords)
    
    #net_List_file = ''
    #net_Louvain_file = ''
    
    return net_List_file, net_Louvain_file
开发者ID:Lx37,项目名称:dmgraphanalysis,代码行数:60,代码来源:modularity.py

示例4: test_btable_prepare

def test_btable_prepare():

    sq2 = np.sqrt(2) / 2.
    bvals = 1500 * np.ones(7)
    bvals[0] = 0
    bvecs = np.array([[0, 0, 0],
                      [1, 0, 0],
                      [0, 1, 0],
                      [0, 0, 1],
                      [sq2, sq2, 0],
                      [sq2, 0, sq2],
                      [0, sq2, sq2]])
    bt = gradient_table(bvals, bvecs)
    npt.assert_array_equal(bt.bvecs, bvecs)
    bt.info
    fimg, fbvals, fbvecs = get_data('small_64D')
    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    bvecs = np.where(np.isnan(bvecs), 0, bvecs)
    bt = gradient_table(bvals, bvecs)
    npt.assert_array_equal(bt.bvecs, bvecs)
    bt2 = gradient_table(bvals, bvecs.T)
    npt.assert_array_equal(bt2.bvecs, bvecs)
    btab = np.concatenate((bvals[:, None], bvecs), axis=1)
    bt3 = gradient_table(btab)
    npt.assert_array_equal(bt3.bvecs, bvecs)
    npt.assert_array_equal(bt3.bvals, bvals)
    bt4 = gradient_table(btab.T)
    npt.assert_array_equal(bt4.bvecs, bvecs)
    npt.assert_array_equal(bt4.bvals, bvals)
    # Test for proper inputs (expects either bvals/bvecs or 4 by n):
    assert_raises(ValueError, gradient_table, bvecs)
开发者ID:MPDean,项目名称:dipy,代码行数:32,代码来源:test_gradients.py

示例5: load

    def load(filename):

        filename_extension = filename.split(".")[-1]

        if filename_extension == "npz":
            filename_npz = filename.replace(".npz", "")+".npz"
            filename_data = filename.replace(".npz", "")+".npy"
        elif filename_extension == "npy":
            filename_npz = filename.replace(".npy", "")+".npz"
            filename_data = filename.replace(".npy", "")+".npy"

        try:
            file_content = np.load(filename_npz)
            vectors_shape = (file_content["np_twoform_3"].size,file_content["np_twoform_0"].size,file_content["np_twoform_1"].size)
            vectors = TwoformVectorsEigenvectors(np.memmap(filename_data, dtype=np.complex128, mode='c', shape=vectors_shape))
        except:
            print("Falling back to load_npz")
            data_dict = AutocorrelationFunctionIO.load_npz(filename)

            if "twoform_4" in data_dict:
                return data_dict
            else:
                print("Loading wavefronts")
                file_content = np.load(filename_npz)
                vectors = TwoformVectorsWavefronts(file_content["np_twoform_0"],file_content["np_twoform_1"], filename)

        data_dict = dict()
        for key in file_content.keys():
            data_dict[key.replace("np_", "")] = file_content[key]

        data_dict["twoform_4"] = vectors

        return data_dict
开发者ID:mark-glass,项目名称:comsyl,代码行数:33,代码来源:AutocorrelationFunctionIO.py

示例6: load_data

def load_data(data_locations, file_idx_location, base_directory='data/cached/'):
    '''
    Loads data from each of the five blocks.

    Args:
        data_locations (list<str>): Locations to the data files.
        file_idx_location (str): Location of the fileidx.mat file.
    Returns:
        list<numpy.ndarray>, list of the data from different blocks.
        list<numpy.ndarray>, list of the labels from different blocks.
    '''

    if not base_directory.endswith('/'):
        base_directory = base_directory + '/'

    all_data = list()
    all_labels = list()

    try:
        if use_cached and os.path.exists(base_directory):

            for block in range(1, 5+1):

                data = np.load(base_directory+'data-{}.npy'.format(block))
                labels = np.load(base_directory+'labels-{}.npy'.format(block))

                all_data.append(data)
                all_labels.append(labels)

            return all_data, all_labels

    except Exception, error:
        print error
        print 'Unable to load cached files. Loading from .mat...'
开发者ID:zo7,项目名称:gcca-speakers,代码行数:34,代码来源:test_baseline.py

示例7: resample

    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D']     = np.zeros( (len(self.d_perps),181,181,nS), dtype=np.float32 )
        KERNELS['CSF']   = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
开发者ID:davidrs06,项目名称:AMICO,代码行数:28,代码来源:models.py

示例8: load_data

	def load_data(self):
		os.chdir(self.data_in)
		if self.add_photos:
			try:
				self.otherframes = np.load("otherframes.npy")
			except:
				print "No otherframes.npy file found... using all the frames in src_imgs/."
				self.otherframes = None

			if self.otherframes is not None:
				otherframe_strings =  ["%08d.jpg" %i for i in self.otherframes]
				self.photos = otherframe_strings
			else: 
				self.photos =[f for f in os.listdir("src_imgs") if os.path.isfile(os.path.join("src_imgs", f)) and os.path.splitext(f)[1].lower()==".jpg"]

		else:
			try:
				self.keyframes = np.load("keyframes.npy")
			except:
				print "No keyframes.npy file found... using all the frames in src_imgs/."
				self.keyframes = None

			if self.keyframes is not None:
				keyframe_strings =  ["%08d.jpg" %i for i in self.keyframes]
				self.photos = keyframe_strings
			else: 
				self.photos =[f for f in os.listdir("src_imgs") if os.path.isfile(os.path.join("src_imgs", f)) and os.path.splitext(f)[1].lower()==".jpg"]
		os.chdir(self.currentDir)
开发者ID:AlienorV,项目名称:pupil3d,代码行数:28,代码来源:__init__.py

示例9: load_field_values

 def load_field_values(self, file_prefix):
     if self.dX is None or self.dY is None or self.dZ is None:
         self.init_field(0.0)
     
     try:
         dX = np.load('{}dX.npy'.format(file_prefix))
         dY = np.load('{}dY.npy'.format(file_prefix))
         dZ = np.load('{}dZ.npy'.format(file_prefix))
         '''
         min_x = np.amin(np.fabs(dX[dX.nonzero()]))
         if min_x < self.dX_min:
             self.dX_min = min_x
         min_y = np.amin(np.fabs(dY[dY.nonzero()]))
         if min_y < self.dY_min:
             self.dY_min = min_y
         min_z = np.amin(np.fabs(dZ[dZ.nonzero()]))
         if min_z < self.dZ_min:
             self.dZ_min = min_z
         '''
         self.dX += dX
         self.dY += dY
         self.dZ += dZ
         
         return True
     except IOError:
         return False
开发者ID:markyoder,项目名称:PyVC,代码行数:26,代码来源:vcutils.py

示例10: load

    def load(cls, fname, mmap=None):
        """
        Load a previously saved object from file (also see `save`).

        If the object was saved with large arrays stored separately, you can load
        these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use
        mmap, load large arrays as normal objects.

        """
        logger.info("loading %s object from %s" % (cls.__name__, fname))
        subname = lambda suffix: fname + '.' + suffix + '.npy'
        obj = unpickle(fname)
        for attrib in getattr(obj, '__numpys', []):
            logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap))
            setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap))
        for attrib in getattr(obj, '__scipys', []):
            logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap))
            sparse = unpickle(subname(attrib))
            sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap)
            sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap)
            sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap)
            setattr(obj, attrib, sparse)
        for attrib in getattr(obj, '__ignoreds', []):
            logger.info("setting ignored attribute %s to None" % (attrib))
            setattr(obj, attrib, None)
        return obj
开发者ID:biddyweb,项目名称:gensim,代码行数:26,代码来源:utils.py

示例11: GLM_column

def GLM_column(featDir, colInd):
    '''
    Returns a particular column of a GLM model matrix.

    Input Parameters:
          featDir:     The .feat directory where the GLM model file exists.
                       If the motion scrubbed version of the GLM (GLM_model_ms.npz)
                       exists, then it is used to produce the output. Otherwise,
                       the original version of the GLM (GLM_model.npz) is used.
          colInd:      The column index indicating the column to be returned. As in
                       the typical Python convention, 0 corresponds to the first column.

    Returns:
          Y:           A 1D array from the GLM design matrix.
    '''
    # file business
    fGLM = os.path.join(featDir, 'GLM_model.npz')
    fGLM_ms = os.path.join(featDir, 'GLM_model_ms.npz')
    # loading the appropriate file
    if os.path.isfile(fGLM_ms):
        infile = np.load(fGLM_ms)
    else:
        infile = np.load(fGLM)
    # and extracting the column
    X = infile['X']
    Y = X[:,colInd]
    return Y
开发者ID:sathayas,项目名称:fMRIConnectome,代码行数:27,代码来源:fsl_feat_model_wrapper.py

示例12: decode_predictions

def decode_predictions(preds, top=5):
    LABELS = None
    if len(preds.shape) == 2:
        if preds.shape[1] == 2622:
            fpath = get_file('rcmalli_vggface_labels_v1.npy',
                             V1_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        elif preds.shape[1] == 8631:
            fpath = get_file('rcmalli_vggface_labels_v2.npy',
                             V2_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        else:
            raise ValueError('`decode_predictions` expects '
                             'a batch of predictions '
                             '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                             '(samples, 8631) for V2.'
                             'Found array with shape: ' + str(preds.shape))
    else:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                         '(samples, 8631) for V2.'
                         'Found array with shape: ' + str(preds.shape))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [[str(LABELS[i].encode('utf8')), pred[i]] for i in top_indices]
        result.sort(key=lambda x: x[1], reverse=True)
        results.append(result)
    return results
开发者ID:kinect59,项目名称:keras-vggface,代码行数:32,代码来源:utils.py

示例13: data_load

def data_load(dirname):
	''' load feature, label pairs from data direcotry

	dirname: string, data directory name
	Return: tuple of numpy arrays, feature, label pairs. '''
	npylist = os.listdir(dirname)
	estilen = len(npylist)
	L = []
	P = []
	epoch = 0
	for k, featname in enumerate(npylist):
		if k * 10 / estilen > epoch:
			epoch = k*10/estilen
			print 'loading', epoch*10, '%'
		if not featname.startswith('p'):
			continue
		labename = 'l'+featname[1:]
		feat = numpy.load(os.path.join(dirname, featname))
		labe = numpy.load(os.path.join(dirname, labename))
                try:
                        if L == []:
                                P = feat 
                                L = labe
                        else:
                                P = numpy.concatenate((P, feat))
                                L = numpy.concatenate((L, labe))
                except:
                        print featname,
                        print ' numpy array shape does not match ',
                        print feat.shape
	return P, L 
开发者ID:ElegantGod,项目名称:TextDetector,代码行数:31,代码来源:fileOp.py

示例14: test_enhance_neurites_gradient_volume

def test_enhance_neurites_gradient_volume(image, module, workspace):
    resources = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "resources"))

    data = numpy.load(os.path.join(resources, "neurite.npy"))

    data = skimage.exposure.rescale_intensity(1.0 * data)

    data = numpy.tile(data, (3, 1)).reshape(3, *data.shape)

    image.pixel_data = data

    image.dimensions = 3

    module.method.value = "Enhance"

    module.enhance_method.value = "Neurites"

    module.neurite_choice.value = "Line structures"

    module.object_size.value = 8

    module.run(workspace)

    output = workspace.image_set.get_image("output")

    actual = output.pixel_data

    expected = numpy.load(os.path.join(resources, "enhanced_neurite.npy"))

    expected = numpy.tile(expected, (3, 1)).reshape(3, *expected.shape)

    numpy.testing.assert_array_almost_equal(expected, actual)
开发者ID:CellProfiler,项目名称:CellProfiler,代码行数:32,代码来源:test_enhanceorsuppressfeatures.py

示例15: load_dataset

def load_dataset():
    
    train = numpy.load(DATA_DIR + train_filename)
    validate = numpy.load(DATA_DIR + valid_filename)
    test = numpy.load(DATA_DIR + test_filename)
    
    return train, validate, test
开发者ID:shub1905,项目名称:project_apollo,代码行数:7,代码来源:utils2.py


注:本文中的numpy.load函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。