当前位置: 首页>>代码示例>>Python>>正文


Python numpy.save函数代码示例

本文整理汇总了Python中numpy.save函数的典型用法代码示例。如果您正苦于以下问题:Python save函数的具体用法?Python save怎么用?Python save使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了save函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: segment_request

def segment_request(request):
    max_iteration=int(request[:,0].max()//optimal_interval)
    for i in range(max_iteration+1):
        temp=request[np.logical_and(request[:,0]<=optimal_interval*(i+1),request[:,0]>=(optimal_interval*i+1))]
        temp[:,0]=temp[:,0]-optimal_interval*i
        np.save('experiment_%d/new_passenger_%d.npy'% (experiment,i),temp)
    return max_iteration
开发者ID:locknard,项目名称:demand-responsive-transit,代码行数:7,代码来源:IP_0322.py

示例2: main

def main():

    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)


    paused = False
    while(True):

        if not paused:
            # 800x600 windowed mode
            screen = grab_screen(region=(0,40,800,640))
            last_time = time.time()
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
            screen = cv2.resize(screen, (160,120))
            # resize to something a bit more acceptable for a CNN
            keys = key_check()
            output = keys_to_output(keys)
            training_data.append([screen,output])
            
            if len(training_data) % 1000 == 0:
                print(len(training_data))
                np.save(file_name,training_data)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
开发者ID:gcm0621,项目名称:pygta5,代码行数:35,代码来源:create_training_data.py

示例3: compute_signif_conf_Z_list

def compute_signif_conf_Z_list(cor_mat_file,conf_cor_mat_file,coords_file):       
        
    import rpy,os
    import nibabel as nib
    import numpy as np
    
    from dmgraphanalysis.utils_cor import export_List_net_from_list,export_Louvain_net_from_list
    from dmgraphanalysis.utils_cor import return_signif_conf_net_list
    from dmgraphanalysis.utils_plot import plot_cormat
    
    print "loading cor_mat_file"
    
    cor_mat = np.load(cor_mat_file)
    
    print "loading conf_cor_mat_file"
    
    conf_cor_mat = np.load(conf_cor_mat_file)
    
    print 'load coords'
    
    coords = np.array(np.loadtxt(coords_file),dtype = int)
    
    print "computing net_list by thresholding conf_cor_mat based on distance and net_threshold"
    
    net_list,binary_signif_matrix = return_signif_conf_net_list(cor_mat,conf_cor_mat)
    
    print binary_signif_matrix.shape
    
    print "saving binary_signif_matrix"
    
    binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.npy')
    
    np.save(binary_signif_matrix_file,binary_signif_matrix)
    
    print "plotting binary_signif_matrix"
    
    plot_binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.eps')
    
    plot_cormat(plot_binary_signif_matrix_file,binary_signif_matrix,list_labels = [])
    
    ## Z correl_mat as list of edges
    
    print "saving net_list as list of edges"
    
    net_List_file = os.path.abspath('net_List_signif_conf.txt')
    
    export_List_net_from_list(net_List_file,net_list)
    
    ### Z correl_mat as Louvain format
    
    print "saving net_list as Louvain format"
    
    net_Louvain_file = os.path.abspath('net_Louvain_signif_conf.txt')
    
    export_Louvain_net_from_list(net_Louvain_file,net_list,coords)
    
    #net_List_file = ''
    #net_Louvain_file = ''
    
    return net_List_file, net_Louvain_file
开发者ID:Lx37,项目名称:dmgraphanalysis,代码行数:60,代码来源:modularity.py

示例4: plotForce

def plotForce():
    figure(size=3,aspect=0.5)
    subplot(1,2,1)
    from EvalTraj import plotFF
    plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8)
    subplot_annotate()
    
    subplot(1,2,2)
    for i in [1,2,3,4]:
        R=np.squeeze(np.load('Rdpse%d.npy'%i))
        R=stats.nanmedian(R,axis=2)[:,1:,:]
        dps=np.linspace(-1,1,201)[1:]
        plt.plot(dps,R[:,:,2].mean(0));
    plt.legend([0,0.1,0.2,0.3],loc=3) 
    i=2
    R=np.squeeze(np.load('Rdpse%d.npy'%i))
    R=stats.nanmedian(R,axis=2)[:,1:,:]
    mn=np.argmin(R,axis=1)
    y=np.random.randn(mn.shape[0])*0.00002+0.0438
    plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39  ,  0.76,  0.64])
    plt.xlabel('Displacement of Force Origin')
    plt.ylabel('Average Net Force Magnitude')
    hh=dps[mn[:,2]]
    err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0])
    err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0])
    m=np.mean(hh)
    print m, m-err,m+err
    np.save('force',[m, m-err,m+err,m-err2,m+err2])
    plt.xlim([-0.5,0.5])
    plt.ylim([0.0435,0.046])
    plt.grid(b=True,axis='x')
    subplot_annotate()
开发者ID:simkovic,项目名称:wolfpackRevisited,代码行数:32,代码来源:Evaluation.py

示例5: __init__

    def __init__(
            self, save_data=True,
            use_saved_features=True, use_saved_npz=True):
        'Init by getting all the works.'
        self._self_dir = path.abspath(path.dirname(__file__))
        self.use_saved_features = use_saved_features
        self.use_saved_npz = use_saved_npz
        self.save_data = save_data
        self.npz_data = None
        self._vectorizer = DictVectorizer()

        if save_data and not path.isdir('features'):
            os.mkdir('features')

        work_fname = 'features/all_works.npy'
        if use_saved_features and path.isfile(work_fname):
            self._works = np.load(work_fname)
        else:
            works = []
            with open(self._get_path('work_list/AllWorks.txt')) as f:
                for line in f:
                    works.append(line.split('-')[0])
            self._works = np.array(works)
            if self.save_data:
                np.save('features/all_works', self.works)
开发者ID:fcchou,项目名称:CS229-project,代码行数:25,代码来源:features.py

示例6: vectorize_and_aggregate

def vectorize_and_aggregate(in_data_file_list, mask_file, matrix_name, parcellation_path, fwhm, use_diagonal,
                            use_fishers_z, df_file, df_col_names):
    import os, pickle
    import numpy as np
    from LeiCA_LIFE.learning.prepare_data_utils import vectorize_ss

    # get an example of the data:
    #save_template: template file; for behav: col names
    vectorized_data, data_type, masker, save_template = vectorize_ss(in_data_file_list[0], mask_file, matrix_name,
                                                                     parcellation_path, fwhm, use_diagonal,
                                                                     use_fishers_z, df_file,
                                                                     df_col_names)
    vectorized_data = np.zeros((len(in_data_file_list), vectorized_data.shape[1]))
    vectorized_data.fill(np.nan)

    for i, in_data_file_ss in enumerate(in_data_file_list):
        vectorized_data[i, :], _, _, _ = vectorize_ss(in_data_file_ss, mask_file, matrix_name, parcellation_path, fwhm,
                                                      use_diagonal, use_fishers_z, df_file, df_col_names)

    vectorized_aggregated_file = os.path.abspath('vectorized_aggregated_data.npy')
    np.save(vectorized_aggregated_file, vectorized_data)

    unimodal_backprojection_info = {'data_type': data_type,
                                    'masker': masker,
                                    'save_template': save_template
                                    }
    unimodal_backprojection_info_file = os.path.abspath('unimodal_backprojection_info.pkl')
    pickle.dump(unimodal_backprojection_info, open(unimodal_backprojection_info_file, 'w'))
    return vectorized_aggregated_file, unimodal_backprojection_info_file
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:29,代码来源:prepare_data_utils.py

示例7: train_word2id

def train_word2id():
    """把训练集的所有词转成对应的id。"""
    time0 = time.time()
    print('Processing train data.')
    df_train = pd.read_csv('../raw_data/question_train_set.txt', sep='\t', usecols=[0, 2, 4],
                           names=['question_id', 'word_title', 'word_content'], dtype={'question_id': object})
    print('training question number %d ' % len(df_train))
    # 没有 content 的问题用 title 来替换
    na_content_indexs = list()
    for i in tqdm(xrange(len(df_train))):
        word_content = df_train.word_content.values[i]
        if type(word_content) is float:
            na_content_indexs.append(i)
    print('There are %d train questions without content.' % len(na_content_indexs))
    for na_index in tqdm(na_content_indexs):
        df_train.at[na_index, 'word_content'] = df_train.at[na_index, 'word_title']
    # 没有 title 的问题, 丢弃
    na_title_indexs = list()
    for i in xrange(len(df_train)):
        word_title = df_train.word_title.values[i]
        if type(word_title) is float:
            na_title_indexs.append(i)
    print('There are %d train questions without title.' % len(na_title_indexs))
    df_train = df_train.drop(na_title_indexs)
    print('After dropping, training question number(should be 2999952) = %d' % len(df_train))
    # 转为 id 形式
    p = Pool()
    train_title = np.asarray(p.map(get_id4words, df_train.word_title.values))
    np.save('../data/wd_train_title.npy', train_title)
    train_content = np.asarray(p.map(get_id4words, df_train.word_content.values))
    np.save('../data/wd_train_content.npy', train_content)
    p.close()
    p.join()
    print('Finished changing the training words to ids. Costed time %g s' % (time.time() - time0))
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:34,代码来源:word2id.py

示例8: consolidate_games

    def consolidate_games(self, name, samples):
        print('>>> Creating consolidated numpy arrays')

        if self.use_generator:
            print('>>> Return generator')
            generator = DataGenerator(self.data_dir, samples)
            return generator

        files_needed = set(file_name for file_name, index in samples)
        print('>>> Total number of files: ' + str(len(files_needed)))

        file_names = []
        for zip_file_name in files_needed:
            file_name = zip_file_name.replace('.zip', '') + name
            file_names.append(file_name)

        feature_list = []
        label_list = []
        for file_name in file_names:
            X = np.load(self.data_dir + '/' + file_name + '_features.npy')
            y = np.load(self.data_dir + '/' + file_name + '_labels.npy')
            feature_list.append(X)
            label_list.append(y)
            print('>>> Done')

        features = np.concatenate(feature_list, axis=0)
        labels = np.concatenate(label_list, axis=0)

        feature_file = self.data_dir + '/' + str(self.num_planes) + '_plane_features_' + name
        label_file = self.data_dir + '/' + str(self.num_planes) + '_plane_labels_' + name

        np.save(feature_file, features)
        np.save(label_file, labels)

        return features, labels
开发者ID:Riashat,项目名称:betago,代码行数:35,代码来源:base_processor.py

示例9: concat_ts

def concat_ts(all_ts_files):
    
    import numpy as np
    import os
    
    print all_ts_files

    for i,ts_file in enumerate(all_ts_files):
    
        ## loading ROI coordinates
        ts = np.load(ts_file)
        
        #print "all_ts: " 
        print ts.shape
        
        if i == 0:
            concat_ts = ts.copy()
            #print concat_ts.shape
        else:
            concat_ts = np.concatenate((concat_ts,ts),axis = 0)
            #print concat_ts.shape

    print concat_ts.shape

    ### saving time series
    concat_ts_file = os.path.abspath("concat_ts.npy")
    np.save(concat_ts_file,concat_ts)
    
        
    return concat_ts_file
开发者ID:annapasca,项目名称:neuropype_ephy,代码行数:30,代码来源:import_mat.py

示例10: test_word2id

def test_word2id():
    """把测试集的所有词转成对应的id。"""
    time0 = time.time()
    print('Processing eval data.')
    df_eval = pd.read_csv('../raw_data/question_eval_set.txt', sep='\t', usecols=[0, 2, 4],
                          names=['question_id', 'word_title', 'word_content'], dtype={'question_id': object})
    print('test question number %d' % len(df_eval))
    # 没有 title 的问题用 content 来替换
    na_title_indexs = list()
    for i in xrange(len(df_eval)):
        word_title = df_eval.word_title.values[i]
        if type(word_title) is float:
            na_title_indexs.append(i)
    print('There are %d test questions without title.' % len(na_title_indexs))
    for na_index in na_title_indexs:
        df_eval.at[na_index, 'word_title'] = df_eval.at[na_index, 'word_content']
    # 没有 content 的问题用 title 来替换
    na_content_indexs = list()
    for i in tqdm(xrange(len(df_eval))):
        word_content = df_eval.word_content.values[i]
        if type(word_content) is float:
            na_content_indexs.append(i)
    print('There are %d test questions without content.' % len(na_content_indexs))
    for na_index in tqdm(na_content_indexs):
        df_eval.at[na_index, 'word_content'] = df_eval.at[na_index, 'word_title']
    # 转为 id 形式
    p = Pool()
    eval_title = np.asarray(p.map(get_id4words, df_eval.word_title.values))
    np.save('../data/wd_eval_title.npy', eval_title)
    eval_content = np.asarray(p.map(get_id4words, df_eval.word_content.values))
    np.save('../data/wd_eval_content.npy', eval_content)
    p.close()
    p.join()
    print('Finished changing the eval words to ids. Costed time %g s' % (time.time() - time0))
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:34,代码来源:word2id.py

示例11: get_buffer_callback

 def get_buffer_callback(overviewBuffers,overflow,triggeredAt,triggered,auto_stop,nValues):
     
     #print('Callback for saving to disk')
     #create filename based on actual timestamp
     #filename = time.strftime("%Y%m%d_%H_%M_%S_%f.csv")
     filename=datetime.datetime.now()
     filename= filename.strftime("%Y%m%d_%H_%M_%S_%f")
     CH1='CH1_' + filename 
     #CH2='CH2_' + filename
     
     #cast 2d-pointer from c- callback into python pointer 
     ob = ctypes.cast(overviewBuffers,ctypes.POINTER(ctypes.POINTER(ctypes.c_short)))
     
     #create array from pointer data ob[0]-> CH1 ob[1]-> CH2
     streamed_data_CH1=np.fromiter(ob[0], dtype=np.short, count=nValues)
     #streamed_data_CH2=np.fromiter(ob[1], dtype=np.short, count=nValues)
                 
     #save array data into numpy fileformat
     path1 = os.path.normpath('C:\\Users\ckattmann\Documents\GitHub\pqpico\Data')+'/'+CH1
     #path2 = os.path.normpath('C:\\Users\ckattmann\Documents\GitHub\pqpico\Data')+'/'+CH2
                 
     np.save(path1,streamed_data_CH1)
     #np.save(path2,streamed_data_CH2)
     #print('File saved:',CH1,CH2)
     
     return 0
开发者ID:kipfer,项目名称:pqpico,代码行数:26,代码来源:Picoscope2000.py

示例12: main

def main(root='/tmp/measurements', output=None):
    data = []
    for s in os.listdir(root):
        subject = []
        for b in os.listdir(os.path.join(root, s)):
            block = []
            bweight, bspeed, bhand, bpaths = b.split('-')[1:]
            for t in os.listdir(os.path.join(root, s, b)):
                thand, tspeed = re.search(r'(left|right)-speed_(\d\.\d+)', t).groups()
                config = np.tile([
                    C[bweight], C[bspeed], C[bhand], C[bpaths],
                    C[thand], float(tspeed)], (120, 1))
                block.append(
                    np.hstack([
                        config,
                        np.loadtxt(os.path.join(root, s, b, t),
                                   skiprows=1, delimiter=',')]))
            subject.append(block)
        if len(subject) == 3:
            data.append(subject)
        else:
            print('incorrect block count! discarding {}'.format(s))
    data = np.array(data)
    logging.info('loaded data %s', data.shape)
    if output:
        np.save(output, data.astype('f'))
开发者ID:EmbodiedCognition,项目名称:tracing-experiment,代码行数:26,代码来源:import-csvs.py

示例13: Cluster

def Cluster(param, DATA_FOLDER):
    ts = ListaSet(param)
    Data = scipy.io.loadmat('./data/filter_template3.mat')
    Filter2 = np.rot90( Data['Filter2'], 2)

#    corList = []
#    TVList= []
    corArr = np.empty(ts.get_num_images())
    TVArr = np.empty(ts.get_num_images())

    for i in range( ts.get_num_images()):
#    for i in range( 100):
        tmp = ts.get_input(i)
        tmp2 = tmp - np.mean( tmp)
        tmp3 = tmp2 / np.linalg.norm(tmp2, 'fro')
#        Cor = scipy.signal.convolve2d(tmp3, Filter2, 'valid')
#        corList.append( Cor)    
        corArr[i] = scipy.signal.convolve2d(tmp3, Filter2, 'valid')
    
        dx = scipy.ndimage.sobel(tmp, 0)
        dy = scipy.ndimage.sobel(tmp, 1)
        mag = np.hypot(dx, dy)
#        TVList.append( np.sum(mag))
        TVArr[i] = np.sum(mag)

        if i % 10000 == 0:
            print i

    np.save( DATA_FOLDER+'/trainCorrelation.npy', corArr)
    np.save( DATA_FOLDER+'/trainTotalVariation.npy', TVArr)
    return
开发者ID:lelegan,项目名称:DLSR,代码行数:31,代码来源:ListaPrvd_regr.py

示例14: sample_lnprob

def sample_lnprob(weight_index):
    import emcee

    ndim = 4
    nwalkers = 8 * ndim
    print("using {} walkers".format(nwalkers))
    p0 = np.vstack((np.random.uniform(-0.5, 2, size=(1, nwalkers)),
                    np.random.uniform(50, 300, size=(1, nwalkers)),
                    np.random.uniform(0.2, 1.5, size=(1, nwalkers)),
                    np.random.uniform(0.2, 1.5, size=(1, nwalkers)))).T

    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(weight_index,), threads=cfg['threads'])

    print("Running Sampler")
    pos, prob, state = sampler.run_mcmc(p0, cfg['burn_in'])

    print("Burn-in complete")
    sampler.reset()
    sampler.run_mcmc(pos, cfg['samples'])

    samples = sampler.flatchain
    np.save(cfg['outdir'] + "samples_w{}.npy".format(weight_index), samples)

    import triangle
    fig = triangle.corner(samples)
    fig.savefig(cfg['outdir'] + "triangle_w{}.png".format(weight_index))
开发者ID:kgullikson88,项目名称:Starfish,代码行数:26,代码来源:optimize_emulator.py

示例15: assertCubeDataAlmostEqual

    def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
        reference_path = self.get_result_path(reference_filename)
        if self._check_reference_file(reference_path):
            kwargs.setdefault("err_msg", "Reference file %s" % reference_path)

            result = np.load(reference_path)
            if isinstance(result, np.lib.npyio.NpzFile):
                self.assertIsInstance(cube.data, ma.MaskedArray, "Cube data was not a masked array.")
                # Avoid comparing any non-initialised array data.
                data = cube.data.filled()
                np.testing.assert_array_almost_equal(data, result["data"], *args, **kwargs)
                np.testing.assert_array_equal(cube.data.mask, result["mask"])
            else:
                np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
        else:
            self._ensure_folder(reference_path)
            logger.warning("Creating result file: %s", reference_path)
            if isinstance(cube.data, ma.MaskedArray):
                # Avoid recording any non-initialised array data.
                data = cube.data.filled()
                with open(reference_path, "wb") as reference_file:
                    np.savez(reference_file, data=data, mask=cube.data.mask)
            else:
                with open(reference_path, "wb") as reference_file:
                    np.save(reference_file, cube.data)
开发者ID:djkirkham,项目名称:iris,代码行数:25,代码来源:__init__.py


注:本文中的numpy.save函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。