当前位置: 首页>>代码示例>>Python>>正文


Python _pickle.dump方法代码示例

本文整理汇总了Python中_pickle.dump方法的典型用法代码示例。如果您正苦于以下问题:Python _pickle.dump方法的具体用法?Python _pickle.dump怎么用?Python _pickle.dump使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在_pickle的用法示例。


在下文中一共展示了_pickle.dump方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write_to_file

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def write_to_file(self, save_file):
    "Write all the times to file."
    try:
      with TestTimes.LockedFile(save_file, 'a+b') as fd:
        times = TestTimes.__read_test_times_file(fd)

        if times is None:
          times = self.__times
        else:
          times.update(self.__times)

        # We erase data from file while still holding a lock to it. This
        # way reading old test times and appending new ones are atomic
        # for external viewer.
        fd.seek(0)
        fd.truncate()
        with gzip.GzipFile(fileobj=fd, mode='wb') as gzf:
          cPickle.dump(times, gzf, PICKLE_HIGHEST_PROTOCOL)
    except IOError:
      pass  # ignore errors---saving the times isn't that important 
开发者ID:google,项目名称:gtest-parallel,代码行数:22,代码来源:gtest_parallel.py

示例2: writeTo

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def writeTo(self):
        company=self.dest_file
        for webp in self.date:
            data_dump_dir = os.path.join(DATA_DIR,'content',company,webp)
            if(not os.path.exists(data_dump_dir)):
                os.makedirs(data_dump_dir)
            temp = {'date':self.date[webp],
                    'title':self.titles[webp],
                    'content':self.contents[webp],
                    'url':self.total_links[webp],
                    }
            with open(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/raw_'+self.file.split('.data')[0]+'_'+webp+'.pkl'), 'wb') as fp:
                pickle.dump(temp, fp)
            df = pd.DataFrame(temp)
            df.set_index('date',inplace=True)
            df.to_pickle(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.pkl'))
            df.to_csv(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.csv')) 
开发者ID:gyanesh-m,项目名称:Sentiment-analysis-of-financial-news-data,代码行数:19,代码来源:quick_scraper.py

示例3: preprocess

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def preprocess(self, vocab_file, corpus_file, data_path, label_file):
        corpus = pd.read_csv(corpus_file, encoding='utf8')
        labels = corpus['label'].drop_duplicates().values
        corpus = corpus['text']
        corpus = ''.join(map(lambda i: i.strip(), corpus))

        self.labels = dict(zip(labels, range(len(labels))))
        self.label_size = len(labels)

        with open(label_file, 'wb') as f:
            pickle.dump(self.labels, f)

        counter = collections.Counter(corpus)
        count_pairs = sorted(counter.items(), key=lambda i: -i[1])
        self.chars, _ = zip(*count_pairs)
        with open(vocab_file, 'wb') as f:
            pickle.dump(self.chars, f)

        self.vocab_size = len(self.chars) + 1
        self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))

        data = pd.read_csv(data_path, encoding='utf8')
        tensor_x = np.array(list(map(self.transform, data['text'])))
        tensor_y = np.array(list(map(self.labels.get, data['label'])))
        self.tensor = np.c_[tensor_x, tensor_y].astype(int) 
开发者ID:koala-ai,项目名称:tensorflow_nlp,代码行数:27,代码来源:dataset.py

示例4: save_model

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def save_model(self, sess, path):
        save_target = path + '_iter%d' % self.epochs_trained
        dirname = os.path.dirname(save_target)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        self.saver.save(sess, save_target)

        dictionaries_path = save_target + '.dict'
        with open(dictionaries_path, 'wb') as file:
            pickle.dump(self.subtoken_to_index, file)
            pickle.dump(self.index_to_subtoken, file)
            pickle.dump(self.subtoken_vocab_size, file)

            pickle.dump(self.target_to_index, file)
            pickle.dump(self.index_to_target, file)
            pickle.dump(self.target_vocab_size, file)

            pickle.dump(self.node_to_index, file)
            pickle.dump(self.index_to_node, file)
            pickle.dump(self.nodes_vocab_size, file)

            pickle.dump(self.num_training_examples, file)
            pickle.dump(self.epochs_trained, file)
            pickle.dump(self.config, file)
        print('Saved after %d epochs in: %s' % (self.epochs_trained, save_target)) 
开发者ID:tech-srl,项目名称:code2seq,代码行数:27,代码来源:model.py

示例5: save

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def save(self, filename='weights.pkl'):
        with open(filename, 'wb') as fp:
            pickle.dump(self.weights, fp) 
开发者ID:alirezamika,项目名称:evostra,代码行数:5,代码来源:feed_forward_network.py

示例6: save

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def save(self, file_path, gsums=None, learning_rate=None, validation_ppl_history=None, best_validation_ppl=None, epoch=None, random_state=None):
        try:
            import cPickle
        except ImportError:
            import _pickle as cPickle
        state = {
            "type":                     self.__class__.__name__,
            "n_hidden":                 self.n_hidden,
            "x_vocabulary":             self.x_vocabulary,
            "y_vocabulary":             self.y_vocabulary,
            "stage1_model_file_name":   self.stage1_model_file_name if hasattr(self, "stage1_model_file_name") else None,
            "params":                   [p.get_value(borrow=True) for p in self.params],
            "gsums":                    [s.get_value(borrow=True) for s in gsums] if gsums else None,
            "learning_rate":            learning_rate,
            "validation_ppl_history":   validation_ppl_history,
            "epoch":                    epoch,
            "random_state":             random_state
        }

        with open(file_path, 'wb') as f:
            cPickle.dump(state, f, protocol=cPickle.HIGHEST_PROTOCOL) 
开发者ID:ottokart,项目名称:punctuator2,代码行数:23,代码来源:models.py

示例7: _create_results_placeholder

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def _create_results_placeholder(self, directory, tmc_number, g_number,
                                   n_points, n_sources, model_family):
        tmc_dir = os.path.join(
            directory, 
            'mem_tmc_{}.pkl'.format(tmc_number.zfill(4))
        )
        g_dir = os.path.join(
            directory, 
            'mem_g_{}.pkl'.format(g_number.zfill(4))
        )
        self.mem_tmc = np.zeros((0, n_points))
        self.mem_g = np.zeros((0, n_points))
        self.idxs_tmc = np.zeros((0, n_sources), int)
        self.idxs_g = np.zeros((0, n_sources), int)
        pkl.dump({'mem_tmc': self.mem_tmc, 'idxs_tmc': self.idxs_tmc}, 
                 open(tmc_dir, 'wb'))
        if model_family not in ['logistic', 'NN']:
            return
        pkl.dump({'mem_g': self.mem_g, 'idxs_g': self.idxs_g}, 
                 open(g_dir, 'wb')) 
开发者ID:amiratag,项目名称:DataShapley,代码行数:22,代码来源:DShap.py

示例8: save_results

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def save_results(self, overwrite=False):
        """Saves results computed so far."""
        if self.directory is None:
            return
        loo_dir = os.path.join(self.directory, 'loo.pkl')
        if not os.path.exists(loo_dir) or overwrite:
            pkl.dump({'loo': self.vals_loo}, open(loo_dir, 'wb'))
        tmc_dir = os.path.join(
            self.directory, 
            'mem_tmc_{}.pkl'.format(self.tmc_number.zfill(4))
        )
        g_dir = os.path.join(
            self.directory, 
            'mem_g_{}.pkl'.format(self.g_number.zfill(4))
        )  
        pkl.dump({'mem_tmc': self.mem_tmc, 'idxs_tmc': self.idxs_tmc}, 
                 open(tmc_dir, 'wb'))
        pkl.dump({'mem_g': self.mem_g, 'idxs_g': self.idxs_g}, 
                 open(g_dir, 'wb')) 
开发者ID:amiratag,项目名称:DataShapley,代码行数:21,代码来源:DShap.py

示例9: test

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def test():
    print('\nTesting:')
    total_test_loss = 0

    bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
    for i, test_song in enumerate(test_set):
        X_test, Y_test = make_feature_vector(test_song, chord_test_set[i], chord_embed_method)

        loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
        model.reset_states()
        total_test_loss += loss
        bar.update(i)
    total_test_loss_array.append(total_test_loss/test_set_size)
    print('\nTotal test loss: ', total_test_loss/test_set_size)
    print('-'*50)
    plt.plot(total_test_loss_array, 'b-')
    plt.plot(total_train_loss_array, 'r-')
#    plt.axis([0, epochs, 0, 5])
    if show_plot: plt.show()
    if save_plot: plt.savefig(model_path+'plot.png')
    pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
    pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))

# Make feature vectors with the notes and the chord information 
开发者ID:brunnergino,项目名称:JamBot,代码行数:26,代码来源:polyphonic_lstm_training.py

示例10: test

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def test():
    print('\nTesting:')
    total_test_loss = 0

    bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
    for i, test_song in enumerate(test_set):
        X_test = test_song[:-1]
        Y_test = np_utils.to_categorical(test_song[1:], num_classes=num_chords)
        loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
        model.reset_states()
        total_test_loss += loss
        bar.update(i+1)
    total_test_loss_array.append(total_test_loss/test_set_size)
    print('\nTotal test loss: ', total_test_loss/test_set_size)
    print('-'*50)
    plt.plot(total_test_loss_array, 'b-', label='test loss')
    plt.plot(total_train_loss_array, 'r-', label='train loss')
#    plt.legend()
    plt.ylabel(model_path)
#    plt.axis([0, 50, 3, 5])
    plt.grid()
    if show_plot: plt.show()
    if save_plot: plt.savefig(model_path+'plot.png')
    pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
    pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb')) 
开发者ID:brunnergino,项目名称:JamBot,代码行数:27,代码来源:chord_lstm_training.py

示例11: save_resources

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def save_resources(self, output_resources_pickle_file):
        print("saving the resources into the file {}".format(output_resources_pickle_file))
        pickle_content = {}       
        pickle_content["word_to_ix_map"] = self.word_to_ix_map
        pickle_content["wordvecs"] = self.wordvecs
        pickle_content["num_embedding_features"] = self.num_embedding_features
        pickle_content["num_classes"] = self.num_classes
        pickle_content["max_sentence_len_train"] = self.max_sentence_len_train
        pickle_content["tag_to_vector_map"] = self.tag_to_vector_map
        pickle_content["vector_to_tag_map"] = self.vector_to_tag_map
        pickle_content["zero_vec_pos"] = self.zero_vec_pos
        
        cPickle.dump(pickle_content, open(output_resources_pickle_file, "wb"))
        print("Done")

    ################################################## 
    #  read_and_parse_test_data 
    ################################################## 
开发者ID:Azure-Samples,项目名称:MachineLearningSamples-BiomedicalEntityExtraction,代码行数:20,代码来源:DataReader.py

示例12: flush

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def flush():
    prints = []

    for name, vals in _since_last_flush.items():
        prints.append("{}\t{}".format(name,
            np.mean(list(vals.values()))))
        _since_beginning[name].update(vals)

        x_vals = np.sort(list(_since_beginning[name].keys()))
        y_vals = [_since_beginning[name][x] for x in x_vals]

        plt.clf()
        plt.plot(x_vals, y_vals)
        plt.xlabel('iteration')
        plt.ylabel(name)
        # plt.savefig(name.replace(' ', '_')+'.jpg')

    print ("iter {}\t{}".format(_iter[0], "\t".join(prints)))
    _since_last_flush.clear()

    with open('log.pkl', 'wb') as f:
        pickle.dump(dict(_since_beginning), f, 3) 
开发者ID:neale,项目名称:Adversarial-Autoencoder,代码行数:24,代码来源:plot.py

示例13: __init__

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def __init__(self, basepath, vid):
        # load skeleton data (and save it to pickle for next load)
        pickle_file = glob.glob(basepath + '/' + vid + '.pickle')

        if pickle_file:
            with open(pickle_file[0], 'rb') as file:
                self.skeletons = pickle.load(file)
        else:
            files = glob.glob(basepath + '/' + vid + '/*.json')
            if len(files) > 10:
                files = sorted(files)
                self.skeletons = []
                for file in files:
                    self.skeletons.append(self.read_skeleton_json(file))
                with open(basepath + '/' + vid + '.pickle', 'wb') as file:
                    pickle.dump(self.skeletons, file)
            else:
                self.skeletons = [] 
开发者ID:youngwoo-yoon,项目名称:youtube-gesture-dataset,代码行数:20,代码来源:data_utils.py

示例14: dump_to_file_and_close

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def dump_to_file_and_close(self):
    json.dump(self.test_results, self.json_dump_file)
    self.json_dump_file.close()


# Record of test runtimes. Has built-in locking. 
开发者ID:google,项目名称:gtest-parallel,代码行数:8,代码来源:gtest_parallel.py

示例15: get_recs_from_cache

# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import dump [as 别名]
def get_recs_from_cache(imagenames, cachedir, cachename):
    # first load gt
    if not os.path.isdir(cachedir):
        os.mkdir(cachedir)
    cachefile = os.path.join(cachedir, cachename)

    if not os.path.isfile(cachefile):
        # load annots
        recs = {}
        for i, imagename in enumerate(imagenames):
            recs[imagename] = parse_rec(get_image_xml_name(imagename))
            #if i % 100 == 0:
            #    print ('Reading annotation for {:d}/{:d}'.format(
            #        i + 1, len(imagenames)))
        # save
        # print ('Saving cached annotations to {:s}'.format(cachefile))
        with open(cachefile, 'wb') as f:
            cPickle.dump(recs, f)
    else:
        # load
        # print ('loaded cached annotations from {:s}'.format(cachefile))
        with open(cachefile, 'rb') as f:
            recs = cPickle.load(f)
        try:
            for imagename in imagenames:
                recs[imagename]
        except Exception as e:
            print("Exception: {0}".format(e))
            print ('\t{:s} is corrupted. retry!!'.format(cachefile))
            os.remove(cachefile)
            recs = get_recs_from_cache(imagenames, cachedir, cachename)
    return recs 
开发者ID:andy-yun,项目名称:pytorch-0.4-yolov3,代码行数:34,代码来源:eval_ap.py


注:本文中的_pickle.dump方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。