本文整理匯總了Python中_pickle.dump方法的典型用法代碼示例。如果您正苦於以下問題:Python _pickle.dump方法的具體用法?Python _pickle.dump怎麽用?Python _pickle.dump使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類_pickle
的用法示例。
在下文中一共展示了_pickle.dump方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: write_to_file
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def write_to_file(self, save_file):
"Write all the times to file."
try:
with TestTimes.LockedFile(save_file, 'a+b') as fd:
times = TestTimes.__read_test_times_file(fd)
if times is None:
times = self.__times
else:
times.update(self.__times)
# We erase data from file while still holding a lock to it. This
# way reading old test times and appending new ones are atomic
# for external viewer.
fd.seek(0)
fd.truncate()
with gzip.GzipFile(fileobj=fd, mode='wb') as gzf:
cPickle.dump(times, gzf, PICKLE_HIGHEST_PROTOCOL)
except IOError:
pass # ignore errors---saving the times isn't that important
示例2: writeTo
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def writeTo(self):
company=self.dest_file
for webp in self.date:
data_dump_dir = os.path.join(DATA_DIR,'content',company,webp)
if(not os.path.exists(data_dump_dir)):
os.makedirs(data_dump_dir)
temp = {'date':self.date[webp],
'title':self.titles[webp],
'content':self.contents[webp],
'url':self.total_links[webp],
}
with open(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/raw_'+self.file.split('.data')[0]+'_'+webp+'.pkl'), 'wb') as fp:
pickle.dump(temp, fp)
df = pd.DataFrame(temp)
df.set_index('date',inplace=True)
df.to_pickle(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.pkl'))
df.to_csv(os.path.join(DATA_DIR,'content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.csv'))
示例3: preprocess
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def preprocess(self, vocab_file, corpus_file, data_path, label_file):
corpus = pd.read_csv(corpus_file, encoding='utf8')
labels = corpus['label'].drop_duplicates().values
corpus = corpus['text']
corpus = ''.join(map(lambda i: i.strip(), corpus))
self.labels = dict(zip(labels, range(len(labels))))
self.label_size = len(labels)
with open(label_file, 'wb') as f:
pickle.dump(self.labels, f)
counter = collections.Counter(corpus)
count_pairs = sorted(counter.items(), key=lambda i: -i[1])
self.chars, _ = zip(*count_pairs)
with open(vocab_file, 'wb') as f:
pickle.dump(self.chars, f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
示例4: save_model
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def save_model(self, sess, path):
save_target = path + '_iter%d' % self.epochs_trained
dirname = os.path.dirname(save_target)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.saver.save(sess, save_target)
dictionaries_path = save_target + '.dict'
with open(dictionaries_path, 'wb') as file:
pickle.dump(self.subtoken_to_index, file)
pickle.dump(self.index_to_subtoken, file)
pickle.dump(self.subtoken_vocab_size, file)
pickle.dump(self.target_to_index, file)
pickle.dump(self.index_to_target, file)
pickle.dump(self.target_vocab_size, file)
pickle.dump(self.node_to_index, file)
pickle.dump(self.index_to_node, file)
pickle.dump(self.nodes_vocab_size, file)
pickle.dump(self.num_training_examples, file)
pickle.dump(self.epochs_trained, file)
pickle.dump(self.config, file)
print('Saved after %d epochs in: %s' % (self.epochs_trained, save_target))
示例5: save
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def save(self, filename='weights.pkl'):
with open(filename, 'wb') as fp:
pickle.dump(self.weights, fp)
示例6: save
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def save(self, file_path, gsums=None, learning_rate=None, validation_ppl_history=None, best_validation_ppl=None, epoch=None, random_state=None):
try:
import cPickle
except ImportError:
import _pickle as cPickle
state = {
"type": self.__class__.__name__,
"n_hidden": self.n_hidden,
"x_vocabulary": self.x_vocabulary,
"y_vocabulary": self.y_vocabulary,
"stage1_model_file_name": self.stage1_model_file_name if hasattr(self, "stage1_model_file_name") else None,
"params": [p.get_value(borrow=True) for p in self.params],
"gsums": [s.get_value(borrow=True) for s in gsums] if gsums else None,
"learning_rate": learning_rate,
"validation_ppl_history": validation_ppl_history,
"epoch": epoch,
"random_state": random_state
}
with open(file_path, 'wb') as f:
cPickle.dump(state, f, protocol=cPickle.HIGHEST_PROTOCOL)
示例7: _create_results_placeholder
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def _create_results_placeholder(self, directory, tmc_number, g_number,
n_points, n_sources, model_family):
tmc_dir = os.path.join(
directory,
'mem_tmc_{}.pkl'.format(tmc_number.zfill(4))
)
g_dir = os.path.join(
directory,
'mem_g_{}.pkl'.format(g_number.zfill(4))
)
self.mem_tmc = np.zeros((0, n_points))
self.mem_g = np.zeros((0, n_points))
self.idxs_tmc = np.zeros((0, n_sources), int)
self.idxs_g = np.zeros((0, n_sources), int)
pkl.dump({'mem_tmc': self.mem_tmc, 'idxs_tmc': self.idxs_tmc},
open(tmc_dir, 'wb'))
if model_family not in ['logistic', 'NN']:
return
pkl.dump({'mem_g': self.mem_g, 'idxs_g': self.idxs_g},
open(g_dir, 'wb'))
示例8: save_results
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def save_results(self, overwrite=False):
"""Saves results computed so far."""
if self.directory is None:
return
loo_dir = os.path.join(self.directory, 'loo.pkl')
if not os.path.exists(loo_dir) or overwrite:
pkl.dump({'loo': self.vals_loo}, open(loo_dir, 'wb'))
tmc_dir = os.path.join(
self.directory,
'mem_tmc_{}.pkl'.format(self.tmc_number.zfill(4))
)
g_dir = os.path.join(
self.directory,
'mem_g_{}.pkl'.format(self.g_number.zfill(4))
)
pkl.dump({'mem_tmc': self.mem_tmc, 'idxs_tmc': self.idxs_tmc},
open(tmc_dir, 'wb'))
pkl.dump({'mem_g': self.mem_g, 'idxs_g': self.idxs_g},
open(g_dir, 'wb'))
示例9: test
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def test():
print('\nTesting:')
total_test_loss = 0
bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
for i, test_song in enumerate(test_set):
X_test, Y_test = make_feature_vector(test_song, chord_test_set[i], chord_embed_method)
loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
model.reset_states()
total_test_loss += loss
bar.update(i)
total_test_loss_array.append(total_test_loss/test_set_size)
print('\nTotal test loss: ', total_test_loss/test_set_size)
print('-'*50)
plt.plot(total_test_loss_array, 'b-')
plt.plot(total_train_loss_array, 'r-')
# plt.axis([0, epochs, 0, 5])
if show_plot: plt.show()
if save_plot: plt.savefig(model_path+'plot.png')
pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))
# Make feature vectors with the notes and the chord information
示例10: test
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def test():
print('\nTesting:')
total_test_loss = 0
bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
for i, test_song in enumerate(test_set):
X_test = test_song[:-1]
Y_test = np_utils.to_categorical(test_song[1:], num_classes=num_chords)
loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
model.reset_states()
total_test_loss += loss
bar.update(i+1)
total_test_loss_array.append(total_test_loss/test_set_size)
print('\nTotal test loss: ', total_test_loss/test_set_size)
print('-'*50)
plt.plot(total_test_loss_array, 'b-', label='test loss')
plt.plot(total_train_loss_array, 'r-', label='train loss')
# plt.legend()
plt.ylabel(model_path)
# plt.axis([0, 50, 3, 5])
plt.grid()
if show_plot: plt.show()
if save_plot: plt.savefig(model_path+'plot.png')
pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))
示例11: save_resources
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def save_resources(self, output_resources_pickle_file):
print("saving the resources into the file {}".format(output_resources_pickle_file))
pickle_content = {}
pickle_content["word_to_ix_map"] = self.word_to_ix_map
pickle_content["wordvecs"] = self.wordvecs
pickle_content["num_embedding_features"] = self.num_embedding_features
pickle_content["num_classes"] = self.num_classes
pickle_content["max_sentence_len_train"] = self.max_sentence_len_train
pickle_content["tag_to_vector_map"] = self.tag_to_vector_map
pickle_content["vector_to_tag_map"] = self.vector_to_tag_map
pickle_content["zero_vec_pos"] = self.zero_vec_pos
cPickle.dump(pickle_content, open(output_resources_pickle_file, "wb"))
print("Done")
##################################################
# read_and_parse_test_data
##################################################
開發者ID:Azure-Samples,項目名稱:MachineLearningSamples-BiomedicalEntityExtraction,代碼行數:20,代碼來源:DataReader.py
示例12: flush
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def flush():
prints = []
for name, vals in _since_last_flush.items():
prints.append("{}\t{}".format(name,
np.mean(list(vals.values()))))
_since_beginning[name].update(vals)
x_vals = np.sort(list(_since_beginning[name].keys()))
y_vals = [_since_beginning[name][x] for x in x_vals]
plt.clf()
plt.plot(x_vals, y_vals)
plt.xlabel('iteration')
plt.ylabel(name)
# plt.savefig(name.replace(' ', '_')+'.jpg')
print ("iter {}\t{}".format(_iter[0], "\t".join(prints)))
_since_last_flush.clear()
with open('log.pkl', 'wb') as f:
pickle.dump(dict(_since_beginning), f, 3)
示例13: __init__
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def __init__(self, basepath, vid):
# load skeleton data (and save it to pickle for next load)
pickle_file = glob.glob(basepath + '/' + vid + '.pickle')
if pickle_file:
with open(pickle_file[0], 'rb') as file:
self.skeletons = pickle.load(file)
else:
files = glob.glob(basepath + '/' + vid + '/*.json')
if len(files) > 10:
files = sorted(files)
self.skeletons = []
for file in files:
self.skeletons.append(self.read_skeleton_json(file))
with open(basepath + '/' + vid + '.pickle', 'wb') as file:
pickle.dump(self.skeletons, file)
else:
self.skeletons = []
示例14: dump_to_file_and_close
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def dump_to_file_and_close(self):
json.dump(self.test_results, self.json_dump_file)
self.json_dump_file.close()
# Record of test runtimes. Has built-in locking.
示例15: get_recs_from_cache
# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import dump [as 別名]
def get_recs_from_cache(imagenames, cachedir, cachename):
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, cachename)
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(get_image_xml_name(imagename))
#if i % 100 == 0:
# print ('Reading annotation for {:d}/{:d}'.format(
# i + 1, len(imagenames)))
# save
# print ('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
cPickle.dump(recs, f)
else:
# load
# print ('loaded cached annotations from {:s}'.format(cachefile))
with open(cachefile, 'rb') as f:
recs = cPickle.load(f)
try:
for imagename in imagenames:
recs[imagename]
except Exception as e:
print("Exception: {0}".format(e))
print ('\t{:s} is corrupted. retry!!'.format(cachefile))
os.remove(cachefile)
recs = get_recs_from_cache(imagenames, cachedir, cachename)
return recs