本文整理汇总了Python中six.moves.cPickle.dump函数的典型用法代码示例。如果您正苦于以下问题:Python dump函数的具体用法?Python dump怎么用?Python dump使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dump函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_complete
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
plt.subplot(3, 3, 1)
plt.plot(list(xrange(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
plt.subplot(3, 3, 6)
plt.streamplot(x, y, u, v)
plt.subplot(3, 3, 7)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
###### plotting is done, now test its pickle-ability #########
# Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
# recursive_pickle(fig)
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert_equal(plt._pylab_helpers.Gcf.figs, {})
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert_not_equal(plt._pylab_helpers.Gcf.figs, {})
assert_equal(fig.get_label(), 'Figure with a label?')
示例2: train_model
def train_model(args):
data_loader = InputHandler(args.data_dir, args.batch_size, args.result_length)
args.vocabulary_size = data_loader.vocabulary_size
# Save the original files, so that we can load the model when sampling
with open(os.path.join(args.snapshots_dir, CONFIGURATION_FILE), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.snapshots_dir, WORDS_VOCABULARY_FILE), 'wb') as f:
cPickle.dump((data_loader.words, data_loader.vocabulary), f)
model = RNNModel(args.rnn_size, args.network_depth, args.batch_size, args.result_length,
args.vocabulary_size, args.gradient)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
for e in range(args.num_epochs):
session.run(tf.assign(model.lr, args.training_rate * (args.decay_rate ** e)))
data_loader.set_batch_pointer_to_zero()
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
x, y = data_loader.get_next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = session.run([model.cost, model.final_state, model.train_op], feed)
if (e * data_loader.num_batches + b) % args.snapshot == 0 \
or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result
snapshot_path = os.path.join(args.snapshots_dir, 'model.ckpt')
saver.save(session, snapshot_path, global_step = e * data_loader.num_batches + b)
print("Model snapshot was taken to {}".format(snapshot_path))
示例3: _run_tmva_training
def _run_tmva_training(self, info):
"""
Run subprocess to train tmva factory
:param info: class with additional information
"""
tmva_process = subprocess.Popen(
'cd {directory}; {executable} -c "from rep.estimators import _tmvaFactory; _tmvaFactory.main()"'.format(
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
cPickle.dump(self, tmva_process.stdin)
cPickle.dump(info, tmva_process.stdin)
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
assert 'TrainTree' in root_numpy.list_trees(os.path.join(info.directory, info.tmva_root)), \
'ERROR: Result file has not TrainTree'
xml_filename = os.path.join(info.directory, 'weights',
'{job}_{name}.weights.xml'.format(job=info.tmva_job, name=self._method_name))
with open(xml_filename, 'r') as xml_file:
self.formula_xml = xml_file.read()
示例4: setUp
def setUp(self):
numpy.random.seed(9 + 5 + 2015)
self.train_features_mock = numpy.random.randint(
0, 256, (10, 3, 32, 32)).astype('uint8')
self.train_fine_labels_mock = numpy.random.randint(
0, 100, (10,)).astype('uint8')
self.train_coarse_labels_mock = numpy.random.randint(
0, 20, (10,)).astype('uint8')
self.test_features_mock = numpy.random.randint(
0, 256, (10, 3, 32, 32)).astype('uint8')
self.test_fine_labels_mock = numpy.random.randint(
0, 100, (10,)).astype('uint8')
self.test_coarse_labels_mock = numpy.random.randint(
0, 20, (10,)).astype('uint8')
self.tempdir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(self.tempdir)
os.mkdir('cifar-100-python')
filename = os.path.join('cifar-100-python', 'train')
with open(filename, 'wb') as f:
cPickle.dump({'data': self.train_features_mock.reshape((10, -1)),
'fine_labels': self.train_fine_labels_mock,
'coarse_labels': self.train_coarse_labels_mock}, f)
filename = os.path.join('cifar-100-python', 'test')
with open(filename, 'wb') as f:
cPickle.dump({'data': self.test_features_mock.reshape((10, -1)),
'fine_labels': self.test_fine_labels_mock,
'coarse_labels': self.test_coarse_labels_mock}, f)
with tarfile.open('cifar-100-python.tar.gz', 'w:gz') as tar_file:
tar_file.add('cifar-100-python')
os.chdir(cwd)
示例5: append_flipped_rois
def append_flipped_rois(self):
"""
This method is irrelevant with database, so implement here
Append flipped images to ROI database
Note this method doesn't actually flip the 'image', it flip
boxes instead
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.TRAIN.PROPOSAL_METHOD + '_roidb_flip.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
flip_roidb = cPickle.load(fid)
print('{} gt flipped roidb loaded from {}'.format(self.name, cache_file))
else:
num_images = self.num_images
widths = [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(num_images)]
flip_roidb = []
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
flip_roidb.append(entry)
with open(cache_file, 'wb') as fid:
cPickle.dump(flip_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt flipped roidb to {}'.format(cache_file))
self.roidb.extend(flip_roidb)
self._image_index *= 2
示例6: fetch_train_thoughts
def fetch_train_thoughts(m, pcs, batches, name="trainthoughts"):
all_thoughts = []
for i in range(batches):
ipt, opt = multi_training.getPieceBatch(pcs)
thoughts = m.update_thought_fun(ipt, opt)
all_thoughts.append((ipt, opt, thoughts))
pickle.dump(all_thoughts, open('output/' + name + '.p', 'wb'))
示例7: create_pickle
def create_pickle(data_folders, force=False):
"""Function for converting data into separate pickle files for each label.
data_folders is the list of folder names of all classes.
Set force = False if pickle files are already created and are not to be overwritten.
Set force = True to overwrite already created pickle files.
"""
# List of names of pickle files for individual classes
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_emotion(folder)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
示例8: get_abinit_variables
def get_abinit_variables():
"""Returns the database with the description of the ABINIT variables."""
global __VARS_DATABASE
if __VARS_DATABASE is None:
pickle_file = os.path.join(os.getenv("HOME"), ".abinit", "abipy", "abinit_vars.pickle")
if os.path.exists(pickle_file):
#print("Reading from pickle")
with open(pickle_file, "rb") as fh:
__VARS_DATABASE = pickle.load(fh)
else:
# Make dir and file if not present.
if not os.path.exists(os.path.dirname(pickle_file)):
os.makedirs(os.path.dirname(pickle_file))
#print("Reading database from YAML file and generating pickle version. It may take a while...")
from abipy import data as abidata
yaml_file = abidata.var_file('abinit_vars.yml')
with open(yaml_file, "rt") as fh:
var_list = yaml.load(fh)
# Build ordered dict with variables in alphabetical order.
var_list = sorted(var_list, key=lambda v: v.varname)
__VARS_DATABASE = VariableDatabase([(v.varname, v) for v in var_list])
# Save object to pickle file so that can we can reload it from pickle instead of yaml (slower)
with open(pickle_file, "wb") as fh:
pickle.dump(__VARS_DATABASE, fh)
return __VARS_DATABASE
示例9: load_additional_args
def load_additional_args(self, config):
"""
"""
self.set_attribute(config, 'request_powermin', 'General',
'power min', cast='float')
self.set_attribute(config, 'request_powermax', 'General',
'power max', cast='float')
# read in the coefficients from file
coeffs = self.config_get(config, 'PowerMeter', 'coefficients')
if coeffs is not None:
self.power_meter_calibration = MeterCalibration(coeffs)
coeffs = self.config_get(config, 'PowerOutput', 'coefficients')
if coeffs is not None:
p = os.path.join(paths.hidden_dir, '{}_power_calibration'.format(self.name.split('.')[0]))
obj = MeterCalibration(coeffs)
# dump to the hidden dir
# the manager will use it directly
try:
self.info('loading power calibration from config file')
with open(p, 'wb') as f:
pickle.dump(obj, f)
except (OSError, pickle.PickleError):
self.warning('failed loading power output calibration')
return super(FusionsCO2LogicBoard, self).load_additional_args(config)
示例10: save_classifier
def save_classifier(cl, fn, use_joblib=True, **kwargs):
"""Save a classifier to disk.
Parameters
----------
cl : classifier object
Pickleable object or a classify.VigraRandomForest object.
fn : string
Writeable path/filename.
use_joblib : bool, optional
Whether to prefer joblib persistence to pickle.
kwargs : keyword arguments
Keyword arguments to be passed on to either `pck.dump` or
`joblib.dump`.
Returns
-------
None
Notes
-----
For joblib persistence, `compress=3` is the default.
"""
if isinstance(cl, VigraRandomForest):
cl.save_to_disk(fn)
elif use_joblib and sklearn_available:
if "compress" not in kwargs:
kwargs["compress"] = 3
joblib.dump(cl, fn, **kwargs)
else:
with open(fn, "wb") as f:
pck.dump(cl, f, protocol=kwargs.get("protocol", 2))
示例11: _run_tmva_predict
def _run_tmva_predict(info, data):
"""
Run subprocess to train tmva factory
:param info: class with additional information
"""
tmva_process = subprocess.Popen(
'cd "{directory}"; {executable} -c "from rep.estimators import _tmvaReader; _tmvaReader.main()"'.format(
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
try:
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(data, tmva_process.stdin)
except:
# Doing nothing, there is check later.
pass
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
with open(info.result_filename, 'rb') as predictions_file:
predictions = cPickle.load(predictions_file)
return predictions
示例12: _run_tmva_training
def _run_tmva_training(self, info, X, y, sample_weight):
"""
Run subprocess to train tmva factory
:param info: class with additional information
"""
tmva_process = subprocess.Popen(
'cd "{directory}"; {executable} -c "from rep.estimators import _tmvaFactory; _tmvaFactory.main()"'.format(
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
try:
cPickle.dump(self, tmva_process.stdin)
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(X, tmva_process.stdin)
cPickle.dump(y, tmva_process.stdin)
cPickle.dump(sample_weight, tmva_process.stdin)
except:
# continuing, next we check the output of process
pass
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
xml_filename = os.path.join(info.directory, 'weights',
'{job}_{name}.weights.xml'.format(job=info.tmva_job, name=self._method_name))
with open(xml_filename, 'r') as xml_file:
self.formula_xml = xml_file.read()
示例13: train_loop
def train_loop():
graph_generated = False
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
train = True
continue
elif inp == 'val': # start validation
pickle.dump(model, open(LOGPATH + 'model', 'wb'), -1)
res_q.put('val')
train = False
continue
x = xp.asarray(inp[0])
y = xp.asarray(inp[1])
if train:
optimizer.zero_grads()
loss = model.forward(x, y, train=True)
loss.backward()
optimizer.update()
else:
loss = model.forward(x, y, train=False)
res_q.put(float(cuda.to_cpu(loss.data)))
del loss, x, y
示例14: parse_ctgs
def parse_ctgs(bestedges, frgtoctg):
cache = "frgtoctg.cache"
if need_update(frgtoctg, cache):
reads_to_ctgs = {}
frgtodeg = frgtoctg.replace(".frgctg", ".frgdeg")
iidtouid = frgtoctg.replace(".posmap.frgctg", ".iidtouid")
fp = open(iidtouid)
frgstore = {}
for row in fp:
tag, iid, uid = row.split()
if tag == "FRG":
frgstore[uid] = int(iid)
for pf, f in zip(("ctg", "deg"), (frgtoctg, frgtodeg)):
fp = open(f)
logging.debug("Parse posmap file `{0}`".format(f))
for row in fp:
frg, ctg = row.split()[:2]
frg = frgstore[frg]
reads_to_ctgs[frg] = pf + ctg
logging.debug("Loaded mapping: {0}".format(len(reads_to_ctgs)))
fw = open(cache, "w")
dump(reads_to_ctgs, fw)
fw.close()
logging.debug("Contig mapping written to `{0}`".format(cache))
reads_to_ctgs = load(open(cache))
logging.debug("Contig mapping loaded from `{0}`".format(cache))
return reads_to_ctgs
示例15: train
def train(epoch_num, output_dir, *args):
model_name = args[0][0]
file = args[0][1]
log_name = "logs/" + model_name + ".log"
model_name = output_dir + "training/" + model_name
# direct stdout to log file
log_file = open(log_name, 'a+')
# TODO: gram_num here is a magic number!
train_chars = LargeCharFeatureGenerator(file, 10);
if os.path.isfile(model_name):
with open(model_name,'rb') as f:
model = cPickle.load(f)
else:
model = SimpleLSTM(train_chars.vocab_size)
avg_loss = train_with_sgd(model,
train_chars,
nepoch=_NEPOCH,
learning_rate=_LEARNING_RATE,
mini_batch_size=_BATCH_SIZE)
with open(model_name, 'wb') as f:
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
log_file.write(avg_loss)
log_file.close()