本文整理汇总了Python中_pickle.load方法的典型用法代码示例。如果您正苦于以下问题:Python _pickle.load方法的具体用法?Python _pickle.load怎么用?Python _pickle.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类_pickle
的用法示例。
在下文中一共展示了_pickle.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def __init__(self, filepath):
"""
Args:
filepath (string): path to data file
Data format - list of characters, list of images, (row, col, ch) numpy array normalized between (0.0, 1.0)
Omniglot dataset - Each language contains a set of characters; Each character is defined by 20 different images
"""
with open(filepath, "rb") as f:
processed_data = pickle.load(f)
self.data = dict()
for image, label in zip(processed_data['images'], processed_data['labels']):
if label not in self.data:
self.data[label] = list()
img = np.expand_dims(image, axis=0).astype('float32')
#img /= 255.0
self.data[label].append(img)
self.num_categories = len(self.data)
self.category_size = len(self.data[processed_data['labels'][0]])
示例2: get_class_detection
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def get_class_detection(imagenames, classname ):
# load annots
classlines = []
for i, imagename in enumerate(imagenames):
det = get_det_result_name(imagename)
lines = get_class_det_result(det, classname)
classlines.extend(lines)
#print(classlines)
ids = [x[0] for x in classlines]
conf = np.array([float(x[1])for x in classlines])
bb = np.array([[float(z)for z in x[2:]] for x in classlines])
#print(ids)
#print(bb)
#print(conf)
return ids, conf, bb
示例3: test_cache_save
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def test_cache_save(self):
try:
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
'test/data_for_tests/cws_train')
end_time = time.time()
pre_time = end_time - start_time
with open('test/demo1.pkl', 'rb') as f:
_embed, _vocab, _d = _pickle.load(f)
self.assertEqual(embed.shape, _embed.shape)
for i in range(embed.shape[0]):
self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
'test/data_for_tests/cws_train')
end_time = time.time()
read_time = end_time - start_time
print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
self.assertGreater(pre_time - 0.5, read_time)
finally:
os.remove('test/demo1.pkl')
示例4: test_cache_save_overwrite_path
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def test_cache_save_overwrite_path(self):
try:
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', 'test/data_for_tests/cws_train',
_cache_fp='test/demo_overwrite.pkl')
end_time = time.time()
pre_time = end_time - start_time
with open('test/demo_overwrite.pkl', 'rb') as f:
_embed, _vocab, _d = _pickle.load(f)
self.assertEqual(embed.shape, _embed.shape)
for i in range(embed.shape[0]):
self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
'test/data_for_tests/cws_train',
_cache_fp='test/demo_overwrite.pkl')
end_time = time.time()
read_time = end_time - start_time
print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
self.assertGreater(pre_time - 0.5, read_time)
finally:
os.remove('test/demo_overwrite.pkl')
示例5: test_cache_refresh
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def test_cache_refresh(self):
try:
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
'test/data_for_tests/cws_train',
_refresh=True)
end_time = time.time()
pre_time = end_time - start_time
with open('test/demo1.pkl', 'rb') as f:
_embed, _vocab, _d = _pickle.load(f)
self.assertEqual(embed.shape, _embed.shape)
for i in range(embed.shape[0]):
self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
start_time = time.time()
embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
'test/data_for_tests/cws_train',
_refresh=True)
end_time = time.time()
read_time = end_time - start_time
print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
self.assertGreater(0.1, pre_time - read_time)
finally:
os.remove('test/demo1.pkl')
示例6: _load_all
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def _load_all(src):
model_path = src
src = os.path.dirname(src)
word_v = _load(src+'/word_v.pkl')
pos_v = _load(src+'/pos_v.pkl')
tag_v = _load(src+'/tag_v.pkl')
pos_pp = torch.load(src+'/pos_pp.pkl')['pipeline']
model_args = ConfigSection()
ConfigLoader.load_config('cfg.cfg', {'model': model_args})
model_args['word_vocab_size'] = len(word_v)
model_args['pos_vocab_size'] = len(pos_v)
model_args['num_label'] = len(tag_v)
model = BiaffineParser(**model_args.data)
model.load_state_dict(torch.load(model_path))
return {
'word_v': word_v,
'pos_v': pos_v,
'tag_v': tag_v,
'model': model,
'pos_pp':pos_pp,
}
示例7: __init__
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def __init__(self, data_dir, dataset, feat_fname, label_fname,
batch_size, feature_type, mode='train',
batch_order='labels', norm='l2', start_index=0,
end_index=-1):
# TODO Option to load only features; useful in prediction
self.feature_type = feature_type
self.norm = norm
self.features, self.labels = None, None
self.batch_size = batch_size
self.start_index = start_index
self.end_index = end_index
self.batch_order = batch_order
self.mode = mode
self.valid_labels = None
self.num_valid_labels = None
self.batches = None
self.construct(data_dir, dataset, feat_fname, label_fname)
示例8: load_model
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def load_model(args):
with open(os.path.join(args.utils_dir, 'vocab.pkl'), 'rb') as f:
vocab = pickle.load(f)
with open(os.path.join(args.utils_dir, 'tags.pkl'), 'rb') as f:
tags = pickle.load(f)
args.vocab_size = len(vocab)
args.tag_size = len(tags)
args.batch_size = 1
if args.model == "lstm":
ckpt_path = os.path.join(args.train_dir, "lstm/lstm.ckpt") # POS model checkpoint path
else:
ckpt_path = os.path.join(args.train_dir, "bilstm/bilstm.ckpt") # POS model checkpoint path
return ModelLoader(args, ckpt_path)
示例9: load_preprocessed
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def load_preprocessed(self, vocab_file, data_path, label_file):
if vocab_file is not None:
with open(vocab_file, 'rb') as f:
self.chars = pickle.load(f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))
if label_file is not None:
with open(label_file, 'rb') as f:
self.labels = pickle.load(f)
self.label_size = len(self.labels)
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
示例10: __init__
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def __init__(self, path, exts, fields, load_dataset=False, prefix='', **kwargs):
if not isinstance(fields[0], (tuple, list)):
fields = [('src', fields[0]), ('trg', fields[1]), ('dec', fields[2])]
src_path, trg_path, dec_path = tuple(os.path.expanduser(path + x) for x in exts)
if load_dataset and (os.path.exists(path + '.processed.{}.pt'.format(prefix))):
examples = torch.load(path + '.processed.{}.pt'.format(prefix))
else:
examples = []
with open(src_path) as src_file, open(trg_path) as trg_file, open(dec_path) as dec_file:
for src_line, trg_line, dec_line in zip(src_file, trg_file, dec_file):
src_line, trg_line, dec_line = src_line.strip(), trg_line.strip(), dec_line.strip()
if src_line != '' and trg_line != '' and dec_line != '':
examples.append(data.Example.fromlist(
[src_line, trg_line, dec_line], fields))
if load_dataset:
torch.save(examples, path + '.processed.{}.pt'.format(prefix))
super(datasets.TranslationDataset, self).__init__(examples, fields, **kwargs)
示例11: load_model
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def load_model(model, path):
dumps = torch.load(path, map_location="cpu")
if model is None:
assert isinstance(dumps, nn.Module), "model is None but load %s" % type(dumps)
model = dumps
else:
if isinstance(dumps, nn.Module):
dumps = dumps.state_dict()
else:
assert isinstance(dumps, dict), type(dumps)
res = model.load_state_dict(dumps, strict=False)
assert len(res.unexpected_keys) == 0, res.unexpected_keys
logger.info("missing keys in init-weights %s", res.missing_keys)
logger.info("load init-weights from %s", path)
return model
示例12: load_model
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def load_model(self, sess):
if not sess is None:
self.saver.restore(sess, self.config.LOAD_PATH)
print('Done loading model')
with open(self.config.LOAD_PATH + '.dict', 'rb') as file:
if self.subtoken_to_index is not None:
return
print('Loading dictionaries from: ' + self.config.LOAD_PATH)
self.subtoken_to_index = pickle.load(file)
self.index_to_subtoken = pickle.load(file)
self.subtoken_vocab_size = pickle.load(file)
self.target_to_index = pickle.load(file)
self.index_to_target = pickle.load(file)
self.target_vocab_size = pickle.load(file)
self.node_to_index = pickle.load(file)
self.index_to_node = pickle.load(file)
self.nodes_vocab_size = pickle.load(file)
self.num_training_examples = pickle.load(file)
self.epochs_trained = pickle.load(file)
saved_config = pickle.load(file)
self.config.take_model_hyperparams_from(saved_config)
print('Done loading dictionaries')
示例13: __read_test_times_file
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def __read_test_times_file(fd):
try:
with gzip.GzipFile(fileobj=fd, mode='rb') as gzf:
times = cPickle.load(gzf)
except Exception:
# File doesn't exist, isn't readable, is malformed---whatever.
# Just ignore it.
return None
else:
return times
示例14: get_recs_from_cache
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def get_recs_from_cache(imagenames, cachedir, cachename):
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, cachename)
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(get_image_xml_name(imagename))
#if i % 100 == 0:
# print ('Reading annotation for {:d}/{:d}'.format(
# i + 1, len(imagenames)))
# save
# print ('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
cPickle.dump(recs, f)
else:
# load
# print ('loaded cached annotations from {:s}'.format(cachefile))
with open(cachefile, 'rb') as f:
recs = cPickle.load(f)
try:
for imagename in imagenames:
recs[imagename]
except Exception as e:
print("Exception: {0}".format(e))
print ('\t{:s} is corrupted. retry!!'.format(cachefile))
os.remove(cachefile)
recs = get_recs_from_cache(imagenames, cachedir, cachename)
return recs
示例15: load_state_dict_from_url
# 需要导入模块: import _pickle [as 别名]
# 或者: from _pickle import load [as 别名]
def load_state_dict_from_url(self, url, version=1):
'''Downloads and copies parameters from the state_dict at the url into this module.\n
'''
if not os.path.exists(home_dir+'/pretrained/'):
os.makedirs(home_dir+'/pretrained/')
model_dir = home_dir+'/pretrained'
from urllib.parse import urlparse
parts = urlparse(url)
filename = os.path.basename(parts.path)
cache = os.path.join(model_dir, filename)
if not os.path.exists(cache):
from urllib.request import urlretrieve
urlretrieve(url, cache, reporthook=download_progress)
print('\n')
self.load(cache, version)