當前位置: 首頁>>代碼示例>>Python>>正文


Python _pickle.load方法代碼示例

本文整理匯總了Python中_pickle.load方法的典型用法代碼示例。如果您正苦於以下問題:Python _pickle.load方法的具體用法?Python _pickle.load怎麽用?Python _pickle.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在_pickle的用法示例。


在下文中一共展示了_pickle.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def __init__(self, filepath):
        """
        Args:
            filepath (string): path to data file
            Data format - list of characters, list of images, (row, col, ch) numpy array normalized between (0.0, 1.0)
            Omniglot dataset - Each language contains a set of characters; Each character is defined by 20 different images
        """
        with open(filepath, "rb") as f:
            processed_data = pickle.load(f)

        self.data = dict()
        for image, label in zip(processed_data['images'], processed_data['labels']):
            if label not in self.data:
                self.data[label] = list()
            img = np.expand_dims(image, axis=0).astype('float32')
            #img /= 255.0
            self.data[label].append(img)
        self.num_categories = len(self.data)
        self.category_size = len(self.data[processed_data['labels'][0]]) 
開發者ID:RUSH-LAB,項目名稱:LSH_Memory,代碼行數:21,代碼來源:omniglot.py

示例2: get_class_detection

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def get_class_detection(imagenames, classname ):
    # load annots
    classlines = []
    for i, imagename in enumerate(imagenames):
        det = get_det_result_name(imagename)
        lines = get_class_det_result(det, classname)
        classlines.extend(lines)

    #print(classlines)
    ids = [x[0] for x in classlines]
    conf = np.array([float(x[1])for x in classlines])
    bb = np.array([[float(z)for z in x[2:]] for x in classlines])

    #print(ids)
    #print(bb)
    #print(conf)

    return ids, conf, bb 
開發者ID:andy-yun,項目名稱:pytorch-0.4-yolov3,代碼行數:20,代碼來源:eval_ap.py

示例3: test_cache_save

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def test_cache_save(self):
        try:
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
                                             'test/data_for_tests/cws_train')
            end_time = time.time()
            pre_time = end_time - start_time
            with open('test/demo1.pkl', 'rb') as f:
                _embed, _vocab, _d = _pickle.load(f)
            self.assertEqual(embed.shape, _embed.shape)
            for i in range(embed.shape[0]):
                self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
                                             'test/data_for_tests/cws_train')
            end_time = time.time()
            read_time = end_time - start_time
            print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
            self.assertGreater(pre_time - 0.5, read_time)
        finally:
            os.remove('test/demo1.pkl') 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:23,代碼來源:test_utils.py

示例4: test_cache_save_overwrite_path

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def test_cache_save_overwrite_path(self):
        try:
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', 'test/data_for_tests/cws_train',
                                             _cache_fp='test/demo_overwrite.pkl')
            end_time = time.time()
            pre_time = end_time - start_time
            with open('test/demo_overwrite.pkl', 'rb') as f:
                _embed, _vocab, _d = _pickle.load(f)
            self.assertEqual(embed.shape, _embed.shape)
            for i in range(embed.shape[0]):
                self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
                                             'test/data_for_tests/cws_train',
                                             _cache_fp='test/demo_overwrite.pkl')
            end_time = time.time()
            read_time = end_time - start_time
            print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
            self.assertGreater(pre_time - 0.5, read_time)
        finally:
            os.remove('test/demo_overwrite.pkl') 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:24,代碼來源:test_utils.py

示例5: test_cache_refresh

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def test_cache_refresh(self):
        try:
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
                                             'test/data_for_tests/cws_train',
                                             _refresh=True)
            end_time = time.time()
            pre_time = end_time - start_time
            with open('test/demo1.pkl', 'rb') as f:
                _embed, _vocab, _d = _pickle.load(f)
            self.assertEqual(embed.shape, _embed.shape)
            for i in range(embed.shape[0]):
                self.assertListEqual(embed[i].tolist(), _embed[i].tolist())
            start_time = time.time()
            embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt',
                                             'test/data_for_tests/cws_train',
                                             _refresh=True)
            end_time = time.time()
            read_time = end_time - start_time
            print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time))
            self.assertGreater(0.1, pre_time - read_time)
        finally:
            os.remove('test/demo1.pkl') 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:25,代碼來源:test_utils.py

示例6: _load_all

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def _load_all(src):
    model_path = src
    src = os.path.dirname(src)

    word_v = _load(src+'/word_v.pkl')
    pos_v = _load(src+'/pos_v.pkl')
    tag_v = _load(src+'/tag_v.pkl')
    pos_pp = torch.load(src+'/pos_pp.pkl')['pipeline']

    model_args = ConfigSection()
    ConfigLoader.load_config('cfg.cfg', {'model': model_args})
    model_args['word_vocab_size'] = len(word_v)
    model_args['pos_vocab_size'] = len(pos_v)
    model_args['num_label'] = len(tag_v)

    model = BiaffineParser(**model_args.data)
    model.load_state_dict(torch.load(model_path))
    return {
        'word_v': word_v,
        'pos_v': pos_v,
        'tag_v': tag_v,
        'model': model,
        'pos_pp':pos_pp,
    } 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:26,代碼來源:infer.py

示例7: __init__

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def __init__(self, data_dir, dataset, feat_fname, label_fname,
                 batch_size, feature_type, mode='train',
                 batch_order='labels', norm='l2', start_index=0,
                 end_index=-1):
        # TODO Option to load only features; useful in prediction
        self.feature_type = feature_type
        self.norm = norm
        self.features, self.labels = None, None
        self.batch_size = batch_size
        self.start_index = start_index
        self.end_index = end_index
        self.batch_order = batch_order
        self.mode = mode
        self.valid_labels = None
        self.num_valid_labels = None
        self.batches = None
        self.construct(data_dir, dataset, feat_fname, label_fname) 
開發者ID:kunaldahiya,項目名稱:pyxclib,代碼行數:19,代碼來源:data_loader.py

示例8: load_model

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def load_model(args):

    with open(os.path.join(args.utils_dir, 'vocab.pkl'), 'rb') as f:
        vocab = pickle.load(f)
    with open(os.path.join(args.utils_dir, 'tags.pkl'), 'rb') as f:
        tags = pickle.load(f)

    args.vocab_size = len(vocab)
    args.tag_size = len(tags)
    args.batch_size = 1

    if args.model == "lstm":
        ckpt_path = os.path.join(args.train_dir, "lstm/lstm.ckpt")  # POS model checkpoint path
    else:
        ckpt_path = os.path.join(args.train_dir, "bilstm/bilstm.ckpt")  # POS model checkpoint path
    return ModelLoader(args, ckpt_path) 
開發者ID:koala-ai,項目名稱:tensorflow_nlp,代碼行數:18,代碼來源:predict.py

示例9: load_preprocessed

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def load_preprocessed(self, vocab_file, data_path, label_file):
        if vocab_file is not None:
            with open(vocab_file, 'rb') as f:
                self.chars = pickle.load(f)
            self.vocab_size = len(self.chars) + 1
            self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))

        if label_file is not None:
            with open(label_file, 'rb') as f:
                self.labels = pickle.load(f)
            self.label_size = len(self.labels)

        data = pd.read_csv(data_path, encoding='utf8')
        tensor_x = np.array(list(map(self.transform, data['text'])))
        tensor_y = np.array(list(map(self.labels.get, data['label'])))
        self.tensor = np.c_[tensor_x, tensor_y].astype(int) 
開發者ID:koala-ai,項目名稱:tensorflow_nlp,代碼行數:18,代碼來源:dataset.py

示例10: __init__

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def __init__(self, path, exts, fields, load_dataset=False, prefix='', **kwargs):
        if not isinstance(fields[0], (tuple, list)):
            fields = [('src', fields[0]), ('trg', fields[1]), ('dec', fields[2])]

        src_path, trg_path, dec_path = tuple(os.path.expanduser(path + x) for x in exts)
        if load_dataset and (os.path.exists(path + '.processed.{}.pt'.format(prefix))):
            examples = torch.load(path + '.processed.{}.pt'.format(prefix))
        else:
            examples = []
            with open(src_path) as src_file, open(trg_path) as trg_file, open(dec_path) as dec_file:
                for src_line, trg_line, dec_line in zip(src_file, trg_file, dec_file):
                    src_line, trg_line, dec_line = src_line.strip(), trg_line.strip(), dec_line.strip()
                    if src_line != '' and trg_line != '' and dec_line != '':
                        examples.append(data.Example.fromlist(
                            [src_line, trg_line, dec_line], fields))
            if load_dataset:
                torch.save(examples, path + '.processed.{}.pt'.format(prefix))

        super(datasets.TranslationDataset, self).__init__(examples, fields, **kwargs) 
開發者ID:nyu-dl,項目名稱:dl4mt-nonauto,代碼行數:21,代碼來源:data.py

示例11: load_model

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def load_model(model, path):
    dumps = torch.load(path, map_location="cpu")

    if model is None:
        assert isinstance(dumps, nn.Module), "model is None but load %s" % type(dumps)
        model = dumps
    else:
        if isinstance(dumps, nn.Module):
            dumps = dumps.state_dict()
        else:
            assert isinstance(dumps, dict), type(dumps)
        res = model.load_state_dict(dumps, strict=False)
        assert len(res.unexpected_keys) == 0, res.unexpected_keys
        logger.info("missing keys in init-weights %s", res.missing_keys)
    logger.info("load init-weights from %s", path)
    return model 
開發者ID:choosewhatulike,項目名稱:sparse-sharing,代碼行數:18,代碼來源:utils.py

示例12: load_model

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def load_model(self, sess):
        if not sess is None:
            self.saver.restore(sess, self.config.LOAD_PATH)
            print('Done loading model')
        with open(self.config.LOAD_PATH + '.dict', 'rb') as file:
            if self.subtoken_to_index is not None:
                return
            print('Loading dictionaries from: ' + self.config.LOAD_PATH)
            self.subtoken_to_index = pickle.load(file)
            self.index_to_subtoken = pickle.load(file)
            self.subtoken_vocab_size = pickle.load(file)

            self.target_to_index = pickle.load(file)
            self.index_to_target = pickle.load(file)
            self.target_vocab_size = pickle.load(file)

            self.node_to_index = pickle.load(file)
            self.index_to_node = pickle.load(file)
            self.nodes_vocab_size = pickle.load(file)

            self.num_training_examples = pickle.load(file)
            self.epochs_trained = pickle.load(file)
            saved_config = pickle.load(file)
            self.config.take_model_hyperparams_from(saved_config)
            print('Done loading dictionaries') 
開發者ID:tech-srl,項目名稱:code2seq,代碼行數:27,代碼來源:model.py

示例13: __read_test_times_file

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def __read_test_times_file(fd):
    try:
      with gzip.GzipFile(fileobj=fd, mode='rb') as gzf:
        times = cPickle.load(gzf)
    except Exception:
      # File doesn't exist, isn't readable, is malformed---whatever.
      # Just ignore it.
      return None
    else:
      return times 
開發者ID:google,項目名稱:gtest-parallel,代碼行數:12,代碼來源:gtest_parallel.py

示例14: get_recs_from_cache

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def get_recs_from_cache(imagenames, cachedir, cachename):
    # first load gt
    if not os.path.isdir(cachedir):
        os.mkdir(cachedir)
    cachefile = os.path.join(cachedir, cachename)

    if not os.path.isfile(cachefile):
        # load annots
        recs = {}
        for i, imagename in enumerate(imagenames):
            recs[imagename] = parse_rec(get_image_xml_name(imagename))
            #if i % 100 == 0:
            #    print ('Reading annotation for {:d}/{:d}'.format(
            #        i + 1, len(imagenames)))
        # save
        # print ('Saving cached annotations to {:s}'.format(cachefile))
        with open(cachefile, 'wb') as f:
            cPickle.dump(recs, f)
    else:
        # load
        # print ('loaded cached annotations from {:s}'.format(cachefile))
        with open(cachefile, 'rb') as f:
            recs = cPickle.load(f)
        try:
            for imagename in imagenames:
                recs[imagename]
        except Exception as e:
            print("Exception: {0}".format(e))
            print ('\t{:s} is corrupted. retry!!'.format(cachefile))
            os.remove(cachefile)
            recs = get_recs_from_cache(imagenames, cachedir, cachename)
    return recs 
開發者ID:andy-yun,項目名稱:pytorch-0.4-yolov3,代碼行數:34,代碼來源:eval_ap.py

示例15: load_state_dict_from_url

# 需要導入模塊: import _pickle [as 別名]
# 或者: from _pickle import load [as 別名]
def load_state_dict_from_url(self, url, version=1):
        '''Downloads and copies parameters from the state_dict at the url into this module.\n
        '''
        if not os.path.exists(home_dir+'/pretrained/'):
            os.makedirs(home_dir+'/pretrained/')
        model_dir = home_dir+'/pretrained'
        from urllib.parse import urlparse
        parts = urlparse(url)
        filename = os.path.basename(parts.path)
        cache = os.path.join(model_dir, filename)
        if not os.path.exists(cache): 
            from urllib.request import urlretrieve
            urlretrieve(url, cache, reporthook=download_progress) 
            print('\n')
        self.load(cache, version) 
開發者ID:Kashu7100,項目名稱:Qualia2.0,代碼行數:17,代碼來源:module.py


注:本文中的_pickle.load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。