当前位置: 首页>>代码示例>>Python>>正文


Python cPickle.dump方法代码示例

本文整理汇总了Python中six.moves.cPickle.dump方法的典型用法代码示例。如果您正苦于以下问题:Python cPickle.dump方法的具体用法?Python cPickle.dump怎么用?Python cPickle.dump使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在six.moves.cPickle的用法示例。


在下文中一共展示了cPickle.dump方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_pkl

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def create_pkl():
    with open(settings.TEST_CLASSIFICATION) as f:
        lines = f.read().splitlines()
    with open(settings.TEST_CLASSIFICATION_GT) as f:
        gt_lines = f.read().splitlines()
    assert len(lines) == len(gt_lines)
    test = []
    for i, line in enumerate(lines):
        anno = json.loads(line.strip())
        gt_anno = json.loads(gt_lines[i].strip())
        image = misc.imread(os.path.join(settings.TEST_IMAGE_DIR, anno['file_name']))
        assert image.shape == (anno['height'], anno['width'], 3)
        assert len(anno['proposals']) == len(gt_anno['ground_truth'])
        for proposal, gt in zip(anno['proposals'], gt_anno['ground_truth']):
            cropped = crop(image, proposal['adjusted_bbox'], 32)
            test.append([cropped, gt])
        if i % 100 == 0:
            print('test', i, '/', len(lines))
    with open(settings.TEST_CLS_CROPPED, 'wb') as f:
        cPickle.dump(test, f) 
开发者ID:yuantailing,项目名称:ctw-baseline,代码行数:22,代码来源:predictions2html.py

示例2: build_y_vocab

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def build_y_vocab(self):
        pool = Pool(opt.num_workers)
        try:
            rets = pool.map_async(build_y_vocab,
                                  [(data_path, 'train')
                                   for data_path in opt.train_data_list]).get(99999999)
            pool.close()
            pool.join()
            y_vocab = set()
            for _y_vocab in rets:
                for k in six.iterkeys(_y_vocab):
                    y_vocab.add(k)
            self.y_vocab = {y: idx for idx, y in enumerate(y_vocab)}
        except KeyboardInterrupt:
            pool.terminate()
            pool.join()
            raise
        self.logger.info('size of y vocab: %s' % len(self.y_vocab))
        cPickle.dump(self.y_vocab, open(self.y_vocab_path, 'wb'), 2) 
开发者ID:kakao-arena,项目名称:shopping-classification,代码行数:21,代码来源:data.py

示例3: save_pkl

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def save_pkl(self):
        """
        Dump this object into its `key_pkl` file.

        May raise a cPickle.PicklingError if such an exception is raised at
        pickle time (in which case a warning is also displayed).

        """
        # Note that writing in binary mode is important under Windows.
        try:
            with open(self.key_pkl, 'wb') as f:
                pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
        except pickle.PicklingError:
            _logger.warning("Cache leak due to unpickle-able key data %s",
                            self.keys)
            os.remove(self.key_pkl)
            raise 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:cmodule.py

示例4: read_dataset

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def read_dataset(data_dir):
    pickle_filename = "lamem.pickle"
    pickle_filepath = os.path.join(data_dir, pickle_filename)
    if not os.path.exists(pickle_filepath):
        utils.maybe_download_and_extract(data_dir, DATA_URL, is_tarfile=True)
        lamem_folder = (DATA_URL.split("/")[-1]).split(os.path.extsep)[0]
        result = {'images': create_image_lists(os.path.join(data_dir, lamem_folder))}
        print ("Pickling ...")
        with open(pickle_filepath, 'wb') as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
    else:
        print ("Found pickle file!")

    with open(pickle_filepath, 'rb') as f:
        result = pickle.load(f)
        training_records = result['images']
        del result

    return training_records 
开发者ID:shekkizh,项目名称:Colorization.tensorflow,代码行数:21,代码来源:read_LaMemDataset.py

示例5: load

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def load(fname):
    """Load an embedding dump generated by `save`"""

    content = _open(fname).read()
    if PY2:
      state = pickle.loads(content)
    else:
      state = pickle.loads(content, encoding='latin1')
    voc, vec = state
    if len(voc) == 2:
      words, counts = voc
      word_count = dict(zip(words, counts))
      vocab = CountedVocabulary(word_count=word_count)
    else:
      vocab = OrderedVocabulary(voc)
    return Embedding(vocabulary=vocab, vectors=vec) 
开发者ID:aboSamoor,项目名称:polyglot,代码行数:18,代码来源:embeddings.py

示例6: generate_label

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def generate_label(cls_dir, labels):
    total_list = []

    cnt = 0
    for label in labels:
        for name in os.listdir(os.path.join(DATA_DIR, cls_dir, label)):
            record = {'name': name, 'label': cnt, 'subdir': label}
            total_list.append(record)
        cnt += 1
    random.shuffle(total_list)
    train_size = int(0.7 * len(total_list))
    print(train_size, len(total_list))

    with open(os.path.join(DATA_DIR, cls_dir, 'train.pickle'), 'wb') as f:
        pickle.dump(total_list[:train_size], f, 2)

    with open(os.path.join(DATA_DIR, cls_dir, 'val.pickle'), 'wb') as f:
        pickle.dump(total_list[train_size:], f, 2) 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:20,代码来源:preprocess_easypr.py

示例7: maybe_pickle

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
  dataset_names = []
  for folder in data_folders:
    set_filename = folder + '.pickle'
    dataset_names.append(set_filename)
    if os.path.exists(set_filename) and not force:
      # You may override by setting force=True.
      print('%s already present - Skipping pickling.' % set_filename)
    else:
      print('Pickling %s.' % set_filename)
      dataset = load_letter(folder, min_num_images_per_class)
      try:
        with open(set_filename, 'wb') as f:
          pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
      except Exception as e:
        print('Unable to save data to', set_filename, ':', e)
  
  return dataset_names 
开发者ID:PacktPublishing,项目名称:Neural-Network-Programming-with-TensorFlow,代码行数:20,代码来源:1_prepare_pickle_200.py

示例8: maybe_pickle

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
  dataset_names = []
  for folder in data_folders:
    set_filename = folder + '.pickle'
    dataset_names.append(set_filename)
    if os.path.exists(set_filename) and not force:
      print('%s already present - Skipping pickling.' % set_filename)
    else:
      print('Pickling %s.' % set_filename)
      dataset = load_letter(folder, min_num_images_per_class)
      try:
        with open(set_filename, 'wb') as f:
          #pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
          print(pickle.HIGHEST_PROTOCOL)
          pickle.dump(dataset, f, 2)
      except Exception as e:
        print('Unable to save data to', set_filename, ':', e)
  
  return dataset_names 
开发者ID:PacktPublishing,项目名称:Neural-Network-Programming-with-TensorFlow,代码行数:21,代码来源:prepare_notmnist.py

示例9: main

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def main(params):

  info = json.load(open(params['dict_json'], 'r'))
  imgs = json.load(open(params['input_json'], 'r'))

  itow = info['ix_to_word']
  wtoi = {w:i for i,w in itow.items()}
  wtod = {w:i+1 for w,i in info['wtod'].items()} # word to detection
  # dtoi = {w:i+1 for i,w in enumerate(wtod.keys())} # detection to index
  dtoi = wtod
  wtol = info['wtol']
  itod = {i:w for w,i in dtoi.items()}

  # imgs = imgs['images']

  ngram_idxs, ref_len = build_dict(imgs, info, wtoi, wtod, dtoi, wtol, itod, params)

  # cPickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','w'), protocol=cPickle.HIGHEST_PROTOCOL)
  cPickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','w'), protocol=cPickle.HIGHEST_PROTOCOL) 
开发者ID:jiasenlu,项目名称:NeuralBabyTalk,代码行数:21,代码来源:prepro_ngrams_flickr30k.py

示例10: main

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def main(params):

  det_train_path = 'data/coco/annotations/instances_train2014.json'
  det_val_path = 'data/coco/annotations/instances_val2014.json'

  coco_det_train = COCO(det_train_path)
  coco_det_val = COCO(det_val_path)

  info = json.load(open(params['dict_json'], 'r'))
  imgs = json.load(open(params['input_json'], 'r'))

  itow = info['ix_to_word']
  wtoi = {w:i for i,w in itow.items()}
  wtod = {w:i+1 for w,i in info['wtod'].items()} # word to detection
  dtoi = {w:i+1 for i,w in enumerate(wtod.keys())} # detection to index
  wtol = info['wtol']
  ctol = {c:i+1 for i, c in enumerate(coco_det_train.cats.keys())}

  # imgs = imgs['images']

  ngram_idxs, ref_len = build_dict(imgs, info, wtoi, wtod, dtoi, wtol, ctol, coco_det_train, coco_det_val, params)

  # cPickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','w'), protocol=cPickle.HIGHEST_PROTOCOL)
  cPickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','w'), protocol=cPickle.HIGHEST_PROTOCOL) 
开发者ID:jiasenlu,项目名称:NeuralBabyTalk,代码行数:26,代码来源:prepro_ngrams_bak.py

示例11: recursive_pickle

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def recursive_pickle(top_obj):
    """
    Recursively pickle all of the given objects subordinates, starting with
    the deepest first. **Very** handy for debugging pickling issues, but
    also very slow (as it literally pickles each object in turn).

    Handles circular object references gracefully.

    """
    objs = depth_getter(top_obj)
    # sort by depth then by nest_info
    objs = sorted(six.itervalues(objs), key=lambda val: (-val[0], val[2]))

    for _, obj, location in objs:
#        print('trying %s' % location)
        try:
            pickle.dump(obj, BytesIO(), pickle.HIGHEST_PROTOCOL)
        except Exception as err:
            print(obj)
            print('Failed to pickle %s. \n Type: %s. Traceback '
                  'follows:' % (location, type(obj)))
            raise 
开发者ID:miloharper,项目名称:neural-network-animation,代码行数:24,代码来源:test_pickle.py

示例12: save_dataset

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def save_dataset(self, filename):
        """使用pickle保存数据文件。

        数据文件包含词典和对话样本。

        Args:
            filename (str): pickle 文件名
        """
        with open(filename, 'wb') as handle:
            data = {
                    'trainingSamples': self.trainingSamples
            }

            if len(self.validationSamples)>0:
                data['validationSamples'] = self.validationSamples
                data['testingSamples'] = self.testingSamples
                data['maxSeqLen'] = self.seq_max_length

            cPickle.dump(data, handle, -1)  # Using the highest protocol available

  # 3. utility 函数,使用pickle读文件 
开发者ID:ZubinGou,项目名称:AI_Poet_Totoro,代码行数:23,代码来源:data_loader.py

示例13: preprocess

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def preprocess(self, input_file, vocab_file, tensor_file):
        def handle(line):
            if len(line) > MAX_LENGTH:
                index_end = line.rfind('。', 0, MAX_LENGTH)
                index_end = index_end if index_end > 0 else MAX_LENGTH
                line = line[:index_end + 1]
            return BEGIN_CHAR + line + END_CHAR

        self.texts = [line.strip().replace('\n', '') for line in
                        open(input_file, encoding='utf-8')]
        self.texts = [handle(line) for line in self.texts if len(line) > MIN_LENGTH]

        words = ['*', ' ']
        for text in self.texts:
            words += [word for word in text]
        self.words = list(set(words))
        self.words_size = len(self.words)

        self.vocab = dict(zip(self.words, range(len(self.words))))
        self.vocab_id = dict(zip(range(len(self.words)), self.words))
        with open(vocab_file, 'wb') as f:
            cPickle.dump(self.words, f)
        self.texts_vector = np.array([
            list(map(self.vocab.get, poetry)) for poetry in self.texts])
        np.save(tensor_file, self.texts_vector) 
开发者ID:stardut,项目名称:Text-Generate-RNN,代码行数:27,代码来源:data.py

示例14: store_response

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def store_response(self, spider, request, response):
        """Store the given response in the cache."""
        rpath = self._get_request_path(spider, request)
        if not os.path.exists(rpath):
            os.makedirs(rpath)
        metadata = {
            'url': request.url,
            'method': request.method,
            'status': response.status,
            'response_url': response.url,
            'timestamp': time(),
        }
        with self._open(os.path.join(rpath, 'meta'), 'wb') as f:
            f.write(to_bytes(repr(metadata)))
        with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f:
            pickle.dump(metadata, f, protocol=2)
        with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f:
            f.write(headers_dict_to_raw(response.headers))
        with self._open(os.path.join(rpath, 'response_body'), 'wb') as f:
            f.write(response.body)
        with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f:
            f.write(headers_dict_to_raw(request.headers))
        with self._open(os.path.join(rpath, 'request_body'), 'wb') as f:
            f.write(request.body) 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:26,代码来源:httpcache.py

示例15: test

# 需要导入模块: from six.moves import cPickle [as 别名]
# 或者: from six.moves.cPickle import dump [as 别名]
def test(args):
    test_set = Dataset.from_bin_file(args.test_file)
    assert args.load_model

    print('load model from [%s]' % args.load_model, file=sys.stderr)
    params = torch.load(args.load_model, map_location=lambda storage, loc: storage)
    transition_system = params['transition_system']
    saved_args = params['args']
    saved_args.cuda = args.cuda
    # set the correct domain from saved arg
    args.lang = saved_args.lang

    parser_cls = Registrable.by_name(args.parser)
    parser = parser_cls.load(model_path=args.load_model, cuda=args.cuda)
    parser.eval()
    evaluator = Registrable.by_name(args.evaluator)(transition_system, args=args)
    eval_results, decode_results = evaluation.evaluate(test_set.examples, parser, evaluator, args,
                                                       verbose=args.verbose, return_decode_result=True)
    print(eval_results, file=sys.stderr)
    if args.save_decode_to:
        pickle.dump(decode_results, open(args.save_decode_to, 'wb')) 
开发者ID:pcyin,项目名称:tranX,代码行数:23,代码来源:exp.py


注:本文中的six.moves.cPickle.dump方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。