當前位置: 首頁>>代碼示例>>Python>>正文


Python msgpack.load方法代碼示例

本文整理匯總了Python中msgpack.load方法的典型用法代碼示例。如果您正苦於以下問題:Python msgpack.load方法的具體用法?Python msgpack.load怎麽用?Python msgpack.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在msgpack的用法示例。


在下文中一共展示了msgpack.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def __init__(self, args, log=None):
        self.args = args
        # build/load vocab and target map
        vocab_file = os.path.join(args.output_dir, 'vocab.txt')
        target_map_file = os.path.join(args.output_dir, 'target_map.txt')
        if not os.path.exists(vocab_file):
            data = load_data(self.args.data_dir)
            self.target_map = Indexer.build((sample['target'] for sample in data), log=log)
            self.target_map.save(target_map_file)
            self.vocab = Vocab.build((word for sample in data
                                      for text in (sample['text1'], sample['text2'])
                                      for word in text.split()[:self.args.max_len]),
                                     lower=args.lower_case, min_df=self.args.min_df, log=log,
                                     pretrained_embeddings=args.pretrained_embeddings,
                                     dump_filtered=os.path.join(args.output_dir, 'filtered_words.txt'))
            self.vocab.save(vocab_file)

        else:
            self.target_map = Indexer.load(target_map_file)
            self.vocab = Vocab.load(vocab_file)
        args.num_classes = len(self.target_map)
        args.num_vocab = len(self.vocab)
        args.padding = Vocab.pad() 
開發者ID:alibaba-edu,項目名稱:simple-effective-text-matching-pytorch,代碼行數:25,代碼來源:interface.py

示例2: save_to_file

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def save_to_file(self, filename):
        """Save only the bare minimum needed to reconstruct this CoverageDB.

        This serializes the data to a single file and cab reduce the disk footprint of
        block coverage significantly (depending on overlap and number of files)."""
        if file_backing_disabled:
            raise Exception("[!] Can't save/load coverage db files without msgpack. Try `pip install msgpack`")
        save_dict = dict()
        save_dict["version"] = 1  # serialized covdb version
        save_dict["module_name"] = self.module_name
        save_dict["module_base"] = self.module_base
        save_dict["coverage_files"] = self.coverage_files
        # save tighter version of block dict {int: int} vice {int: str}
        block_dict_to_save = {}
        file_index_map = {filepath: self.coverage_files.index(filepath) for filepath in self.coverage_files}
        for block, trace_list in self.block_dict.items():
            trace_id_list = [file_index_map[name] for name in trace_list]
            block_dict_to_save[block] = trace_id_list
        save_dict["block_dict"] = block_dict_to_save
        # write packed version to file
        with open(filename, "wb") as f:
            msgpack.dump(save_dict, f)
            self.filename = filename 
開發者ID:ForAllSecure,項目名稱:bncov,代碼行數:25,代碼來源:coverage.py

示例3: __init__

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def __init__(self, args, log=None):
        self.args = args
        # build/load vocab and target map
        vocab_file = os.path.join(args.output_dir, 'vocab.txt')
        target_map_file = os.path.join(args.output_dir, 'target_map.txt')
        if not os.path.exists(vocab_file):
            data = load_data(self.args.data_dir)
            self.target_map = Indexer.build((sample['target'] for sample in data), log=log)
            self.target_map.save(target_map_file)
            self.vocab = Vocab.build((word for sample in data
                                      for text in (sample['text1'], sample['text2'])
                                      for word in text.split()[:self.args.max_len]),
                                     lower=args.lower_case, min_df=self.args.min_df, log=log,
                                     pretrained_embeddings=args.pretrained_embeddings,
                                     dump_filtered=os.path.join(args.output_dir, 'filtered_words.txt'))
            self.vocab.save(vocab_file)
        else:
            self.target_map = Indexer.load(target_map_file)
            self.vocab = Vocab.load(vocab_file)
        args.num_classes = len(self.target_map)
        args.num_vocab = len(self.vocab)
        args.padding = Vocab.pad() 
開發者ID:alibaba-edu,項目名稱:simple-effective-text-matching,代碼行數:24,代碼來源:interface.py

示例4: load_embeddings

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_embeddings(self):
        """generate embeddings suited for the current vocab or load previously cached ones."""
        assert self.args.pretrained_embeddings
        embedding_file = os.path.join(self.args.output_dir, 'embedding.msgpack')
        if not os.path.exists(embedding_file):
            embeddings = load_embeddings(self.args.pretrained_embeddings, self.vocab,
                                         self.args.embedding_dim, mode=self.args.embedding_mode,
                                         lower=self.args.lower_case)
            with open(embedding_file, 'wb') as f:
                msgpack.dump(embeddings, f)
        else:
            with open(embedding_file, 'rb') as f:
                embeddings = msgpack.load(f)
        return embeddings 
開發者ID:alibaba-edu,項目名稱:simple-effective-text-matching-pytorch,代碼行數:16,代碼來源:interface.py

示例5: read

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def read(self, stream):
        """Given a readable file descriptor object (something `load`able by
        msgpack or json), read the data, and return the Python representation
        of the contents. One-shot reader.
        """
        return self.reader.load(stream) 
開發者ID:cognitect,項目名稱:transit-python,代碼行數:8,代碼來源:reader.py

示例6: load

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load(self, stream):
        return self.decoder.decode(json.load(stream,
                                             object_pairs_hook=OrderedDict)) 
開發者ID:cognitect,項目名稱:transit-python,代碼行數:5,代碼來源:reader.py

示例7: load_data

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_data(self):
        print('Load train_meta.msgpack...')
        meta_file_name = os.path.join(self.spacyDir, 'train_meta.msgpack')
        with open(meta_file_name, 'rb') as f:
            meta = msgpack.load(f, encoding='utf8')
        embedding = torch.Tensor(meta['embedding'])
        self.opt['vocab_size'] = embedding.size(0)
        self.opt['vocab_dim'] = embedding.size(1)
        self.opt['char_vocab_size'] = len(meta['char_vocab'])
        return meta['vocab'], meta['char_vocab'], embedding 
開發者ID:microsoft,項目名稱:SDNet,代碼行數:12,代碼來源:CoQAPreprocess.py

示例8: load_cpickle

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_cpickle(cls, filename):
        """Load CPICKLE file

        Parameters
        ----------
        filename : str
            Filename path

        Returns
        -------
        data

        """

        cls.file_exists(filename=filename)

        try:
            import cPickle as pickle

        except ImportError:
            try:
                import pickle

            except ImportError:
                message = '{name}: Unable to import pickle module.'.format(
                    name=cls.__class__.__name__
                )

                cls.logger().exception(message)
                raise ImportError(message)

        return pickle.load(open(filename, "rb")) 
開發者ID:DCASE-REPO,項目名稱:dcase_util,代碼行數:34,代碼來源:serialization.py

示例9: load_json

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_json(cls, filename):
        """Load JSON file

        Parameters
        ----------
        filename : str
            Filename path

        Returns
        -------
        data

        """

        cls.file_exists(filename=filename)

        try:
            import ujson as json

        except ImportError:
            try:
                import json

            except ImportError:
                message = '{name}: Unable to import json module. You can install it with `pip install ujson`.'.format(
                    name=cls.__class__.__name__
                )

                cls.logger().exception(message)
                raise ImportError(message)

        return json.load(open(filename, "r")) 
開發者ID:DCASE-REPO,項目名稱:dcase_util,代碼行數:34,代碼來源:serialization.py

示例10: load_msgpack

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_msgpack(cls, filename):
        """Load MSGPACK file

        Parameters
        ----------
        filename : str
            Filename path

        Returns
        -------
        data

        """

        cls.file_exists(filename=filename)

        try:
            import msgpack

        except ImportError:
            message = '{name}: Unable to import msgpack module. You can install it with `pip install msgpack-python`.'.format(
                name=cls.__class__.__name__
            )

            cls.logger().exception(message)
            raise ImportError(message)

        return msgpack.load(open(filename, "rb"), encoding='utf-8') 
開發者ID:DCASE-REPO,項目名稱:dcase_util,代碼行數:30,代碼來源:serialization.py

示例11: load_marshal

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_marshal(cls, filename):
        """Load MARSHAL file

        Parameters
        ----------
        filename : str
            Filename path

        Returns
        -------
        data

        """

        cls.file_exists(filename=filename)

        try:
            import marshal

        except ImportError:
            message = '{name}: Unable to import marshal module. You can install it with `pip install pymarshal`.'.format(
                name=cls.__class__.__name__
            )

            cls.logger().exception(message)
            raise ImportError(message)

        return marshal.load(open(filename, "rb")) 
開發者ID:DCASE-REPO,項目名稱:dcase_util,代碼行數:30,代碼來源:serialization.py

示例12: load_train_data

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_train_data(opt):
    with open(os.path.join(args.train_dir, 'train_meta.msgpack'), 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    opt['vocab_size'] = embedding.size(0)
    opt['embedding_dim'] = embedding.size(1)

    with open(os.path.join(args.train_dir, 'train_data.msgpack'), 'rb') as f:
        data = msgpack.load(f, encoding='utf8')
    #data_orig = pd.read_csv(os.path.join(args.train_dir, 'train.csv'))

    opt['num_features'] = len(data['context_features'][0][0])

    train = {'context': list(zip(
                        data['context_ids'],
                        data['context_tags'],
                        data['context_ents'],
                        data['context'],
                        data['context_span'],
                        data['1st_question'],
                        data['context_tokenized'])),
             'qa': list(zip(
                        data['question_CID'],
                        data['question_ids'],
                        data['context_features'],
                        data['answer_start'],
                        data['answer_end'],
                        data['rationale_start'],
                        data['rationale_end'],
                        data['answer_choice'],
                        data['question'],
                        data['answer'],
                        data['question_tokenized']))
            }
    return train, embedding, opt 
開發者ID:MiuLab,項目名稱:FlowDelta,代碼行數:37,代碼來源:train_CoQA.py

示例13: load_dev_data

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_dev_data(opt): # can be extended to true test set
    with open(os.path.join(args.dev_dir, 'dev_meta.msgpack'), 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    assert opt['embedding_dim'] == embedding.size(1)

    with open(os.path.join(args.dev_dir, 'dev_data.msgpack'), 'rb') as f:
        data = msgpack.load(f, encoding='utf8')
    #data_orig = pd.read_csv(os.path.join(args.dev_dir, 'dev.csv'))

    assert opt['num_features'] == len(data['context_features'][0][0])

    dev = {'context': list(zip(
                        data['context_ids'],
                        data['context_tags'],
                        data['context_ents'],
                        data['context'],
                        data['context_span'],
                        data['1st_question'],
                        data['context_tokenized'])),
           'qa': list(zip(
                        data['question_CID'],
                        data['question_ids'],
                        data['context_features'],
                        data['answer_start'],
                        data['answer_end'],
                        data['rationale_start'],
                        data['rationale_end'],
                        data['answer_choice'],
                        data['question'],
                        data['answer'],
                        data['question_tokenized']))
          }

    return dev, embedding 
開發者ID:MiuLab,項目名稱:FlowDelta,代碼行數:37,代碼來源:train_CoQA.py

示例14: load_dev_data

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_dev_data(opt): # can be extended to true test set
    with open(os.path.join(args.dev_dir, 'dev_meta.msgpack'), 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    assert opt['embedding_dim'] == embedding.size(1)

    with open(os.path.join(args.dev_dir, 'dev_data.msgpack'), 'rb') as f:
        data = msgpack.load(f, encoding='utf8')

    assert opt['num_features'] == len(data['context_features'][0][0]) + opt['explicit_dialog_ctx'] * (opt['use_dialog_act']*3 + 2)
    
    dev = {'context': list(zip(
                        data['context_ids'],
                        data['context_tags'],
                        data['context_ents'],
                        data['context'],
                        data['context_span'],
                        data['1st_question'],
                        data['context_tokenized'])),
           'qa': list(zip(
                        data['question_CID'],
                        data['question_ids'],
                        data['context_features'],
                        data['answer_start'],
                        data['answer_end'],
                        data['answer_choice'],
                        data['question'],
                        data['answer'],
                        data['question_tokenized']))
          }
    
    dev_answer = []
    for i, CID in enumerate(data['question_CID']):
        if len(dev_answer) <= CID:
            dev_answer.append([])
        dev_answer[CID].append(data['all_answer'][i])
    
    return dev, embedding, dev_answer 
開發者ID:MiuLab,項目名稱:FlowDelta,代碼行數:40,代碼來源:predict_QuAC.py

示例15: load_train_data

# 需要導入模塊: import msgpack [as 別名]
# 或者: from msgpack import load [as 別名]
def load_train_data(opt):
    with open(os.path.join(args.train_dir, 'train_meta.msgpack'), 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    opt['vocab_size'] = embedding.size(0)
    opt['embedding_dim'] = embedding.size(1)

    with open(os.path.join(args.train_dir, 'train_data.msgpack'), 'rb') as f:
        data = msgpack.load(f, encoding='utf8')
    #data_orig = pd.read_csv(os.path.join(args.train_dir, 'train.csv'))

    opt['num_features'] = len(data['context_features'][0][0])

    train = {'context': list(zip(
                        data['context_ids'],
                        data['context_tags'],
                        data['context_ents'],
                        data['context'],
                        data['context_span'],
                        data['1st_question'],
                        data['context_tokenized'])),
             'qa': list(zip(
                        data['question_CID'],
                        data['question_ids'],
                        data['context_features'],
                        data['answer_start'],
                        data['answer_end'],
                        data['answer_choice'],
                        data['question'],
                        data['answer'],
                        data['question_tokenized']))
            }
    return train, embedding, opt 
開發者ID:MiuLab,項目名稱:FlowDelta,代碼行數:35,代碼來源:train_QuAC.py


注:本文中的msgpack.load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。