当前位置: 首页>>代码示例>>Python>>正文


Python Vocabulary.load方法代码示例

本文整理汇总了Python中util.vocabulary.Vocabulary.load方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.load方法的具体用法?Python Vocabulary.load怎么用?Python Vocabulary.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在util.vocabulary.Vocabulary的用法示例。


在下文中一共展示了Vocabulary.load方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
def test(args):
  trace('loading model ...')
  word_vocab = Vocabulary.load(args.model + '.words')
  phrase_vocab = Vocabulary.load(args.model + '.phrases')
  semiterminal_vocab = Vocabulary.load(args.model + '.semiterminals')
  parser = Parser.load_spec(args.model + '.spec')
  if args.use_gpu:
    parser.to_gpu()
  serializers.load_hdf5(args.model + '.weights', parser)

  embed_cache = {}
  parser.reset()

  trace('generating parse trees ...')
  with open(args.source) as fp:
    for l in fp:
      word_list = to_vram_words(convert_word_list(l.split(), word_vocab))
      tree = combine_xbar(
          restore_labels(
              parser.forward(word_list, None, args.unary_limit, embed_cache),
              phrase_vocab,
              semiterminal_vocab))
      print('( ' + tree_to_string(tree) + ' )')

  trace('finished.')
开发者ID:odashi,项目名称:nn_parsers,代码行数:27,代码来源:parse15a.py

示例2: test

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
def test(args):
  trace('loading model ...')
  src_vocab = Vocabulary.load(args.model + '.srcvocab')
  trg_vocab = Vocabulary.load(args.model + '.trgvocab')
  attmt = AttentionMT.load_spec(args.model + '.spec')
  if args.use_gpu:
    attmt.to_gpu()
  serializers.load_hdf5(args.model + '.weights', attmt)
  
  trace('generating translation ...')
  generated = 0

  with open(args.target, 'w') as fp:
    for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
      src_batch = fill_batch(src_batch)
      K = len(src_batch)

      trace('sample %8d - %8d ...' % (generated + 1, generated + K))
      hyp_batch = forward(src_batch, None, src_vocab, trg_vocab, attmt, False, args.generation_limit)

      for hyp in hyp_batch:
        hyp.append('</s>')
        hyp = hyp[:hyp.index('</s>')]
        print(' '.join(hyp), file=fp)

      generated += K

  trace('finished.')
开发者ID:prajdabre,项目名称:chainer_examples,代码行数:30,代码来源:mt_s2s_attention.py

示例3: test

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
    def test(self):
        trace('loading model ...')
        src_vocab = Vocabulary.load(self.model + '.srcvocab')
        trg_vocab = Vocabulary.load(self.model + '.trgvocab')
        encdec = EncoderDecoder.load_spec(self.model + '.spec')
        serializers.load_hdf5(self.model + '.weights', encdec)

        trace('generating translation ...')
        generated = 0

        with open(self.target, 'w') as fp:
            for src_batch in gens.batch(gens.word_list(self.source), self.minibatch):
                src_batch = fill_batch(src_batch)
                K = len(src_batch)

                trace('sample %8d - %8d ...' % (generated + 1, generated + K))
                hyp_batch = self.forward(src_batch, None, src_vocab, trg_vocab, encdec, False, self.generation_limit)

                source_cuont = 0
                for hyp in hyp_batch:
                    hyp.append('</s>')
                    hyp = hyp[:hyp.index('</s>')]
                    print("src : " + "".join(src_batch[source_cuont]).replace("</s>", ""))
                    print('hyp : ' +''.join(hyp))
                    print(' '.join(hyp), file=fp)
                    source_cuont = source_cuont + 1

                generated += K

        trace('finished.')
开发者ID:fedorajzf,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:32,代码来源:EncoderDecoderModel.py

示例4: load

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def load(filename):
     self = AttentionalTranslationModel()
     with ModelFile(filename) as fp:
         self.__src_vocab = Vocabulary.load(fp.get_file_pointer())
         self.__trg_vocab = Vocabulary.load(fp.get_file_pointer())
         self.__n_embed = int(fp.read())
         self.__n_hidden = int(fp.read())
         self.__make_model()
         wrapper.begin_model_access(self.__model)
         fp.read_embed(self.__model.w_xi)
         fp.read_linear(self.__model.w_ia)
         fp.read_linear(self.__model.w_aa)
         fp.read_linear(self.__model.w_ib)
         fp.read_linear(self.__model.w_bb)
         fp.read_linear(self.__model.w_aw)
         fp.read_linear(self.__model.w_bw)
         fp.read_linear(self.__model.w_pw)
         fp.read_linear(self.__model.w_we)
         fp.read_linear(self.__model.w_ap)
         fp.read_linear(self.__model.w_bp)
         fp.read_embed(self.__model.w_yp)
         fp.read_linear(self.__model.w_pp)
         fp.read_linear(self.__model.w_cp)
         fp.read_linear(self.__model.w_dp)
         fp.read_linear(self.__model.w_py)
         wrapper.end_model_access(self.__model)
     return self
开发者ID:benob,项目名称:chainer_examples,代码行数:29,代码来源:mt_s2s_attention.py

示例5: __predict_sentence

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def __predict_sentence(self, src_batch):
     dialogue = EncoderDecoderModelForwardSlack(self.parameter)
     src_vocab = Vocabulary.load(self.model_name + '.srcvocab')
     trg_vocab = Vocabulary.load(self.model_name + '.trgvocab')
     model = EncoderDecoder.load_spec(self.model_name + '.spec')
     serializers.load_hdf5(dialogue.model + '.weights', model)
     hyp_batch = dialogue.forward(src_batch, None, src_vocab, trg_vocab, model, False, self.generation_limit)
     return hyp_batch
开发者ID:fedorajzf,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:10,代码来源:app.py

示例6: __init__

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
  def __init__(self, args):
    trace('loading model ...')
    self.args = args
    self.src_vocab = Vocabulary.load(args.model + '.srcvocab')
    self.trg_vocab = Vocabulary.load(args.model + '.trgvocab')
    self.encdec = EncoderDecoder.load_spec(args.model + '.spec')
    if args.use_gpu:
      self.encdec.to_gpu()
    serializers.load_hdf5(args.model + '.weights', self.encdec)

    trace('generating translation ...')
开发者ID:delihiros,项目名称:dqname,代码行数:13,代码来源:mt_s2s_encdec.py

示例7: __predict_sentence

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def __predict_sentence(self, src_batch):
     """
     predict sentence
     :param src_batch: get the source sentence
     :return:
     """
     dialogue = EncoderDecoderModelAttention(self.parameter)
     src_vocab = Vocabulary.load(self.model_name + '.srcvocab')
     trg_vocab = Vocabulary.load(self.model_name + '.trgvocab')
     model = AttentionDialogue.load_spec(self.model_name + '.spec', self.XP)
     serializers.load_hdf5(self.model_name + '.weights', model)
     hyp_batch = dialogue.forward_implement(src_batch, None, src_vocab, trg_vocab, model, False, self.generation_limit)
     return hyp_batch
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:15,代码来源:app.py

示例8: load

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def load(self, filename):
     with ModelFile(filename) as fp:
         self.src_vocab = Vocabulary.load(fp.get_file_pointer())
         self.trg_vocab = Vocabulary.load(fp.get_file_pointer())
         self.n_embed = int(fp.read())
         self.n_hidden = int(fp.read())
         self.make_model()
         wrapper.begin_model_access(self.model)
         fp.read_embed(self.model.weight_xi)
         fp.read_linear(self.model.weight_ip)
         fp.read_linear(self.model.weight_pp)
         fp.read_linear(self.model.weight_pq)
         fp.read_linear(self.model.weight_qj)
         fp.read_linear(self.model.weight_jy)
         fp.read_embed(self.model.weight_yq)
         fp.read_linear(self.model.weight_qq)
         wrapper.end_model_access(self.model)
     return self
开发者ID:tksugimoto,项目名称:Chainer_Machine_Translation_ipython_notebook,代码行数:20,代码来源:EncoderDecoderModel.py

示例9: load

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def load(filename):
     self = EncoderDecoderModel()
     with ModelFile(filename) as fp:
         self.__src_vocab = Vocabulary.load(fp.get_file_pointer())
         self.__trg_vocab = Vocabulary.load(fp.get_file_pointer())
         self.__n_embed = int(fp.read())
         self.__n_hidden = int(fp.read())
         self.__make_model()
         wrapper.begin_model_access(self.__model)
         fp.read_embed(self.__model.w_xi)
         fp.read_linear(self.__model.w_ip)
         fp.read_linear(self.__model.w_pp)
         fp.read_linear(self.__model.w_pq)
         fp.read_linear(self.__model.w_qj)
         fp.read_linear(self.__model.w_jy)
         fp.read_embed(self.__model.w_yq)
         fp.read_linear(self.__model.w_qq)
         wrapper.end_model_access(self.__model)
     return self
开发者ID:skaasj,项目名称:chainer_examples,代码行数:21,代码来源:mt_s2s_encdec.py

示例10: load

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def load(filename):
     self = TransSegmentationModel()
     with ModelFile(filename) as fp:
         self.__vocab = Vocabulary.load(fp.get_file_pointer())
         self.__n_context = int(fp.read())
         self.__n_hidden = int(fp.read())
         self.__make_model()
         wrapper.begin_model_access(self.__model)
         fp.read_embed(self.__model.w_xh)
         fp.read_linear(self.__model.w_hy)
         wrapper.end_model_access(self.__model)
     return self
开发者ID:ace12358,项目名称:WordSegmentation,代码行数:14,代码来源:tomo.py

示例11: test

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
def test(args):
  trace('loading model ...')
  word_vocab = Vocabulary.load(args.model + '.words')
  phrase_vocab = Vocabulary.load(args.model + '.phrases')
  semi_vocab = Vocabulary.load(args.model + '.semiterminals')
  parser = Parser.load_spec(args.model + '.spec')
  if USE_GPU:
    parser.to_gpu()
  serializers.load_hdf5(args.model + '.weights', parser)

  trace('generating parse trees ...')
  with open(args.source) as fp:
    for l in fp:
      word_list = convert_word_list(l.split(), word_vocab)
      tree = restore_labels(
          parser.forward(word_list, None, args.unary_limit),
          phrase_vocab,
          semi_vocab
      )
      print('( ' + tree_to_string(tree) + ' )')

  trace('finished.')
开发者ID:odashi,项目名称:nn_parsers,代码行数:24,代码来源:parse02.py

示例12: test

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
    def test(self):
        """
        Test method
        You have to parepare the train model
        """
        trace("loading model ...")
        prefix = self.model
        model_path = APP_ROOT + "/model/" + prefix
        src_vocab = Vocabulary.load(model_path + ".srcvocab")
        trg_vocab = Vocabulary.load(model_path + ".trgvocab")
        self.attention_dialogue = AttentionDialogue.load_spec(model_path + ".spec", self.XP)
        serializers.load_hdf5(model_path + ".weights", self.attention_dialogue)

        trace("generating translation ...")
        generated = 0

        with open(self.test_target, "w") as fp:
            for src_batch in gens.batch(gens.word_list(self.source), self.minibatch):
                src_batch = fill_batch(src_batch)
                K = len(src_batch)

                trace("sample %8d - %8d ..." % (generated + 1, generated + K))
                hyp_batch = self.forward_implement(
                    src_batch, None, src_vocab, trg_vocab, self.attention_dialogue, False, self.generation_limit
                )

                source_cuont = 0
                for hyp in hyp_batch:
                    hyp.append("</s>")
                    hyp = hyp[: hyp.index("</s>")]
                    print("src : " + "".join(src_batch[source_cuont]).replace("</s>", ""))
                    print("hyp : " + "".join(hyp))
                    print(" ".join(hyp), file=fp)
                    source_cuont = source_cuont + 1

                generated += K

        trace("finished.")
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:40,代码来源:EncoderDecoderModelAttention.py

示例13: load

# 需要导入模块: from util.vocabulary import Vocabulary [as 别名]
# 或者: from util.vocabulary.Vocabulary import load [as 别名]
 def load(filename):
     self = RNNSegmentationModel()
     with ModelFile(filename) as fp:
         self.__vocab = Vocabulary.load(fp.get_file_pointer())
         self.__n_embed = int(fp.read())
         self.__n_hidden = int(fp.read())
         self.__make_model()
         wrapper.begin_model_access(self.__model)
         fp.read_embed(self.__model.w_xe)
         fp.read_linear(self.__model.w_ea)
         fp.read_linear(self.__model.w_aa)
         fp.read_linear(self.__model.w_eb)
         fp.read_linear(self.__model.w_bb)
         fp.read_linear(self.__model.w_ay1)
         fp.read_linear(self.__model.w_by1)
         fp.read_linear(self.__model.w_ay2)
         fp.read_linear(self.__model.w_by2)
         wrapper.end_model_access(self.__model)
     return self
开发者ID:YukiOnda,项目名称:chainer_examples,代码行数:21,代码来源:seg_rnn.py


注:本文中的util.vocabulary.Vocabulary.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。