當前位置: 首頁>>代碼示例>>Python>>正文


Python seq2seq.models方法代碼示例

本文整理匯總了Python中seq2seq.models方法的典型用法代碼示例。如果您正苦於以下問題:Python seq2seq.models方法的具體用法?Python seq2seq.models怎麽用?Python seq2seq.models使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在seq2seq的用法示例。


在下文中一共展示了seq2seq.models方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _load_model_from_config

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
  """Loads model from a configuration file"""
  with gfile.GFile(config_path) as config_file:
    config = yaml.load(config_file)
  model_cls = locate(config["model"]) or getattr(models, config["model"])
  model_params = config["model_params"]
  if hparam_overrides:
    model_params.update(hparam_overrides)
  # Change the max decode length to make the test run faster
  model_params["decoder.params"]["max_decode_length"] = 5
  model_params["vocab_source"] = vocab_file
  model_params["vocab_target"] = vocab_file
  return model_cls(params=model_params, mode=mode) 
開發者ID:akanimax,項目名稱:natural-language-summary-generation-from-structured-data,代碼行數:15,代碼來源:example_config_test.py

示例2: average_models

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def average_models(checkpoint_filenames):
    averaged = {}
    scale = 1. / len(checkpoint_filenames)
    print('Averaging %s models' % len(checkpoint_filenames))
    for m in checkpoint_filenames:
        checkpoint = torch.load(
            m, map_location=lambda storage, loc: storage)
        for n, p in checkpoint['state_dict'].items():
            if n in averaged:
                averaged[n].add_(scale * p)
            else:
                averaged[n] = scale * p
    checkpoint['state_dict'] = averaged
    return checkpoint 
開發者ID:eladhoffer,項目名稱:seq2seq.pytorch,代碼行數:16,代碼來源:inference.py

示例3: remove_wn_checkpoint

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def remove_wn_checkpoint(checkpoint):
    model = getattr(models, checkpoint['config'].model)(
        **checkpoint['config'].model_config)
    model.load_state_dict(checkpoint['state_dict'])

    def change_field(dict_obj, field, new_val):
        for k, v in dict_obj.items():
            if k == field:
                dict_obj[k] = new_val
            elif isinstance(v, dict):
                change_field(v, field, new_val)

    change_field(checkpoint['config'].model_config, 'layer_norm', False)

    for module in model.modules():
        if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d) or isinstance(module, nn.LSTM) or isinstance(module, nn.LSTMCell):
            for n, _ in list(module.named_parameters()):
                if n.endswith('_g'):
                    name = n.replace('_g', '')
                    wn = WeightNorm(None, 0)
                    weight = wn.compute_weight(module, name)
                    delattr(module, name)
                    del module._parameters[name + '_g']
                    del module._parameters[name + '_v']
                    module.register_parameter(name, nn.Parameter(weight.data))
                    print('wn removed from %s - %s' % (module, name))

    checkpoint['state_dict'] = model.state_dict()
    change_field(checkpoint['config'].model_config, 'weight_norm', False)
    return checkpoint 
開發者ID:eladhoffer,項目名稱:seq2seq.pytorch,代碼行數:32,代碼來源:inference.py

示例4: __init__

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def __init__(self, checkpoint,
                 use_moses=None,
                 beam_size=5,
                 length_normalization_factor=0,
                 max_input_length=None,
                 max_output_length=50,
                 get_attention=False,
                 device="cpu",
                 device_ids=None):
        config = checkpoint['config']
        self.model = getattr(models, config.model)(**config.model_config)
        self.model.load_state_dict(checkpoint['state_dict'])

        self.src_tok, self.target_tok = checkpoint['tokenizers'].values()
        if use_moses is None:  # if not set, turn on if training was done with moses pretok
            use_moses = config.data_config.get('moses_pretok', False)
        if use_moses:
            src_lang, target_lang = checkpoint['tokenizers'].keys()
            self.src_tok.enable_moses(lang=src_lang)
            self.target_tok.enable_moses(lang=target_lang)
        self.insert_target_start = [BOS]
        self.insert_src_start = [BOS]
        self.insert_src_end = [EOS]
        self.get_attention = get_attention
        self.device = device
        self.device_ids = device_ids
        self.model.to(self.device)
        self.model.eval()

        self.beam_size = beam_size
        self.max_input_length = max_input_length
        self.max_output_length = max_output_length
        self.get_attention = get_attention
        self.length_normalization_factor = length_normalization_factor
        self.batch_first = self.model.encoder.batch_first
        self.pack_encoder_inputs = getattr(self.model.encoder, 'pack_inputs',
                                           False) 
開發者ID:eladhoffer,項目名稱:seq2seq.pytorch,代碼行數:39,代碼來源:inference.py

示例5: evaluate

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def evaluate(self, model, data):
        """ Evaluate a model on given dataset and return performance.

        Args:
            model (seq2seq.models): model to evaluate
            data (seq2seq.dataset.dataset.Dataset): dataset to evaluate against

        Returns:
            loss (float): loss of the given model on the given dataset
        """
        model.eval()

        loss = self.loss
        loss.reset()
        match = 0
        total = 0

        device = None if torch.cuda.is_available() else -1
        batch_iterator = torchtext.data.BucketIterator(
            dataset=data, batch_size=self.batch_size,
            sort=True, sort_key=lambda x: len(x.src),
            device=device, train=False)
        tgt_vocab = data.fields[seq2seq.tgt_field_name].vocab
        pad = tgt_vocab.stoi[data.fields[seq2seq.tgt_field_name].pad_token]

        with torch.no_grad():
            for batch in batch_iterator:
                input_variables, input_lengths  = getattr(batch, seq2seq.src_field_name)
                target_variables = getattr(batch, seq2seq.tgt_field_name)

                decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(), target_variables)

                # Evaluation
                seqlist = other['sequence']
                for step, step_output in enumerate(decoder_outputs):
                    target = target_variables[:, step + 1]
                    loss.eval_batch(step_output.view(target_variables.size(0), -1), target)

                    non_padding = target.ne(pad)
                    correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().item()
                    match += correct
                    total += non_padding.sum().item()

        if total == 0:
            accuracy = float('nan')
        else:
            accuracy = match / total

        return loss.get_loss(), accuracy 
開發者ID:IBM,項目名稱:pytorch-seq2seq,代碼行數:51,代碼來源:evaluator.py

示例6: train

# 需要導入模塊: import seq2seq [as 別名]
# 或者: from seq2seq import models [as 別名]
def train(self, model, data, num_epochs=5,
              resume=False, dev_data=None,
              optimizer=None, teacher_forcing_ratio=0):
        """ Run training for a given model.

        Args:
            model (seq2seq.models): model to run training on, if `resume=True`, it would be
               overwritten by the model loaded from the latest checkpoint.
            data (seq2seq.dataset.dataset.Dataset): dataset object to train on
            num_epochs (int, optional): number of epochs to run (default 5)
            resume(bool, optional): resume training with the latest checkpoint, (default False)
            dev_data (seq2seq.dataset.dataset.Dataset, optional): dev Dataset (default None)
            optimizer (seq2seq.optim.Optimizer, optional): optimizer for training
               (default: Optimizer(pytorch.optim.Adam, max_grad_norm=5))
            teacher_forcing_ratio (float, optional): teaching forcing ratio (default 0)
        Returns:
            model (seq2seq.models): trained model.
        """
        # If training is set to resume
        if resume:
            latest_checkpoint_path = Checkpoint.get_latest_checkpoint(self.expt_dir)
            resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
            model = resume_checkpoint.model
            self.optimizer = resume_checkpoint.optimizer

            # A walk around to set optimizing parameters properly
            resume_optim = self.optimizer.optimizer
            defaults = resume_optim.param_groups[0]
            defaults.pop('params', None)
            defaults.pop('initial_lr', None)
            self.optimizer.optimizer = resume_optim.__class__(model.parameters(), **defaults)

            start_epoch = resume_checkpoint.epoch
            step = resume_checkpoint.step
        else:
            start_epoch = 1
            step = 0
            if optimizer is None:
                optimizer = Optimizer(optim.Adam(model.parameters()), max_grad_norm=5)
            self.optimizer = optimizer

        self.logger.info("Optimizer: %s, Scheduler: %s" % (self.optimizer.optimizer, self.optimizer.scheduler))

        self._train_epoches(data, model, num_epochs,
                            start_epoch, step, dev_data=dev_data,
                            teacher_forcing_ratio=teacher_forcing_ratio)
        return model 
開發者ID:IBM,項目名稱:pytorch-seq2seq,代碼行數:49,代碼來源:supervised_trainer.py


注:本文中的seq2seq.models方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。