當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.PAD屬性代碼示例

本文整理匯總了Python中utils.PAD屬性的典型用法代碼示例。如果您正苦於以下問題:Python utils.PAD屬性的具體用法?Python utils.PAD怎麽用?Python utils.PAD使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在utils的用法示例。


在下文中一共展示了utils.PAD屬性的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def __init__(self, config, use_attention=True, encoder=None, decoder=None):
        super(seq2seq, self).__init__()

        if encoder is not None:
            self.encoder = encoder
        else:
            self.encoder = models.rnn_encoder(config)
        tgt_embedding = self.encoder.embedding if config.shared_vocab else None
        if decoder is not None:
            self.decoder = decoder
        else:
            self.decoder = models.rnn_decoder(config, embedding=tgt_embedding, use_attention=use_attention)
        self.log_softmax = nn.LogSoftmax(dim=-1)
        self.use_cuda = config.use_cuda
        self.config = config
        self.criterion = nn.CrossEntropyLoss(ignore_index=utils.PAD, reduction='none')
        if config.use_cuda:
            self.criterion.cuda() 
開發者ID:lancopku,項目名稱:Global-Encoding,代碼行數:20,代碼來源:seq2seq.py

示例2: _prepro

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def _prepro(self, raw_article_sents):
        ext_word2id = dict(self._word2id)
        ext_id2word = dict(self._id2word)
        for raw_words in raw_article_sents:
            for w in raw_words:
                if not w in ext_word2id:
                    ext_word2id[w] = len(ext_word2id)
                    ext_id2word[len(ext_id2word)] = w
        articles = conver2id(UNK, self._word2id, raw_article_sents)
        art_lens = [len(art) for art in articles]
        article = pad_batch_tensorize(articles, PAD, cuda=False
                                     ).to(self._device)
        extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)
        extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False
                                        ).to(self._device)
        extend_vsize = len(ext_word2id)
        dec_args = (article, art_lens, extend_art, extend_vsize,
                    START, END, UNK, self._max_len)
        return dec_args, ext_id2word 
開發者ID:ChenRocks,項目名稱:fast_abs_rl,代碼行數:21,代碼來源:decoding.py

示例3: configure_training

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def configure_training(opt, lr, clip_grad, lr_decay, batch_size):
    """ supports Adam optimizer only"""
    assert opt in ['adam']
    opt_kwargs = {}
    opt_kwargs['lr'] = lr

    train_params = {}
    train_params['optimizer']      = (opt, opt_kwargs)
    train_params['clip_grad_norm'] = clip_grad
    train_params['batch_size']     = batch_size
    train_params['lr_decay']       = lr_decay

    nll = lambda logit, target: F.nll_loss(logit, target, reduce=False)
    def criterion(logits, targets):
        return sequence_loss(logits, targets, nll, pad_idx=PAD)

    return criterion, train_params 
開發者ID:ChenRocks,項目名稱:fast_abs_rl,代碼行數:19,代碼來源:train_abstractor.py

示例4: _prepro

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def _prepro(self, raw_article_sents):
        ext_word2id = dict(self._word2id)
        ext_id2word = dict(self._id2word)
        for raw_words in raw_article_sents:
            for w in raw_words:
                if not w in ext_word2id:
                    ext_word2id[w] = len(ext_word2id)
                    ext_id2word[len(ext_id2word)] = w
        articles = conver2id(UNK, self._word2id, raw_article_sents)
        art_lens = [len(art) for art in articles]
        article = pad_batch_tensorize(articles, PAD, cuda=False).to(self._device)
        extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)
        extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False).to(self._device)
        extend_vsize = len(ext_word2id)
        dec_args = (article, art_lens, extend_art, extend_vsize,
                    START, END, UNK, self._max_len)
        return dec_args, ext_id2word 
開發者ID:iwangjian,項目名稱:ByteCup2018,代碼行數:19,代碼來源:decoding.py

示例5: configure_training

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def configure_training(opt, lr, clip_grad, lr_decay, batch_size):
    """ supports Adam optimizer only"""
    assert opt in ['adam']
    opt_kwargs = {}
    opt_kwargs['lr'] = lr

    train_params = {}
    train_params['optimizer'] = (opt, opt_kwargs)
    train_params['clip_grad_norm'] = clip_grad
    train_params['batch_size'] = batch_size
    train_params['lr_decay'] = lr_decay

    nll = lambda logit, target: F.nll_loss(logit, target, reduce=False)

    def criterion(logits, targets):
        return sequence_loss(logits, targets, nll, pad_idx=PAD)

    return criterion, train_params 
開發者ID:iwangjian,項目名稱:ByteCup2018,代碼行數:20,代碼來源:train_abstractor.py

示例6: build_batchers

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def build_batchers(net_type, word2id, cuda, debug):
    assert net_type in ['ff', 'rnn']
    prepro = prepro_fn_extract(args.max_word, args.max_sent)
    def sort_key(sample):
        src_sents, _ = sample
        return len(src_sents)
    batchify_fn = (batchify_fn_extract_ff if net_type == 'ff'
                   else batchify_fn_extract_ptr)
    convert_batch = (convert_batch_extract_ff if net_type == 'ff'
                     else convert_batch_extract_ptr)
    batchify = compose(batchify_fn(PAD, cuda=cuda),
                       convert_batch(UNK, word2id))

    train_loader = DataLoader(
        ExtractDataset('train'), batch_size=BUCKET_SIZE,
        shuffle=not debug,
        num_workers=4 if cuda and not debug else 0,
        collate_fn=coll_fn_extract
    )
    train_batcher = BucketedGenerater(train_loader, prepro, sort_key, batchify,
                                      single_run=False, fork=not debug)

    val_loader = DataLoader(
        ExtractDataset('val'), batch_size=BUCKET_SIZE,
        shuffle=False, num_workers=4 if cuda and not debug else 0,
        collate_fn=coll_fn_extract
    )
    val_batcher = BucketedGenerater(val_loader, prepro, sort_key, batchify,
                                    single_run=True, fork=not debug)
    return train_batcher, val_batcher 
開發者ID:ChenRocks,項目名稱:fast_abs_rl,代碼行數:32,代碼來源:train_extractor_ml.py

示例7: __call__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def __call__(self, raw_article_sents):
        self._net.eval()
        n_art = len(raw_article_sents)
        articles = conver2id(UNK, self._word2id, raw_article_sents)
        article = pad_batch_tensorize(articles, PAD, cuda=False
                                     ).to(self._device)
        indices = self._net.extract([article], k=min(n_art, self._max_ext))
        return indices 
開發者ID:ChenRocks,項目名稱:fast_abs_rl,代碼行數:10,代碼來源:decoding.py

示例8: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def __init__(self, config, use_attention=True, encoder=None, decoder=None,
                 src_padding_idx=0, tgt_padding_idx=0, label_smoothing=0, tgt_vocab=None):
        super(seq2seq, self).__init__()

        if encoder is not None:
            self.encoder = encoder
        else:
            self.encoder = models.rnn_encoder(
                config, padding_idx=src_padding_idx)
        tgt_embedding = self.encoder.embedding if config.shared_vocab else None
        if decoder is not None:
            self.decoder = decoder
        else:
            self.decoder = models.rnn_decoder(
                config, embedding=tgt_embedding, use_attention=use_attention, padding_idx=tgt_padding_idx)
        self.log_softmax = nn.LogSoftmax(dim=-1)
        self.use_cuda = config.use_cuda
        self.config = config
        self.label_smoothing = label_smoothing
        if self.label_smoothing > 0:
            self.criterion = LabelSmoothingLoss(
                label_smoothing, config.tgt_vocab_size,
                ignore_index=tgt_padding_idx)
        else:
            self.criterion = nn.CrossEntropyLoss(ignore_index=utils.PAD, reduction='none')
        if config.use_cuda:
            self.criterion.cuda()
        if config.rl:
            # self.reward_provider = CTRRewardProvider(
            #     config.ctr_rewared_provider_path)
            self.tgt_vocab = tgt_vocab
        self.padding_idx = tgt_padding_idx 
開發者ID:THUDM,項目名稱:KOBE,代碼行數:34,代碼來源:seq2seq.py

示例9: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def __init__(self, size, n_best=1, cuda=True, length_norm=False, minimum_length=0):

        self.size = size
        self.tt = torch.cuda if cuda else torch

        # The score for each translation on the beam.
        self.scores = self.tt.FloatTensor(size).zero_()
        self.allScores = []

        # The backpointers at each time-step.
        self.prevKs = []

        # The outputs at each time-step.
        self.nextYs = [self.tt.LongTensor(size)
                       .fill_(utils.PAD)]
        self.nextYs[0][0] = utils.BOS

        # Has EOS topped the beam yet.
        self._eos = utils.EOS
        self.eosTop = False

        # The attentions (matrix) for each time.
        self.attn = []

        # Time and k pair for finished.
        self.finished = []
        self.n_best = n_best

        self.length_norm = length_norm
        self.minimum_length = minimum_length 
開發者ID:THUDM,項目名稱:KOBE,代碼行數:32,代碼來源:beam.py

示例10: build_batchers

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def build_batchers(data_dir, net_type, word2id, cuda, debug):
    assert net_type in ['ff', 'rnn']
    prepro = prepro_fn_extract(args.max_word, args.max_sent)

    def sort_key(sample):
        src_sents, _ = sample
        return len(src_sents)

    batchify_fn = (batchify_fn_extract_ff if net_type == 'ff'
                   else batchify_fn_extract_ptr)
    convert_batch = (convert_batch_extract_ff if net_type == 'ff'
                     else convert_batch_extract_ptr)
    batchify = compose(batchify_fn(PAD, cuda=cuda), convert_batch(UNK, word2id))

    train_loader = DataLoader(
        ExtractDataset('train', data_dir), batch_size=BUCKET_SIZE,
        shuffle=not debug,
        num_workers=4 if cuda and not debug else 0,
        collate_fn=coll_fn_extract)
    val_loader = DataLoader(
        ExtractDataset('val', data_dir), batch_size=BUCKET_SIZE,
        shuffle=False, num_workers=4 if cuda and not debug else 0,
        collate_fn=coll_fn_extract)

    train_batcher = BucketedGenerater(train_loader, prepro, sort_key, batchify,
                                      single_run=False, fork=False)
    val_batcher = BucketedGenerater(val_loader, prepro, sort_key, batchify,
                                    single_run=True, fork=False)
    return train_batcher, val_batcher 
開發者ID:iwangjian,項目名稱:ByteCup2018,代碼行數:31,代碼來源:train_extractor.py

示例11: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def __init__(self, config, use_attention=True,
                 encoder=None, decoder=None,
                 src_padding_idx=0, tgt_padding_idx=0,
                 label_smoothing=0, tgt_vocab=None):
        """
        Initialization of variables and functions
        :param config: configuration
        :param use_attention: use attention or not, consistent with seq2seq
        :param encoder: encoder
        :param decoder: decoder
        :param src_padding_idx: source padding index
        :param tgt_padding_idx: target padding index
        :param label_smoothing: ratio for label smoothing
        :param tgt_vocab: target vocabulary
        """
        super(tensor2tensor, self).__init__()

        self.config = config

        # pretrained encoder or not
        if encoder is not None:
            self.encoder = encoder
        else:
            self.encoder = models.TransformerEncoder(
                config, padding_idx=src_padding_idx)
            if self.config.knowledge:
                # HACK: we use tgt_vocab for knowledge instead of src_vocab
                src_vocab_size = config.src_vocab_size
                config.src_vocab_size = config.tgt_vocab_size
                self.knowledge_encoder = models.TransformerEncoder(
                    config, padding_idx=src_padding_idx)
                config.src_vocab_size = src_vocab_size
        tgt_embedding = self.encoder.embedding if config.shared_vocab else None
        # pretrained decoder or not
        if decoder is not None:
            self.decoder = decoder
        else:
            self.decoder = models.TransformerDecoder(
                config, tgt_embedding=tgt_embedding, padding_idx=tgt_padding_idx)
        # log softmax should specify dimension explicitly
        self.log_softmax = nn.LogSoftmax(dim=-1)
        self.use_cuda = config.use_cuda
        self.config = config
        self.label_smoothing = label_smoothing
        if self.label_smoothing > 0:
            self.criterion = LabelSmoothingLoss(
                label_smoothing, config.tgt_vocab_size,
                ignore_index=tgt_padding_idx)
        else:
            self.criterion = nn.CrossEntropyLoss(ignore_index=utils.PAD)
        if config.use_cuda:
            self.criterion.cuda()
        self.compute_score = nn.Linear(
            config.hidden_size, config.tgt_vocab_size)

        # Use rl or not. Should specify a reward provider. Not available yet in this framework.
        # if config.rl:
            # self.bleu_scorer = bleu.Scorer(pad=0, eos=3, unk=1)
            # self.reward_provider = CTRRewardProvider(config.ctr_reward_provider_path)
            # self.tgt_vocab = tgt_vocab
        self.padding_idx = tgt_padding_idx 
開發者ID:THUDM,項目名稱:KOBE,代碼行數:63,代碼來源:tensor2tensor.py

示例12: build_model

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import PAD [as 別名]
def build_model(checkpoints, config, device):
    """
    build model, either Seq2Seq or Tensor2Tensor
    :param checkpoints: load checkpoint if there is pretrained model
    :return: model, optimizer and the print function
    """
    print(config)

    # model
    print("building model...\n")
    model = getattr(models, config.model)(
        config,
        src_padding_idx=utils.PAD,
        tgt_padding_idx=utils.PAD,
        label_smoothing=config.label_smoothing,
    )
    model.to(device)
    if config.param_init != 0.0:
        for p in model.parameters():
            p.data.uniform_(-config.param_init, config.param_init)
    if config.param_init_glorot:
        for p in model.parameters():
            if p.dim() > 1:
                xavier_uniform_(p)
    if checkpoints is not None:
        model.load_state_dict(checkpoints["model"])
    if config.pretrain:
        print("loading checkpoint from %s" % config.pretrain)
        pre_ckpt = torch.load(
            config.pretrain, map_location=lambda storage, loc: storage
        )["model"]
        model.load_state_dict(pre_ckpt)

    optim = models.Optim(
        config.optim,
        config.learning_rate,
        config.max_grad_norm,
        lr_decay=config.learning_rate_decay,
        start_decay_steps=config.start_decay_steps,
        beta1=config.beta1,
        beta2=config.beta2,
        decay_method=config.decay_method,
        warmup_steps=config.warmup_steps,
        model_size=config.hidden_size,
    )
    print(optim)
    optim.set_parameters(model.parameters())
    if checkpoints is not None:
        optim.optimizer.load_state_dict(checkpoints["optim"])

    param_count = sum([param.view(-1).size()[0] for param in model.parameters()])
    print(repr(model) + "\n\n")
    print("total number of parameters: %d\n\n" % param_count)

    return model, optim 
開發者ID:THUDM,項目名稱:KOBE,代碼行數:57,代碼來源:train.py


注:本文中的utils.PAD屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。