当前位置: 首页>>代码示例>>Python>>正文


Python functional.embedding方法代码示例

本文整理汇总了Python中torch.nn.functional.embedding方法的典型用法代码示例。如果您正苦于以下问题:Python functional.embedding方法的具体用法?Python functional.embedding怎么用?Python functional.embedding使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.embedding方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, g, lg, x, y, deg_g, deg_lg, pm_pd):
        pmpd_x = F.embedding(pm_pd, x)

        sum_x = sum(theta(z) for theta, z in zip(self.theta_list, self.aggregate(g, x)))

        g.set_e_repr({'y' : y})
        g.update_all(fn.copy_edge(edge='y', out='m'), fn.sum('m', 'pmpd_y'))
        pmpd_y = g.pop_n_repr('pmpd_y')

        x = self.theta_x(x) + self.theta_deg(deg_g * x) + sum_x + self.theta_y(pmpd_y)
        n = self.out_feats // 2
        x = th.cat([x[:, :n], F.relu(x[:, n:])], 1)
        x = self.bn_x(x)

        sum_y = sum(gamma(z) for gamma, z in zip(self.gamma_list, self.aggregate(lg, y)))

        y = self.gamma_y(y) + self.gamma_deg(deg_lg * y) + sum_y + self.gamma_x(pmpd_x)
        y = th.cat([y[:, :n], F.relu(y[:, n:])], 1)
        y = self.bn_y(y)

        return x, y 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:gnn.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, x, encoding, source_masks=None, decoder_masks=None,
                input_embeddings=False, positions=None, feedback=None):
        # x : decoder_inputs

        if self.out_norm:
            out_weight = self.out.weight / (1e-6 + torch.sqrt((self.out.weight ** 2).sum(0, keepdim=True)))
        else:
            out_weight = self.out.weight

        if not input_embeddings:  # NOTE only for Transformer
            if x.ndimension() == 2:
                x = F.embedding(x, out_weight * math.sqrt(self.d_model))
            elif x.ndimension() == 3:  # softmax relaxiation
                x = x @ out_weight * math.sqrt(self.d_model)  # batch x len x embed_size

        x += positional_encodings_like(x)
        x = self.dropout(x)

        if self.enc_last:
            for l, layer in enumerate(self.layers):
                x = layer(x, encoding[-1], mask_src=source_masks, mask_trg=decoder_masks, feedback=feedback)
        else:
            for l, (layer, enc) in enumerate(zip(self.layers, encoding[1:])):
                x = layer(x, enc, mask_src=source_masks, mask_trg=decoder_masks, feedback=feedback)
        return x 
开发者ID:nyu-dl,项目名称:dl4mt-nonauto,代码行数:27,代码来源:model.py

示例3: _read_embeddings_from_hdf5

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def _read_embeddings_from_hdf5(
    embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
    """
    Reads from a hdf5 formatted file. The embedding matrix is assumed to
    be keyed by 'embedding' and of size `(num_tokens, embedding_dim)`.
    """
    with h5py.File(embeddings_filename, "r") as fin:
        embeddings = fin["embedding"][...]

    if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
        raise ConfigurationError(
            "Read shape {0} embeddings from the file, but expected {1}".format(
                list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]
            )
        )

    return torch.FloatTensor(embeddings) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:embedding.py

示例4: _get_num_tokens_from_first_line

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def _get_num_tokens_from_first_line(line: str) -> Optional[int]:
        """ This function takes in input a string and if it contains 1 or 2 integers, it assumes the
        largest one it the number of tokens. Returns None if the line doesn't match that pattern. """
        fields = line.split(" ")
        if 1 <= len(fields) <= 2:
            try:
                int_fields = [int(x) for x in fields]
            except ValueError:
                return None
            else:
                num_tokens = max(int_fields)
                logger.info(
                    "Recognized a header line in the embedding file with number of tokens: %d",
                    num_tokens,
                )
                return num_tokens
        return None 
开发者ID:allenai,项目名称:allennlp,代码行数:19,代码来源:embedding.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, unique_word_chars, unique_word_lengths, sequences_as_uniqs=None):
        long_tensor = torch.cuda.LongTensor if torch.cuda.device_count() > 0 else torch.LongTensor
        embedded_chars = self._embeddings(unique_word_chars.type(long_tensor))
        # [N, S, L]
        conv_out = self._conv(embedded_chars.transpose(1, 2))
        # [N, L]
        conv_mask = misc.mask_for_lengths(unique_word_lengths)
        conv_out = conv_out + conv_mask.unsqueeze(1)
        embedded_words = conv_out.max(2)[0]

        if sequences_as_uniqs is None:
            return embedded_words
        else:
            if not isinstance(sequences_as_uniqs, list):
                sequences_as_uniqs = [sequences_as_uniqs]

            all_embedded = []
            for word_idxs in sequences_as_uniqs:
                all_embedded.append(functional.embedding(
                    word_idxs.type(long_tensor), embedded_words))
            return all_embedded 
开发者ID:uclnlp,项目名称:jack,代码行数:23,代码来源:embedding.py

示例6: embedded_dropout

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def embedded_dropout(embed, words, dropout=0.1, scale=None):
    if dropout:
        mask = embed.weight.data.new().resize_((embed.weight.size(0), 1))
        mask = mask.bernoulli_(1 - dropout)
        mask = mask.expand_as(embed.weight) / (1 - dropout)
        masked_embed_weight = mask * embed.weight
    else:
        masked_embed_weight = embed.weight
    if scale:
        masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight

    padding_idx = embed.padding_idx
    if padding_idx is None:
        padding_idx = -1
    X = F.embedding(words, masked_embed_weight, padding_idx, embed.max_norm,
        embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
    return X 
开发者ID:matthewmackay,项目名称:reversible-rnn,代码行数:19,代码来源:embed_regularize.py

示例7: beam_decode

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def beam_decode(self, src, src_lang_idx, tgt_lang_idx, logit_mask):
        embed_dim = self.args.embed_dim
        max_len = src.size(1) + 51
        pos_embedding = ut.get_positional_encoding(embed_dim, max_len)
        word_embedding = F.normalize(self.word_embedding, dim=-1) if self.args.fix_norm else self.word_embedding
        logit_mask = logit_mask == 1 if self.logit_mask is None else self.logit_mask
        tgt_lang_embed = self.lang_embedding[tgt_lang_idx]

        encoder_inputs = self.get_input(src, src_lang_idx, word_embedding, pos_embedding)
        encoder_mask = (src == ac.PAD_ID).unsqueeze(1).unsqueeze(2)
        encoder_outputs = self.encoder(encoder_inputs, encoder_mask)

        def get_tgt_inp(tgt, time_step):
            word_embed = F.embedding(tgt.type(src.type()), word_embedding) * self.scale
            pos_embed = pos_embedding[time_step, :].reshape(1, 1, -1)
            return word_embed + tgt_lang_embed + pos_embed

        def logprob_fn(decoder_output):
            logits = self.logit_fn(decoder_output, word_embedding, logit_mask)
            return F.log_softmax(logits, dim=-1)

        # following Attention is all you need, we decode up to src_len + 50 tokens only
        max_lengths = torch.sum(src != ac.PAD_ID, dim=-1).type(src.type()) + 50
        return self.decoder.beam_decode(encoder_outputs, encoder_mask, get_tgt_inp, logprob_fn, ac.BOS_ID, ac.EOS_ID, max_lengths, beam_size=self.args.beam_size, alpha=self.args.beam_alpha) 
开发者ID:tnq177,项目名称:transformers_without_tears,代码行数:26,代码来源:model.py

示例8: show_segmentation

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def show_segmentation(img, gt, pred, mean, std, colormap):
    colormap = colormap.to(img.device)
    gt = F.embedding(gt, colormap).permute(2, 0, 1).div(255)
    pred = F.embedding(pred, colormap).permute(2, 0, 1).div(255)
    mean = torch.as_tensor(mean, dtype=torch.float32, device=img.device)
    std = torch.as_tensor(std, dtype=torch.float32, device=img.device)
    img = img * std[:, None, None] + mean[:, None, None]
    grid = torch.stack([img, gt, pred], 0)
    grid = make_grid(grid, nrow=3)
    grid = (
        grid.mul_(255)
        .add_(0.5)
        .clamp_(0, 255)
        .permute(1, 2, 0)
        .to('cpu', torch.uint8)
        .numpy()
    )
    img = Image.fromarray(grid)

    return img 
开发者ID:rosinality,项目名称:ocr-pytorch,代码行数:22,代码来源:util.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, data, weights=None):
        '''
            @param data dictionary
                @key text: batch_size * max_text_len
                @key text_len: batch_size
                @key idf: vocab_size
            @param weights placeholder used for maml
            @return output: batch_size * embedding_dim
        '''
        ebd = self.ebd(data, weights)

        if self.args.embedding == 'idf':
            score = F.embedding(data['text'], data['idf'])
        elif self.args.embedding == 'iwf':
            score = F.embedding(data['text'], data['iwf'])

        ebd = torch.sum(ebd * score, dim=1)
        ebd = ebd / data['text_len'].unsqueeze(-1).float()

        return ebd 
开发者ID:YujiaBao,项目名称:Distributional-Signatures,代码行数:22,代码来源:idf.py

示例10: precompute_stats

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def precompute_stats(train_data, val_data, test_data, args):
    '''
    Compute idf and iwf over the training data
    '''
    if args.embedding in ['idf', 'meta', 'meta_mlp']:
        idf = _compute_idf(train_data)

        train_data['idf'] = idf
        val_data['idf'] = idf
        test_data['idf'] = idf

    if args.embedding in ['iwf', 'meta', 'meta_mlp']:
        iwf = _compute_iwf(train_data)
        train_data['iwf'] = iwf
        val_data['iwf'] = iwf
        test_data['iwf'] = iwf 
开发者ID:YujiaBao,项目名称:Distributional-Signatures,代码行数:18,代码来源:stats.py

示例11: embedded_dropout

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def embedded_dropout(embed, words, dropout=0.1, scale=None):
  if dropout:
    mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
    mask = Variable(mask)
    masked_embed_weight = mask * embed.weight
  else:
    masked_embed_weight = embed.weight
  if scale:
    masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight

  padding_idx = embed.padding_idx
  if padding_idx is None:
      padding_idx = -1
  X = F.embedding(words, masked_embed_weight,
    padding_idx, embed.max_norm, embed.norm_type,
    embed.scale_grad_by_freq, embed.sparse
  )
  return X 
开发者ID:zihangdai,项目名称:mos,代码行数:20,代码来源:embed_regularize.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, inputs):  # pylint: disable=arguments-differ
        original_inputs = inputs
        if original_inputs.dim() > 2:
            inputs = inputs.view(-1, inputs.size(-1))
        embedded = embedding(inputs, self.weight,
                             max_norm=self.max_norm,
                             norm_type=self.norm_type,
                             scale_grad_by_freq=self.scale_grad_by_freq,
                             sparse=self.sparse)
        if original_inputs.dim() > 2:
            view_args = list(original_inputs.size()) + [embedded.size(-1)]
            embedded = embedded.view(*view_args)
        if self._projection:
            projection = self._projection
            for _ in range(embedded.dim() - 2):
                projection = TimeDistributed(projection)
            embedded = projection(embedded)
        return embedded

    # Custom logic requires custom from_params. 
开发者ID:plasticityai,项目名称:magnitude,代码行数:22,代码来源:embedding.py

示例13: _read_embeddings_from_hdf5

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def _read_embeddings_from_hdf5(embeddings_filename     ,
                               embedding_dim     ,
                               vocab            ,
                               namespace      = u"tokens")                     :
    u"""
    Reads from a hdf5 formatted file. The embedding matrix is assumed to
    be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``.
    """
    with h5py.File(embeddings_filename, u'r') as fin:
        embeddings = fin[u'embedding'][...]

    if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
        raise ConfigurationError(
                u"Read shape {0} embeddings from the file, but expected {1}".format(
                        list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]))

    return torch.FloatTensor(embeddings) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:19,代码来源:embedding.py

示例14: _read_embeddings_from_hdf5

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def _read_embeddings_from_hdf5(embeddings_filename: str,
                               embedding_dim: int,
                               vocab: Vocabulary,
                               namespace: str = "tokens",
                               amr: bool = False) -> torch.FloatTensor:
    """
    Reads from a hdf5 formatted file. The embedding matrix is assumed to
    be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``.
    """
    with h5py.File(embeddings_filename, 'r') as fin:
        embeddings = fin['embedding'][...]

    if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
        raise ConfigurationError(
                "Read shape {0} embeddings from the file, but expected {1}".format(
                        list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]))

    return torch.FloatTensor(embeddings) 
开发者ID:jcyk,项目名称:gtos,代码行数:20,代码来源:embedding.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import embedding [as 别名]
def forward(self, inputs):  # pylint:disable=arguments-differ
        """Embeds `inputs` with the dropped out embedding weight matrix."""
        if self.training:
            dropout = self.dropout
        else:
            dropout = 0

        if dropout:
            mask = self.weight.data.new(self.weight.size(0), 1)
            mask.bernoulli_(1 - dropout)
            mask = mask.expand_as(self.weight)
            mask = mask / (1 - dropout)
            masked_weight = self.weight * Variable(mask)
        else:
            masked_weight = self.weight
        if self.scale and self.scale != 1:
            masked_weight = masked_weight * self.scale

        return F.embedding(inputs,
                           masked_weight,
                           max_norm=self.max_norm,
                           norm_type=self.norm_type,
                           scale_grad_by_freq=self.scale_grad_by_freq,
                           sparse=self.sparse) 
开发者ID:carpedm20,项目名称:ENAS-pytorch,代码行数:26,代码来源:shared_rnn.py


注:本文中的torch.nn.functional.embedding方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。