当前位置: 首页>>代码示例>>Python>>正文


Python functional.glu方法代码示例

本文整理汇总了Python中torch.nn.functional.glu方法的典型用法代码示例。如果您正苦于以下问题:Python functional.glu方法的具体用法?Python functional.glu怎么用?Python functional.glu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.glu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, inputs, context=None):
        temps = inputs
        if self.use_batch_norm:
            temps = self.batch_norm_layers[0](temps)
        temps = self.activation(temps)
        temps = self.linear_layers[0](temps)
        if self.use_batch_norm:
            temps = self.batch_norm_layers[1](temps)
        temps = self.activation(temps)
        temps = self.dropout(temps)
        temps = self.linear_layers[1](temps)
        if context is not None:
            temps = F.glu(
                torch.cat(
                    (temps, self.context_layer(context)),
                    dim=1
                ),
                dim=1
            )
        return inputs + temps 
开发者ID:bayesiains,项目名称:nsf,代码行数:22,代码来源:resnet.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, inputs, context=None):
        temps = inputs
        if self.use_batch_norm:
            temps = self.batch_norm_layers[0](temps)
        temps = self.activation(temps)
        temps = self.linear_layers[0](temps)
        if self.use_batch_norm:
            temps = self.batch_norm_layers[1](temps)
        temps = self.activation(temps)
        temps = self.dropout(temps)
        temps = self.linear_layers[1](temps)
        if context is not None:
            temps = F.glu(
                torch.cat((temps, self.context_layer(context)), dim=1),
                dim=1
            )
        return inputs + temps 
开发者ID:bayesiains,项目名称:nsf,代码行数:19,代码来源:made.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, xs):
        """Forward pass.

        Args:
            xs (FloatTensor): `[B, T, d_model]`
        Returns:
            xs (FloatTensor): `[B, T, d_model]`

        """
        B, T, d_model = xs.size()
        assert d_model == self.d_model

        xs = xs.transpose(2, 1).contiguous()  # `[B, C, T]`
        xs = self.pointwise_conv1(xs)  # `[B, 2 * C, T]`
        xs = xs.transpose(2, 1)  # `[B, T, 2 * C]`
        xs = F.glu(xs)  # `[B, T, C]`
        xs = xs.transpose(2, 1).contiguous()  # `[B, C, T]`
        xs = self.depthwise_conv(xs)  # `[B, C, T]`

        xs = self.batch_norm(xs)
        xs = self.activation(xs)
        xs = self.pointwise_conv2(xs)  # `[B, C, T]`

        xs = xs.transpose(2, 1).contiguous()  # `[B, T, C]`
        return xs 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:27,代码来源:conformer_convolution.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, x, need_attention_weights=False):
        x = F.glu(self.linear(x), dim=-1) # B, Tt, Ts, C
        if not need_attention_weights:
            # Maxpool 
            x, _ = x.max(dim=2)  # B, Tt, C
            return x, None
        # Output attention weights:
        if need_attention_weights:
            # x in B, Tt, Ts, C
            B, Tt, Ts, C = x.size()
            x, indices = x.max(dim=2)
            # indices in B, Tt, C with each channel selecting a source position
            # Terrible but will do:
            attn = x.new_zeros(B, Tt, Ts)
            for i in range(Ts):
                attn[:,:,i] = indices.eq(i).sum(dim=-1)
            # Normalize
            attn = attn / attn.sum(dim=-1, keepdim=True)
        return x, attn 
开发者ID:elbayadm,项目名称:attn2d,代码行数:21,代码来源:aggregators.py

示例5: one_step

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def one_step(self, x, need_attention_weights=False):
        x = x[:, -1:]  # B, 1, Ts, C
        x = F.glu(self.linear(x), dim=-1) # B, 1, Ts, C
        if not need_attention_weights:
            x, _ = x.max(dim=2)  # B, Tt, C
            return x, None
        # Output attention weights:
        if need_attention_weights:
            B, Tt, Ts, C = x.size()
            x, indices = x.max(dim=2)
            # indices in B, Tt, C with each channel selecting a source position
            # Terrible but will do:
            attn = x.new_zeros(B, Tt, Ts)
            for i in range(Ts):
                attn[:,:,i] = indices.eq(i).sum(dim=-1)
            # Normalize
            attn = attn / attn.sum(dim=-1, keepdim=True)
        return x, attn 
开发者ID:elbayadm,项目名称:attn2d,代码行数:20,代码来源:aggregators.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, inputs):
        temps = self.conv(inputs)
        outputs = F.glu(temps, dim=1)
        return outputs 
开发者ID:bayesiains,项目名称:nsf,代码行数:6,代码来源:conv.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, src_tokens, src_lengths):
        # embed tokens and positions
        x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
        x = F.dropout(x, p=self.dropout, training=self.training)
        input_embedding = x.transpose(0, 1)

        # project to size of convolution
        x = self.fc1(x)

        # B x T x C -> T x B x C
        x = x.transpose(0, 1)

        # temporal convolutions
        for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):
            residual = x if proj is None else proj(x)

            x = F.dropout(x, p=self.dropout, training=self.training)
            padding_l = (conv.kernel_size[0] - 1) // 2
            padding_r = conv.kernel_size[0] // 2
            x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
            x = conv(x)
            x = F.glu(x, dim=2)
            if attention is not None:
                x = attention(x)
            x = (x + residual) * math.sqrt(0.5)

        # T x B x C -> B x T x C
        x = x.transpose(1, 0)

        # project back to size of embedding
        x = self.fc2(x)

        # scale gradients (this only affects backward, not forward)
        x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))

        # add output to input embedding for attention
        y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)

        return {
            'encoder_out': (x, y),
        } 
开发者ID:nusnlp,项目名称:crosentgec,代码行数:43,代码来源:fconv_self_att.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, src_tokens, src_lengths, **kwargs):

        """
        src_tokens: padded tensor (B, T, C * feat)
        src_lengths: tensor of original lengths of input utterances (B,)
        """
        B, T, _ = src_tokens.size()
        x = src_tokens.transpose(1, 2).contiguous()  # (B, feat, T) assuming C == 1

        for layer_idx in range(len(self.conv_layers)):
            x = self.conv_layers[layer_idx](x)
            x = F.glu(x, dim=1)
            x = F.dropout(x, p=self.dropouts[layer_idx], training=self.training)

        x = x.transpose(1, 2).contiguous()  # (B, T, 908)
        x = self.linear_layers[0](x)
        x = F.glu(x, dim=2)
        x = F.dropout(x, p=self.dropouts[-1])
        x = self.linear_layers[1](x)

        assert x.size(0) == B
        assert x.size(1) == T

        encoder_out = x.transpose(0, 1)  # (T, B, vocab_size)

        # need to debug this -- find a simpler/elegant way in pytorch APIs
        encoder_padding_mask = (
            torch.arange(T).view(1, T).expand(B, -1).to(x.device)
            >= src_lengths.view(B, 1).expand(-1, T)
        ).t()  # (B x T) -> (T x B)

        return {
            "encoder_out": encoder_out,  # (T, B, vocab_size)
            "encoder_padding_mask": encoder_padding_mask,  # (T, B)
        } 
开发者ID:pytorch,项目名称:fairseq,代码行数:37,代码来源:w2l_conv_glu_enc.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, x):
        x1 = self.fc(x)
        if self.add_batch_norm:
            x1 = self.batch_norm(x1)
        x = th.cat((x, x1), 1)
        return F.glu(x, 1) 
开发者ID:antoine77340,项目名称:howto100m,代码行数:8,代码来源:model.py

示例10: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, src_tokens, src_lengths):
        # embed tokens and positions
        x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
        x = F.dropout(x, p=self.dropout, training=self.training)
        input_embedding = x

        # project to size of convolution
        x = self.fc1(x)

        # B x T x C -> T x B x C
        x = x.transpose(0, 1)

        # temporal convolutions
        for proj, conv in zip(self.projections, self.convolutions):
            residual = x if proj is None else proj(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
            padding_l = (conv.kernel_size[0] - 1) // 2
            padding_r = conv.kernel_size[0] // 2
            x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
            x = conv(x)
            x = F.glu(x, dim=2)
            x = (x + residual) * math.sqrt(0.5)

        # T x B x C -> B x T x C
        x = x.transpose(1, 0)

        # project back to size of embedding
        x = self.fc2(x)

        # scale gradients (this only affects backward, not forward)
        x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))

        # add output to input embedding for attention
        y = (x + input_embedding) * math.sqrt(0.5)

        return x, y 
开发者ID:EdinburghNLP,项目名称:XSum,代码行数:38,代码来源:fconv.py

示例11: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, xs):
        return F.glu(self.fc(xs), dim=-1) 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:4,代码来源:glu.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, xs, xlens, task, use_cache=False, streaming=False,):
        """Forward pass.

        Args:
            xs (FloatTensor): `[B, T, F]`
            xlens (IntTensor): `[B]`
        Returns:
            eouts (dict):
                xs (FloatTensor): `[B, T', C_o * F]`
                xlens (IntTensor): `[B]`

        """
        eouts = {'ys': {'xs': None, 'xlens': None},
                 'ys_sub1': {'xs': None, 'xlens': None},
                 'ys_sub2': {'xs': None, 'xlens': None}}

        bs, xmax, input_dim = xs.size()
        xs = xs.transpose(2, 1).unsqueeze(3)  # `[B, in_ch (input_dim), T, 1]`

        xs = self.layers(xs)  # `[B, out_ch, T, 1]`
        bs, out_ch, xmax, freq = xs.size()
        xs = xs.transpose(2, 1).contiguous().view(bs, xmax, -1)  # `[B, T, out_ch * feat_dim]`

        # weight normalization + GLU for the last fully-connected layer
        xs = F.glu(self.fc_glu(xs), dim=2)

        # Bridge layer
        if self.bridge is not None:
            xs = self.bridge(xs)

        # NOTE: no subsampling is conducted

        if task in ['all', 'ys']:
            eouts['ys']['xs'], eouts['ys']['xlens'] = xs, xlens
        else:
            raise NotImplementedError
        return eouts 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:39,代码来源:gated_conv.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward(self, x):
        return F.glu(self.linear(x), dim=-1) 
开发者ID:elbayadm,项目名称:attn2d,代码行数:4,代码来源:pa_gatenet3.py

示例14: forward_gate

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def forward_gate(self, x):
        for l in self.gate[:-1]:
            x = F.glu(l(x))
        return self.gate[-1](x) 
开发者ID:elbayadm,项目名称:attn2d,代码行数:6,代码来源:hmm_controls2.py

示例15: test_glu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import glu [as 别名]
def test_glu(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        output = F.glu(inp, dim=-1) 
开发者ID:NVIDIA,项目名称:apex,代码行数:5,代码来源:test_pyprof_nvtx.py


注:本文中的torch.nn.functional.glu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。