當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.ConstantPad1d方法代碼示例

本文整理匯總了Python中torch.nn.ConstantPad1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ConstantPad1d方法的具體用法?Python nn.ConstantPad1d怎麽用?Python nn.ConstantPad1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.ConstantPad1d方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _make_conv_pool_block

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def _make_conv_pool_block(
        cls,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        activation: nn.Module,
        pool_size: int,
    ) -> nn.Module:
        """Make conv pool block."""
        return nn.Sequential(
            nn.ConstantPad1d((0, kernel_size - 1), 0),
            nn.Conv1d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size
            ),
            activation,
            nn.MaxPool1d(kernel_size=pool_size)
        ) 
開發者ID:NTMC-Community,項目名稱:MatchZoo-py,代碼行數:21,代碼來源:arci.py

示例2: decode

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def decode(self, data_loader):
        self.model.eval()
        with torch.no_grad():
            for i, (data) in enumerate(data_loader):
                # predict phones using AM
                xs, frame_lens, filenames = data
                if self.use_cuda:
                    xs = xs.cuda(non_blocking=True)
                ys_hat = self.model(xs)
                ys_hat = ys_hat.unsqueeze(dim=0).transpose(1, 2)
                pos = torch.cat((torch.zeros((1, ), dtype=torch.long), torch.cumsum(frame_lens, dim=0)))
                ys_hats = [ys_hat.narrow(2, p, l).clone() for p, l in zip(pos[:-1], frame_lens)]
                max_len = torch.max(frame_lens)
                ys_hats = [nn.ConstantPad1d((0, max_len-yh.size(2)), 0)(yh) for yh in ys_hats]
                ys_hat = torch.cat(ys_hats).transpose(1, 2)
                # latgen decoding
                if self.use_cuda:
                    ys_hat = ys_hat.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(ys_hat, frame_lens)
                # print results
                ys_hat = [y[:s] for y, s in zip(ys_hat, frame_lens)]
                words = [w[:s] for w, s in zip(words, w_sizes)]
                for results in zip(filenames, ys_hat, words):
                    self.print_result(*results) 
開發者ID:jinserk,項目名稱:pytorch-asr,代碼行數:26,代碼來源:predictor.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 activation=None):

        super(BatchNormConv1d, self).__init__()
        self.padding = padding
        self.padder = nn.ConstantPad1d(padding, 0)
        self.conv1d = nn.Conv1d(
            in_channels,
            out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=0,
            bias=False)
        # Following tensorflow's default parameters
        self.bn = nn.BatchNorm1d(out_channels, momentum=0.99, eps=1e-3)
        self.activation = activation
        # self.init_layers() 
開發者ID:mozilla,項目名稱:TTS,代碼行數:24,代碼來源:tacotron.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def __init__(self, input_channels, output_channels, kernel, 
                 dropout=0.0, activation='identity', dilation=1, groups=1, batch_norm=True):
        super(ConvBlock, self).__init__()

        self._groups = groups
        
        p = (kernel-1) * dilation // 2 
        padding = p if kernel % 2 != 0 else (p, p+1)
        layers = [ConstantPad1d(padding, 0.0),
                  Conv1d(input_channels, output_channels, kernel, padding=0, dilation=dilation, groups=groups, bias=(not batch_norm))]

        if batch_norm:
            layers += [BatchNorm1d(output_channels)]
            
        layers += [get_activation(activation)]
        layers += [Dropout(dropout)]

        self._block = Sequential(*layers) 
開發者ID:Tomiinek,項目名稱:Multilingual_Text_to_Speech,代碼行數:20,代碼來源:layers.py

示例5: pad_occurrences

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def pad_occurrences(self, occurrences):
        padding = self.unroll_target - occurrences.shape[0]
        if padding != 0:
            padding = nn.ConstantPad1d((0, padding), 1)
            occurrences = padding(occurrences)
        return occurrences 
開發者ID:ranahanocka,項目名稱:MeshCNN,代碼行數:8,代碼來源:mesh_unpool.py

示例6: _create_base_network

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConstantPad1d [as 別名]
def _create_base_network(self) -> nn.Module:
        """
        Apply conv and maxpooling operation towards to each letter-ngram.

        The input shape is `fixed_text_length`*`number of letter-ngram`,
        as described in the paper, `n` is 3, `number of letter-trigram`
        is about 30,000 according to their observation.

        :return: A :class:`nn.Module` of CDSSM network, tensor in tensor out.
        """
        pad = nn.ConstantPad1d((0, self._params['kernel_size'] - 1), 0)
        conv = nn.Conv1d(
            in_channels=self._params['vocab_size'],
            out_channels=self._params['filters'],
            kernel_size=self._params['kernel_size']
        )
        activation = parse_activation(
            self._params['conv_activation_func']
        )
        dropout = nn.Dropout(p=self._params['dropout_rate'])
        pool = nn.AdaptiveMaxPool1d(1)
        squeeze = Squeeze()
        mlp = self._make_multi_layer_perceptron_layer(
            self._params['filters']
        )
        return nn.Sequential(
            pad, conv, activation, dropout, pool, squeeze, mlp
        ) 
開發者ID:NTMC-Community,項目名稱:MatchZoo-py,代碼行數:30,代碼來源:cdssm.py


注:本文中的torch.nn.ConstantPad1d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。