当前位置: 首页>>代码示例>>Python>>正文


Python functional.dropout方法代码示例

本文整理汇总了Python中torch.nn.functional.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python functional.dropout方法的具体用法?Python functional.dropout怎么用?Python functional.dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: enc_ans_features

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def enc_ans_features(self, x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_ents, x_ctx_ent_len, x_ctx_ent_num):
        '''
        x_types: answer type
        x_paths: answer path, i.e., bow of relation
        x_ctx_ents: answer context, i.e., bow of entity words, (batch_size, num_cands, num_ctx, L)
        '''
        # ans_types = torch.mean(self.ent_type_embed(x_types.view(-1, x_types.size(-1))), 1).view(x_types.size(0), x_types.size(1), -1)
        ans_type_bow = (self.lstm_enc_type(x_type_bow.view(-1, x_type_bow.size(-1)), x_type_bow_len.view(-1))[1]).view(x_type_bow.size(0), x_type_bow.size(1), -1)
        ans_path_bow = (self.lstm_enc_path(x_path_bow.view(-1, x_path_bow.size(-1)), x_path_bow_len.view(-1))[1]).view(x_path_bow.size(0), x_path_bow.size(1), -1)
        ans_paths = torch.mean(self.relation_embed(x_paths.view(-1, x_paths.size(-1))), 1).view(x_paths.size(0), x_paths.size(1), -1)

        # Avg over ctx
        ctx_num_mask = create_mask(x_ctx_ent_num.view(-1), x_ctx_ents.size(2), self.use_cuda).view(x_ctx_ent_num.shape + (-1,))
        ans_ctx_ent = (self.lstm_enc_ctx(x_ctx_ents.view(-1, x_ctx_ents.size(-1)), x_ctx_ent_len.view(-1))[1]).view(x_ctx_ents.size(0), x_ctx_ents.size(1), x_ctx_ents.size(2), -1)
        ans_ctx_ent = ctx_num_mask.unsqueeze(-1) * ans_ctx_ent
        ans_ctx_ent = torch.sum(ans_ctx_ent, dim=2) / torch.clamp(x_ctx_ent_num.float().unsqueeze(-1), min=VERY_SMALL_NUMBER)

        if self.ans_enc_dropout:
            # ans_types = F.dropout(ans_types, p=self.ans_enc_dropout, training=self.training)
            ans_type_bow = F.dropout(ans_type_bow, p=self.ans_enc_dropout, training=self.training)
            ans_path_bow = F.dropout(ans_path_bow, p=self.ans_enc_dropout, training=self.training)
            ans_paths = F.dropout(ans_paths, p=self.ans_enc_dropout, training=self.training)
            ans_ctx_ent = F.dropout(ans_ctx_ent, p=self.ans_enc_dropout, training=self.training)
        return ans_type_bow, None, ans_path_bow, ans_paths, ans_ctx_ent 
开发者ID:hugochan,项目名称:BAMnet,代码行数:26,代码来源:modules.py

示例2: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def __init__(self, vocab_size, embed_size, hidden_size, \
                seq_enc_type='lstm', word_emb_dropout=None,
                cnn_kernel_size=[3], bidirectional=False, \
                shared_embed=None, init_word_embed=None, use_cuda=True):
        if seq_enc_type in ('lstm', 'gru'):
            self.que_enc = EncoderRNN(vocab_size, embed_size, hidden_size, \
                        dropout=word_emb_dropout, \
                        bidirectional=bidirectional, \
                        shared_embed=shared_embed, \
                        init_word_embed=init_word_embed, \
                        rnn_type=seq_enc_type, \
                        use_cuda=use_cuda)

        elif seq_enc_type == 'cnn':
            self.que_enc = EncoderCNN(vocab_size, embed_size, hidden_size, \
                        kernel_size=cnn_kernel_size, dropout=word_emb_dropout, \
                        shared_embed=shared_embed, \
                        init_word_embed=init_word_embed, \
                        use_cuda=use_cuda)
        else:
            raise RuntimeError('Unknown SeqEncoder type: {}'.format(seq_enc_type)) 
开发者ID:hugochan,项目名称:BAMnet,代码行数:23,代码来源:modules.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, x, x_len=None):
        """x: [batch_size * max_length]
           x_len: reserved
        """
        x = self.embed(x)
        if self.dropout:
            x = F.dropout(x, p=self.dropout, training=self.training)
        # Trun(batch_size, seq_len, embed_size) to (batch_size, embed_size, seq_len) for cnn1d
        x = x.transpose(1, 2)
        z = [conv(x) for conv in self.cnns]
        output = [F.max_pool1d(i, kernel_size=i.size(-1)).squeeze(-1) for i in z]

        if len(output) > 1:
            output = self.fc(torch.cat(output, -1))
        else:
            output = output[0]
        return None, output 
开发者ID:hugochan,项目名称:BAMnet,代码行数:19,代码来源:modules.py

示例4: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def __init__(self, n_conv, kernel_size=7, n_filters=128, dropout=0.1, num_heads=4):
        super(EncoderBlock, self).__init__()
        self.dropout = dropout
        self.n_conv = n_conv
        self.num_heads = num_heads

        self.position_encoding = PositionEncoding(n_filters=n_filters)

        self.layer_norm = nn.ModuleList([nn.LayerNorm(n_filters) for _ in range(n_conv)])
        self.final_layer_norm = nn.LayerNorm(n_filters)
        self.conv = nn.ModuleList([
            DepthwiseSeparableConv(in_ch=n_filters, out_ch=n_filters, k=kernel_size, relu=True)
            for _ in range(n_conv)])

        if self.num_heads != 0:
            self.multi_head_attn = MultiHeadedAttention(nh=num_heads, d_model=n_filters)
            self.attn_layer_norm = nn.LayerNorm(n_filters) 
开发者ID:jayleicn,项目名称:TVQAplus,代码行数:19,代码来源:encoder.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, x, mask):
        """
        :param x: (N, L, D)
        :param mask: (N, L)
        :return: (N, L, D)
        """
        outputs = self.position_encoding(x)  # (N, L, D)

        for i in range(self.n_conv):
            residual = outputs
            outputs = self.layer_norm[i](outputs)

            if i % 2 == 0:
                outputs = F.dropout(outputs, p=self.dropout, training=self.training)
            outputs = self.conv[i](outputs)
            outputs = outputs + residual

        if self.num_heads != 0:
            residual = outputs
            outputs = self.attn_layer_norm(outputs)
            outputs = self.multi_head_attn(outputs, mask=mask)
            outputs = outputs + residual

        return self.final_layer_norm(outputs)  # (N, L, D) 
开发者ID:jayleicn,项目名称:TVQAplus,代码行数:26,代码来源:encoder.py

示例6: similarity

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def similarity(self, C, Q, c_mask, q_mask):
        """
        word2word dot-product similarity
        Args:
            C: (N, 5, Li, Lqa, D)
            Q: (N, 1, Li, Lr, D)
            c_mask: (N, 5, Li, Lqa)
            q_mask: (N, 1, Li, Lr)
        Returns:
            (N, *, Lc, Lq)
        """
        C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
        Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)

        S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))  # (N, 5, Li, Lqa, Lr)
        S = torch.matmul(C, Q.transpose(-2, -1))  # (N, 5, Li, Lqa, Lr)
        masked_S = S - 1e10*(1 - S_mask)  # (N, 5, Li, Lqa, Lr)
        return masked_S, S_mask 
开发者ID:jayleicn,项目名称:TVQAplus,代码行数:20,代码来源:context_query_attention.py

示例7: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def __init__(self, in_channels, out_channels, kernel_size, dim=1, stride=1, padding=0, relu=True, dropout=0.1):
        """
        :param in_channels: input hidden dimension size
        :param out_channels: output hidden dimension size
        :param kernel_size: kernel size
        :param dim: default 1. 1D conv or 2D conv
        """
        super(ConvRelu, self).__init__()
        self.relu = relu
        self.dropout = dropout
        if dim == 1:
            self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels,
                                  kernel_size=kernel_size, stride=stride, padding=padding)
        elif dim == 2:
            self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                                  kernel_size=kernel_size, stride=stride, padding=padding)
        else:
            raise Exception("Incorrect dimension!") 
开发者ID:jayleicn,项目名称:TVQAplus,代码行数:20,代码来源:cnn.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        if self.adj_dropout > 0:
            edge_index, edge_type = dropout_adj(
                edge_index, edge_type, p=self.adj_dropout, 
                force_undirected=self.force_undirected, num_nodes=len(x), 
                training=self.training
            )
        concat_states = []
        for conv in self.convs:
            x = torch.tanh(conv(x, edge_index))
            concat_states.append(x)
        concat_states = torch.cat(concat_states, 1)
        x = global_add_pool(concat_states, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        if self.regression:
            return x[:, 0]
        else:
            return F.log_softmax(x, dim=-1) 
开发者ID:muhanzhang,项目名称:IGMC,代码行数:23,代码来源:models.py

示例9: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate):
        super().__init__()
        self.attn_u = Attn(hidden_size)
        self.attn_z = Attn(hidden_size)
        self.gru = nn.GRU(embed_size + hidden_size, hidden_size, dropout=dropout_rate)
        self.ln1 = LayerNormalization(hidden_size)

        self.w1 = nn.Linear(hidden_size, vocab_size)
        self.proj_copy1 = nn.Linear(hidden_size * 2, hidden_size)
        self.v1 = nn.Linear(hidden_size, 1)

        self.proj_copy2 = nn.Linear(hidden_size * 2, hidden_size)
        self.v2 = nn.Linear(hidden_size, 1)
        self.mu = nn.Linear(vocab_size, embed_size)
        self.dropout_rate = dropout_rate

        self.gru = orth_gru(self.gru)

        self.copy_weight = 1 
开发者ID:AuCson,项目名称:SEDST,代码行数:21,代码来源:unsup_net.py

示例10: whatCellType

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def whatCellType(input_size, hidden_size, cell_type, dropout_rate):
    if cell_type == 'rnn':
        cell = nn.RNN(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'gru':
        cell = nn.GRU(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'lstm':
        cell = nn.LSTM(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_lstm(cell)
        return cell
    elif cell_type == 'bigru':
        cell = nn.GRU(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'bilstm':
        cell = nn.LSTM(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
        init_lstm(cell)
        return cell 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:23,代码来源:model.py

示例11: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:10,代码来源:model.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, x):
        if not self.equalInOut:
            x = self.relu1(self.bn1(x))
        else:
            out = self.relu1(self.bn1(x))
        out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)
        out = self.conv2(out)
        return torch.add(x if self.equalInOut else self.convShortcut(x), out) 
开发者ID:zhunzhong07,项目名称:Random-Erasing,代码行数:12,代码来源:wrn.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:10,代码来源:mnist.py

示例14: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def forward(self,input_):
        x = self.sublayer(input_)
        if self.drop_rate > 0:
            x = F.dropout(x, p=self.drop_rate,training=self.training)
        x_final = torch.cat([input_ , x],dim=1)
        return x_final 
开发者ID:HaiyangLiu1997,项目名称:Pytorch-Networks,代码行数:8,代码来源:DenseNet2016.py

示例15: kb_aware_query_enc

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import dropout [as 别名]
def kb_aware_query_enc(self, memories, queries, query_lengths, ans_mask, ctx_mask=None):
        # Question encoder
        Q_r = self.que_enc(queries, query_lengths)[0]
        if self.que_enc_dropout:
            Q_r = F.dropout(Q_r, p=self.que_enc_dropout, training=self.training)

        query_mask = create_mask(query_lengths, Q_r.size(1), self.use_cuda)
        q_r_init = self.self_atten(Q_r, query_lengths, query_mask)

        # Answer encoder
        _, _, _, x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_ent, x_ctx_ent_len, x_ctx_ent_num, _, _, _, _ = memories
        ans_comp_val, ans_comp_key = self.ans_enc(x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_ent, x_ctx_ent_len, x_ctx_ent_num)
        if self.ans_enc_dropout:
            for _ in range(len(ans_comp_key)):
                ans_comp_key[_] = F.dropout(ans_comp_key[_], p=self.ans_enc_dropout, training=self.training)
        # KB memory summary
        ans_comp_atts = [self.init_atten(q_r_init, each, atten_mask=ans_mask) for each in ans_comp_key]
        if ctx_mask is not None:
            ans_comp_atts[-1] = ctx_mask * ans_comp_atts[-1] - (1 - ctx_mask) * INF
        ans_comp_probs = [torch.softmax(each, dim=-1) for each in ans_comp_atts]
        memory_summary = []
        for i, probs in enumerate(ans_comp_probs):
            memory_summary.append(torch.bmm(probs.unsqueeze(1), ans_comp_val[i]))
        memory_summary = torch.cat(memory_summary, 1)

        # Co-attention
        CoAtt = torch.bmm(Q_r, memory_summary.transpose(1, 2)) # co-attention matrix
        CoAtt = query_mask.unsqueeze(-1) * CoAtt - (1 - query_mask).unsqueeze(-1) * INF
        if ctx_mask is not None:
            # mask over empty ctx elements
            ctx_mask_global = (ctx_mask.sum(-1, keepdim=True) > 0).float()
            CoAtt[:, :, -1] = ctx_mask_global * CoAtt[:, :, -1].clone() - (1 - ctx_mask_global) * INF

        q_att = F.max_pool1d(CoAtt, kernel_size=CoAtt.size(-1)).squeeze(-1)
        q_att = torch.softmax(q_att, dim=-1)
        return (ans_comp_val, ans_comp_key), (q_att, Q_r), query_mask 
开发者ID:hugochan,项目名称:BAMnet,代码行数:38,代码来源:modules.py


注:本文中的torch.nn.functional.dropout方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。