当前位置: 首页>>代码示例>>Python>>正文


Python functional.tanh函数代码示例

本文整理汇总了Python中torch.nn.functional.tanh函数的典型用法代码示例。如果您正苦于以下问题:Python tanh函数的具体用法?Python tanh怎么用?Python tanh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了tanh函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

    def forward(self, inputs):
        x, u = inputs
        x = self.bn0(x)
        x = F.tanh(self.linear1(x))
        x = F.tanh(self.linear2(x))

        V = self.V(x)
        mu = F.tanh(self.mu(x))

        Q = None
        if u is not None:
            num_outputs = mu.size(1)
            L = self.L(x).view(-1, num_outputs, num_outputs)
            L = L * \
                self.tril_mask.expand_as(
                    L) + torch.exp(L) * self.diag_mask.expand_as(L)
            P = torch.bmm(L, L.transpose(2, 1))

            u_mu = (u - mu).unsqueeze(2)
            A = -0.5 * \
                torch.bmm(torch.bmm(u_mu.transpose(2, 1), P), u_mu)[:, :, 0]

            Q = A + V

        return mu, Q, V
开发者ID:lenvdv,项目名称:pytorch-ddpg-naf,代码行数:25,代码来源:naf.py

示例2: forward

    def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
        # The p_att_feats here is already projected
        att_size = att_feats.numel() // att_feats.size(0) // self.att_feat_size
        att = p_att_feats.view(-1, att_size, self.att_hid_size)
        
        att_h = self.h2att(state[0][-1])                        # batch * att_hid_size
        att_h = att_h.unsqueeze(1).expand_as(att)            # batch * att_size * att_hid_size
        dot = att + att_h                                   # batch * att_size * att_hid_size
        dot = F.tanh(dot)                                # batch * att_size * att_hid_size
        dot = dot.view(-1, self.att_hid_size)               # (batch * att_size) * att_hid_size
        dot = self.alpha_net(dot)                           # (batch * att_size) * 1
        dot = dot.view(-1, att_size)                        # batch * att_size
        
        weight = F.softmax(dot)                             # batch * att_size
        att_feats_ = att_feats.view(-1, att_size, self.att_feat_size) # batch * att_size * att_feat_size
        att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size

        all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
        sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
        sigmoid_chunk = F.sigmoid(sigmoid_chunk)
        in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
        forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
        out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)

        in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
            self.a2c(att_res)
        in_transform = torch.max(\
            in_transform.narrow(1, 0, self.rnn_size),
            in_transform.narrow(1, self.rnn_size, self.rnn_size))
        next_c = forget_gate * state[1][-1] + in_gate * in_transform
        next_h = out_gate * F.tanh(next_c)

        output = self.dropout(next_h)
        state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
        return output, state
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:35,代码来源:Att2inModel.py

示例3: forward

    def forward(self, x):
        # print("fffff",x)
        embed = self.embed(x)

        # CNN
        cnn_x = embed
        cnn_x = torch.transpose(cnn_x, 0, 1)
        cnn_x = cnn_x.unsqueeze(1)
        cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x]  # [(N,Co), ...]*len(Ks)
        cnn_x = torch.cat(cnn_x, 1)
        cnn_x = self.dropout(cnn_x)

        # LSTM
        lstm_x = embed.view(len(x), embed.size(1), -1)
        lstm_out, self.hidden = self.lstm(lstm_x, self.hidden)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        # lstm_out = F.tanh(lstm_out)
        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)

        # CNN and LSTM cat
        cnn_x = torch.transpose(cnn_x, 0, 1)
        lstm_out = torch.transpose(lstm_out, 0, 1)
        cnn_lstm_out = torch.cat((cnn_x, lstm_out), 0)
        cnn_lstm_out = torch.transpose(cnn_lstm_out, 0, 1)

        # linear
        cnn_lstm_out = self.hidden2label1(F.tanh(cnn_lstm_out))
        cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))

        # output
        logit = cnn_lstm_out
        return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:34,代码来源:model_CNN_LSTM.py

示例4: norm_flow

    def norm_flow(self, params, z, v, logposterior):

        h = F.tanh(params[0][0](z))
        mew_ = params[0][1](h)
        sig_ = F.sigmoid(params[0][2](h)+5.) #[PB,Z]


        z_reshaped = z.view(self.P, self.B, self.z_size)

        gradients = torch.autograd.grad(outputs=logposterior(z_reshaped), inputs=z_reshaped,
                          grad_outputs=self.grad_outputs,
                          create_graph=True, retain_graph=True, only_inputs=True)[0]
        gradients = gradients.detach()

        gradients = gradients.view(-1,self.z_size)


        v = v*sig_ + mew_*gradients

        logdet = torch.sum(torch.log(sig_), 1)


        h = F.tanh(params[1][0](v))
        mew_ = params[1][1](h)
        sig_ = F.sigmoid(params[1][2](h)+5.) #[PB,Z]

        z = z*sig_ + mew_*v

        logdet2 = torch.sum(torch.log(sig_), 1)

        #[PB]
        logdet = logdet + logdet2
        
        #[PB,Z], [PB]
        return z, v, logdet
开发者ID:chriscremer,项目名称:Other_Code,代码行数:35,代码来源:approx_posteriors_v6.py

示例5: forward

    def forward(self, sentence):
        # print(sentence)                                     # [torch.LongTensor of size 42x64]
        x = self.word_embeddings(sentence)
        x = self.dropout_embed(x)
        # print(embeds.size())                                # torch.Size([42, 64, 100])
        # x = embeds.view(len(sentence), self.batch_size, -1)
        # print(x.size())                                     # torch.Size([42, 64, 100])
        lstm_out, self.hidden = self.lstm(x, self.hidden)   # lstm_out 10*5*50 hidden 1*5*50 *2
        # print(lstm_out)
        # lstm_out = [F.max_pool1d(i, len(lstm_out)).unsqueeze(2) for i in lstm_out]
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)

        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2))
        # print(lstm_out.size())
        lstm_out = lstm_out.squeeze(2)
        # y = self.hidden2label(lstm_out)

        #lstm_out = torch.cat(lstm_out, 1)
        # lstm_out = self.dropout(lstm_out)
        # lstm_out = lstm_out.view(len(sentence), -1)
        y = self.hidden2label1(F.tanh(lstm_out))
        y = self.hidden2label2(F.tanh(y))
        # log_probs = F.log_softmax(y)
        log_probs = y
        return log_probs
开发者ID:Joyce94,项目名称:sentence_classification,代码行数:26,代码来源:Bi_LSTM.py

示例6: PeepholeLSTMCell

def PeepholeLSTMCell(input: torch.Tensor,
                     hidden: Tuple[torch.Tensor, torch.Tensor],
                     w_ih: torch.Tensor,
                     w_hh: torch.Tensor,
                     w_ip: torch.Tensor,
                     w_fp: torch.Tensor,
                     w_op: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    An LSTM cell with peephole connections without biases.

    Mostly ripped from the pytorch autograd lstm implementation.
    """
    hx, cx = hidden
    gates = F.linear(input, w_ih) + F.linear(hx, w_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    peep_i = w_ip.unsqueeze(0).expand_as(cx) * cx
    ingate = ingate + peep_i
    peep_f = w_fp.unsqueeze(0).expand_as(cx) * cx
    forgetgate = forgetgate + peep_f

    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    cy = (forgetgate * cx) + (ingate * cellgate)
    peep_o = w_op.unsqueeze(0).expand_as(cy) * cy
    outgate = outgate + peep_o
    hy = outgate * F.tanh(cy)

    return hy, cy
开发者ID:mittagessen,项目名称:kraken,代码行数:30,代码来源:layers.py

示例7: forward

    def forward(self, h_out, fake_region, conv_feat, conv_feat_embed):

        # View into three dimensions
        att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
        conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
        conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)

        # view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
        fake_region = self.fr_linear(fake_region)
        fake_region_embed = self.fr_embed(fake_region)

        h_out_linear = self.ho_linear(h_out)
        h_out_embed = self.ho_embed(h_out_linear)

        txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))

        img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
        img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)

        hA = F.tanh(img_all_embed + txt_replicate)
        hA = F.dropout(hA,self.drop_prob_lm, self.training)
        
        hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
        PI = F.softmax(hAflat.view(-1, att_size + 1))

        visAtt = torch.bmm(PI.unsqueeze(1), img_all)
        visAttdim = visAtt.squeeze(1)

        atten_out = visAttdim + h_out_linear

        h = F.tanh(self.att2h(atten_out))
        h = F.dropout(h, self.drop_prob_lm, self.training)
        return h
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:33,代码来源:AttModel.py

示例8: forward

    def forward(self, inputs):
        x = inputs
        x = self.bn0(x)
        x = F.tanh(self.linear1(x))
        x = F.tanh(self.linear2(x))

        mu = F.tanh(self.mu(x))
        return mu
开发者ID:GuanyuGao,项目名称:thermal_project,代码行数:8,代码来源:ddpg.py

示例9: forward

 def forward(self, input, cell):
     hx, cx = cell
     input = self.i2h_bn(self.i2h(input)) + self.h2h_bn(self.h2h(hx))
     gates = F.sigmoid(input[:, :3*self.hidden_size])
     in_gate = gates[:, :self.hidden_size]
     forget_gate = gates[:, self.hidden_size:2*self.hidden_size]
     out_gate = gates[:, 2*self.hidden_size:3*self.hidden_size]
     input = F.tanh(input[:, 3*self.hidden_size:4*self.hidden_size])
     cx = (forget_gate * cx) + (in_gate * input)
     hx = out_gate * F.tanh(self.cx_bn(cx))
     return hx, cx
开发者ID:shubhampachori12110095,项目名称:doom-net-pytorch,代码行数:11,代码来源:lstm.py

示例10: LSTMCell

def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = F.sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)
    return hy, cy
开发者ID:Northrend,项目名称:pytorch,代码行数:13,代码来源:test_jit.py

示例11: forward

 def forward(self, x):
     embed = self.embed(x)
     embed = self.dropout_embed(embed)
     x = embed.view(len(x), embed.size(1), -1)
     # lstm
     lstm_out, self.hidden = self.lstm(x, self.hidden)
     lstm_out = torch.transpose(lstm_out, 0, 1)
     lstm_out = torch.transpose(lstm_out, 1, 2)
     # pooling
     lstm_out = F.tanh(lstm_out)
     lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
     lstm_out = F.tanh(lstm_out)
     # linear
     logit = self.hidden2label(lstm_out)
     return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:15,代码来源:model_LSTM.py

示例12: _get_lstm_features

 def _get_lstm_features(self, names, lengths):
     self.hidden = self.init_hidden(names.size(-1))
     embeds = self.char_embeds(names)  # Figure 4
     packed_input = pack_padded_sequence(embeds, lengths)  # Figure 5
     packed_output, (ht, ct) = self.lstm(packed_input, self.hidden)  # Figure 6
     lstm_out, _ = pad_packed_sequence(packed_output)  # Figure 7
     lstm_out = torch.transpose(lstm_out, 0, 1)
     lstm_out = torch.transpose(lstm_out, 1, 2)
     lstm_out = F.tanh(lstm_out)  # Figure 8
     lstm_out, indices = F.max_pool1d(lstm_out, lstm_out.size(2), return_indices=True)  # Figure 9
     lstm_out = lstm_out.squeeze(2)  #对维度的修正,使其符合输入格式
     lstm_out = F.tanh(lstm_out)
     lstm_feats = self.fully_connected_layer(lstm_out)
     output = self.softmax(lstm_feats)  # Figure 10
     return output
开发者ID:Joe955,项目名称:MachineLearning,代码行数:15,代码来源:pytorch-rnn-demo.py

示例13: forward

    def forward(self, input_seq, last_hidden, encoder_outputs):
        # Note: we run this one step at a time

        # Get the embedding of the current input word (last output word)
        embedded = self.embedding(input_seq)
        embedded = self.embedding_dropout(embedded) #[1, 64, 512]
        if(embedded.size(0) != 1):
            raise ValueError('Decoder input sequence length should be 1')

        # Get current hidden state from input word and last hidden state
        rnn_output, hidden = self.gru(embedded, last_hidden)

        # Calculate attention from current RNN state and all encoder outputs;
        # apply to encoder outputs to get weighted average
        attn_weights = self.attn(rnn_output, encoder_outputs) #[64, 1, 14]
        # encoder_outputs [14, 64, 512]
        context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) #[64, 1, 512]

        # Attentional vector using the RNN hidden state and context vector
        # concatenated together (Luong eq. 5)
        rnn_output = rnn_output.squeeze(0) #[64, 512]
        context = context.squeeze(1) #[64, 512]
        concat_input = torch.cat((rnn_output, context), 1) #[64, 1024]
        concat_output = F.tanh(self.concat(concat_input)) #[64, 512]

        # Finally predict next token (Luong eq. 6, without softmax)
        output = self.out(concat_output) #[64, output_size]
        output = F.softmax(output)

        # Return final output, hidden state, and attention weights (for visualization)
        return output, hidden, attn_weights
开发者ID:yucoian,项目名称:pytorch-chatbot,代码行数:31,代码来源:model.py

示例14: forward

    def forward(self, x):
        """
        :param x: tensor with shape [batch_size, max_seq_len, max_word_len, char_embed_size]

        :return: tensor with shape [batch_size, max_seq_len, depth_sum]

        applies multikenrel 1d-conv layer along every word in input with max-over-time pooling
            to emit fixed-size output
        """

        input_size = x.size()
        input_size_len = len(input_size)

        assert input_size_len == 4, \
            'Wrong input rang, must be equal to 4, but {} found'.format(input_size_len)

        [batch_size, seq_len, _, embed_size] = input_size

        assert embed_size == self.params.char_embed_size, \
            'Wrong embedding size, must be equal to {}, but {} found'.format(self.params.char_embed_size, embed_size)

        # leaps with shape
        x = x.view(-1, self.params.max_word_len, self.params.char_embed_size).transpose(1, 2).contiguous()

        xs = [F.tanh(F.conv1d(x, kernel, bias=self.biases[i])) for i, kernel in enumerate(self.kernels)]
        xs = [x.max(2)[0].squeeze(2) for x in xs]

        x = t.cat(xs, 1)
        x = x.view(batch_size, seq_len, -1)

        return x
开发者ID:leezqcst,项目名称:contiguous-succotash,代码行数:31,代码来源:tdnn.py

示例15: score

 def score(self, hidden, encoder_output):
     
     if self.method == 'dot':            
         # hidden is 1 by 256
         # encoder_output is 22 by 256
         encoder_output = torch.transpose(encoder_output, 0, 1)
         # encoder_output is 256 by 22
         energy = torch.matmul(hidden, encoder_output)
         return energy
     
     elif self.method == 'general':
         # hidden is 1 by 256
         # encoder_output is 256 by 22
         # encoder_output = torch.transpose(encoder_output, 0, 1)
         hidden = hidden.view(1, -1)
         a = self.attn(encoder_output)
         a = torch.transpose(a, 0, 1)
         energy = torch.matmul(hidden, a)
         return energy
     
     elif self.method == 'concat':
         len_encoder_output = encoder_output.size()[1]
         # hidden is 1 by 256
         # encoder_output is 256 by 22
         hidden = torch.transpose(hidden, 0, 1)
         # hidden is 256 by 1
         hidden = hidden.repeat(hidden_size, len_encoder_output)
         # hidden is 256 by 22
         concat = torch.cat((hidden, encoder_output), dim=0)
         # concat is 512 by 22
         # self.attn(concat) --> 256 by 22
         energy = torch.matmul(self.v, F.tanh(self.attn(concat)))
         return energy
开发者ID:vwrj,项目名称:neural_machine_translation,代码行数:33,代码来源:V2-Attention-Vish.py


注:本文中的torch.nn.functional.tanh函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。