当前位置: 首页>>代码示例>>Python>>正文


Python functional.relu函数代码示例

本文整理汇总了Python中torch.nn.functional.relu函数的典型用法代码示例。如果您正苦于以下问题:Python relu函数的具体用法?Python relu怎么用?Python relu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了relu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

	def forward(self, v, u, d):
		"""
		@param v [batch_size, embedding_size] matrix to push
		@param u [batch_size,] vector of pop signals in (0, 1)
		@param d [batch_size,] vector of push signals in (0, 1)
		@return [batch_size, embedding_size] read matrix
		"""

		# update V, which is of size [t, bach_size, embedding_size]
		v = v.view(1, self.batch_size, self.embedding_size)
		self.V = torch.cat([self.V, v], 0) if len(self.V.data) != 0 else v

		# TODO initialize queue to fixed size

		# update s, which is of size [t, batch_size]
		old_t = self.s.size(0) if self.s.size() else 0
		s = Variable(torch.FloatTensor(old_t + 1, self.batch_size))
		w = u
		for i in xrange(old_t):
			s_ = F.relu(self.s[i,:] - w)
			w = F.relu(w - self.s[i,:])
			s[i,:] = s_
			# if len(torch.nonzero(w.data)) == 0: break
			# TODO does this if work properly now?
		s[old_t,:] = d
		self.s = s

		# calculate r, which is of size [batch_size, embedding_size]
		r = Variable(torch.zeros([self.batch_size, self.embedding_size]))
		for i in xrange(old_t + 1):
			used = torch.sum(self.s[:i,:], 0) if i > 0 else self.zero
			coeffs = torch.min(self.s[i,:], F.relu(1 - used))
			# reformating coeffs into a matrix that can be multiplied element-wise
			r += coeffs.view(self.batch_size, 1).repeat(1, self.embedding_size) * self.V[i,:,:]
		return r
开发者ID:simonjmendelsohn,项目名称:StackNN,代码行数:35,代码来源:queue.py

示例2: forward

 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return x
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:sv-dkl.py

示例3: forward

    def forward(self, x, y, y_mask):
        """Input shapes:
            x = batch * len1 * h
            y = batch * len2 * h
            y_mask = batch * len2
        Output shapes:
            matched_seq = batch * len1 * h
        """
        # Project vectors
        if self.linear:
            x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
            x_proj = F.relu(x_proj)
            y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
            y_proj = F.relu(y_proj)
        else:
            x_proj = x
            y_proj = y

        # Compute scores
        scores = x_proj.bmm(y_proj.transpose(2, 1))

        # Mask padding
        y_mask = y_mask.unsqueeze(1).expand(scores.size())
        scores.data.masked_fill_(y_mask.data, -float('inf'))

        # Normalize with softmax
        alpha_flat = F.softmax(scores.view(-1, y.size(1)))
        alpha = alpha_flat.view(-1, x.size(1), y.size(1))

        # Take weighted average
        matched_seq = alpha.bmm(y)
        return matched_seq
开发者ID:ahiroto,项目名称:ParlAI,代码行数:32,代码来源:layers.py

示例4: forward

 def forward(self, x):
     out = F.relu(self.bn1(self.conv1(x)))
     out = F.relu(self.bn2(self.conv2(out)))
     out = self.bn3(self.conv3(out))
     out += self.shortcut(x)
     out = F.relu(out)
     return out
开发者ID:naiqili,项目名称:CL,代码行数:7,代码来源:resnext.py

示例5: forward

 def forward(self, x):
     x = self.fc1(x).view(-1, self.channels, self.rows, self.rows)
     x = F.relu(self.batch_norm1(x))
     x = F.relu(self.batch_norm2(self.conv1(x)))
     x = F.relu(self.batch_norm3(self.conv2(x)))
     x = F.relu(self.batch_norm4(self.conv3(x)))
     return F.tanh(self.conv4(x))
开发者ID:y-kamiya,项目名称:machine-learning-samples,代码行数:7,代码来源:conditional_gan.py

示例6: forward

    def forward(self, x_left, x_right):
        x_left = F.relu(x_left)
        x_left = self.conv_left(x_left)
        x_left = self.bn_left(x_left)

        x_right = F.relu(x_right)
        x_right = self.conv_right(x_right)
        x_right = self.bn_right(x_right)

        x_comb_iter_0_left = self.comb_iter_0_left(x_right)
        x_comb_iter_0_right = self.comb_iter_0_right(x_left)
        x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right

        x_comb_iter_1_left = self.comb_iter_1_left(x_left)
        x_comb_iter_1_right = self.comb_iter_1_right(x_right)
        x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right

        x_comb_iter_2_left = self.comb_iter_2_left(x_right)
        x_comb_iter_2 = x_comb_iter_2_left + x_left

        x_comb_iter_3_left = self.comb_iter_3_left(x_left) # TODO: those two avgPool look similar
        x_comb_iter_3_right = self.comb_iter_3_right(x_left)
        x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right

        x_comb_iter_4_left = self.comb_iter_4_left(x_right)
        x_comb_iter_4 = x_comb_iter_4_left + x_right

        return torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
开发者ID:sarathknv,项目名称:pretrained-models.pytorch,代码行数:28,代码来源:nasnet.py

示例7: adpW

 def adpW(self,x):
     '''
        calculate the pairwise_att of everypair of inputs
        output_size: (x.size(0),x.size(1)/2)
     '''
     x = x.detach()
     x = self.adp_metric_embedding1(x)
     x = self.adp_metric_embedding1_bn(x)
     x = F.relu(x)
     x = self.adp_metric_embedding2(x)
     x = self.adp_metric_embedding2_bn(x)
     x = F.relu(x)
     x = self.adp_metric_embedding3(x)
     x = self.adp_metric_embedding3_bn(x)
     x = F.relu(x)
     pairwise_att = F.sigmoid(self.adp_metric_embedding4(x))
     # x = self.adp_metric_embedding2_bn(x)
     diag_matrix1 = []
     diag_matrix2 = []
     for i in range(x.size(0)):
         diag_matrix1.append(torch.diag(pairwise_att[i, :x.size(1)/2]))
     for i in range(x.size(0)):
         diag_matrix2.append(torch.diag(pairwise_att[i, x.size(1)/2:]))
     pairwise_att1 = torch.stack(diag_matrix1)
     pairwise_att2 = torch.stack(diag_matrix1)
     return pairwise_att1, pairwise_att2
开发者ID:hh23333,项目名称:FVAE_adversarial,代码行数:26,代码来源:adaptive_triplet.py

示例8: forward

 def forward(self, x):
     x_no_static = self.embed_no_static(x)
     # x_no_static = self.dropout(x_no_static)
     x_static = self.embed_static(x)
     # fix the embedding
     x_static = Variable(x_static.data)
     # x_static = self.dropout(x_static)
     x = torch.stack([x_static, x_no_static], 1)
     one_layer = x  # (N,W,D) #  torch.Size([64, 43, 300])
     # print("one_layer {}".format(one_layer.size()))
     # one_layer = self.dropout(one_layer)
     # one_layer = one_layer.unsqueeze(1)  # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
     # one layer
     one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
     # one_layer = [F.relu(conv(one_layer)).squeeze(3).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
     # print(one_layer[0].size())
     # print(one_layer[1].size())
     # two layer
     two_layer = [F.relu(conv(one_layer)).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
     # print("two_layer {}".format(two_layer[0].size()))
     # print("two_layer {}".format(two_layer[1].size()))
     # pooling
     output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]   #  torch.Size([64, 100]) torch.Size([64, 100])
     output = torch.cat(output, 1)  # torch.Size([64, 300])
     # dropout
     output = self.dropout(output)
     # linear
     output = self.fc1(output)
     logit = self.fc2(F.relu(output))
     return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:30,代码来源:model_DeepCNN_MUI.py

示例9: forward

    def forward(self, x):
        # flatten input
        if len(x.size()) > 2:
            x = x.view(-1, int(np.prod(x.size()[1:])))
        # corrupt input
        x = self.input_corrupt(x)
        corrupted = x
        # encode
        for layer in self.encode_layers:
            x = layer(x)
            x = F.relu(x)
        # decode
        if self.tied_weights:
            for i, (layer, bias) in enumerate(self.decode_params):
                x = F.linear(x, weight=layer.weight.t(), bias=bias)
                if i == len(self.decode_params)-1:
                    x = self.visible_act(x)
                else:
                    x = F.relu(x)
        else:
            for i, layer in enumerate(self.decode_layers):
                x = layer(x)
                if i == len(self.decode_layers)-1:
                    x = self.visible_act(x)
                else:
                    x = F.relu(x)

        return x, corrupted
开发者ID:mbeissinger,项目名称:recurrent_gsn,代码行数:28,代码来源:dae.py

示例10: forward

 def forward(self, x):
     x1_1 = F.relu(self.cpm1(x), inplace=True)
     x1_2 = F.relu(self.cpm2(x), inplace=True)
     x2_1 = F.relu(self.cpm3(x1_2), inplace=True)
     x2_2 = F.relu(self.cpm4(x1_2), inplace=True)
     x3_1 = F.relu(self.cpm5(x2_2), inplace=True)
     return torch.cat([x1_1, x2_1, x3_1] , 1)
开发者ID:UGuess,项目名称:FaceDetection-DSFD,代码行数:7,代码来源:face_ssd.py

示例11: forward

 def forward(self, x):
     out = F.relu(self.bn1(x))
     shortcut = self.shortcut(out)
     out = self.conv1(out)
     out = self.conv2(F.relu(self.bn2(out)))
     out += shortcut
     return out
开发者ID:eglxiang,项目名称:mixup-cifar10,代码行数:7,代码来源:resnet.py

示例12: forward

 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.relu(self.conv2(x))
     before_pool = x
     if self.pooling:
         x = self.pool(x)
     return x, before_pool
开发者ID:Lazarusgoh,项目名称:deeplearning,代码行数:7,代码来源:nets.py

示例13: forward

 def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
     """
         input:
             word_inputs: (batch_size, sent_len)
             word_seq_lengths: list of batch_size, (batch_size,1)
             char_inputs: (batch_size*sent_len, word_length)
             char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
             char_seq_recover: variable which records the char order information, used to recover char order
         output:
             Variable(batch_size, sent_len, hidden_dim)
     """
     word_represent = self.wordrep(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
     ## word_embs (batch_size, seq_len, embed_size)
     if self.word_feature_extractor == "CNN":
         word_in = F.tanh(self.word2cnn(word_represent)).transpose(2,1).contiguous()
         for idx in range(self.cnn_layer):
             if idx == 0:
                 cnn_feature = F.relu(self.cnn_list[idx](word_in))
             else:
                 cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
             cnn_feature = self.cnn_drop_list[idx](cnn_feature)
             cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
         feature_out = cnn_feature.transpose(2,1).contiguous()
     else:
         packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
         hidden = None
         lstm_out, hidden = self.lstm(packed_words, hidden)
         lstm_out, _ = pad_packed_sequence(lstm_out)
         ## lstm_out (seq_len, seq_len, hidden_size)
         feature_out = self.droplstm(lstm_out.transpose(1,0))
     ## feature_out (batch_size, seq_len, hidden_size)
     outputs = self.hidden2tag(feature_out)
     return outputs
开发者ID:xj361685640,项目名称:NCRFpp,代码行数:33,代码来源:wordsequence.py

示例14: forward

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)), True)
        x = F.max_pool2d(self.conv2(x), 2)
        x = self.conv3(x)
        x = self.conv4(x)

        previous = x

        outputs = []
        for i in range(self.num_modules):
            hg = self._modules['m' + str(i)](previous)

            ll = hg
            ll = self._modules['top_m_' + str(i)](ll)

            ll = F.relu(self._modules['bn_end' + str(i)]
                        (self._modules['conv_last' + str(i)](ll)), True)

            # Predict heatmaps
            tmp_out = self._modules['l' + str(i)](ll)
            outputs.append(tmp_out)

            if i < self.num_modules - 1:
                ll = self._modules['bl' + str(i)](ll)
                tmp_out_ = self._modules['al' + str(i)](tmp_out)
                previous = previous + ll + tmp_out_

        return outputs
开发者ID:CLAUDIOPINHEIRO,项目名称:face-alignment,代码行数:28,代码来源:models.py

示例15: forward

 def forward(self, x):
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc11(x))
     x = F.relu(self.fc2(x))
     x = F.relu(self.fc3(x))
     x = F.tanh(self.out(x))
     return x
开发者ID:sunstrikes,项目名称:Algorithm,代码行数:7,代码来源:WGAN.py


注:本文中的torch.nn.functional.relu函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。