當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.relu方法代碼示例

本文整理匯總了Python中torch.relu方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.relu方法的具體用法?Python torch.relu怎麽用?Python torch.relu使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.relu方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _get_body

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def _get_body(self, x, target):
        cos_t = torch.gather(x, 1, target.unsqueeze(1))  # cos(theta_yi)
        if self.easy_margin:
            cond = torch.relu(cos_t)
        else:
            cond_v = cos_t - self.threshold
            cond = torch.relu(cond_v)
        cond = cond.bool()
        # Apex would convert FP16 to FP32 here
        # cos(theta_yi + m)
        new_zy = torch.cos(torch.acos(cos_t) + self.m).type(cos_t.dtype)
        if self.easy_margin:
            zy_keep = cos_t
        else:
            zy_keep = cos_t - self.mm  # (cos(theta_yi) - sin(pi - m)*m)
        new_zy = torch.where(cond, new_zy, zy_keep)
        diff = new_zy - cos_t  # cos(theta_yi + m) - cos(theta_yi)
        gt_one_hot = F.one_hot(target, num_classes=self.classes)
        body = gt_one_hot * diff
        return body 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:22,代碼來源:loss.py

示例2: bbox_kp_visibility

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def bbox_kp_visibility(bbox, keypoints, vis):
    bx, by, bw, bh = bbox
    x = keypoints[0]
    y = keypoints[1]
    ctx_ = 0.1
    in_box = (x >= bx-ctx_*bw) * (x <= bx+bw*(1+ctx_)) * \
        (y >= by-ctx_*bh) * (y <= by+bh*(1+ctx_))

    in_box = in_box * (vis == 1)

    err = torch.stack([(bx-ctx_*bw)-x,
                       x-(bx+bw*(1+ctx_)),
                       (by-ctx_*bh)-y,
                       y-(by+bh*(1+ctx_))])
    err = torch.relu(err) * vis[None].float()
    err = torch.stack((torch.max(err[0], err[1]),
                       torch.max(err[2], err[3]))).max(dim=1)[0]

    return in_box, err 
開發者ID:facebookresearch,項目名稱:c3dpo_nrsfm,代碼行數:21,代碼來源:keypoints_dataset.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        x_all = []

        for i, layer in enumerate(self.layers):
            x = layer(x, edge_index)
            if self.aggregation == 'max':
                x = torch.relu(self.fc_max(x))
            x_all.append(x)

        x = torch.cat(x_all, dim=1)
        x = global_max_pool(x, batch)

        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x 
開發者ID:diningphil,項目名稱:gnn-comparison,代碼行數:19,代碼來源:GraphSAGE.py

示例4: lp_pool2d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
    # type: (Tensor, float, int, Optional[BroadcastingList2[int]], bool) -> Tensor
    r"""Applies a 2D power-average pooling over an input signal composed of
    several input planes. If the sum of all inputs to the power of `p` is
    zero, the gradient is set to zero as well.

    See :class:`~torch.nn.LPPool2d` for details.
    """
    kw, kh = utils._pair(kernel_size)
    if stride is not None:
        stride = torch.jit._unwrap_optional(stride)
        out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
    else:
        out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)

    return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type) 
開發者ID:MagicChuyi,項目名稱:SlowFast-Network-pytorch,代碼行數:18,代碼來源:functional.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def forward(self, input_):
        # FFN Network
        x = input_.transpose(1, 2) 
        x = self.w_2(t.relu(self.w_1(x))) 
        x = x.transpose(1, 2) 


        # residual connection
        x = x + input_ 

        # dropout
        # x = self.dropout(x) 

        # layer normalization
        x = self.layer_norm(x) 

        return x 
開發者ID:soobinseo,項目名稱:Transformer-TTS,代碼行數:19,代碼來源:module.py

示例6: inner_forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def inner_forward(self, st_inp, first_dimension_size):
        """Implements the forward pass layers of the algorithm."""
        x = self.bn0(st_inp) # 2d batch norm over feature dimension.
        x = self.inp_drop(x) # [b, 1, 2*hidden_size_2, hidden_size_1]
        x = self.conv2d_1(x) # [b, 32, 2*hidden_size_2-3+1, hidden_size_1-3+1]
        x = self.bn1(x) # 2d batch normalization across feature dimension
        x = torch.relu(x)
        x = self.feat_drop(x)
        x = x.view(first_dimension_size, -1) # flatten => [b, 32*(2*hidden_size_2-3+1)*(hidden_size_1-3+1)
        x = self.fc(x) # dense layer => [b, k]
        x = self.hidden_drop(x)
        if self.training:
            x = self.bn2(x) # batch normalization across the last axis
        x = torch.relu(x)
        x = torch.matmul(x, self.transpose(self.ent_embeddings.weight)) # [b, k] * [k, tot_ent] => [b, tot_ent]
        x = torch.add(x, self.b.weight) # add a bias value
        return torch.sigmoid(x) # sigmoid activation 
開發者ID:Sujit-O,項目名稱:pykg2vec,代碼行數:19,代碼來源:projection.py

示例7: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
        if self.can_be_stateful and self._is_stateful:
            self.running_keys = torch.cat([self.running_keys, keys], 1)
            keys = self.running_keys

            self.running_values = torch.cat([self.running_values, values], 1)
            values = self.running_values

        if self.identity_map_reordering:
            q_norm = self.layer_norm(queries)
            k_norm = self.layer_norm(keys)
            v_norm = self.layer_norm(values)
            out = self.attention(q_norm, k_norm, v_norm, attention_mask, attention_weights)
            out = queries + self.dropout(torch.relu(out))
        else:
            out = self.attention(queries, keys, values, attention_mask, attention_weights)
            out = self.dropout(out)
            out = self.layer_norm(queries + out)
        return out 
開發者ID:aimagelab,項目名稱:meshed-memory-transformer,代碼行數:21,代碼來源:attention.py

示例8: _make_layer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def _make_layer(self, i):
        convolution = nn.Conv2d(
            in_channels=self.channels[i - 1] if i > 0 else self.in_channels,
            out_channels=self.channels[i],
            kernel_size=self.filter_shapes[i],
            dilation=self.dilations[i],
            padding=self.filter_shapes[i] // 2,
        )

        if i == len(self.channels) - 1:
            layer = convolution
            self.add_module(f'layer{i}', layer)
            return layer

        layer = nn.Sequential()
        layer.add_module('conv', convolution)
        if self.batch_norm:
            batch_norm = nn.BatchNorm2d(self.channels[i])
            layer.add_module('batch_norm', batch_norm)
        layer.add_module('relu', nn.ReLU())
        return layer 
開發者ID:nussl,項目名稱:nussl,代碼行數:23,代碼來源:blocks.py

示例9: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def __init__(self, num_inputs, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = num_inputs
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.logic = nn.Linear(last_dim, 1)
        self.logic.weight.data.mul_(0.1)
        self.logic.bias.data.mul_(0.0) 
開發者ID:Khrylx,項目名稱:PyTorch-RL,代碼行數:20,代碼來源:mlp_discriminator.py

示例10: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def __init__(self, state_dim, action_num, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        self.is_disc_action = True
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.action_head = nn.Linear(last_dim, action_num)
        self.action_head.weight.data.mul_(0.1)
        self.action_head.bias.data.mul_(0.0) 
開發者ID:Khrylx,項目名稱:PyTorch-RL,代碼行數:21,代碼來源:mlp_policy_disc.py

示例11: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def __init__(self, state_dim, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.value_head = nn.Linear(last_dim, 1)
        self.value_head.weight.data.mul_(0.1)
        self.value_head.bias.data.mul_(0.0) 
開發者ID:Khrylx,項目名稱:PyTorch-RL,代碼行數:20,代碼來源:mlp_critic.py

示例12: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def __init__(self, state_dim, action_dim, hidden_size=(128, 128), activation='tanh', log_std=0):
        super().__init__()
        self.is_disc_action = False
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.action_mean = nn.Linear(last_dim, action_dim)
        self.action_mean.weight.data.mul_(0.1)
        self.action_mean.bias.data.mul_(0.0)

        self.action_log_std = nn.Parameter(torch.ones(1, action_dim) * log_std) 
開發者ID:Khrylx,項目名稱:PyTorch-RL,代碼行數:23,代碼來源:mlp_policy.py

示例13: torch_spmv

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def torch_spmv(M, K, dtype="float32", n_trial=1):
    spmm = torch.sparse.mm 
    # a_np = np.random.uniform(-0.91, 0.9, [M, K]).astype(dtype)
    # b_np = np.random.uniform(-0.91, 0.9, [K, 1]).astype(dtype)
    # a_torch = torch.relu(torch.tensor(a_np)).to_sparse()
    # b_torch = torch.tensor(b_np)
    m = torch.distributions.bernoulli.Bernoulli(torch.tensor(0.9))
    a_torch = m.sample([M, K]).to_sparse()
    b_torch = m.sample([K, 1])

    # warm-up
    res = spmm(a_torch, b_torch)
    beg = time.time()
    for i in range(n_trial):
        spmm(a_torch, b_torch)
    end = time.time()
    return (end - beg) * 1e3 / n_trial 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:19,代碼來源:sparse-gemm.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def forward(self, context_idxs, pos, context_ner, context_char_idxs, context_lens, sent_h_mapping, sent_t_mapping, relation_label):
		# para_size, char_size, bsz = context_idxs.size(1), context_char_idxs.size(2), context_idxs.size(0)
		# context_ch = self.char_emb(context_char_idxs.contiguous().view(-1, char_size)).view(bsz * para_size, char_size, -1)
		# context_ch = self.char_cnn(context_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, para_size, -1)

		sent = torch.cat([self.word_emb(context_idxs) , self.coref_embed(pos), self.ner_emb(context_ner)], dim=-1)

		el = sent_h_mapping.size(1)
		re_embed = (self.relation_embed(relation_label).unsqueeze(1)).expand(-1, el, -1)

		context_output = self.rnn(sent, context_lens)
		context_output = torch.relu(self.linear_t(context_output))
		start_re_output = torch.matmul(sent_h_mapping, context_output)
		end_re_output = torch.matmul(sent_t_mapping, context_output)

		sent_output = torch.cat([start_re_output, end_re_output, re_embed], dim=-1)
		predict_sent = self.linear_re(sent_output).squeeze(2)

		# predict_sent = torch.sum(self.bili(start_re_output, end_re_output)*re_embed, dim=-1)

		return predict_sent 
開發者ID:thunlp,項目名稱:DocRED,代碼行數:23,代碼來源:LSTM_SP.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import relu [as 別名]
def forward(self, context_idxs, pos, context_ner, context_char_idxs, context_lens, h_mapping, t_mapping, relation_mask, dis_h_2_t, dis_t_2_h):
		# para_size, char_size, bsz = context_idxs.size(1), context_char_idxs.size(2), context_idxs.size(0)
		# context_ch = self.char_emb(context_char_idxs.contiguous().view(-1, char_size)).view(bsz * para_size, char_size, -1)
		# context_ch = self.char_cnn(context_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, para_size, -1)

		sent = torch.cat([self.word_emb(context_idxs), self.coref_embed(pos), self.ner_emb(context_ner)], dim=-1)
		context_output = self.rnn(sent, context_lens)


		context_output = torch.relu(self.linear_re(context_output))

		start_re_output = torch.matmul(h_mapping, context_output)
		end_re_output = torch.matmul(t_mapping, context_output)

		s_rep = torch.cat([start_re_output, self.dis_embed(dis_h_2_t)], dim=-1)
		t_rep = torch.cat([end_re_output, self.dis_embed(dis_t_2_h)], dim=-1)


		re_rep = self.bili(s_rep, t_rep)
		re_rep = self.self_att(re_rep, re_rep, relation_mask)


		return self.linear_output(re_rep) 
開發者ID:thunlp,項目名稱:DocRED,代碼行數:25,代碼來源:ContextAware.py


注:本文中的torch.relu方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。