当前位置: 首页>>代码示例>>Python>>正文


Python torch.relu方法代码示例

本文整理汇总了Python中torch.relu方法的典型用法代码示例。如果您正苦于以下问题:Python torch.relu方法的具体用法?Python torch.relu怎么用?Python torch.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_body

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def _get_body(self, x, target):
        cos_t = torch.gather(x, 1, target.unsqueeze(1))  # cos(theta_yi)
        if self.easy_margin:
            cond = torch.relu(cos_t)
        else:
            cond_v = cos_t - self.threshold
            cond = torch.relu(cond_v)
        cond = cond.bool()
        # Apex would convert FP16 to FP32 here
        # cos(theta_yi + m)
        new_zy = torch.cos(torch.acos(cos_t) + self.m).type(cos_t.dtype)
        if self.easy_margin:
            zy_keep = cos_t
        else:
            zy_keep = cos_t - self.mm  # (cos(theta_yi) - sin(pi - m)*m)
        new_zy = torch.where(cond, new_zy, zy_keep)
        diff = new_zy - cos_t  # cos(theta_yi + m) - cos(theta_yi)
        gt_one_hot = F.one_hot(target, num_classes=self.classes)
        body = gt_one_hot * diff
        return body 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:22,代码来源:loss.py

示例2: bbox_kp_visibility

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def bbox_kp_visibility(bbox, keypoints, vis):
    bx, by, bw, bh = bbox
    x = keypoints[0]
    y = keypoints[1]
    ctx_ = 0.1
    in_box = (x >= bx-ctx_*bw) * (x <= bx+bw*(1+ctx_)) * \
        (y >= by-ctx_*bh) * (y <= by+bh*(1+ctx_))

    in_box = in_box * (vis == 1)

    err = torch.stack([(bx-ctx_*bw)-x,
                       x-(bx+bw*(1+ctx_)),
                       (by-ctx_*bh)-y,
                       y-(by+bh*(1+ctx_))])
    err = torch.relu(err) * vis[None].float()
    err = torch.stack((torch.max(err[0], err[1]),
                       torch.max(err[2], err[3]))).max(dim=1)[0]

    return in_box, err 
开发者ID:facebookresearch,项目名称:c3dpo_nrsfm,代码行数:21,代码来源:keypoints_dataset.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        x_all = []

        for i, layer in enumerate(self.layers):
            x = layer(x, edge_index)
            if self.aggregation == 'max':
                x = torch.relu(self.fc_max(x))
            x_all.append(x)

        x = torch.cat(x_all, dim=1)
        x = global_max_pool(x, batch)

        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x 
开发者ID:diningphil,项目名称:gnn-comparison,代码行数:19,代码来源:GraphSAGE.py

示例4: lp_pool2d

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
    # type: (Tensor, float, int, Optional[BroadcastingList2[int]], bool) -> Tensor
    r"""Applies a 2D power-average pooling over an input signal composed of
    several input planes. If the sum of all inputs to the power of `p` is
    zero, the gradient is set to zero as well.

    See :class:`~torch.nn.LPPool2d` for details.
    """
    kw, kh = utils._pair(kernel_size)
    if stride is not None:
        stride = torch.jit._unwrap_optional(stride)
        out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
    else:
        out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)

    return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type) 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:18,代码来源:functional.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def forward(self, input_):
        # FFN Network
        x = input_.transpose(1, 2) 
        x = self.w_2(t.relu(self.w_1(x))) 
        x = x.transpose(1, 2) 


        # residual connection
        x = x + input_ 

        # dropout
        # x = self.dropout(x) 

        # layer normalization
        x = self.layer_norm(x) 

        return x 
开发者ID:soobinseo,项目名称:Transformer-TTS,代码行数:19,代码来源:module.py

示例6: inner_forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def inner_forward(self, st_inp, first_dimension_size):
        """Implements the forward pass layers of the algorithm."""
        x = self.bn0(st_inp) # 2d batch norm over feature dimension.
        x = self.inp_drop(x) # [b, 1, 2*hidden_size_2, hidden_size_1]
        x = self.conv2d_1(x) # [b, 32, 2*hidden_size_2-3+1, hidden_size_1-3+1]
        x = self.bn1(x) # 2d batch normalization across feature dimension
        x = torch.relu(x)
        x = self.feat_drop(x)
        x = x.view(first_dimension_size, -1) # flatten => [b, 32*(2*hidden_size_2-3+1)*(hidden_size_1-3+1)
        x = self.fc(x) # dense layer => [b, k]
        x = self.hidden_drop(x)
        if self.training:
            x = self.bn2(x) # batch normalization across the last axis
        x = torch.relu(x)
        x = torch.matmul(x, self.transpose(self.ent_embeddings.weight)) # [b, k] * [k, tot_ent] => [b, tot_ent]
        x = torch.add(x, self.b.weight) # add a bias value
        return torch.sigmoid(x) # sigmoid activation 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:19,代码来源:projection.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
        if self.can_be_stateful and self._is_stateful:
            self.running_keys = torch.cat([self.running_keys, keys], 1)
            keys = self.running_keys

            self.running_values = torch.cat([self.running_values, values], 1)
            values = self.running_values

        if self.identity_map_reordering:
            q_norm = self.layer_norm(queries)
            k_norm = self.layer_norm(keys)
            v_norm = self.layer_norm(values)
            out = self.attention(q_norm, k_norm, v_norm, attention_mask, attention_weights)
            out = queries + self.dropout(torch.relu(out))
        else:
            out = self.attention(queries, keys, values, attention_mask, attention_weights)
            out = self.dropout(out)
            out = self.layer_norm(queries + out)
        return out 
开发者ID:aimagelab,项目名称:meshed-memory-transformer,代码行数:21,代码来源:attention.py

示例8: _make_layer

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def _make_layer(self, i):
        convolution = nn.Conv2d(
            in_channels=self.channels[i - 1] if i > 0 else self.in_channels,
            out_channels=self.channels[i],
            kernel_size=self.filter_shapes[i],
            dilation=self.dilations[i],
            padding=self.filter_shapes[i] // 2,
        )

        if i == len(self.channels) - 1:
            layer = convolution
            self.add_module(f'layer{i}', layer)
            return layer

        layer = nn.Sequential()
        layer.add_module('conv', convolution)
        if self.batch_norm:
            batch_norm = nn.BatchNorm2d(self.channels[i])
            layer.add_module('batch_norm', batch_norm)
        layer.add_module('relu', nn.ReLU())
        return layer 
开发者ID:nussl,项目名称:nussl,代码行数:23,代码来源:blocks.py

示例9: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def __init__(self, num_inputs, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = num_inputs
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.logic = nn.Linear(last_dim, 1)
        self.logic.weight.data.mul_(0.1)
        self.logic.bias.data.mul_(0.0) 
开发者ID:Khrylx,项目名称:PyTorch-RL,代码行数:20,代码来源:mlp_discriminator.py

示例10: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def __init__(self, state_dim, action_num, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        self.is_disc_action = True
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.action_head = nn.Linear(last_dim, action_num)
        self.action_head.weight.data.mul_(0.1)
        self.action_head.bias.data.mul_(0.0) 
开发者ID:Khrylx,项目名称:PyTorch-RL,代码行数:21,代码来源:mlp_policy_disc.py

示例11: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def __init__(self, state_dim, hidden_size=(128, 128), activation='tanh'):
        super().__init__()
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.value_head = nn.Linear(last_dim, 1)
        self.value_head.weight.data.mul_(0.1)
        self.value_head.bias.data.mul_(0.0) 
开发者ID:Khrylx,项目名称:PyTorch-RL,代码行数:20,代码来源:mlp_critic.py

示例12: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def __init__(self, state_dim, action_dim, hidden_size=(128, 128), activation='tanh', log_std=0):
        super().__init__()
        self.is_disc_action = False
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'relu':
            self.activation = torch.relu
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid

        self.affine_layers = nn.ModuleList()
        last_dim = state_dim
        for nh in hidden_size:
            self.affine_layers.append(nn.Linear(last_dim, nh))
            last_dim = nh

        self.action_mean = nn.Linear(last_dim, action_dim)
        self.action_mean.weight.data.mul_(0.1)
        self.action_mean.bias.data.mul_(0.0)

        self.action_log_std = nn.Parameter(torch.ones(1, action_dim) * log_std) 
开发者ID:Khrylx,项目名称:PyTorch-RL,代码行数:23,代码来源:mlp_policy.py

示例13: torch_spmv

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def torch_spmv(M, K, dtype="float32", n_trial=1):
    spmm = torch.sparse.mm 
    # a_np = np.random.uniform(-0.91, 0.9, [M, K]).astype(dtype)
    # b_np = np.random.uniform(-0.91, 0.9, [K, 1]).astype(dtype)
    # a_torch = torch.relu(torch.tensor(a_np)).to_sparse()
    # b_torch = torch.tensor(b_np)
    m = torch.distributions.bernoulli.Bernoulli(torch.tensor(0.9))
    a_torch = m.sample([M, K]).to_sparse()
    b_torch = m.sample([K, 1])

    # warm-up
    res = spmm(a_torch, b_torch)
    beg = time.time()
    for i in range(n_trial):
        spmm(a_torch, b_torch)
    end = time.time()
    return (end - beg) * 1e3 / n_trial 
开发者ID:KnowingNothing,项目名称:FlexTensor,代码行数:19,代码来源:sparse-gemm.py

示例14: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def forward(self, context_idxs, pos, context_ner, context_char_idxs, context_lens, sent_h_mapping, sent_t_mapping, relation_label):
		# para_size, char_size, bsz = context_idxs.size(1), context_char_idxs.size(2), context_idxs.size(0)
		# context_ch = self.char_emb(context_char_idxs.contiguous().view(-1, char_size)).view(bsz * para_size, char_size, -1)
		# context_ch = self.char_cnn(context_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, para_size, -1)

		sent = torch.cat([self.word_emb(context_idxs) , self.coref_embed(pos), self.ner_emb(context_ner)], dim=-1)

		el = sent_h_mapping.size(1)
		re_embed = (self.relation_embed(relation_label).unsqueeze(1)).expand(-1, el, -1)

		context_output = self.rnn(sent, context_lens)
		context_output = torch.relu(self.linear_t(context_output))
		start_re_output = torch.matmul(sent_h_mapping, context_output)
		end_re_output = torch.matmul(sent_t_mapping, context_output)

		sent_output = torch.cat([start_re_output, end_re_output, re_embed], dim=-1)
		predict_sent = self.linear_re(sent_output).squeeze(2)

		# predict_sent = torch.sum(self.bili(start_re_output, end_re_output)*re_embed, dim=-1)

		return predict_sent 
开发者ID:thunlp,项目名称:DocRED,代码行数:23,代码来源:LSTM_SP.py

示例15: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import relu [as 别名]
def forward(self, context_idxs, pos, context_ner, context_char_idxs, context_lens, h_mapping, t_mapping, relation_mask, dis_h_2_t, dis_t_2_h):
		# para_size, char_size, bsz = context_idxs.size(1), context_char_idxs.size(2), context_idxs.size(0)
		# context_ch = self.char_emb(context_char_idxs.contiguous().view(-1, char_size)).view(bsz * para_size, char_size, -1)
		# context_ch = self.char_cnn(context_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, para_size, -1)

		sent = torch.cat([self.word_emb(context_idxs), self.coref_embed(pos), self.ner_emb(context_ner)], dim=-1)
		context_output = self.rnn(sent, context_lens)


		context_output = torch.relu(self.linear_re(context_output))

		start_re_output = torch.matmul(h_mapping, context_output)
		end_re_output = torch.matmul(t_mapping, context_output)

		s_rep = torch.cat([start_re_output, self.dis_embed(dis_h_2_t)], dim=-1)
		t_rep = torch.cat([end_re_output, self.dis_embed(dis_t_2_h)], dim=-1)


		re_rep = self.bili(s_rep, t_rep)
		re_rep = self.self_att(re_rep, re_rep, relation_mask)


		return self.linear_output(re_rep) 
开发者ID:thunlp,项目名称:DocRED,代码行数:25,代码来源:ContextAware.py


注:本文中的torch.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。