当前位置: 首页>>代码示例>>Python>>正文


Python functional.softplus方法代码示例

本文整理汇总了Python中torch.nn.functional.softplus方法的典型用法代码示例。如果您正苦于以下问题:Python functional.softplus方法的具体用法?Python functional.softplus怎么用?Python functional.softplus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.softplus方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sample_content

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def sample_content(self, content, sample):
    '''
    Pass into content_lstm to get a final content.
    '''
    content = content.view(-1, self.n_frames_input, self.total_components, self.content_latent_size)
    contents = []
    for i in range(self.total_components):
      z = content[:, :, i, :]
      z = self.content_lstm(z).unsqueeze(1) # batch_size x 1 x (content_latent_size * 2)
      contents.append(z)
    content = torch.cat(contents, dim=1).view(-1, self.content_latent_size * 2)

    # Get mu and sigma, and sample.
    content_mu = content[:, :self.content_latent_size]
    content_sigma = F.softplus(content[:, self.content_latent_size:])
    content = self.pyro_sample('content', dist.Normal, content_mu, content_sigma, sample)
    return content 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:19,代码来源:DDPAE.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def forward(self, x, target):
        similarity_matrix = x @ x.T  # need gard here
        label_matrix = target.unsqueeze(1) == target.unsqueeze(0)
        negative_matrix = label_matrix.logical_not()
        positive_matrix = label_matrix.fill_diagonal_(False)

        sp = torch.where(positive_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))
        sn = torch.where(negative_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))

        ap = torch.clamp_min(1 + self.m - sp.detach(), min=0.)
        an = torch.clamp_min(sn.detach() + self.m, min=0.)

        logit_p = -self.gamma * ap * (sp - self.dp)
        logit_n = self.gamma * an * (sn - self.dn)

        logit_p = torch.where(positive_matrix, logit_p,
                              torch.zeros_like(logit_p))
        logit_n = torch.where(negative_matrix, logit_n,
                              torch.zeros_like(logit_n))

        loss = F.softplus(torch.logsumexp(logit_p, dim=1) +
                          torch.logsumexp(logit_n, dim=1)).mean()
        return loss 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:27,代码来源:loss.py

示例3: init_action_pd

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def init_action_pd(ActionPD, pdparam):
    '''
    Initialize the action_pd for discrete or continuous actions:
    - discrete: action_pd = ActionPD(logits)
    - continuous: action_pd = ActionPD(loc, scale)
    '''
    if 'logits' in ActionPD.arg_constraints:  # discrete
        action_pd = ActionPD(logits=pdparam)
    else:  # continuous, args = loc and scale
        if isinstance(pdparam, list):  # split output
            loc, scale = pdparam
        else:
            loc, scale = pdparam.transpose(0, 1)
        # scale (stdev) must be > 0, use softplus with positive
        scale = F.softplus(scale) + 1e-8
        if isinstance(pdparam, list):  # split output
            # construct covars from a batched scale tensor
            covars = torch.diag_embed(scale)
            action_pd = ActionPD(loc=loc, covariance_matrix=covars)
        else:
            action_pd = ActionPD(loc=loc, scale=scale)
    return action_pd 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:24,代码来源:policy_util.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def forward(self, source_features):
        outputs = []
        if self.weight_type == 'const':
            for w in F.softplus(self.weights.mul(10)):
                outputs.append(w.view(1, 1))
        else:
            for i, (idx, _) in enumerate(self.pairs):
                f = source_features[idx]
                f = F.avg_pool2d(f, f.size(2)).view(-1, f.size(1))
                if self.weight_type == 'relu':
                    outputs.append(F.relu(self[i](f)))
                elif self.weight_type == 'relu-avg':
                    outputs.append(F.relu(self[i](f.div(f.size(1)))))
                elif self.weight_type == 'relu6':
                    outputs.append(F.relu6(self[i](f)))
        return outputs 
开发者ID:alinlab,项目名称:L2T-ww,代码行数:18,代码来源:train_l2t_ww.py

示例5: _concatenation_debug

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def _concatenation_debug(self, x, g):
        input_size = x.size()
        batch_size = input_size[0]
        assert batch_size == g.size(0)

        # theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
        # phi   => (b, g_d) -> (b, i_c)
        theta_x = self.theta(x)
        theta_x_size = theta_x.size()

        # g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
        #  Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
        phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
        f = F.softplus(theta_x + phi_g)

        #  psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
        sigm_psi_f = F.sigmoid(self.psi(f))

        # upsample the attentions and multiply
        sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
        y = sigm_psi_f.expand_as(x) * x
        W_y = self.W(y)

        return W_y, sigm_psi_f 
开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:26,代码来源:grid_attention_layer.py

示例6: _pos

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def _pos(self, p):
        pos_fn = self.pos_fn.lower()
        if pos_fn == 'softmax':
            p_sz = p.size()
            p = p.view(p_sz[0],p_sz[1], -1)
            p = F.softmax(p, -1)
            return p.view(p_sz)
        elif pos_fn == 'exp':
            return torch.exp(p)
        elif pos_fn == 'softplus':
            return F.softplus(p, beta=10)
        elif pos_fn == 'sigmoid':
            return F.sigmoid(p)
        else:
            print('Undefined positive function!')
            return 
开发者ID:abdo-eldesokey,项目名称:nconv,代码行数:18,代码来源:nconv.py

示例7: D_logistic_r2

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def D_logistic_r2(fake_img, real_img, D, gamma=10.0):
    real_img = Variable(real_img, requires_grad=True).to(real_img.device)
    fake_img = Variable(fake_img, requires_grad=True).to(fake_img.device)

    real_score = D(real_img)
    fake_score = D(fake_img)

    loss = F.softplus(fake_score)
    loss = loss + F.softplus(-real_score)

    # GradientPenalty
    # One of the differentiated Tensors does not require grad?
    # https://discuss.pytorch.org/t/one-of-the-differentiated-tensors-does-not-require-grad/54694
    fake_grads = grad(torch.sum(fake_score), fake_img)[0]
    gradient_penalty = torch.sum(torch.square(fake_grads), dim=[1, 2, 3])
    reg = gradient_penalty * (gamma * 0.5)

    # fixme: only support non-lazy mode
    return loss + reg


# ==============================================================================
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
# ============================================================================== 
开发者ID:tomguluson92,项目名称:StyleGAN2_PyTorch,代码行数:27,代码来源:loss.py

示例8: _compute_policy_entropy

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def _compute_policy_entropy(self, obs):
        r"""Compute entropy value of probability distribution.

        Notes: P is the maximum path length (self.max_path_length)

        Args:
            obs (torch.Tensor): Observation from the environment
                with shape :math:`(N, P, O*)`.

        Returns:
            torch.Tensor: Calculated entropy values given observation
                with shape :math:`(N, P)`.

        """
        if self._stop_entropy_gradient:
            with torch.no_grad():
                policy_entropy = self.policy(obs)[0].entropy()
        else:
            policy_entropy = self.policy(obs)[0].entropy()

        # This prevents entropy from becoming negative for small policy std
        if self._use_softplus_entropy:
            policy_entropy = F.softplus(policy_entropy)

        return policy_entropy 
开发者ID:rlworkgroup,项目名称:garage,代码行数:27,代码来源:vpg.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def forward(self, x, y):
        # First extract an embedding z from the visual input x.
        #
        # We use softplus activations so our model has
        # (generally) non-zero second-order derivatives.
        z = F.softplus(self.conv1(x))
        z = F.max_pool2d(z, 2, 2)
        z = F.softplus(self.conv2(z))
        z = F.max_pool2d(z, 2, 2)
        z = z.view(-1, 4*4*50)
        z = F.softplus(self.fc1(z))
        z = self.fc2(z)

        # Next combine that embedding with the proposed label y
        # and pass that through a single hidden-layer to predict
        # the energy function value.
        v = torch.cat((z, y), dim=1)
        v = F.softplus(self.fce1(v))
        E = self.fce2(v).squeeze()
        return E 
开发者ID:facebookresearch,项目名称:higher,代码行数:22,代码来源:deep-energy-mnist.py

示例10: normal_parse_params

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def normal_parse_params(params, min_sigma=0):
    """
    Take a Tensor (e. g. neural network output) and return
    torch.distributions.Normal distribution.
    This Normal distribution is component-wise independent,
    and its dimensionality depends on the input shape.
    First half of channels is mean of the distribution,
    the softplus of the second half is std (sigma), so there is
    no restrictions on the input tensor.

    min_sigma is the minimal value of sigma. I. e. if the above
    softplus is less than min_sigma, then sigma is clipped
    from below with value min_sigma. This regularization
    is required for the numerical stability and may be considered
    as a neural network architecture choice without any change
    to the probabilistic model.
    """
    n = params.shape[0]
    d = params.shape[1]
    mu = params[:, :d // 2]
    sigma_params = params[:, d // 2:]
    sigma = softplus(sigma_params)
    sigma = sigma.clamp(min=min_sigma)
    distr = Normal(mu, sigma)
    return distr 
开发者ID:tigvarts,项目名称:vaeac,代码行数:27,代码来源:prob_utils.py

示例11: _pre_process

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def _pre_process(self, x):
        """Dequantize the input image `x` and convert to logits.

        Args:
            x (torch.Tensor): Input image.

        Returns:
            y (torch.Tensor): Dequantized logits of `x`.

        See Also:
            - Dequantization: https://arxiv.org/abs/1511.01844, Section 3.1
            - Modeling logits: https://arxiv.org/abs/1605.08803, Section 4.1
        """
        y = (x * 255. + torch.rand_like(x)) / 256.
        y = (2 * y - 1) * self.data_constraint
        y = (y + 1) / 2
        y = y.log() - (1. - y).log()

        # Save log-determinant of Jacobian of initial transform
        ldj = F.softplus(y) + F.softplus(-y) \
            - F.softplus((1. - self.data_constraint).log() - self.data_constraint.log())
        sldj = ldj.view(ldj.size(0), -1).sum(-1)

        return y, sldj 
开发者ID:chrischute,项目名称:real-nvp,代码行数:26,代码来源:real_nvp.py

示例12: test_softplus_activation

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def test_softplus_activation(N=15):
    from numpy_ml.neural_nets.activations import SoftPlus

    np.random.seed(12345)

    N = np.inf if N is None else N

    mine = SoftPlus()
    gold = lambda z: F.softplus(torch.FloatTensor(z)).numpy()

    i = 0
    while i < N:
        n_dims = np.random.randint(1, 100)
        z = random_stochastic_matrix(1, n_dims)
        assert_almost_equal(mine.fn(z), gold(z))
        print("PASSED")
        i += 1


#######################################################################
#                      Activation Gradients                           #
####################################################################### 
开发者ID:ddbourgin,项目名称:numpy-ml,代码行数:24,代码来源:test_nn.py

示例13: test_softplus_grad

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def test_softplus_grad(N=15):
    from numpy_ml.neural_nets.activations import SoftPlus

    np.random.seed(12345)

    N = np.inf if N is None else N

    mine = SoftPlus()
    gold = torch_gradient_generator(F.softplus)

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 100)
        n_dims = np.random.randint(1, 100)
        z = random_tensor((n_ex, n_dims), standardize=True)
        assert_almost_equal(mine.grad(z), gold(z))
        print("PASSED")
        i += 1


#######################################################################
#                          Layers                                     #
####################################################################### 
开发者ID:ddbourgin,项目名称:numpy-ml,代码行数:25,代码来源:test_nn.py

示例14: test_softplus_grad

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def test_softplus_grad(N=50):
    from numpy_ml.neural_nets.activations import SoftPlus

    N = np.inf if N is None else N

    mine = SoftPlus()
    gold = torch_gradient_generator(F.softplus)

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 100)
        n_dims = np.random.randint(1, 100)
        z = random_tensor((n_ex, n_dims), standardize=True)
        assert_almost_equal(mine.grad(z), gold(z))
        print("PASSED")
        i += 1 
开发者ID:ddbourgin,项目名称:numpy-ml,代码行数:18,代码来源:test_nn_activations.py

示例15: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softplus [as 别名]
def __init__(self, dim, context_dim, 
                 oper=nn_.ResLinear, realify=nn_.softplus):
        super(LinearFlow, self).__init__()
        self.realify = realify
        
        self.dim = dim
        self.context_dim = context_dim

        
        if type(dim) is int:
            dim_ = dim
        else:
            dim_ = np.prod(dim)
        
        self.mean = oper(context_dim, dim_)
        self.lstd = oper(context_dim, dim_)
        
        self.reset_parameters() 
开发者ID:CW-Huang,项目名称:torchkit,代码行数:20,代码来源:flows.py


注:本文中的torch.nn.functional.softplus方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。