當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.cosh方法代碼示例

本文整理匯總了Python中torch.cosh方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.cosh方法的具體用法?Python torch.cosh怎麽用?Python torch.cosh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.cosh方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: calc_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def calc_loss(y_hat, y_cuda, mag_hat, batch_size=20, scale_by_freq=None, l1_lambda=2e-5, reg_logcosh=False):
    # Reconstruction term plus regularization -> Slightly less wiggly waveform

    #loss = logcosh(y_hat, y_cuda) + 1e-5*torch.abs(mag_hat).mean()
    # loss = logcosh(y_hat, y_cuda) + 2e-5*torch.abs(mag_hat).mean()
    #print("y_hat.dtype, y_cuda.dtype, mag_hat.dtype, scale_by_freq.dtype =",y_hat.dtype, y_cuda.dtype, mag_hat.dtype, scale_by_freq.dtype)
    if not reg_logcosh:
        if scale_by_freq is None:
            loss = logcosh(y_hat, y_cuda) + l1_lambda*torch.abs(mag_hat).mean()    # second term is an L1 regularization to help 'damp' high-freq noise
        else:
            loss = logcosh(y_hat, y_cuda) + l1_lambda/10*torch.abs(mag_hat*scale_by_freq).mean()    # second term is an L1 regularization to help 'damp' high-freq noise
    else:
        if scale_by_freq is None:
            loss = logcosh(y_hat, y_cuda) + l1_lambda*torch.mean(torch.log(torch.cosh(mag_hat)))   # second term is an L1 regularization to help 'damp' high-freq noise
        else:
            loss = logcosh(y_hat, y_cuda) + l1_lambda/10*torch.mean(scale_by_freq*torch.log(torch.cosh(mag_hat)))    # second term is an L1 regularization to help 'damp' high-freq noise

    return loss



# EOF 
開發者ID:drscotthawley,項目名稱:signaltrain,代碼行數:24,代碼來源:loss_functions.py

示例2: cosh

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def cosh(x, out=None):
    """
    Return the hyperbolic cosine, element-wise.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the hyperbolic cosine.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    hyperbolic cosine : ht.DNDarray
        A tensor of the same shape as x, containing the hyperbolic cosine of each element in this tensor.
        Negative input elements are returned as nan. If out was provided, square_roots is a reference to it.

    Examples
    --------
    >>> ht.cosh(ht.arange(-6, 7, 2))
    tensor([201.7156,  27.3082,   3.7622,   1.0000,   3.7622,  27.3082, 201.7156])
    """
    return local_op(torch.cosh, x, out) 
開發者ID:helmholtz-analytics,項目名稱:heat,代碼行數:26,代碼來源:trigonometrics.py

示例3: logcosh

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def logcosh(y_hat, y):
    return torch.mean( torch.log( torch.cosh(y - y_hat) )) 
開發者ID:drscotthawley,項目名稱:signaltrain,代碼行數:4,代碼來源:loss_functions.py

示例4: exp_map

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def exp_map(x, v):
    # BD, BD -> BD
    tn = tangent_norm(v).unsqueeze(dim=1)
    tn_expand = tn.repeat(1, x.size()[-1])
    result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)
    result = torch.where(tn_expand > 0, result, x)  # only update if tangent norm is > 0
    return result 
開發者ID:theSage21,項目名稱:lorentz-embeddings,代碼行數:9,代碼來源:lorentz.py

示例5: build_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def build_tensor(K):
    lam = [torch.cosh(K)*2, torch.sinh(K)*2]
    T = []
    for i in range(2):
        for j in range(2):
            for k in range(2):
                for l in range(2):
                    if ((i+j+k+l)%2==0):
                        T.append(torch.sqrt(lam[i]*lam[j]*lam[k]*lam[l])/2.)
                    else:
                        T.append(torch.tensor(0.0, dtype=K.dtype, device=K.device))
    T = torch.stack(T).view(2, 2, 2, 2)
    return T 
開發者ID:wangleiphy,項目名稱:tensorgrad,代碼行數:15,代碼來源:ising.py

示例6: bwd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
        a, b, c, d, g = NLSQ.get_pseudo_params(params)

        # double needed for stability. No effect on overall speed
        a = a.double()
        b = b.double()
        c = c.double()
        d = d.double()
        g = g.double()
        z = z.double()

        aa = -b * d.pow(2)
        bb = (z - a) * d.pow(2) - 2 * b * d * g
        cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))
        dd = (z - a) * (1 + g.pow(2)) - c

        p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))
        q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))

        t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)
        inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))
        inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)
        t = t * torch.cosh(inter_term2)

        tpos = -2 * torch.sqrt(torch.abs(p) / 3)
        inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))
        inter_term2 = 1 / 3 * arcsinh(inter_term1)
        tpos = tpos * torch.sinh(inter_term2)

        t[p > 0] = tpos[p > 0]
        z = t - bb / (3 * aa)
        arg = d * z + g
        denom = arg.pow(2) + 1
        logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))

        z = z.float().mul(mask.unsqueeze(2))
        logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
        return z, logdet 
開發者ID:XuezheMax,項目名稱:flowseq,代碼行數:40,代碼來源:transform.py

示例7: log_map

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def log_map(x, y):
    """Perform the log step."""
    d = dist(x, y)
    return (d / torch.sinh(d)) * (y - torch.cosh(d) * x) 
開發者ID:asappresearch,項目名稱:flambe,代碼行數:6,代碼來源:hyperbolic.py

示例8: exp_map

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def exp_map(x, y):
    """Perform the exp step."""
    n = torch.clamp(norm(y), min=EPSILON)
    return torch.cosh(n) * x + (torch.sinh(n) / n) * y 
開發者ID:asappresearch,項目名稱:flambe,代碼行數:6,代碼來源:hyperbolic.py

示例9: grad_log_prob

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def grad_log_prob(self, value):
        res = - value / self.scale.pow(2) + (self.dim - 1) * self.c.sqrt() * torch.cosh(self.c.sqrt() * value) / torch.sinh(self.c.sqrt() * value) 
        return res 
開發者ID:emilemathieu,項目名稱:pvae,代碼行數:5,代碼來源:hyperbolic_radius.py

示例10: aten_cosh

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def aten_cosh(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.COSH)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError

    return [torch.cosh(inp)] 
開發者ID:traveller59,項目名稱:torch2trt,代碼行數:16,代碼來源:unary.py

示例11: cosh

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def cosh(t):
    """
    Element-wise hyperbolic cosine computed using cross-approximation; see PyTorch's `cosh()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.cosh(x), tensors=t, verbose=False) 
開發者ID:rballester,項目名稱:tntorch,代碼行數:12,代碼來源:ops.py

示例12: standard

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def standard(x, nn_outp):
        a, b, c, d, f = NLSq.get_pseudo_params(nn_outp)
        
        # double needed for stability. No effect on overall speed
        a = a.double()
        b = b.double()
        c = c.double()
        d = d.double()
        f = f.double()
        x = x.double()

        aa = -b*d.pow(2)
        bb = (x-a)*d.pow(2) - 2*b*d*f
        cc = (x-a)*2*d*f - b*(1+f.pow(2))
        dd = (x-a)*(1+f.pow(2)) - c

        p = (3*aa*cc - bb.pow(2))/(3*aa.pow(2))
        q = (2*bb.pow(3) - 9*aa*bb*cc + 27*aa.pow(2)*dd)/(27*aa.pow(3))
        
        t = -2*torch.abs(q)/q*torch.sqrt(torch.abs(p)/3)
        inter_term1 = -3*torch.abs(q)/(2*p)*torch.sqrt(3/torch.abs(p))
        inter_term2 = 1/3*arccosh(torch.abs(inter_term1-1)+1)
        t = t*torch.cosh(inter_term2)

        tpos = -2*torch.sqrt(torch.abs(p)/3)
        inter_term1 = 3*q/(2*p)*torch.sqrt(3/torch.abs(p))
        inter_term2 = 1/3*arcsinh(inter_term1)
        tpos = tpos*torch.sinh(inter_term2)

        t[p > 0] = tpos[p > 0]
        y = t - bb/(3*aa)

        arg = d*y + f
        denom = 1 + arg.pow(2)

        x_new = a + b*y + c/denom

        logdet = -torch.log(b - 2*c*d*arg/denom.pow(2)).sum(-1)

        y = y.float()
        logdet = logdet.float()

        return y, logdet 
開發者ID:harvardnlp,項目名稱:TextFlow,代碼行數:45,代碼來源:flows.py

示例13: train_epoch

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosh [as 別名]
def train_epoch(self,t,x,y,thres_cosh=50,thres_emb=6):
        self.model.train()

        r=np.arange(x.size(0))
        np.random.shuffle(r)
        r=torch.LongTensor(r).cuda()

        # Loop batches
        for i in range(0,len(r),self.sbatch):
            if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]
            else: b=r[i:]
            images=torch.autograd.Variable(x[b],volatile=False)
            targets=torch.autograd.Variable(y[b],volatile=False)
            task=torch.autograd.Variable(torch.LongTensor([t]).cuda(),volatile=False)
            s=(self.smax-1/self.smax)*i/len(r)+1/self.smax

            # Forward
            outputs,masks=self.model.forward(task,images,s=s)
            output=outputs[t]
            loss,_=self.criterion(output,targets,masks)

            # Backward
            self.optimizer.zero_grad()
            loss.backward()

            # Restrict layer gradients in backprop
            if t>0:
                for n,p in self.model.named_parameters():
                    if n in self.mask_back:
                        p.grad.data*=self.mask_back[n]

            # Compensate embedding gradients
            for n,p in self.model.named_parameters():
                if n.startswith('e'):
                    num=torch.cosh(torch.clamp(s*p.data,-thres_cosh,thres_cosh))+1
                    den=torch.cosh(p.data)+1
                    p.grad.data*=self.smax/s*num/den

            # Apply step
            torch.nn.utils.clip_grad_norm(self.model.parameters(),self.clipgrad)
            self.optimizer.step()

            # Constrain embeddings
            for n,p in self.model.named_parameters():
                if n.startswith('e'):
                    p.data=torch.clamp(p.data,-thres_emb,thres_emb)

            #print(masks[-1].data.view(1,-1))
            #if i>=5*self.sbatch: sys.exit()
            #if i==0: print(masks[-2].data.view(1,-1),masks[-2].data.max(),masks[-2].data.min())
        #print(masks[-2].data.view(1,-1))

        return 
開發者ID:joansj,項目名稱:hat,代碼行數:55,代碼來源:hat.py


注:本文中的torch.cosh方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。