当前位置: 首页>>代码示例>>Python>>正文


Python torch.log10方法代码示例

本文整理汇总了Python中torch.log10方法的典型用法代码示例。如果您正苦于以下问题:Python torch.log10方法的具体用法?Python torch.log10怎么用?Python torch.log10使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.log10方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _fade_in

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def _fade_in(self, waveform_length: int) -> Tensor:
        fade = torch.linspace(0, 1, self.fade_in_len)
        ones = torch.ones(waveform_length - self.fade_in_len)

        if self.fade_shape == "linear":
            fade = fade

        if self.fade_shape == "exponential":
            fade = torch.pow(2, (fade - 1)) * fade

        if self.fade_shape == "logarithmic":
            fade = torch.log10(.1 + fade) + 1

        if self.fade_shape == "quarter_sine":
            fade = torch.sin(fade * math.pi / 2)

        if self.fade_shape == "half_sine":
            fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5

        return torch.cat((fade, ones)).clamp_(0, 1) 
开发者ID:pytorch,项目名称:audio,代码行数:22,代码来源:transforms.py

示例2: _fade_out

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def _fade_out(self, waveform_length: int) -> Tensor:
        fade = torch.linspace(0, 1, self.fade_out_len)
        ones = torch.ones(waveform_length - self.fade_out_len)

        if self.fade_shape == "linear":
            fade = - fade + 1

        if self.fade_shape == "exponential":
            fade = torch.pow(2, - fade) * (1 - fade)

        if self.fade_shape == "logarithmic":
            fade = torch.log10(1.1 - fade) + 1

        if self.fade_shape == "quarter_sine":
            fade = torch.sin(fade * math.pi / 2 + math.pi / 2)

        if self.fade_shape == "half_sine":
            fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5

        return torch.cat((ones, fade)).clamp_(0, 1) 
开发者ID:pytorch,项目名称:audio,代码行数:22,代码来源:transforms.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def forward(self, waveform: Tensor) -> Tensor:
        r"""
        Args:
            waveform (Tensor): Tensor of audio of dimension (..., time).

        Returns:
            Tensor: Tensor of audio of dimension (..., time).
        """
        if self.gain_type == "amplitude":
            waveform = waveform * self.gain

        if self.gain_type == "db":
            waveform = F.gain(waveform, self.gain)

        if self.gain_type == "power":
            waveform = F.gain(waveform, 10 * math.log10(self.gain))

        return torch.clamp(waveform, -1, 1) 
开发者ID:pytorch,项目名称:audio,代码行数:20,代码来源:transforms.py

示例4: _validate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def _validate(self, validate, metrics):
        """Validation process"""
        for cover, _ in tqdm(validate, disable=not self.verbose):
            gc.collect()
            cover = cover.to(self.device)
            generated, payload, decoded = self._encode_decode(cover, quantize=True)
            encoder_mse, decoder_loss, decoder_acc = self._coding_scores(
                cover, generated, payload, decoded)
            generated_score = self._critic(generated)
            cover_score = self._critic(cover)

            metrics['val.encoder_mse'].append(encoder_mse.item())
            metrics['val.decoder_loss'].append(decoder_loss.item())
            metrics['val.decoder_acc'].append(decoder_acc.item())
            metrics['val.cover_score'].append(cover_score.item())
            metrics['val.generated_score'].append(generated_score.item())
            metrics['val.ssim'].append(ssim(cover, generated).item())
            metrics['val.psnr'].append(10 * torch.log10(4 / encoder_mse).item())
            metrics['val.bpp'].append(self.data_depth * (2 * decoder_acc.item() - 1)) 
开发者ID:DAI-Lab,项目名称:SteganoGAN,代码行数:21,代码来源:models.py

示例5: synthesize

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def synthesize(self, mgc, batch_size, temperature=1.0):
        mel = mgc
        mel = torch.autograd.Variable(torch.tensor(mel).cuda().float()).transpose(0, 1)
        mel = torch.unsqueeze(mel, 0)
        mel = torch.log10(mel) * 20
        # from ipdb import set_trace
        # set_trace()
        with torch.no_grad():
            audio = self.waveglow.infer(mel, sigma=temperature)
            audio = audio * 32768
        audio = audio.squeeze()
        audio = audio.cpu().numpy()
        from scipy import signal
        audio = signal.lfilter([1.0], [1.0, -0.97], audio)
        audio = audio.astype('int16')
        return audio 
开发者ID:tiberiu44,项目名称:TTS-Cube,代码行数:18,代码来源:vocoder.py

示例6: log10

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def log10(x, out=None):
    """
    log base 10, element-wise.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the logarithm.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    logarithms : ht.DNDarray
        A tensor of the same shape as x, containing the positive logarithms of each element in this tensor.
        Negative input elements are returned as nan. If out was provided, logarithms is a reference to it.

    Examples
    --------
    >>> ht.log10(ht.arange(5))
    tensor([  -inf, 0.0000, 0.3010, 0.4771, 0.6021])
    """
    return operations.__local_op(torch.log10, x, out) 
开发者ID:helmholtz-analytics,项目名称:heat,代码行数:26,代码来源:exponential.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def forward(
        self, input: torch.Tensor, input_lengths: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        # 1. Stft: time -> time-freq
        input_stft, feats_lens = self.stft(input, input_lengths)

        assert input_stft.dim() >= 4, input_stft.shape
        # "2" refers to the real/imag parts of Complex
        assert input_stft.shape[-1] == 2, input_stft.shape

        # NOTE(kamo): We use different definition for log-spec between TTS and ASR
        #   TTS: log_10(abs(stft))
        #   ASR: log_e(power(stft))

        # STFT -> Power spectrum
        # input_stft: (..., F, 2) -> (..., F)
        input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2
        log_amp = 0.5 * torch.log10(torch.clamp(input_power, min=1.0e-10))
        return log_amp, feats_lens 
开发者ID:espnet,项目名称:espnet,代码行数:21,代码来源:log_spectrogram.py

示例8: make_vec_eps

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def make_vec_eps(self, global_B, env_ranks):
        """Construct log-spaced epsilon values and select local assignments
        from the global number of sampler environment instances (for SyncRl
        and AsyncRl)."""
        if (self.eps_final_min is not None and
                self.eps_final_min != self._eps_final_scalar):  # vector epsilon.
            if self.alternating:  # In FF case, sampler sets agent.alternating.
                assert global_B % 2 == 0
                global_B = global_B // 2  # Env pairs will share epsilon.
                env_ranks = list(set([i // 2 for i in env_ranks]))
            self.eps_init = self._eps_init_scalar * torch.ones(len(env_ranks))
            global_eps_final = torch.logspace(
                torch.log10(torch.tensor(self.eps_final_min)),
                torch.log10(torch.tensor(self._eps_final_scalar)),
                global_B)
            self.eps_final = global_eps_final[env_ranks]
        self.eps_sample = self.eps_init 
开发者ID:astooke,项目名称:rlpyt,代码行数:19,代码来源:epsilon_greedy.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def forward(self, audio):
        p = (self.n_fft - self.hop_length) // 2
        audio = F.pad(audio, (p, p), "reflect").squeeze(1)
        fft = torch.stft(
            audio,
            n_fft=self.n_fft,
            hop_length=self.hop_length,
            win_length=self.win_length,
            window=self.window,
            center=False,
        )
        real_part, imag_part = fft.unbind(-1)
        magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
        mel_output = torch.matmul(self.mel_basis, magnitude)
        log_mel_spec = torch.log10(torch.clamp(mel_output, min=1e-5))
        return log_mel_spec 
开发者ID:descriptinc,项目名称:melgan-neurips,代码行数:18,代码来源:modules.py

示例10: compute_yloss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def compute_yloss(self):
        """Computes the first part (y-loss) of the loss function."""
        yloss = 0.0
        for i in range(self.total_CFs):
            if self.yloss_type == "l2_loss":
                temp_loss = torch.pow((self.get_model_output(self.cfs[i]) - self.target_cf_class), 2)[0]
            elif self.yloss_type == "log_loss":
                temp_logits = torch.log10((abs(self.get_model_output(self.cfs[i]) - 0.000001))/(1 - abs(self.get_model_output(self.cfs[i]) - 0.000001)))
                criterion = torch.nn.BCEWithLogitsLoss()
                temp_loss = criterion(temp_logits, torch.tensor([self.target_cf_class]))
            elif self.yloss_type == "hinge_loss":
                temp_logits = torch.log10((abs(self.get_model_output(self.cfs[i]) - 0.000001))/(1 - abs(self.get_model_output(self.cfs[i]) - 0.000001)))
                criterion = torch.nn.ReLU()
                temp_loss = criterion(0.5 - (temp_logits*self.target_cf_class))[0]

            yloss += temp_loss

        return yloss/self.total_CFs 
开发者ID:interpretml,项目名称:DiCE,代码行数:20,代码来源:dice_pytorch.py

示例11: sisnr

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def sisnr(self, x, s, eps=1e-8):
        """
        Arguments:
        x: separated signal, N x S tensor
        s: reference signal, N x S tensor
        Return:
        sisnr: N tensor
        """

        def l2norm(mat, keepdim=False):
            return th.norm(mat, dim=-1, keepdim=keepdim)

        if x.shape != s.shape:
            raise RuntimeError(
                "Dimention mismatch when calculate si-snr, {} vs {}".format(
                    x.shape, s.shape))
        x_zm = x - th.mean(x, dim=-1, keepdim=True)
        s_zm = s - th.mean(s, dim=-1, keepdim=True)
        t = th.sum(
            x_zm * s_zm, dim=-1,
            keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
        return 20 * th.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps)) 
开发者ID:funcwj,项目名称:conv-tasnet,代码行数:24,代码来源:trainer.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def forward(self, data, coords):
    # in_ = coords.contiguous()
    in_ = th.cat([th.log10(1.0 + data/255.0), coords], 2).contiguous()
    assert in_.shape[0] == 1, "current implementation assumes batch_size = 1"
    kernels = self.net(in_.squeeze(0))
    cdata = crop_like(data.squeeze(0), kernels).contiguous()
    output, _ = self.kernel_update(cdata, kernels)

    # Average over samples
    output = th.unsqueeze(output, 0).mean(1)

    # crop output
    k = (self.ksize-1) // 2
    output = output[..., k:-k, k:-k]

    kviz = kernels.detach().clone()
    min_ = kviz.min()
    max_ = kviz.max()
    kviz = (kviz - min_) / (max_ - min_ - 1e-8) 
    bs, k2, h, w = kviz.shape
    return output, kviz.view(bs, self.ksize, self.ksize, h, w) 
开发者ID:adobe,项目名称:sbmc,代码行数:23,代码来源:scatter_vs_gather.py

示例13: calc_psnr

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def calc_psnr(sr, hr, scale=0, benchmark=False):
    # adapt from EDSR: https://github.com/thstkdgus35/EDSR-PyTorch
    diff = (sr - hr).data
    if benchmark:
        shave = scale
        if diff.size(1) > 1:
            convert = diff.new(1, 3, 1, 1)
            convert[0, 0, 0, 0] = 65.738
            convert[0, 1, 0, 0] = 129.057
            convert[0, 2, 0, 0] = 25.064
            diff.mul_(convert).div_(256)
            diff = diff.sum(dim=1, keepdim=True)
    else:
        shave = scale + 6

    valid = diff[:, :, shave:-shave, shave:-shave]
    mse = valid.pow(2).mean()

    return -10 * math.log10(mse)


# +++++++++++++++++++++++++++++++++++++
#           PSNR      
# ------------------------------------- 
开发者ID:yu45020,项目名称:Waifu2x,代码行数:26,代码来源:image_quality.py

示例14: SI_SNR

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def SI_SNR(_s, s, zero_mean=True):
    '''
         Calculate the SNR indicator between the two audios.
         The larger the value, the better the separation.
         input:
               _s: Generated audio
               s:  Ground Truth audio
         output:
               SNR value
    '''
    if zero_mean:
        _s = _s - torch.mean(_s)
        s = s - torch.mean(s)
    s_target = sum(torch.mul(_s, s))*s/torch.pow(torch.norm(s, p=2), 2)
    e_noise = _s - s_target
    return 20*torch.log10(torch.norm(s_target, p=2)/torch.norm(e_noise, p=2)) 
开发者ID:speechLabBcCuny,项目名称:onssen,代码行数:18,代码来源:loss_e2e.py

示例15: sisnr

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log10 [as 别名]
def sisnr(x, s, eps=1e-8):
    """
    calculate training loss
    input:
          x: separated signal, N x S tensor
          s: reference signal, N x S tensor
    Return:
          sisnr: N tensor
    """

    def l2norm(mat, keepdim=False):
        return torch.norm(mat, dim=-1, keepdim=keepdim)

    if x.shape != s.shape:
        raise RuntimeError(
            "Dimention mismatch when calculate si-snr, {} vs {}".format(
                x.shape, s.shape))
    x_zm = x - torch.mean(x, dim=-1, keepdim=True)
    s_zm = s - torch.mean(s, dim=-1, keepdim=True)
    t = torch.sum(
        x_zm * s_zm, dim=-1,
        keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
    return 20 * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps)) 
开发者ID:speechLabBcCuny,项目名称:onssen,代码行数:25,代码来源:loss_e2e.py


注:本文中的torch.log10方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。