当前位置: 首页>>代码示例>>Python>>正文


Python torch.log方法代码示例

本文整理汇总了Python中torch.log方法的典型用法代码示例。如果您正苦于以下问题:Python torch.log方法的具体用法?Python torch.log怎么用?Python torch.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.log方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: bbox_transform

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def bbox_transform(ex_rois, gt_rois):
  ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
  ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
  ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
  ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

  gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
  gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
  gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
  gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

  targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
  targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
  targets_dw = torch.log(gt_widths / ex_widths)
  targets_dh = torch.log(gt_heights / ex_heights)

  targets = torch.stack(
    (targets_dx, targets_dy, targets_dw, targets_dh), 1)
  return targets 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:21,代码来源:bbox_transform.py

示例2: bbox_transform

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def bbox_transform(ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = torch.log(gt_widths / ex_widths)
    targets_dh = torch.log(gt_heights / ex_heights)

    targets = torch.stack(
        (targets_dx, targets_dy, targets_dw, targets_dh),1)

    return targets 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:22,代码来源:bbox_transform.py

示例3: apply_box_deltas_2D

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def apply_box_deltas_2D(boxes, deltas):
    """Applies the given deltas to the given boxes.
    boxes: [N, 4] where each row is y1, x1, y2, x2
    deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
    """
    # Convert to y, x, h, w
    height = boxes[:, 2] - boxes[:, 0]
    width = boxes[:, 3] - boxes[:, 1]
    center_y = boxes[:, 0] + 0.5 * height
    center_x = boxes[:, 1] + 0.5 * width
    # Apply deltas
    center_y += deltas[:, 0] * height
    center_x += deltas[:, 1] * width
    height *= torch.exp(deltas[:, 2])
    width *= torch.exp(deltas[:, 3])
    # Convert back to y1, x1, y2, x2
    y1 = center_y - 0.5 * height
    x1 = center_x - 0.5 * width
    y2 = y1 + height
    x2 = x1 + width
    result = torch.stack([y1, x1, y2, x2], dim=1)
    return result 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:24,代码来源:model_utils.py

示例4: cxcy_to_gcxgcy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def cxcy_to_gcxgcy(cxcy, priors_cxcy):
    """
    Encode bounding boxes (that are in center-size form) w.r.t. the corresponding prior boxes (that are in center-size form).

    For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box.
    For the size coordinates, scale by the size of the prior box, and convert to the log-space.

    In the model, we are predicting bounding box coordinates in this encoded form.

    :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_priors, 4)
    :param priors_cxcy: prior boxes with respect to which the encoding must be performed, a tensor of size (n_priors, 4)
    :return: encoded bounding boxes, a tensor of size (n_priors, 4)
    """

    # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical
    # They are for some sort of numerical conditioning, for 'scaling the localization gradient'
    # See https://github.com/weiliu89/caffe/issues/155
    return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10),  # g_c_x, g_c_y
                      torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1)  # g_w, g_h 
开发者ID:zzzDavid,项目名称:ICDAR-2019-SROIE,代码行数:21,代码来源:utils.py

示例5: encode

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def encode(matched, priors, variances):
    """Encode the variances from the priorbox layers into the ground truth boxes
    we have matched (based on jaccard overlap) with the prior boxes.
    Args:
        matched: (tensor) Coords of ground truth for each prior in point-form
            Shape: [num_priors, 4].
        priors: (tensor) Prior boxes in center-offset form
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        encoded boxes (tensor), Shape: [num_priors, 4]
    """

    # dist b/t match center and prior's center
    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
    # encode variance
    g_cxcy /= (variances[0] * priors[:, 2:])
    # match wh / prior wh
    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
    g_wh = torch.log(g_wh) / variances[1]
    # return target for smooth_l1_loss
    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]


# Adapted from https://github.com/Hakuyume/chainer-ssd 
开发者ID:soo89,项目名称:CSD-SSD,代码行数:27,代码来源:box_utils.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def forward(self, p_proba, q_proba): # [B, T, V]
        mask = torch.ones(p_proba.size(0), p_proba.size(1))
        cnt = 0
        for i in range(q_proba.size(0)):
            flg = False
            for j in range(q_proba.size(1)):
                topv, topi = torch.max(q_proba[i,j], -1)
                if flg:
                    mask[i,j] = 0
                else:
                    mask[i,j] = 1
                    cnt += 1
                if topi.item() in self.special_tokens:
                    flg = True
        mask = cuda_(Variable(mask))
        loss = q_proba * (torch.log(q_proba) - torch.log(p_proba))
        masked_loss = torch.sum(mask.unsqueeze(-1) * loss)
        return masked_loss / (cnt + 1e-10) 
开发者ID:AuCson,项目名称:SEDST,代码行数:20,代码来源:semi_sup_net.py

示例7: get_log_parition

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def get_log_parition(self, seq_emit_score):
        """
        Calculate the log of the partition function
        :param seq_emit_score: [seq_len, batch_size, tag_size]
        :return: Tensor with Size([batch_size])
        """
        seq_len, batch_size, tag_size = seq_emit_score.size()
        # dynamic programming table to store previously summarized tag logits
        dp_table = seq_emit_score.new_full(
            (batch_size, tag_size), self.NEG_LOGIT, requires_grad=False
        )
        dp_table[:, self.start_tag] = 0.

        batch_trans_mat = self.trans_mat.unsqueeze(0).expand(batch_size, tag_size, tag_size)

        for token_idx in range(seq_len):
            prev_logit = dp_table.unsqueeze(1)  # [batch_size, 1, tag_size]
            batch_emit_score = seq_emit_score[token_idx].unsqueeze(-1)  # [batch_size, tag_size, 1]
            cur_logit = batch_trans_mat + batch_emit_score + prev_logit  # [batch_size, tag_size, tag_size]
            dp_table = log_sum_exp(cur_logit)  # [batch_size, tag_size]
        batch_logit = dp_table + self.trans_mat[self.end_tag, :].unsqueeze(0)
        log_partition = log_sum_exp(batch_logit)  # [batch_size]

        return log_partition 
开发者ID:dolphin-zs,项目名称:Doc2EDAG,代码行数:26,代码来源:ner_model.py

示例8: maskedNLL

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def maskedNLL(y_pred, y_gt, mask):
    acc = torch.zeros_like(mask)
    muX = y_pred[:,:,0]
    muY = y_pred[:,:,1]
    sigX = y_pred[:,:,2]
    sigY = y_pred[:,:,3]
    rho = y_pred[:,:,4]
    ohr = torch.pow(1-torch.pow(rho,2),-0.5)
    x = y_gt[:,:, 0]
    y = y_gt[:,:, 1]
    # If we represent likelihood in feet^(-1):
    out = 0.5*torch.pow(ohr, 2)*(torch.pow(sigX, 2)*torch.pow(x-muX, 2) + torch.pow(sigY, 2)*torch.pow(y-muY, 2) - 2*rho*torch.pow(sigX, 1)*torch.pow(sigY, 1)*(x-muX)*(y-muY)) - torch.log(sigX*sigY*ohr) + 1.8379
    # If we represent likelihood in m^(-1):
    # out = 0.5 * torch.pow(ohr, 2) * (torch.pow(sigX, 2) * torch.pow(x - muX, 2) + torch.pow(sigY, 2) * torch.pow(y - muY, 2) - 2 * rho * torch.pow(sigX, 1) * torch.pow(sigY, 1) * (x - muX) * (y - muY)) - torch.log(sigX * sigY * ohr) + 1.8379 - 0.5160
    acc[:,:,0] = out
    acc[:,:,1] = out
    acc = acc*mask
    lossVal = torch.sum(acc)/torch.sum(mask)
    return lossVal

## NLL for sequence, outputs sequence of NLL values for each time-step, uses mask for variable output lengths, used for evaluation 
开发者ID:nachiket92,项目名称:conv-social-pooling,代码行数:23,代码来源:utils.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):
        sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)

        m_embed = self.emb(m_t_input)
        z_context = self.attn_z(last_hidden, z_enc_out)
        u_context = self.attn_u(last_hidden, u_enc_out)
        gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)
        gru_out, last_hidden = self.gru(gru_in, last_hidden)
        gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
        z_copy_score = F.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
        z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
        z_copy_score = z_copy_score.cpu()
        z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
        z_copy_score = torch.exp(z_copy_score - z_copy_score_max)  # [B,T]
        z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
            1) + z_copy_score_max  # [B,V]
        z_copy_score = cuda_(z_copy_score)

        scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
        gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
                                  scores[:, cfg.vocab_size:]
        proba = gen_score + z_copy_score[:, :cfg.vocab_size]  # [B,V]
        proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
        return proba, last_hidden, gru_out 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:26,代码来源:tsd_net.py

示例10: masked_cross_entropy_

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def masked_cross_entropy_(logits, target, length, take_log=False):
    if USE_CUDA:
        length = Variable(torch.LongTensor(length)).cuda()
    else:
        length = Variable(torch.LongTensor(length))

    # logits_flat: (batch * max_len, num_classes)
    # -1 means infered from other dimentions
    logits_flat = logits.view(-1, logits.size(-1))
    if take_log:
        logits_flat = torch.log(logits_flat)
    # target_flat: (batch * max_len, 1)
    target_flat = target.view(-1, 1)
    # losses_flat: (batch * max_len, 1)
    losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)
    # losses: (batch, max_len)
    losses = losses_flat.view(*target.size())
    # mask: (batch, max_len)
    mask = sequence_mask(sequence_length=length, max_len=target.size(1))
    losses = losses * mask.float()
    loss = losses.sum() / length.float().sum()
    return loss 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:24,代码来源:trade_utils.py

示例11: masked_cross_entropy_for_slot

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def masked_cross_entropy_for_slot(logits, target, mask, use_softmax=True):
    # print("logits", logits)
    # print("target", target)
    # -1 means infered from other dimentions
    logits_flat = logits.view(-1, logits.size(-1))
    # print(logits_flat.size())
    if use_softmax:
        log_probs_flat = functional.log_softmax(logits_flat, dim=1)
    else:
        log_probs_flat = logits_flat  # torch.log(logits_flat)
    # print("log_probs_flat", log_probs_flat)
    target_flat = target.view(-1, 1)
    # print("target_flat", target_flat)
    losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
    losses = losses_flat.view(*target.size())  # b * |s|
    losses = losses * mask.float()
    loss = losses.sum() / (losses.size(0)*losses.size(1))
    # print("loss inside", loss)
    return loss 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:trade_utils.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def forward(self, logits, temperature=1.0, hard=False,
                return_max_id=False):
        """
        :param logits: [batch_size, n_class] unnormalized log-prob
        :param temperature: non-negative scalar
        :param hard: if True take argmax
        :param return_max_id
        :return: [batch_size, n_class] sample from gumbel softmax
        """
        y = self.gumbel_softmax_sample(logits, temperature, self.use_gpu)
        _, y_hard = th.max(y, dim=1, keepdim=True)
        if hard:
            y_onehot = cast_type(Variable(th.zeros(y.size())), FLOAT, self.use_gpu)
            y_onehot.scatter_(1, y_hard, 1.0)
            y = y_onehot
        if return_max_id:
            return y, y_hard
        else:
            return y 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:nn_lib.py

示例13: update_critic

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def update_critic(self, obs, action, reward, next_obs, not_done, L, step):
        with torch.no_grad():
            _, policy_action, log_pi, _ = self.actor(next_obs)
            target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
            target_V = torch.min(target_Q1,
                                 target_Q2) - self.alpha.detach() * log_pi
            target_Q = reward + (not_done * self.discount * target_V)

        # get current Q estimates
        current_Q1, current_Q2 = self.critic(obs, action)
        critic_loss = F.mse_loss(current_Q1,
                                 target_Q) + F.mse_loss(current_Q2, target_Q)
        L.log('train_critic/loss', critic_loss, step)


        # Optimize the critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        self.critic.log(L, step) 
开发者ID:denisyarats,项目名称:pytorch_sac_ae,代码行数:23,代码来源:sac_ae.py

示例14: update_decoder

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def update_decoder(self, obs, target_obs, L, step):
        h = self.critic.encoder(obs)

        if target_obs.dim() == 4:
            # preprocess images to be in [-0.5, 0.5] range
            target_obs = utils.preprocess_obs(target_obs)
        rec_obs = self.decoder(h)
        rec_loss = F.mse_loss(target_obs, rec_obs)

        # add L2 penalty on latent representation
        # see https://arxiv.org/pdf/1903.12436.pdf
        latent_loss = (0.5 * h.pow(2).sum(1)).mean()

        loss = rec_loss + self.decoder_latent_lambda * latent_loss
        self.encoder_optimizer.zero_grad()
        self.decoder_optimizer.zero_grad()
        loss.backward()

        self.encoder_optimizer.step()
        self.decoder_optimizer.step()
        L.log('train_ae/ae_loss', loss, step)

        self.decoder.log(L, step, log_freq=LOG_FREQ) 
开发者ID:denisyarats,项目名称:pytorch_sac_ae,代码行数:25,代码来源:sac_ae.py

示例15: update

# 需要导入模块: import torch [as 别名]
# 或者: from torch import log [as 别名]
def update(self, replay_buffer, L, step):
        obs, action, reward, next_obs, not_done = replay_buffer.sample()

        L.log('train/batch_reward', reward.mean(), step)

        self.update_critic(obs, action, reward, next_obs, not_done, L, step)

        if step % self.actor_update_freq == 0:
            self.update_actor_and_alpha(obs, L, step)

        if step % self.critic_target_update_freq == 0:
            utils.soft_update_params(
                self.critic.Q1, self.critic_target.Q1, self.critic_tau
            )
            utils.soft_update_params(
                self.critic.Q2, self.critic_target.Q2, self.critic_tau
            )
            utils.soft_update_params(
                self.critic.encoder, self.critic_target.encoder,
                self.encoder_tau
            )

        if self.decoder is not None and step % self.decoder_update_freq == 0:
            self.update_decoder(obs, obs, L, step) 
开发者ID:denisyarats,项目名称:pytorch_sac_ae,代码行数:26,代码来源:sac_ae.py


注:本文中的torch.log方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。