本文整理汇总了Python中torch.log2方法的典型用法代码示例。如果您正苦于以下问题:Python torch.log2方法的具体用法?Python torch.log2怎么用?Python torch.log2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.log2方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
示例2: p2o
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def p2o(psf, shape):
'''
# psf: NxCxhxw
# shape: [H,W]
# otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.rfft(otf, 2, onesided=False)
n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math.
示例3: p2o
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def p2o(psf, shape):
'''
Args:
psf: NxCxhxw
shape: [H,W]
Returns:
otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.rfft(otf, 2, onesided=False)
n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
示例4: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def __init__(self, output_size, scales):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(PyramidRROIAlign, self).__init__()
poolers = []
for scale in scales:
poolers.append(
RROIAlign(
output_size, spatial_scale=scale
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
示例5: test_LinQuant_forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def test_LinQuant_forward(fsr, bit_width, inputs):
op1 = LogQuant(fsr=fsr, bit_width=bit_width, with_sign=False, lin_back=True)
expected1 = torch.pow(torch.ones_like(inputs)*2, torch.clamp(torch.round(torch.log2(torch.abs(inputs))), fsr-2**bit_width ,fsr ))
assert equals(
op1.apply(inputs),
expected1
)
op2 = LogQuant(fsr=fsr, bit_width=bit_width, with_sign=True, lin_back=True)
expected2 = torch.sign(inputs)*torch.pow(torch.ones_like(inputs)*2, torch.clamp(torch.round(torch.log2(torch.abs(inputs))), fsr-2**bit_width ,fsr ))
assert equals(
op2.apply(inputs),
expected2
)
示例6: discounted_cumulative_gain
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def discounted_cumulative_gain(y : torch.Tensor,
k : int = 10,
gain_type : str = "exp2"):
# Calculate cumulative gain
y_partial = y[:k]
if gain_type == "exp2":
gains = torch.pow(y_partial, 2.0) - 1.0
elif gain_type == "identity":
gains = y_partial
else:
raise ValueError("gain type only allow \"exp2\" or \"identity\".")
# Calculate discount
ranges = torch.arange(1, k + 1, 1).float()
discount = torch.log2(ranges + 1)
dcg = torch.sum(torch.div(gains, discount))
return dcg
示例7: log2
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def log2(x, out=None):
"""
log base 2, element-wise.
Parameters
----------
x : ht.DNDarray
The value for which to compute the logarithm.
out : ht.DNDarray or None, optional
A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
or set to None, a fresh tensor is allocated.
Returns
-------
logarithms : ht.DNDarray
A tensor of the same shape as x, containing the positive logarithms of each element in this tensor.
Negative input elements are returned as nan. If out was provided, logarithms is a reference to it.
Examples
--------
>>> ht.log2(ht.arange(5))
tensor([ -inf, 0.0000, 1.0000, 1.5850, 2.0000])
"""
return operations.__local_op(torch.log2, x, out)
示例8: get_delta_gains
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def get_delta_gains(batch_stds, discount=False):
'''
Delta-gains w.r.t. pairwise swapping of the ideal ltr_adhoc
:param batch_stds: the standard labels sorted in a descending order
:return:
'''
batch_gains = torch.pow(2.0, batch_stds) - 1.0
batch_g_diffs = torch.unsqueeze(batch_gains, dim=2) - torch.unsqueeze(batch_gains, dim=1)
if discount:
batch_std_ranks = torch.arange(batch_stds.size(1)).type(tensor)
batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients
batch_dists = torch.unsqueeze(batch_dists, dim=0)
batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
batch_delta_gs = torch.abs(batch_g_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping
else:
batch_delta_gs = torch.abs(batch_g_diffs) # absolute delta gains w.r.t. pairwise swapping
return batch_delta_gs
示例9: tor_discounted_cumu_gain_at_k
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def tor_discounted_cumu_gain_at_k(sorted_labels, cutoff, multi_level_rele=True):
'''
ICML-nDCG, which places stronger emphasis on retrieving relevant documents
:param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
:param max_cutoff: the maximum rank position to be considered
:param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
:return: cumulative gains for each rank position
'''
if multi_level_rele: #the common case with multi-level labels
nums = torch.pow(2.0, sorted_labels[0:cutoff]) - 1.0
else:
nums = sorted_labels[0:cutoff] #the case like listwise ranking, where the relevance is labeled as (n-rank_position)
denoms = torch.log2(torch.arange(cutoff, dtype=torch.float) + 2.0) #discounting factor
dited_cumu_gain = torch.sum(nums/denoms) # discounted cumulative gain value
return dited_cumu_gain
示例10: torch_discounted_cumu_gain_at_k
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def torch_discounted_cumu_gain_at_k(sorted_labels, cutoff, multi_level_rele=True):
'''
ICML-nDCG, which places stronger emphasis on retrieving relevant documents
:param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
:param max_cutoff: the maximum rank position to be considered
:param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
:return: cumulative gains for each rank position
'''
if multi_level_rele: #the common case with multi-level labels
nums = torch.pow(2.0, sorted_labels[0:cutoff]) - 1.0
else:
nums = sorted_labels[0:cutoff] #the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)
denoms = torch.log2(torch.arange(cutoff).type(torch.FloatTensor) + 2.0) #discounting factor
dited_cumu_gain = torch.sum(nums/denoms) # discounted cumulative gain value
return dited_cumu_gain
示例11: torch_discounted_cumu_gain_at_ks
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def torch_discounted_cumu_gain_at_ks(sorted_labels, max_cutoff, multi_level_rele=True):
'''
ICML-nDCG, which places stronger emphasis on retrieving relevant documents
:param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
:param max_cutoff: the maximum rank position to be considered
:param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
:return: cumulative gains for each rank position
'''
if multi_level_rele: #the common case with multi-level labels
nums = torch.pow(2.0, sorted_labels[0:max_cutoff]) - 1.0
else:
nums = sorted_labels[0:max_cutoff] #the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)
denoms = torch.log2(torch.arange(max_cutoff).type(torch.FloatTensor) + 2.0) #discounting factor
dited_cumu_gains = torch.cumsum(nums/denoms, dim=0) # discounted cumulative gain value w.r.t. each position
return dited_cumu_gains
示例12: get_delta_ndcg
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def get_delta_ndcg(batch_stds, batch_stds_sorted_via_preds):
'''
Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc
:param batch_stds: the standard labels sorted in a descending order
:param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions
:return:
'''
batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu) # ideal discount cumulative gains
batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0
batch_n_gains = batch_gains / batch_idcgs # normalised gains
batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)
batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)
batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients
batch_dists = torch.unsqueeze(batch_dists, dim=0)
batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping
return batch_delta_ndcg
示例13: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def __init__(self, output_size, scales, sampling_ratio, canonical_level=4):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(
lvl_min, lvl_max, canonical_level=canonical_level
)
示例14: _get_discount
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def _get_discount(self, slate_size: int) -> Tensor:
weights = DCGSlateMetric._weights
if (
weights is None
or weights.shape[0] < slate_size
or weights.device != self._device
):
DCGSlateMetric._weights = torch.reciprocal(
torch.log2(
torch.arange(
2, slate_size + 2, dtype=torch.double, device=self._device
)
)
)
weights = DCGSlateMetric._weights
assert weights is not None
return weights[:slate_size]
示例15: compute
# 需要导入模块: import torch [as 别名]
# 或者: from torch import log2 [as 别名]
def compute(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Compute the bits per character given the input and target.
Parameters
----------
pred: torch.Tensor
input logits of shape (B x N)
target: torch.LontTensor
target tensor of shape (B)
Returns
-------
torch.float
Output perplexity
"""
entropy = self.entropy(pred, target).mean()
return torch.log2(torch.exp(entropy))