本文整理匯總了Python中torch.floor方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.floor方法的具體用法?Python torch.floor怎麽用?Python torch.floor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.floor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: map_roi_levels
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
示例2: map_roi_levels
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale: level 0
- finest_scale <= scale < finest_scale * 2: level 1
- finest_scale * 2 <= scale < finest_scale * 4: level 2
- scale >= finest_scale * 4: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
示例3: map_roi_levels
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def map_roi_levels(self, rois, num_levels):
"""Map rrois to corresponding feature levels by scales.
- scale < finest_scale: level 0
- finest_scale <= scale < finest_scale * 2: level 1
- finest_scale * 2 <= scale < finest_scale * 4: level 2
- scale >= finest_scale * 4: level 3
Args:
rois (Tensor): Input RRoIs, shape (k, 6). (index, x, y, w, h, angle)
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(rois[:, 3] * rois[:, 4])
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
示例4: fit_positive
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def fit_positive(rows, cols, yx_min, yx_max, anchors):
device_id = anchors.get_device() if torch.cuda.is_available() else None
batch_size, num, _ = yx_min.size()
num_anchors, _ = anchors.size()
valid = torch.prod(yx_min < yx_max, -1)
center = (yx_min + yx_max) / 2
ij = torch.floor(center)
i, j = torch.unbind(ij.long(), -1)
index = i * cols + j
anchors2 = anchors / 2
iou_matrix = utils.iou.torch.iou_matrix((yx_min - center).view(-1, 2), (yx_max - center).view(-1, 2), -anchors2, anchors2).view(batch_size, -1, num_anchors)
iou, index_anchor = iou_matrix.max(-1)
_positive = []
cells = rows * cols
for valid, index, index_anchor in zip(torch.unbind(valid), torch.unbind(index), torch.unbind(index_anchor)):
index, index_anchor = (t[valid] for t in (index, index_anchor))
t = utils.ensure_device(torch.ByteTensor(cells, num_anchors).zero_(), device_id)
t[index, index_anchor] = 1
_positive.append(t)
return torch.stack(_positive)
示例5: create_dummy_data
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def create_dummy_data(data_dir, num_examples=1000, maxlen=20):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
示例6: _init_buffers
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def _init_buffers(self):
m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
m_max = 2595 * np.log10(1. + (self.f_max / 700))
m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
f_pts = (700 * (10**(m_pts / 2595) - 1))
bins = torch.floor(((self.n_fft - 1) * 2) * f_pts / self.sr).long()
fb = torch.zeros(self.n_fft, self.n_mels)
for m in range(1, self.n_mels + 1):
f_m_minus = bins[m - 1].item()
f_m = bins[m].item()
f_m_plus = bins[m + 1].item()
if f_m_minus != f_m:
fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
if f_m != f_m_plus:
fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
self.register_buffer("fb", fb)
示例7: interpolate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def interpolate(feat, pt):
# feat: c,h,w
# pt: K,2
# return: c,k
h,w = feat.shape[1], feat.shape[2]
x = pt[:,0] * (w-1)
y = pt[:,1] * (h-1)
x0 = torch.floor(x)
y0 = torch.floor(y)
val=feat[:,y0.long(),x0.long()]*(x0+1-x)*(y0+1-y)+\
feat[:,y0.long()+1,x0.long()]*(x0+1-x)*(y-y0)+\
feat[:,y0.long(),x0.long()+1]*(x-x0)*(y0+1-y)+\
feat[:,y0.long()+1,x0.long()+1]*(x-x0)*(y-y0)
return val
示例8: lidar_to_img
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def lidar_to_img(points, img_size):
# pdb.set_trace()
lidar_data = np.array(points[:, :2])
lidar_data *= 9.9999
lidar_data -= (0.5 * img_size, 0.5 * img_size)
lidar_data = np.fabs(lidar_data)
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img = np.zeros((img_size, img_size))
lidar_img[tuple(lidar_data.T)] = 255
return torch.tensor(lidar_img).cuda()
# def lidar_to_img(points, img_size):
# # pdb.set_trace()
# lidar_data = points[:, :2]
# lidar_data *= 9.9999
# lidar_data -= torch.tensor((0.5 * img_size, 0.5 * img_size)).cuda()
# lidar_data = torch.abs(lidar_data)
# lidar_data = torch.floor(lidar_data).long()
# lidar_data = lidar_data.view(-1, 2)
# lidar_img = torch.zeros((img_size, img_size)).cuda()
# lidar_img[lidar_data.permute(1,0)] = 255
# return lidar_img
示例9: tobin
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def tobin(self, target):
indxneg = target.data[:,0,:,:] < 0
eps = torch.zeros(target.data[:,0,:,:].size()).cuda()
epsind = target.data[:,0,:,:] == 0
eps[epsind] += 1e-5
angle = torch.atan(target.data[:,1,:,:] / (target.data[:,0,:,:] + eps))
angle[indxneg] += np.pi
angle += np.pi / 2 # 0 to 2pi
angle = torch.clamp(angle, 0, 2 * np.pi - 1e-3)
radius = torch.sqrt(target.data[:,0,:,:] ** 2 + target.data[:,1,:,:] ** 2)
radius = torch.clamp(radius, 0, self.fmax - 1e-3)
quantized_angle = torch.floor(self.abins * angle / (2 * np.pi))
if self.quantize_strategy == 'linear':
quantized_radius = torch.floor(self.rbins * radius / self.fmax)
elif self.quantize_strategy == 'quadratic':
quantized_radius = torch.floor(self.rbins * torch.sqrt(radius / self.fmax))
else:
raise Exception("No such quantize strategy: {}".format(self.quantize_strategy))
quantized_target = torch.autograd.Variable(torch.cat([torch.unsqueeze(quantized_angle, 1), torch.unsqueeze(quantized_radius, 1)], dim=1))
return quantized_target.type(torch.cuda.LongTensor)
示例10: drop_connect
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def drop_connect(inputs, is_training, drop_connect_rate):
"""
Apply drop connect to random inputs in a batch.
"""
if not is_training:
return inputs
keep_prob = 1 - drop_connect_rate
# compute drop connect tensor
batch_size = inputs.shape[0]
random_tensor = keep_prob
random_tensor += torch.rand(
[batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device
)
binary_tensor = torch.floor(random_tensor)
outputs = (inputs / keep_prob) * binary_tensor
return outputs
示例11: alpha_dropout
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def alpha_dropout(
x,
p=0.05,
alpha=-1.7580993408473766,
fixedPointMean=0,
fixedPointVar=1,
training=False,
):
keep_prob = 1 - p
if keep_prob == 1 or not training:
return x
a = np.sqrt(fixedPointVar / (keep_prob * (
(1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))
b = fixedPointMean - a * (
keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
keep_prob = 1 - p
random_tensor = keep_prob + torch.rand(x.size())
binary_tensor = Variable(torch.floor(random_tensor))
x = x.mul(binary_tensor)
ret = x + alpha * (1 - binary_tensor)
ret.mul_(a).add_(b)
return ret
示例12: drop_connect
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def drop_connect(inputs, drop_p, training):
""" Drop connect. """
if not training:
return inputs * (1. - drop_p)
batch_size = inputs.shape[0]
random_tensor = torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = random_tensor > drop_p
output = inputs * binary_tensor.float()
# output = inputs / (1. - drop_p) * binary_tensor.float()
return output
# if not training: return inputs
# batch_size = inputs.shape[0]
# keep_prob = 1 - drop_p
# random_tensor = keep_prob
# random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
# binary_tensor = torch.floor(random_tensor)
# output = inputs / keep_prob * binary_tensor
# return output
示例13: map_roi_levels
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
示例14: _joint_actions_2_action_pair
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def _joint_actions_2_action_pair(joint_action, n_actions,use_delegate_action=True):
if isinstance(joint_action, int):
if use_delegate_action:
if joint_action != 0.0:
_action1 = (joint_action - 1.0) // n_actions
_action2 = (joint_action - 1.0) % n_actions
else:
_action1 = float("nan")
_action2 = float("nan")
else:
_action1 = th.floor(joint_action / n_actions)
_action2 = (joint_action) % n_actions
return _action1, _action2
else:
if use_delegate_action:
mask = (joint_action == 0.0)
joint_action[mask] = 1.0
_action1 = th.floor((joint_action-1.0) / n_actions)
_action2 = (joint_action-1.0) % n_actions
_action1[mask] = float("nan")
_action2[mask] = float("nan")
else:
_action1 = th.floor(joint_action / n_actions)
_action2 = (joint_action) % n_actions
return _action1, _action2
示例15: _joint_actions_2_action_pair_aa
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import floor [as 別名]
def _joint_actions_2_action_pair_aa(joint_action, n_actions, avail_actions1, avail_actions2, use_delegate_action=True):
joint_action = joint_action.clone()
if use_delegate_action:
mask = (joint_action == 0.0)
joint_action[mask] = 1.0
_action1 = th.floor((joint_action-1.0) / n_actions)
_action2 = (joint_action-1.0) % n_actions
_action1[mask] = float("nan")
_action2[mask] = float("nan")
else:
_action1 = th.floor(joint_action / n_actions)
_action2 = (joint_action) % n_actions
aa_m1 = _action1 != _action1
aa_m2 = _action2 != _action2
_action1[aa_m1] = 0
_action2[aa_m2] = 0
aa1 = avail_actions1.data.gather(-1, ( _action1.long() ))
aa2 = avail_actions2.data.gather(-1, ( _action2.long() ))
_action1[aa1 == 0] = float("nan")
_action2[aa2 == 0] = float("nan")
_action1[aa_m1] = float("nan")
_action2[aa_m2] = float("nan")
return _action1, _action2