本文整理汇总了Python中torch.clamp函数的典型用法代码示例。如果您正苦于以下问题:Python clamp函数的具体用法?Python clamp怎么用?Python clamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clamp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bbox_iou
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
示例2: bbox_iou
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
示例3: F_bilinear_interp2d
def F_bilinear_interp2d(input, coords):
"""
bilinear interpolation of 2d torch.autograd.Variable
"""
x = torch.clamp(coords[:,:,0], 0, input.size(1)-2)
x0 = x.floor()
x1 = x0 + 1
y = torch.clamp(coords[:,:,1], 0, input.size(2)-2)
y0 = y.floor()
y1 = y0 + 1
stride = torch.LongTensor(input.stride())
x0_ix = x0.mul(stride[1]).long()
x1_ix = x1.mul(stride[1]).long()
y0_ix = y0.mul(stride[2]).long()
y1_ix = y1.mul(stride[2]).long()
input_flat = input.view(input.size(0),-1).contiguous()
vals_00 = input_flat.gather(1, x0_ix.add(y0_ix).detach())
vals_10 = input_flat.gather(1, x1_ix.add(y0_ix).detach())
vals_01 = input_flat.gather(1, x0_ix.add(y1_ix).detach())
vals_11 = input_flat.gather(1, x1_ix.add(y1_ix).detach())
xd = x - x0
yd = y - y0
xm = 1 - xd
ym = 1 - yd
x_mapped = (vals_00.mul(xm).mul(ym) +
vals_10.mul(xd).mul(ym) +
vals_01.mul(xm).mul(yd) +
vals_11.mul(xd).mul(yd))
return x_mapped.view_as(input)
示例4: sample_from_discretized_mix_logistic_1d
def sample_from_discretized_mix_logistic_1d(l, nr_mix):
# Pytorch ordering
l = l.permute(0, 2, 3, 1)
ls = [int(y) for y in l.size()]
xs = ls[:-1] + [1] #[3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2]) # for mean, scale
# sample mixture indicator from softmax
temp = torch.FloatTensor(logit_probs.size())
if l.is_cuda : temp = temp.cuda()
temp.uniform_(1e-5, 1. - 1e-5)
temp = logit_probs.data - torch.log(- torch.log(temp))
_, argmax = temp.max(dim=3)
one_hot = to_one_hot(argmax, nr_mix)
sel = one_hot.view(xs[:-1] + [1, nr_mix])
# select logistic parameters
means = torch.sum(l[:, :, :, :, :nr_mix] * sel, dim=4)
log_scales = torch.clamp(torch.sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.)
u = torch.FloatTensor(means.size())
if l.is_cuda : u = u.cuda()
u.uniform_(1e-5, 1. - 1e-5)
u = Variable(u)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.), max=1.)
out = x0.unsqueeze(1)
return out
示例5: forward
def forward(self, input) -> torch.FloatTensor:
""" Preprocess the input matrix
:param input tensor
"""
if isinstance(input, np.ndarray):
input = torch.from_numpy(input).type(self.dtype)
if isinstance(input, rlt.FeatureVector):
input = input.float_features.type(self.dtype)
# ONNX doesn't support != yet
not_missing_input = (
self.one_tensor.float() - (input == self.missing_tensor).float()
)
feature_starts = self._get_type_boundaries()
outputs = []
for i, feature_type in enumerate(FEATURE_TYPES):
begin_index = feature_starts[i]
if (i + 1) == len(FEATURE_TYPES):
end_index = len(self.normalization_parameters)
else:
end_index = feature_starts[i + 1]
if begin_index == end_index:
continue # No features of this type
if feature_type == ENUM:
# Process one-at-a-time
for j in range(begin_index, end_index):
norm_params = self.normalization_parameters[self.sorted_features[j]]
new_output = self._preprocess_feature_single_column(
j, input[:, j : j + 1], norm_params
)
new_output *= not_missing_input[:, j : j + 1]
self._check_preprocessing_output(new_output, [norm_params])
outputs.append(new_output)
else:
norm_params = []
for f in self.sorted_features[begin_index:end_index]:
norm_params.append(self.normalization_parameters[f])
new_output = self._preprocess_feature_multi_column(
begin_index, input[:, begin_index:end_index], norm_params
)
new_output *= not_missing_input[:, begin_index:end_index]
self._check_preprocessing_output(new_output, norm_params)
outputs.append(new_output)
def wrap(output):
if self.typed_output:
return rlt.FeatureVector(float_features=output)
else:
return output
if len(outputs) == 1:
return wrap(torch.clamp(outputs[0], MIN_FEATURE_VALUE, MAX_FEATURE_VALUE))
return wrap(
torch.clamp(torch.cat(outputs, dim=1), MIN_FEATURE_VALUE, MAX_FEATURE_VALUE)
)
示例6: train_actor_critic
def train_actor_critic(actor, critic, memory, actor_optim, critic_optim, args):
memory = np.array(memory)
states = np.vstack(memory[:, 0])
actions = list(memory[:, 1])
rewards = list(memory[:, 2])
masks = list(memory[:, 3])
old_values = critic(torch.Tensor(states))
returns, advants = get_gae(rewards, masks, old_values, args)
mu, std = actor(torch.Tensor(states))
old_policy = log_prob_density(torch.Tensor(actions), mu, std)
criterion = torch.nn.MSELoss()
n = len(states)
arr = np.arange(n)
for _ in range(args.ppo_update_num):
np.random.shuffle(arr)
for i in range(n // args.batch_size):
batch_index = arr[args.batch_size * i : args.batch_size * (i + 1)]
batch_index = torch.LongTensor(batch_index)
inputs = torch.Tensor(states)[batch_index]
actions_samples = torch.Tensor(actions)[batch_index]
returns_samples = returns.unsqueeze(1)[batch_index]
advants_samples = advants.unsqueeze(1)[batch_index]
oldvalue_samples = old_values[batch_index].detach()
values = critic(inputs)
clipped_values = oldvalue_samples + \
torch.clamp(values - oldvalue_samples,
-args.clip_param,
args.clip_param)
critic_loss1 = criterion(clipped_values, returns_samples)
critic_loss2 = criterion(values, returns_samples)
critic_loss = torch.max(critic_loss1, critic_loss2).mean()
loss, ratio, entropy = surrogate_loss(actor, advants_samples, inputs,
old_policy.detach(), actions_samples,
batch_index)
clipped_ratio = torch.clamp(ratio,
1.0 - args.clip_param,
1.0 + args.clip_param)
clipped_loss = clipped_ratio * advants_samples
actor_loss = -torch.min(loss, clipped_loss).mean()
loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy
critic_optim.zero_grad()
loss.backward(retain_graph=True)
critic_optim.step()
actor_optim.zero_grad()
loss.backward()
actor_optim.step()
示例7: forward
def forward(self, x=None, warmup=1., inf_net=None): #, k=1): #, marginf_type=0):
outputs = {}
B = x.shape[0]
if inf_net is None:
# mu, logvar = self.inference_net(x)
z, logits = self.q.sample(x)
else:
# mu, logvar = inf_net.inference_net(x)
z, logqz = inf_net.sample(x)
# print (z[0])
# b = harden(z)
# print (b[0])
# logpz = torch.sum( self.prior.log_prob(b), dim=1)
# print (logpz[0])
# print (logpz.shape)
# fdasf
probs_q = torch.sigmoid(logits)
probs_q = torch.clamp(probs_q, min=.00000001, max=.9999999)
probs_p = torch.ones(B, self.z_size).cuda() *.5
KL = probs_q*torch.log(probs_q/probs_p) + (1-probs_q)*torch.log((1-probs_q)/(1-probs_p))
KL = torch.sum(KL, dim=1)
# print (z.shape)
# Decode Image
x_hat = self.generator.forward(z)
alpha = torch.sigmoid(x_hat)
beta = Beta(alpha*self.beta_scale, (1.-alpha)*self.beta_scale)
x_noise = torch.clamp(x + torch.FloatTensor(x.shape).uniform_(0., 1./256.).cuda(), min=1e-5, max=1-1e-5)
logpx = beta.log_prob(x_noise) #[120,3,112,112] # add uniform noise here
logpx = torch.sum(logpx.view(B, -1),1) # [PB] * self.w_logpx
# print (logpx.shape,logpz.shape,logqz.shape)
# fsdfda
log_ws = logpx - KL #+ logpz - logqz
outputs['logpx'] = torch.mean(logpx)
outputs['x_recon'] = alpha
# outputs['welbo'] = torch.mean(logpx + warmup*( logpz - logqz))
outputs['welbo'] = torch.mean(logpx + warmup*(KL))
outputs['elbo'] = torch.mean(log_ws)
outputs['logws'] = log_ws
outputs['z'] = z
outputs['logpz'] = torch.zeros(1) #torch.mean(logpz)
outputs['logqz'] = torch.mean(KL)
# outputs['logvar'] = logvar
return outputs
示例8: F_trilinear_interp3d
def F_trilinear_interp3d(input, coords):
"""
trilinear interpolation of 3D image
"""
# take clamp then floor/ceil of x coords
x = torch.clamp(coords[:,0], 0, input.size(1)-2)
x0 = x.floor()
x1 = x0 + 1
# take clamp then floor/ceil of y coords
y = torch.clamp(coords[:,1], 0, input.size(2)-2)
y0 = y.floor()
y1 = y0 + 1
# take clamp then floor/ceil of z coords
z = torch.clamp(coords[:,2], 0, input.size(3)-2)
z0 = z.floor()
z1 = z0 + 1
stride = torch.LongTensor(input.stride())[1:]
x0_ix = x0.mul(stride[0]).long()
x1_ix = x1.mul(stride[0]).long()
y0_ix = y0.mul(stride[1]).long()
y1_ix = y1.mul(stride[1]).long()
z0_ix = z0.mul(stride[2]).long()
z1_ix = z1.mul(stride[2]).long()
input_flat = th_flatten(input)
vals_000 = input_flat[x0_ix.add(y0_ix).add(z0_ix).detach()]
vals_100 = input_flat[x1_ix.add(y0_ix).add(z0_ix).detach()]
vals_010 = input_flat[x0_ix.add(y1_ix).add(z0_ix).detach()]
vals_001 = input_flat[x0_ix.add(y0_ix).add(z1_ix).detach()]
vals_101 = input_flat[x1_ix.add(y0_ix).add(z1_ix).detach()]
vals_011 = input_flat[x0_ix.add(y1_ix).add(z1_ix).detach()]
vals_110 = input_flat[x1_ix.add(y1_ix).add(z0_ix).detach()]
vals_111 = input_flat[x1_ix.add(y1_ix).add(z1_ix).detach()]
xd = x - x0
yd = y - y0
zd = z - z0
xm = 1 - xd
ym = 1 - yd
zm = 1 - zd
x_mapped = (vals_000.mul(xm).mul(ym).mul(zm) +
vals_100.mul(xd).mul(ym).mul(zm) +
vals_010.mul(xm).mul(yd).mul(zm) +
vals_001.mul(xm).mul(ym).mul(zd) +
vals_101.mul(xd).mul(ym).mul(zd) +
vals_011.mul(xm).mul(yd).mul(zd) +
vals_110.mul(xd).mul(yd).mul(zm) +
vals_111.mul(xd).mul(yd).mul(zd))
return x_mapped.view_as(input)
示例9: F_batch_trilinear_interp3d
def F_batch_trilinear_interp3d(input, coords):
"""
input : torch.Tensor
size = (N,H,W,C)
coords : torch.Tensor
size = (N,H*W*C,2)
"""
x = torch.clamp(coords[:,:,0], 0, input.size(2)-2)
x0 = x.floor()
x1 = x0 + 1
y = torch.clamp(coords[:,:,1], 0, input.size(3)-2)
y0 = y.floor()
y1 = y0 + 1
z = torch.clamp(coords[:,:,2], 0, input.size(4)-2)
z0 = z.floor()
z1 = z0 + 1
stride = torch.LongTensor(input.stride())
x0_ix = x0.mul(stride[2]).long()
x1_ix = x1.mul(stride[2]).long()
y0_ix = y0.mul(stride[3]).long()
y1_ix = y1.mul(stride[3]).long()
z0_ix = z0.mul(stride[4]).long()
z1_ix = z1.mul(stride[4]).long()
input_flat = input.contiguous().view(input.size(0),-1)
vals_000 = input_flat.gather(1,x0_ix.add(y0_ix).add(z0_ix).detach())
vals_100 = input_flat.gather(1,x1_ix.add(y0_ix).add(z0_ix).detach())
vals_010 = input_flat.gather(1,x0_ix.add(y1_ix).add(z0_ix).detach())
vals_001 = input_flat.gather(1,x0_ix.add(y0_ix).add(z1_ix).detach())
vals_101 = input_flat.gather(1,x1_ix.add(y0_ix).add(z1_ix).detach())
vals_011 = input_flat.gather(1,x0_ix.add(y1_ix).add(z1_ix).detach())
vals_110 = input_flat.gather(1,x1_ix.add(y1_ix).add(z0_ix).detach())
vals_111 = input_flat.gather(1,x1_ix.add(y1_ix).add(z1_ix).detach())
xd = x - x0
yd = y - y0
zd = z - z0
xm = 1 - xd
ym = 1 - yd
zm = 1 - zd
x_mapped = (vals_000.mul(xm).mul(ym).mul(zm) +
vals_100.mul(xd).mul(ym).mul(zm) +
vals_010.mul(xm).mul(yd).mul(zm) +
vals_001.mul(xm).mul(ym).mul(zd) +
vals_101.mul(xd).mul(ym).mul(zd) +
vals_011.mul(xm).mul(yd).mul(zd) +
vals_110.mul(xd).mul(yd).mul(zm) +
vals_111.mul(xd).mul(yd).mul(zd))
return x_mapped.view_as(input)
示例10: encode
def encode(self, x):
# x = x.view(-1, 1, self.x_size, self.x_size)
# print (x.shape)
x = self.act_func(self.conv1(x))
# print (x.shape)
x = self.act_func(self.conv2(x))
x = self.act_func(self.conv3(x))
# print (x.size())
x = x.view(-1, self.intermediate_size)
h1 = self.act_func(self.fc1(x))
h2 = self.fc2(h1)
mean = h2[:,:self.z_size]
logvar = h2[:,self.z_size:]
#this solves the nan grad problem.
logvar = torch.clamp(logvar, min=-20.)
self.mean = mean
self.logvar = logvar
return mean, logvar
示例11: calculate_distance_term
def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
dist_term = 0.0
for i in range(bs):
_n_objects_sample = n_objects[i]
if _n_objects_sample <= 1:
continue
_mean_sample = means[i, : _n_objects_sample, :] # n_objects, n_filters
means_1 = _mean_sample.unsqueeze(1).expand(
_n_objects_sample, _n_objects_sample, n_filters)
means_2 = means_1.permute(1, 0, 2)
diff = means_1 - means_2 # n_objects, n_objects, n_filters
_norm = torch.norm(diff, norm, 2)
margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
if usegpu:
margin = margin.cuda()
margin = Variable(margin)
_dist_term_sample = torch.sum(
torch.clamp(margin - _norm, min=0.0) ** 2)
_dist_term_sample = _dist_term_sample / \
(_n_objects_sample * (_n_objects_sample - 1))
dist_term += _dist_term_sample
dist_term = dist_term / bs
return dist_term
示例12: hinge_loss
def hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Hinge pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
loss = torch.clamp(negative_predictions -
positive_predictions +
1.0, 0.0)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
示例13: l2_pixel_loss
def l2_pixel_loss(self, matches_b, non_matches_b, M_pixel=None):
"""
Apply l2 loss in pixel space.
This weights non-matches more if they are "far away" in pixel space.
:param matches_b: A torch.LongTensor with shape torch.Shape([num_matches])
:param non_matches_b: A torch.LongTensor with shape torch.Shape([num_non_matches])
:return l2 loss per sample: A torch.FloatTensorof with shape torch.Shape([num_matches])
"""
if M_pixel is None:
M_pixel = self._config['M_pixel']
num_non_matches_per_match = len(non_matches_b)/len(matches_b)
ground_truth_pixels_for_non_matches_b = torch.t(matches_b.repeat(num_non_matches_per_match,1)).contiguous().view(-1,1)
ground_truth_u_v_b = self.flattened_pixel_locations_to_u_v(ground_truth_pixels_for_non_matches_b)
sampled_u_v_b = self.flattened_pixel_locations_to_u_v(non_matches_b.unsqueeze(1))
# each element is always within [0,1], you have 1 if you are at least M_pixel away in
# L2 norm in pixel space
norm_degree = 2
squared_l2_pixel_loss = 1.0/M_pixel * torch.clamp((ground_truth_u_v_b - sampled_u_v_b).float().norm(norm_degree,1), max=M_pixel)
return squared_l2_pixel_loss, ground_truth_u_v_b, sampled_u_v_b
示例14: log_Bernoulli
def log_Bernoulli(x, mean, average=False, dim=None):
probs = torch.clamp( mean, min=min_epsilon, max=max_epsilon )
log_bernoulli = x * torch.log( probs ) + (1. - x ) * torch.log( 1. - probs )
if average:
return torch.mean( log_bernoulli, dim )
else:
return torch.sum( log_bernoulli, dim )
示例15: pdist
def pdist(self, fX):
"""Compute pdist à-la scipy.spatial.distance.pdist
Parameters
----------
fX : (n, d) torch.Tensor
Embeddings.
Returns
-------
distances : (n * (n-1) / 2,) torch.Tensor
Condensed pairwise distance matrix
"""
n_sequences, _ = fX.size()
distances = []
for i in range(n_sequences - 1):
if self.metric in ('cosine', 'angular'):
d = 1. - F.cosine_similarity(
fX[i, :].expand(n_sequences - 1 - i, -1),
fX[i+1:, :], dim=1, eps=1e-8)
if self.metric == 'angular':
d = torch.acos(torch.clamp(1. - d, -1 + 1e-6, 1 - 1e-6))
elif self.metric == 'euclidean':
d = F.pairwise_distance(
fX[i, :].expand(n_sequences - 1 - i, -1),
fX[i+1:, :], p=2, eps=1e-06).view(-1)
distances.append(d)
return torch.cat(distances)