本文整理匯總了Python中torch.histc方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.histc方法的具體用法?Python torch.histc怎麽用?Python torch.histc使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.histc方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: batch_intersection_union
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def batch_intersection_union(output, target, nclass):
"""mIoU"""
# inputs are NDarray, output 4D, target 3D
# the category -1 is ignored class, typically for background / boundary
mini = 1
maxi = nclass
nbins = nclass
predict = torch.argmax(output, 1) + 1
target = target.float() + 1
predict = predict.float() * (target > 0).float()
intersection = predict * (predict == target).float()
# areas of intersection and union
area_inter = torch.histc(intersection, bins=nbins, min=mini, max=maxi)
area_pred = torch.histc(predict, bins=nbins, min=mini, max=maxi)
area_lab = torch.histc(target, bins=nbins, min=mini, max=maxi)
area_union = area_pred + area_lab - area_inter
assert torch.sum(area_inter > area_union).item() == 0, \
"Intersection area should be smaller than Union area"
return area_inter.float(), area_union.float()
示例2: batch_intersection_union
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def batch_intersection_union(output, target, nclass):
"""mIoU"""
# inputs are numpy array, output 4D, target 3D
mini = 1
maxi = nclass
nbins = nclass
predict = torch.argmax(output, 1) + 1
target = target.float() + 1
predict = predict.float() * (target > 0).float()
intersection = predict * (predict == target).float()
# areas of intersection and union
# element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.
area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi)
area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi)
area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi)
area_union = area_pred + area_lab - area_inter
assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area"
return area_inter.float(), area_union.float()
示例3: get_selabel_vector
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def get_selabel_vector(target, nclass):
r"""Get SE-Loss Label in a batch
Args:
predict: input 4D tensor
target: label 3D tensor (BxHxW)
nclass: number of categories (int)
Output:
2D tensor (BxnClass)
"""
batch = target.size(0)
tvect = torch.zeros(batch, nclass)
for i in range(batch):
hist = torch.histc(target[i].data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
示例4: cal_hist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def cal_hist(image):
"""
cal cumulative hist for channel list
"""
hists = []
for i in range(0, 3):
channel = image[i]
# channel = image[i, :, :]
channel = torch.from_numpy(channel)
# hist, _ = np.histogram(channel, bins=256, range=(0,255))
hist = torch.histc(channel, bins=256, min=0, max=256)
hist = hist.numpy()
# refHist=hist.view(256,1)
sum = hist.sum()
pdf = [v / sum for v in hist]
for i in range(1, 256):
pdf[i] = pdf[i - 1] + pdf[i]
hists.append(pdf)
return hists
示例5: intersectionAndUnion
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def intersectionAndUnion(batch_data, pred, numClass):
(imgs, segs, infos) = batch_data
_, preds = torch.max(pred.data.cpu(), dim=1)
# compute area intersection
intersect = preds.clone()
intersect[torch.ne(preds, segs)] = -1
area_intersect = torch.histc(intersect.float(),
bins=numClass,
min=0,
max=numClass - 1)
# compute area union:
preds[torch.lt(segs, 0)] = -1
area_pred = torch.histc(preds.float(),
bins=numClass,
min=0,
max=numClass - 1)
area_lab = torch.histc(segs.float(),
bins=numClass,
min=0,
max=numClass - 1)
area_union = area_pred + area_lab - area_intersect
return area_intersect, area_union
示例6: intersection_union
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def intersection_union(gt, pred, correct, n_class):
intersect = pred * correct
area_intersect = torch.histc(intersect, bins=n_class, min=1, max=n_class)
area_pred = torch.histc(pred, bins=n_class, min=1, max=n_class)
area_gt = torch.histc(gt, bins=n_class, min=1, max=n_class)
# intersect = intersect.detach().to('cpu').numpy()
# pred = pred.detach().to('cpu').numpy()
# gt = gt.detach().to('cpu').numpy()
# area_intersect, _ = np.histogram(intersect, bins=n_class, range=(1, n_class))
# area_pred, _ = np.histogram(pred, bins=n_class, range=(1, n_class))
# area_gt, _ = np.histogram(gt, bins=n_class, range=(1, n_class))
area_union = area_pred + area_gt - area_intersect
return area_intersect, area_union
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def forward(self,feat_t0,feat_t1,ground_truth):
n,c,h,w = feat_t0.data.shape
out_t0_rz = torch.transpose(feat_t0.view(c,h*w),1,0)
out_t1_rz = torch.transpose(feat_t1.view(c,h*w),1,0)
gt_np = ground_truth.view(h * w).data.cpu().numpy()
#### inspired by Source code from Histogram loss ###
### get all pos in positive pairs and negative pairs ###
pos_inds_np,neg_inds_np = np.squeeze(np.where(gt_np == 0), 1),np.squeeze(np.where(gt_np !=0),1)
pos_size,neg_size = pos_inds_np.shape[0],neg_inds_np.shape[0]
pos_inds,neg_inds = torch.from_numpy(pos_inds_np).cuda(),torch.from_numpy(neg_inds_np).cuda()
### get similarities(l2 distance) for all position ###
distance = torch.squeeze(self.various_distance(out_t0_rz,out_t1_rz),dim=1)
### build similarity histogram of positive pairs and negative pairs ###
pos_dist_ls,neg_dist_ls = distance[pos_inds],distance[neg_inds]
pos_dist_ls_t,neg_dist_ls_t = torch.from_numpy(pos_dist_ls.data.cpu().numpy()),torch.from_numpy(neg_dist_ls.data.cpu().numpy())
hist_pos = Variable(torch.histc(pos_dist_ls_t,bins=100,min=0,max=1)/pos_size,requires_grad=True)
hist_neg = Variable(torch.histc(neg_dist_ls_t,bins=100,min=0,max=1)/neg_size,requires_grad=True)
loss = self.distance(hist_pos,hist_neg)
return loss
示例8: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def forward(self, y, batch):
if self.use_cuda:
hist = Variable(
torch.histc(y.cpu().data.float(), bins=self.num_classes, min=0, max=self.num_classes) + 1
).cuda()
else:
hist = Variable(
torch.histc(y.data.float(), bins=self.num_classes, min=0, max=self.num_classes) + 1
)
centers_count = hist.index_select(0, y.long()) # 1 + how many examples of y[i]-th class
batch_size = batch.size()[0]
embeddings = batch.view(batch_size, -1)
assert embeddings.size()[1] == self.embedding_size
centers_pred = self.centers.index_select(0, y.long())
diff = embeddings - centers_pred
loss = 1 / 2.0 * (diff.pow(2).sum(1) / centers_count).sum()
return loss
示例9: accuracy
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def accuracy(pred_cls, true_cls, nclass=79):
"""
Function to calculate accuracy (TP/(TP + FP + TN + FN)
:param pytorch.Tensor pred_cls: network prediction (categorical)
:param pytorch.Tensor true_cls: ground truth (categorical)
:param int nclass: number of classes
:return:
"""
positive = torch.histc(true_cls.cpu().float(), bins=nclass, min=0, max=nclass, out=None)
per_cls_counts = []
tpos = []
for i in range(1, nclass):
true_positive = ((pred_cls == i).float() + (true_cls == i).float()).eq(2).sum().item()
tpos.append(true_positive)
per_cls_counts.append(positive[i])
return np.array(tpos), np.array(per_cls_counts)
##
# Plotting functions
##
示例10: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def forward(self, inputs, target):
"""
:param inputs: predictions (N, C, H, W)
:param target: target distribution (N, C, H, W)
:return: loss with image-wise weighting factor
"""
assert inputs.size() == target.size()
mask = (target != self.ignore_index)
_, argpred = torch.max(inputs, 1)
weights = []
batch_size = inputs.size(0)
for i in range(batch_size):
hist = torch.histc(argpred[i].cpu().data.float(),
bins=self.num_class, min=0,
max=self.num_class-1).float()
weight = (1/torch.max(torch.pow(hist, self.ratio)*torch.pow(hist.sum(), 1-self.ratio), torch.ones(1))).to(argpred.device)[argpred[i]].detach()
weights.append(weight)
weights = torch.stack(weights, dim=0)
log_likelihood = F.log_softmax(inputs, dim=1)
loss = torch.sum((torch.mul(-log_likelihood, target)*weights)[mask]) / (batch_size*self.num_class)
return loss
示例11: _get_batch_label_vector
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass - 1)
vect = hist > 0
tvect[i] = vect
return tvect
示例12: _get_batch_label_vector
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
示例13: energy_spectrum
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def energy_spectrum(vel):
"""
Compute energy spectrum given a velocity field
:param vel: tensor of shape (N, 3, res, res, res)
:return spec: tensor of shape(N, res/2)
:return k: tensor of shape (res/2,), frequencies corresponding to spec
"""
device = vel.device
res = vel.shape[-2:]
assert(res[0] == res[1])
r = res[0]
k_end = int(r/2)
vel_ = pad_rfft3(vel, onesided=False) # (N, 3, res, res, res, 2)
uu_ = (torch.norm(vel_, dim=-1) / r**3)**2
e_ = torch.sum(uu_, dim=1) # (N, res, res, res)
k = fftfreqs(res).to(device) # (3, res, res, res)
rad = torch.norm(k, dim=0) # (res, res, res)
k_bin = torch.arange(k_end, device=device).float()+1
bins = torch.zeros(k_end+1).to(device)
bins[1:-1] = (k_bin[1:]+k_bin[:-1])/2
bins[-1] = k_bin[-1]
bins = bins.unsqueeze(0)
bins[1:] += 1e-3
inds = searchsorted(bins, rad.flatten().unsqueeze(0)).squeeze().int()
# bincount = torch.histc(inds.cpu(), bins=bins.shape[1]+1).to(device)
bincount = torch.bincount(inds)
asort = torch.argsort(inds.squeeze())
sorted_e_ = e_.view(e_.shape[0], -1)[:, asort]
csum_e_ = torch.cumsum(sorted_e_, dim=1)
binloc = torch.cumsum(bincount, dim=0).long()-1
spec_ = csum_e_[:,binloc[1:]] - csum_e_[:,binloc[:-1]]
spec_ = spec_[:, :-1]
spec_ = spec_ * 2 * np.pi * (k_bin.float()**2) / bincount[1:-1].float()
return spec_, k_bin
##################### COMPUTE STATS ###########################
示例14: _get_batch_label_vector
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass - 1)
vect = hist > 0
tvect[i] = vect
return tvect
# TODO: optim function
示例15: get_doc_freqs_t
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import histc [as 別名]
def get_doc_freqs_t(cnts):
"""
Return word --> # of docs it appears in (torch version).
"""
return torch.histc(
cnts._indices()[0].float(), bins=cnts.size(0), min=0, max=cnts.size(0)
)