本文整理匯總了Python中torch.bincount方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.bincount方法的具體用法?Python torch.bincount怎麽用?Python torch.bincount使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.bincount方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _torch_hist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def _torch_hist(label_true, label_pred, n_class):
"""Calculates the confusion matrix for the labels
Args:
label_true ([type]): [description]
label_pred ([type]): [description]
n_class ([type]): [description]
Returns:
[type]: [description]
"""
assert len(label_true.shape) == 1, "Labels need to be 1D"
assert len(label_pred.shape) == 1, "Predictions need to be 1D"
mask = (label_true >= 0) & (label_true < n_class)
hist = torch.bincount(n_class * label_true[mask] + label_pred[mask], minlength=n_class ** 2).reshape(
n_class, n_class
)
return hist
示例2: split
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def split(data, batch):
node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
node_slice = torch.cat([torch.tensor([0]), node_slice])
row, _ = data.edge_index
edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
edge_slice = torch.cat([torch.tensor([0]), edge_slice])
# Edge indices should start at zero for every graph.
data.edge_index -= node_slice[batch[row]].unsqueeze(0)
data.__num_nodes__ = torch.bincount(batch).tolist()
slices = {'edge_index': edge_slice}
if data.x is not None:
slices['x'] = node_slice
if data.edge_attr is not None:
slices['edge_attr'] = edge_slice
if data.y is not None:
if data.y.size(0) == batch.size(0):
slices['y'] = node_slice
else:
slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
return data, slices
示例3: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
# check compatibility
if self.in_features % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.out_features != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
示例4: test_main
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def test_main():
'''
Test the unified segmenter.
'''
from PIL import Image
testim = Image.open('script/testdata/test_church_242.jpg')
tensor_im = (torch.from_numpy(numpy.asarray(testim)).permute(2, 0, 1)
.float() / 255 * 2 - 1)[None, :, :, :].cuda()
segmenter = UnifiedParsingSegmenter()
seg = segmenter.segment_batch(tensor_im)
bc = torch.bincount(seg.view(-1))
labels, cats = segmenter.get_label_and_category_names()
for label in bc.nonzero()[:,0]:
if label.item():
# What is the prediction for this class?
pred, mask = segmenter.predict_single_class(tensor_im, label.item())
assert mask.sum().item() == bc[label].item()
assert len(((seg == label).max(1)[0] - mask).nonzero()) == 0
inside_pred = pred[mask].mean().item()
outside_pred = pred[~mask].mean().item()
print('%s (%s, #%d): %d pixels, pred %.2g inside %.2g outside' %
(labels[label.item()] + (label.item(), bc[label].item(),
inside_pred, outside_pred)))
示例5: KMeans
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def KMeans(x_i, c_j, Nits = 10, ranges = None):
D = x_i.shape[1]
for i in range(10):
# Points -> Nearest cluster
labs_i = nn_search(x_i, c_j, ranges = ranges)
# Class cardinals:
Ncl = torch.bincount(labs_i.view(-1)).type(dtype)
# Compute the cluster centroids with torch.bincount:
for d in range(D): # Unfortunately, vector weights are not supported...
c_j[:, d] = torch.bincount(labs_i.view(-1), weights=x_i[:, d]) / Ncl
return c_j, labs_i
##############################################
# On the subject
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# For new subject (unlabelled), we perform a simple Kmean
# on R^60 to obtain a cluster of the data.
#
示例6: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def forward(self, simmat, dtoks, qtoks):
# THIS IS SLOW ... Any way to make this faster? Maybe it's not worth doing on GPU?
BATCH, CHANNELS, QLEN, DLEN = simmat.shape
# +1e-5 to nudge scores of 1 to above threshold
bins = ((simmat + 1.000001) / 2. * (self.bins - 1)).int()
# set weights of 0 for padding (in both query and doc dims)
weights = ((dtoks != -1).reshape(BATCH, 1, DLEN).expand(BATCH, QLEN, DLEN) * \
(qtoks != -1).reshape(BATCH, QLEN, 1).expand(BATCH, QLEN, DLEN)).float()
# no way to batch this... loses gradients here. https://discuss.pytorch.org/t/histogram-function-in-pytorch/5350
bins, weights = bins.cpu(), weights.cpu()
histogram = []
for superbins, w in zip(bins, weights):
result = []
for b in superbins:
result.append(torch.stack([torch.bincount(q, x, self.bins) for q, x in zip(b, w)], dim=0))
result = torch.stack(result, dim=0)
histogram.append(result)
histogram = torch.stack(histogram, dim=0)
# back to GPU
histogram = histogram.to(simmat.device)
return (histogram.float() + 1e-5).log()
示例7: update
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output
self._num_examples += y_pred.shape[0]
# target is (batch_size, ...)
y_pred = torch.argmax(y_pred, dim=1).flatten()
y = y.flatten()
target_mask = (y >= 0) & (y < self.num_classes)
y = y[target_mask]
y_pred = y_pred[target_mask]
indices = self.num_classes * y + y_pred
m = torch.bincount(indices, minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
self.confusion_matrix += m.to(self.confusion_matrix)
示例8: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def forward(self, simmat, dlens, dtoks, qtoks):
BATCH, CHANNELS, QLEN, DLEN = simmat.shape
# +1e-5 to nudge scores of 1 to above threshold
bins = ((simmat + 1.00001) / 2. * (self.bins - 1)).int()
weights = ((dtoks != -1).reshape(BATCH, 1, DLEN).expand(BATCH, QLEN, DLEN) * \
(qtoks != -1).reshape(BATCH, QLEN, 1).expand(BATCH, QLEN, DLEN)).float()
# apparently no way to batch this... https://discuss.pytorch.org/t/histogram-function-in-pytorch/5350
bins, weights = bins.cpu(), weights.cpu() # WARNING: this line (and the similar line below) improve performance tenfold when on GPU
histogram = []
for superbins, w in zip(bins, weights):
result = []
for b in superbins:
result.append(torch.stack([torch.bincount(q, x, self.bins) for q, x in zip(b, w)], dim=0))
result = torch.stack(result, dim=0)
histogram.append(result)
histogram = torch.stack(histogram, dim=0)
histogram = histogram.to(simmat.device) # WARNING: this line (and the similar line above) improve performance tenfold when on GPU
return histogram
示例9: fast_hist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def fast_hist(label_true, label_pred):
n_class = settings.N_CLASSES
mask = (label_true >= 0) & (label_true < n_class)
hist = torch.bincount(
n_class * label_true[mask].int() + label_pred[mask].int(),
minlength=n_class ** 2,
).reshape(n_class, n_class)
return hist
示例10: update
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def update(self, a, b):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
with torch.no_grad():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
示例11: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def __init__(self, centroids, assignments, num_embeddings, embedding_dim,
padding_idx=None, max_norm=None, norm_type=2.,
scale_grad_by_freq=False, sparse=False, _weight=None):
super(PQEmbedding, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
# check compatibility
if self.embedding_dim % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.num_embeddings != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
示例12: energy_spectrum
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def energy_spectrum(vel):
"""
Compute energy spectrum given a velocity field
:param vel: tensor of shape (N, 3, res, res, res)
:return spec: tensor of shape(N, res/2)
:return k: tensor of shape (res/2,), frequencies corresponding to spec
"""
device = vel.device
res = vel.shape[-2:]
assert(res[0] == res[1])
r = res[0]
k_end = int(r/2)
vel_ = pad_rfft3(vel, onesided=False) # (N, 3, res, res, res, 2)
uu_ = (torch.norm(vel_, dim=-1) / r**3)**2
e_ = torch.sum(uu_, dim=1) # (N, res, res, res)
k = fftfreqs(res).to(device) # (3, res, res, res)
rad = torch.norm(k, dim=0) # (res, res, res)
k_bin = torch.arange(k_end, device=device).float()+1
bins = torch.zeros(k_end+1).to(device)
bins[1:-1] = (k_bin[1:]+k_bin[:-1])/2
bins[-1] = k_bin[-1]
bins = bins.unsqueeze(0)
bins[1:] += 1e-3
inds = searchsorted(bins, rad.flatten().unsqueeze(0)).squeeze().int()
# bincount = torch.histc(inds.cpu(), bins=bins.shape[1]+1).to(device)
bincount = torch.bincount(inds)
asort = torch.argsort(inds.squeeze())
sorted_e_ = e_.view(e_.shape[0], -1)[:, asort]
csum_e_ = torch.cumsum(sorted_e_, dim=1)
binloc = torch.cumsum(bincount, dim=0).long()-1
spec_ = csum_e_[:,binloc[1:]] - csum_e_[:,binloc[:-1]]
spec_ = spec_[:, :-1]
spec_ = spec_ * 2 * np.pi * (k_bin.float()**2) / bincount[1:-1].float()
return spec_, k_bin
##################### COMPUTE STATS ###########################
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, timesteps, sequence_length)` of word ids
representing the current batch.
# Returns
`torch.Tensor`
The bag-of-words representations for the input sequence, shape
`(batch_size, vocab_size)`
"""
bag_of_words_vectors = []
mask = get_text_field_mask({"tokens": {"tokens": inputs}})
if self._ignore_oov:
# also mask out positions corresponding to oov
mask &= inputs != self._oov_idx
for document, doc_mask in zip(inputs, mask):
document = torch.masked_select(document, doc_mask)
vec = torch.bincount(document, minlength=self.vocab_size).float()
vec = vec.view(1, -1)
bag_of_words_vectors.append(vec)
bag_of_words_output = torch.cat(bag_of_words_vectors, 0)
if self._projection:
projection = self._projection
bag_of_words_output = projection(bag_of_words_output)
return bag_of_words_output
示例14: _fast_hist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def _fast_hist(true, pred, num_classes):
mask = (true >= 0) & (true < num_classes)
hist = torch.bincount(
num_classes * true[mask] + pred[mask],
minlength=num_classes ** 2,
).reshape(num_classes, num_classes).float()
return hist
示例15: region_based_classification_single
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import bincount [as 別名]
def region_based_classification_single(self, sample, radius):
"""
:param sample: one sample (1*channel*H*W)
:param radius:
:return:
"""
self.model.eval()
assert sample.shape[0] == 1, "the sample parameter should be one example in numpy format"
copy_sample = np.copy(sample)
with torch.no_grad():
copy_sample = torch.from_numpy(copy_sample).to(self.device)
# prepare the hypercube samples (size=num_points) for the sample (size=1)
hypercube_samples = copy_sample.repeat(self.num_points, 1, 1, 1).to(self.device).float()
random_space = torch.Tensor(*hypercube_samples.size()).to(self.device).float()
random_space.uniform_(-radius, radius)
hypercube_samples = torch.clamp(hypercube_samples + random_space, min=0.0, max=1.0)
# predicting for hypercube samples
hypercube_preds = self.model(hypercube_samples)
hypercube_labels = torch.max(hypercube_preds, dim=1)[1]
# voting for predicted labels
bin_count = torch.bincount(hypercube_labels)
rc_label = torch.max(bin_count, dim=0)[1]
return rc_label.cpu().numpy()