本文整理匯總了Python中torch.numel方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.numel方法的具體用法?Python torch.numel怎麽用?Python torch.numel使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.numel方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: grad_sparsity
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if DEBUG:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
示例2: grad_sparsity
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if self._verbose:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
示例3: grad_sparsity
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if self._verbose:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
示例4: density
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def density(tensor):
"""Computes the density of a tensor.
Density is the fraction of non-zero elements in a tensor.
If a tensor has a density of 1.0, then it has no zero elements.
Args:
tensor: the tensor for which we compute the density.
Returns:
density (float)
"""
nonzero = torch.nonzero(tensor)
if nonzero.dim() == 0:
return 0.0
return nonzero.size(0) / float(torch.numel(tensor))
示例5: log_weights_sparsity
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def log_weights_sparsity(self, model, epoch):
params_size = 0
sparse_params_size = 0
for name, param in model.state_dict().items():
if param.dim() in [2, 4]:
_density = density(param)
params_size += torch.numel(param)
sparse_params_size += param.numel() * _density
self.tblogger.scalar_summary('sparsity/weights/' + name,
sparsity(param)*100, epoch)
self.tblogger.scalar_summary('sparsity-2D/weights/' + name,
sparsity_2D(param)*100, epoch)
self.tblogger.scalar_summary("sprasity/weights/total", 100*(1 - sparse_params_size/params_size), epoch)
self.tblogger.sync_to_file()
示例6: _calc_apoz
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def _calc_apoz(self, activations):
"""
Calculate APoZ(average percentage of zeros) of activations.
Parameters
----------
activations : list
Layer's output activations
Returns
-------
torch.Tensor
Filter's APoZ(average percentage of zeros) of the activations
"""
activations = torch.cat(activations, 0)
_eq_zero = torch.eq(activations, torch.zeros_like(activations))
_apoz = torch.sum(_eq_zero, dim=(0, 2, 3)) / torch.numel(_eq_zero[:, 0, :, :])
return _apoz
示例7: grad_sparsity
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
return
示例8: to_float
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def to_float(val):
""" Check that val is one of the following:
- pytorch autograd Variable with one element
- pytorch tensor with one element
- numpy array with one element
- any type supporting float() operation
And convert val to float
"""
n_elements = 1
if isinstance(val, np.ndarray):
n_elements = val.size
elif torch is not None and (isinstance(val, torch_autograd.Variable) or torch.is_tensor(val)):
n_elements = torch.numel(val)
assert n_elements == 1, \
"val should have one element (got {})".format(n_elements)
try:
return float(val)
except:
raise TypeError("Unsupported type for val ({})".format(type(val)))
示例9: _perturb_func
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def _perturb_func(inputs):
def perturb_ratio(input):
return (
torch.arange(-torch.numel(input[0]) // 2, torch.numel(input[0]) // 2)
.view(input[0].shape)
.float()
/ 100
)
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = inputs
input2 = None
perturbed_input1 = input1 + perturb_ratio(input1)
if input2 is None:
return perturbed_input1
return perturbed_input1, input2 + perturb_ratio(input2)
示例10: construct_feature_mask
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def construct_feature_mask(
self, inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], int]:
feature_mask = []
current_num_features = 0
for i in range(len(inputs)):
num_features = torch.numel(inputs[i][0])
feature_mask.append(
current_num_features
+ torch.reshape(
torch.arange(num_features, device=inputs[i].device),
inputs[i][0:1].shape,
)
)
current_num_features += num_features
total_features = current_num_features
feature_mask = tuple(feature_mask)
return feature_mask, total_features
示例11: l2_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def l2_loss(pred_traj, pred_traj_gt, loss_mask, random=0, mode='average'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- loss_mask: Tensor of shape (batch, seq_len)
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
"""
seq_len, batch, _ = pred_traj.size()
loss = (loss_mask.unsqueeze(dim=2) *
(pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2)
if mode == 'sum':
return torch.sum(loss)
elif mode == 'average':
return torch.sum(loss) / torch.numel(loss_mask.data)
elif mode == 'raw':
return loss.sum(dim=2).sum(dim=1)
示例12: var_loss_function_joint
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def var_loss_function_joint(output_samples_classification, target, output_samples_recon, inp, mu, std, device):
recon_loss = nn.BCEWithLogitsLoss(reduction='sum')
class_loss = nn.CrossEntropyLoss(reduction='sum')
# Place-holders for the final loss values over all latent space samples
recon_losses = torch.zeros(output_samples_recon.size(0)).to(device)
cl_losses = torch.zeros(output_samples_classification.size(0)).to(device)
# numerical value for stability of log computation
eps = 1e-8
# loop through each sample for each input and calculate the correspond loss. Normalize the losses.
for i in range(output_samples_classification.size(0)):
cl_losses[i] = class_loss(output_samples_classification[i], target) / torch.numel(target)
recon_losses[i] = recon_loss(output_samples_recon[i], inp) / torch.numel(inp)
# average the loss over all samples per input
cl = torch.mean(cl_losses, dim=0)
rl = torch.mean(recon_losses, dim=0)
# Compute the KL divergence, normalized by latent dimensionality
kld = -0.5 * torch.sum(1 + torch.log(eps + std ** 2) - (mu ** 2) - (std ** 2)) / torch.numel(mu)
return cl, rl, kld
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def forward(self, x):
bsn = 1
batchSize, dim, h, w = x.data.shape
x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, dim) # batchsize,h, w, dim,
y = torch.ones(batchSize, self.output_dim, device=x.device)
for img in range(batchSize // bsn):
segLen = bsn * h * w
upper = batchSize * h * w
interLarge = torch.arange(img * segLen, min(upper, (img + 1) * segLen), dtype=torch.long)
interSmall = torch.arange(img * bsn, min(upper, (img + 1) * bsn), dtype=torch.long)
batch_x = x_flat[interLarge, :]
sketch1 = batch_x.mm(self.sparseM[0].to(x.device)).unsqueeze(2)
sketch1 = torch.fft(torch.cat((sketch1, torch.zeros(sketch1.size(), device=x.device)), dim=2), 1)
sketch2 = batch_x.mm(self.sparseM[1].to(x.device)).unsqueeze(2)
sketch2 = torch.fft(torch.cat((sketch2, torch.zeros(sketch2.size(), device=x.device)), dim=2), 1)
Re = sketch1[:, :, 0].mul(sketch2[:, :, 0]) - sketch1[:, :, 1].mul(sketch2[:, :, 1])
Im = sketch1[:, :, 0].mul(sketch2[:, :, 1]) + sketch1[:, :, 1].mul(sketch2[:, :, 0])
tmp_y = torch.ifft(torch.cat((Re.unsqueeze(2), Im.unsqueeze(2)), dim=2), 1)[:, :, 0]
y[interSmall, :] = tmp_y.view(torch.numel(interSmall), h, w, self.output_dim).sum(dim=1).sum(dim=1)
y = self._signed_sqrt(y)
y = self._l2norm(y)
return y
示例14: get_num_parameters
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def get_num_parameters(model):
"""
Returns the number of trainable parameters in a model of type nn.Module
:param model: nn.Module containing trainable parameters
:return: number of trainable parameters in model
"""
num_parameters = 0
for parameter in model.parameters():
num_parameters += torch.numel(parameter)
return num_parameters
示例15: loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import numel [as 別名]
def loss(self, proposal_classes: Tensor,gt_proposal_classes: Tensor, batch_size,batch_indices) -> Tuple[Tensor, Tensor]:
# assert np.any(np.isnan(np.array(proposal_classes)))==False
# assert np.any(np.isnan(np.array(gt_proposal_classes))) == False
cross_entropies = torch.zeros(batch_size, dtype=torch.float, device=proposal_classes.device).cuda()
#batch_indices=torch.tensor(batch_indices,dtype=torch.float)
for batch_index in range(batch_size):
selected_indices = (batch_indices == batch_index).nonzero().view(-1)
input=proposal_classes[selected_indices]
target=gt_proposal_classes[selected_indices]
if torch.numel(input)==0 or torch.numel(target)==0:
#print("Warning:None DATA:",batch_index)
continue
assert torch.numel(input)==torch.numel(target)
# print('input:',input)
# print("input_sigmoid:", F.sigmoid(input))
# print('target:',target)
cross_entropy =F.multilabel_soft_margin_loss(input=proposal_classes[selected_indices],target=gt_proposal_classes[selected_indices],reduction="mean")
# cross_entropy = F.binary_cross_entropy(input=F.sigmoid(proposal_classes[selected_indices]),
# target=gt_proposal_classes[selected_indices])
torch.nn.MultiLabelSoftMarginLoss
# print('cross_entropy:',cross_entropy)
# print('cross_entropy:',cross_entropy)
# cross_entropy = F.cross_entropy(input=proposal_classes[selected_indices],
# target=gt_proposal_classes[selected_indices])
cross_entropies[batch_index] = cross_entropy
return cross_entropies