本文整理匯總了Python中torch.tensors方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.tensors方法的具體用法?Python torch.tensors怎麽用?Python torch.tensors使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.tensors方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_data_lm
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def load_data_lm():
dataset_file = cached_path("https://s3.amazonaws.com/datasets.huggingface.co/wikitext-103/"
"wikitext-103-train-tokenized-bert.bin")
datasets = torch.load(dataset_file)
# Convert our encoded dataset to torch.tensors and reshape in blocks of the transformer's input length
for split_name in ['train', 'valid']:
tensor = torch.tensor(datasets[split_name], dtype=torch.long)
num_sequences = (tensor.size(0) // 256) * 256
datasets[split_name] = tensor.narrow(0, 0, num_sequences).view(-1, 256)
n = len(datasets['valid']) // 2
datasets['test'] = datasets['valid'][n:]
datasets['valid'] = datasets['valid'][:n]
datasets['train'] = datasets['train'][:1000]
return datasets
示例2: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def __getitem__(self, index):
im_name = self.imlist[index]
im_input, label = self.sample_loader(im_name)
# Resize a sample, or not.
if not self.resize_to is None:
im_input = cv2.resize(im_input, self.resize_to)
label = cv2.resize(label, self.resize_to)
# Transform: output torch.tensors of [0,1] and (C,H,W).
# Note: for test on DDN_Data and RESIDE, the output is in [0,1] and (V,C,H,W).
# V means the distortation types of a dataset (e.g., V == 14 for DDN_Data)
if not self.transform is None:
im_input, label = self.Transformer(im_input, label)
return im_input, label, im_name
# Read a image name list.
示例3: _set_grad_to_zero
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _set_grad_to_zero(self, args, make_private=False):
"""Sets gradients for args to zero
Args:
args (list of torch.tensors): contains arguments
make_private (bool): encrypt args using CrypTensor
"""
args_zero_grad = []
for arg in args:
if is_float_tensor(arg) and make_private:
arg = crypten.cryptensor(arg, requires_grad=True)
elif is_float_tensor(arg):
arg.requires_grad = True
arg.grad = None
args_zero_grad.append(arg)
return args_zero_grad
示例4: _reductions_helper
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _reductions_helper(self, input_reductions, method=None):
"""Tests input reductions on tensors of various sizes."""
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
for reduction in input_reductions:
if method is None:
self._check_forward_backward(reduction, tensor)
else:
with crypten.mpc.ConfigManager("max_method", method):
self._check_forward_backward(reduction, tensor)
# Check dim 0 if tensor is 0-dimensional
dims = 1 if tensor.dim() == 0 else tensor.dim()
for dim in range(dims):
for keepdim in [False, True]:
if method is None:
self._check_forward_backward(
reduction, tensor, dim, keepdim=keepdim
)
else:
with crypten.mpc.ConfigManager("max_method", method):
self._check_forward_backward(
reduction, tensor, dim, keepdim=keepdim
)
示例5: _conv1d
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _conv1d(self, signal_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
nout_channels = [1, 5]
kernel_sizes = [1, 2, 3]
paddings = [0, 1]
strides = [1, 2]
for batches in nbatches:
size = (batches, in_channels, signal_size)
signal = get_random_test_tensor(size=size, is_float=True)
for kernel_size, out_channels in itertools.product(
kernel_sizes, nout_channels
):
kernel_size = (out_channels, in_channels, kernel_size)
kernel = get_random_test_tensor(size=kernel_size, is_float=True)
for padding in paddings:
for stride in strides:
self._check_forward_backward(
"conv1d", signal, kernel, stride=stride, padding=padding
)
示例6: _euclidian
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _euclidian(x, y):
"""
Helper function to calculate euclidian distance between torch.tensors x and y: sqrt(|x-y|**2)
Based on torch.cdist
Parameters
----------
x : torch.tensor
2D tensor of size m x f
y : torch.tensor
2D tensor of size n x f
Returns
-------
torch.tensor
2D tensor of size m x n
"""
return torch.cdist(x, y)
示例7: _euclidian_fast
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _euclidian_fast(x, y):
"""
Helper function to calculate euclidian distance between torch.tensors x and y: sqrt(|x-y|**2)
Uses quadratic expansion to calculate (x-y)**2
Parameters
----------
x : torch.tensor
2D tensor of size m x f
y : torch.tensor
2D tensor of size n x f
Returns
-------
torch.tensor
2D tensor of size m x n
"""
return torch.sqrt(_quadratic_expand(x, y))
示例8: _gaussian
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _gaussian(x, y, sigma=1.0):
"""
Helper function to calculate gaussian distance between torch.tensors x and y: exp(-(|x-y|**2/2sigma**2)
Based on torch.cdist
Parameters
----------
x : torch.tensor
2D tensor of size m x f
y : torch.tensor
2D tensor of size n x f
sigma: float, default=1.0
scaling factor for gaussian kernel
Returns
-------
torch.tensor
2D tensor of size m x n
"""
d2 = _euclidian(x, y) ** 2
result = torch.exp(-d2 / (2 * sigma * sigma))
return result
示例9: _gaussian_fast
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _gaussian_fast(x, y, sigma=1.0):
"""
Helper function to calculate gaussian distance between torch.tensors x and y: exp(-(|x-y|**2/2sigma**2)
Uses quadratic expansion to calculate (x-y)**2
Parameters
----------
x : torch.tensor
2D tensor of size m x f
y : torch.tensor
2D tensor of size n x f
sigma: float, default=1.0
scaling factor for gaussian kernel
Returns
-------
torch.tensor
2D tensor of size m x n
"""
d2 = _quadratic_expand(x, y)
result = torch.exp(-d2 / (2 * sigma * sigma))
return result
示例10: _add
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def _add(self, *args, **kwargs):
"""Internal add method to add to the storage arrays.
Args:
*args: All the elements in a transition.
"""
self._check_args_length(*args, **kwargs)
elements = self.get_add_args_signature()
# convert kwarg np.arrays to torch.tensors
for element in elements[len(args) :]:
if element.name in kwargs:
kwargs[element.name] = torch.from_numpy(
np.array(kwargs[element.name], dtype=element.type)
)
# convert arg np.arrays to torch.tensors
kwargs.update(
{
e.name: torch.from_numpy(np.array(arg, dtype=e.type))
for arg, e in zip(args, elements[: len(args)])
}
)
self._add_transition(kwargs)
示例11: unflatten_like
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def unflatten_like(vector, likeTensorList):
"""
Takes a flat torch.tensor and unflattens it to a list of torch.tensors
shaped like likeTensorList
Arguments:
vector (torch.tensor): flat one dimensional tensor
likeTensorList (list or iterable): list of tensors with same number of ele-
ments as vector
"""
outList = []
i = 0
for tensor in likeTensorList:
n = tensor.numel()
outList.append(vector[i : i + n].view(tensor.shape))
i += n
return outList
示例12: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def forward(self, representation_dict):
"""
Forward pass through adaptation network. Returns classification parameters for task.
:param representation_dict: (dict::torch.tensors) Dictionary containing class-level representations for each
class in the task.
:return: (dict::torch.tensors) Dictionary containing the weights and biases for the classification of each class
in the task. Model can extract parameters and build the classifier accordingly. Supports sampling if
ML-PIP objective is desired.
"""
classifier_param_dict = {}
class_weight_means = []
class_bias_means = []
# Extract and sort the label set for the task
label_set = list(representation_dict.keys())
label_set.sort()
num_classes = len(label_set)
# For each class, extract the representation and pass it through adaptation network to generate classification
# params for that class. Store parameters in a list,
for class_num in label_set:
nu = representation_dict[class_num]
class_weight_means.append(self.weight_means_processor(nu))
class_bias_means.append(self.bias_means_processor(nu))
# Save the parameters as torch tensors (matrix and vector) and add to dictionary
classifier_param_dict['weight_mean'] = torch.cat(class_weight_means, dim=0)
classifier_param_dict['bias_mean'] = torch.reshape(torch.cat(class_bias_means, dim=1), [num_classes, ])
return classifier_param_dict
示例13: test_unary_functions
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def test_unary_functions(self):
"""Test unary functions on tensors of various sizes."""
unary_functions = [
"neg",
"__neg__",
"exp",
"reciprocal",
"abs",
"__abs__",
"sign",
"relu",
"sin",
"cos",
"sigmoid",
"tanh",
"log",
"sqrt",
]
pos_only_functions = ["log", "sqrt"]
for func in unary_functions:
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
# Make tensor positive when positive inputs are required
if func in pos_only_functions:
tensor = tensor.abs()
self._check_forward_backward(func, tensor)
示例14: test_dot_ger
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def test_dot_ger(self):
"""Test inner and outer products of encrypted tensors."""
for length in range(1, 10):
tensor1 = get_random_test_tensor(size=(length,), is_float=True)
tensor2 = get_random_test_tensor(size=(length,), is_float=True)
self._check_forward_backward("dot", tensor1, tensor2)
self._check_forward_backward("ger", tensor1, tensor2)
示例15: test_clone
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tensors [as 別名]
def test_clone(self):
"""Tests shallow_copy and clone of encrypted tensors."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("clone", tensor)