本文整理汇总了Python中torch.tensor方法的典型用法代码示例。如果您正苦于以下问题:Python torch.tensor方法的具体用法?Python torch.tensor怎么用?Python torch.tensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.tensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification and quality (IoU)
joint scores for all scale levels, each is a 4D-tensor,
the channel number is num_classes.
bbox_preds (list[Tensor]): Box distribution logits for all
scale levels, each is a 4D-tensor, the channel number is
4*(n+1), n is max value of integral set.
"""
return multi_apply(self.forward_single, feats, self.scales)
示例2: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
return multi_apply(self.forward_single, feats, self.scales)
示例3: generate_grid
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def generate_grid(num_grid, size, device):
"""Generate regular square grid of points in [0, 1] x [0, 1] coordinate
space.
Args:
num_grid (int): The number of grids to sample, one for each region.
size (tuple(int, int)): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
contains coordinates for the regular grids.
"""
affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
grid = F.affine_grid(
affine_trans, torch.Size((1, 1, *size)), align_corners=False)
grid = normalize(grid)
return grid.view(1, -1, 2).expand(num_grid, -1, -1)
示例4: to_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
示例5: random_choice
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def random_choice(gallery, num):
"""Randomly select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
gallery = torch.tensor(
gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
示例6: test_strides
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
示例7: test_ce_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def test_ce_loss():
# use_mask and use_sigmoid cannot be true at the same time
with pytest.raises(AssertionError):
loss_cfg = dict(
type='CrossEntropyLoss',
use_mask=True,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test loss with class weights
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=[0.8, 0.2],
loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))
loss_cls_cfg = dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
示例8: collate_fn
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def collate_fn(queries, tokenizer, sample, max_seq_len=None):
token_id_seqs = [[1] + tokenizer(x, **sample) + [2] for x in queries]
length = [len(x) - 1 for x in token_id_seqs]
if max_seq_len is None or max_seq_len > max(length) + 1:
max_seq_len = max(length) + 1
padded = []
mask = []
for x in token_id_seqs:
x = x[:max_seq_len]
pad_length = max_seq_len - len(x)
padded.append(x + [0] * pad_length)
mask.append([1] * (len(x) - 1) + [0] * pad_length)
padded = torch.tensor(padded).t().contiguous()
length = torch.tensor(length)
mask = torch.tensor(mask).t().contiguous()
return padded[:-1], padded[1:], length, mask
示例9: process_embedding
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def process_embedding(self, embedding,
residue_reduction=True, protein_reduction=False):
'''
Direct output of ELMo has shape (3,L,1024), with L being the protein's
length, 3 being the number of layers used to train SeqVec (1 CharCNN, 2 LSTMs)
and 1024 being a hyperparameter chosen to describe each amino acid.
When a representation on residue level is required, you can sum
over the first dimension, resulting in a tensor of size (L,1024).
If you want to reduce each protein to a fixed-size vector, regardless of its
length, you can average over dimension L.
'''
embedding = torch.tensor(embedding)
if residue_reduction:
embedding = embedding.sum(dim=0)
elif protein_reduction:
embedding = embedding.sum(dim=0).mean(dim=0)
return embedding.cpu().detach().numpy()
示例10: get_params
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def get_params():
def _one(shape):
ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
return torch.nn.Parameter(ts, requires_grad=True)
def _three():
return (_one((num_inputs, num_hiddens)),
_one((num_hiddens, num_hiddens)),
torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))
W_xz, W_hz, b_z = _three() # 更新门参数
W_xr, W_hr, b_r = _three() # 重置门参数
W_xh, W_hh, b_h = _three() # 候选隐藏层参数
# 输出层参数
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q])
示例11: data_iter_random
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
# 减1是因为输出的索引x是相应输入的索引y加1
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
example_indices = list(range(num_examples))
random.shuffle(example_indices)
# 返回从pos开始的长为num_steps的序列
def _data(pos):
return corpus_indices[pos: pos + num_steps]
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for i in range(epoch_size):
# 每次读取batch_size个随机样本
i = i * batch_size
batch_indices = example_indices[i: i + batch_size]
X = [_data(j * num_steps) for j in batch_indices]
Y = [_data(j * num_steps + 1) for j in batch_indices]
yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device)
示例12: predict_rnn_pytorch
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,
char_to_idx):
state = None
output = [char_to_idx[prefix[0]]] # output会记录prefix加上输出
for t in range(num_chars + len(prefix) - 1):
X = torch.tensor([output[-1]], device=device).view(1, 1)
if state is not None:
if isinstance(state, tuple): # LSTM, state:(h, c)
state = (state[0].to(device), state[1].to(device))
else:
state = state.to(device)
(Y, state) = model(X, state) # 前向计算不需要传入模型参数
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y.argmax(dim=1).item()))
return ''.join([idx_to_char[i] for i in output])
示例13: generate_dataset
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def generate_dataset(true_w, true_b):
num_examples = 1000
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
# 真实 label
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
# 添加噪声
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
# 展示下分布
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
plt.show()
return features, labels
# batch 读取数据集
示例14: batch_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state()
enc_outputs, enc_state = encoder(X, enc_state)
# 初始化解码器的隐藏状态
dec_state = decoder.begin_state(enc_state)
# 解码器在最初时间步的输入是BOS
dec_input = torch.tensor([out_vocab.stoi[BOS]] * batch_size)
# 我们将使用掩码变量mask来忽略掉标签为填充项PAD的损失
mask, num_not_pad_tokens = torch.ones(batch_size,), 0
l = torch.tensor([0.0])
for y in Y.permute(1,0): # Y shape: (batch, seq_len)
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # 使用强制教学
num_not_pad_tokens += mask.sum().item()
# 将PAD对应位置的掩码设成0, 原文这里是 y != out_vocab.stoi[EOS], 感觉有误
mask = mask * (y != out_vocab.stoi[PAD]).float()
return l / num_not_pad_tokens
示例15: translate
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensor [as 别名]
def translate(encoder, decoder, input_seq, max_seq_len):
in_tokens = input_seq.split(' ')
in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
enc_input = torch.tensor([[in_vocab.stoi[tk] for tk in in_tokens]]) # batch=1
enc_state = encoder.begin_state()
enc_output, enc_state = encoder(enc_input, enc_state)
dec_input = torch.tensor([out_vocab.stoi[BOS]])
dec_state = decoder.begin_state(enc_state)
output_tokens = []
for _ in range(max_seq_len):
dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
pred = dec_output.argmax(dim=1)
pred_token = out_vocab.itos[int(pred.item())]
if pred_token == EOS: # 当任一时间步搜索出EOS时,输出序列即完成
break
else:
output_tokens.append(pred_token)
dec_input = pred
return output_tokens