本文整理汇总了Python中torch.LongTensor方法的典型用法代码示例。如果您正苦于以下问题:Python torch.LongTensor方法的具体用法?Python torch.LongTensor怎么用?Python torch.LongTensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.LongTensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: shuffle_sequences
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def shuffle_sequences(self, split="train", keys=None):
if keys is None:
# print(type(self.data))
# print(type(self.data.keys()))
keys = self.data[split].keys()
for key in keys:
if key in ["positive", "negative"]:
continue
idxs = list(range(len(self.data[split][key])))
random.shuffle(idxs)
self.sequences[split][key] = \
self.sequences[split][key].index_select(
0, torch.LongTensor(idxs))
temp = [self.data[split][key][i] for i in idxs]
self.data[split][key] = temp
temp = [self.masks[split][key][i] for i in idxs]
self.masks[split][key] = temp
示例2: set_conceptnet_inputs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def set_conceptnet_inputs(input_event, relation, text_encoder, max_e1, max_r, force):
abort = False
e1_tokens, rel_tokens, _ = data.conceptnet_data.do_example(text_encoder, input_event, relation, None)
if len(e1_tokens) > max_e1:
if force:
XMB = torch.zeros(1, len(e1_tokens) + max_r).long().to(cfg.device)
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
return {}, True
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
XMB[:, :len(e1_tokens)] = torch.LongTensor(e1_tokens)
XMB[:, max_e1:max_e1 + len(rel_tokens)] = torch.LongTensor(rel_tokens)
batch = {}
batch["sequences"] = XMB
batch["attention_mask"] = data.conceptnet_data.make_attention_mask(XMB)
return batch, abort
示例3: pose_inv_full
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def pose_inv_full(pose):
'''
param pose: N x 6
Inverse the 2x3 transformer matrix.
'''
N, _ = pose.size()
b = pose.view(N, 2, 3)[:, :, 2:]
# A^{-1}
# Calculate determinant
determinant = (pose[:, 0] * pose[:, 4] - pose[:, 1] * pose[:, 3] + 1e-8).view(N, 1)
indices = Variable(torch.LongTensor([4, 1, 3, 0]).cuda())
scale = Variable(torch.Tensor([1, -1, -1, 1]).cuda())
A_inv = torch.index_select(pose, 1, indices) * scale / determinant
A_inv = A_inv.view(N, 2, 2)
# b' = - A^{-1} b
b_inv = - A_inv.matmul(b).view(N, 2, 1)
transformer_inv = torch.cat([A_inv, b_inv], dim=2)
return transformer_inv
示例4: to_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
示例5: gen_base_anchors
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
示例6: test_max_iou_assigner
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
示例7: test_max_iou_assigner_with_ignore
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
示例8: test_max_iou_assigner_with_empty_gt
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
示例9: test_approx_iou_assigner
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
示例10: test_approx_iou_assigner_with_empty_gt
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_approx_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
示例11: test_random_sampler_empty_pred
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def forward(self, input):
# array has shape (N, 4, 1, 1000)
# return the sequence + its RC concatenated
# create inverted indices
invert_dims = [1,3]
input_bkup = input
for idim in invert_dims:
idxs = [i for i in range(input.size(idim)-1, -1, -1)]
idxs_var = Variable(torch.LongTensor(idxs))
if input.is_cuda:
idxs_var =idxs_var.cuda()
input = input.index_select(idim, idxs_var)
#
input = torch.cat([input_bkup, input], dim=0)
#
# Using numpy:
#input = edit_tensor_in_numpy(input, lambda x: np.concatenate([x, x[:,::-1, : ,::-1]],axis=0))
return input
示例13: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def forward(self, encoding, lengths):
lengths = Variable(torch.LongTensor(lengths))
if torch.cuda.is_available():
lengths = lengths.cuda()
if self.method == 'mean':
encoding_pad = nn.utils.rnn.pack_padded_sequence(encoding, lengths.data.tolist(), batch_first=True)
encoding = nn.utils.rnn.pad_packed_sequence(encoding_pad, batch_first=True, padding_value=0)[0]
lengths = lengths.float().view(-1, 1)
return encoding.sum(1) / lengths, None
elif self.method == 'max':
return encoding.max(1) # [bsz, in_dim], [bsz, in_dim] (position)
elif self.method == 'attn':
size = encoding.size() # [bsz, len, in_dim]
x_flat = encoding.contiguous().view(-1, size[2]) # [bsz*len, in_dim]
hbar = self.tanh(self.ws1(x_flat)) # [bsz*len, attn_hid]
alphas = self.ws2(hbar).view(size[0], size[1]) # [bsz, len]
alphas = nn.utils.rnn.pack_padded_sequence(alphas, lengths.data.tolist(), batch_first=True)
alphas = nn.utils.rnn.pad_packed_sequence(alphas, batch_first=True, padding_value=-1e8)[0]
alphas = functional.softmax(alphas, dim=1) # [bsz, len]
alphas = alphas.view(size[0], 1, size[1]) # [bsz, 1, len]
return torch.bmm(alphas, encoding).squeeze(1), alphas # [bsz, in_dim], [bsz, len]
elif self.method == 'last':
return torch.cat([encoding[i][lengths[i] - 1] for i in range(encoding.size(0))], dim=0), None
示例14: get_keda_data
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def get_keda_data(dataset_dir, r):
wav_paths = []
texts = []
wav_dirs = ['nannan', 'xiaofeng', 'donaldduck']
csv_paths = ['transcript-nannan.csv', 'transcript-xiaofeng.csv', 'transcript-donaldduck.csv']
for wav_dir, csv_path in zip(wav_dirs, csv_paths):
csv = open(os.path.join(dataset_dir, csv_path), 'r')
for line in csv.readlines():
items = line.strip().split('|')
wav_paths.append(os.path.join(dataset_dir, wav_dir, items[0] + '.wav'))
text = text_normalize(items[1]) + 'E'
text = [hp.char2idx[c] for c in text]
text = torch.Tensor(text).type(torch.LongTensor)
texts.append(text)
csv.close()
for wav in wav_paths[-20:]:
print(wav)
return wav_paths[r], texts[r]
示例15: get_aishell_data
# 需要导入模块: import torch [as 别名]
# 或者: from torch import LongTensor [as 别名]
def get_aishell_data(data_dir, r):
path = os.path.join(data_dir, 'transcript.txt')
data_dir = os.path.join(data_dir, 'wav', 'train')
wav_paths = []
texts = []
with open(path, 'r') as f:
for line in f.readlines():
items = line.strip().split('|')
wav_paths.append(os.path.join(data_dir, items[0] + '.wav'))
text = items[1]
text = text_normalize(text) + 'E'
text = [hp.char2idx[c] for c in text]
text = torch.Tensor(text).type(torch.LongTensor)
texts.append(text)
for wav, txt in zip(wav_paths[-20:], texts[-20:]):
print(wav, txt)
return wav_paths[r], texts[r]