本文整理匯總了Python中torch.cuda.FloatTensor方法的典型用法代碼示例。如果您正苦於以下問題:Python cuda.FloatTensor方法的具體用法?Python cuda.FloatTensor怎麽用?Python cuda.FloatTensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.cuda
的用法示例。
在下文中一共展示了cuda.FloatTensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: interpolation
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def interpolation(self, uvm, image, index):
u, v = torch.index_select(uvm, dim=1, index=LongTensor([0+3*index,
1+3*index])).permute(0, 2, 3, 1).split(1, dim=3)
row_num = FloatTensor()
col_num = FloatTensor()
im_size = image.shape[2:4]
torch.arange(im_size[0], out=row_num)
torch.arange(im_size[1], out=col_num)
row_num = row_num.view(1, im_size[0], 1, 1)
col_num = col_num.view(1, 1, im_size[1], 1)
x_norm = 2*(u+col_num)/(im_size[1]-1)-1
y_norm = 2*(v+row_num)/(im_size[0]-1)-1
xy_norm = torch.clamp(torch.cat((x_norm, y_norm), dim=3), -1, 1)
interp = nn.functional.grid_sample(image, xy_norm)
w = torch.index_select(uvm, dim=1, index=LongTensor([3*index+2]))+0.5
return interp, w, u, v
示例2: visualize_image
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_image(tensor, name, label=None, env='main', w=250, h=250,
update_window_without_label=False):
tensor = tensor.cpu() if isinstance(tensor, CUDATensor) else tensor
title = name + ('-{}'.format(label) if label is not None else '')
_WINDOW_CASH[title] = _vis(env).image(
tensor.numpy(), win=_WINDOW_CASH.get(title),
opts=dict(title=title, width=w, height=h)
)
# This is useful when you want to maintain the most recent images.
if update_window_without_label:
_WINDOW_CASH[name] = _vis(env).image(
tensor.numpy(), win=_WINDOW_CASH.get(name),
opts=dict(title=name, width=w, height=h)
)
示例3: visualize_images
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_images(tensor, name, label=None, env='main', w=400, h=400,
update_window_without_label=False):
tensor = tensor.cpu() if isinstance(tensor, CUDATensor) else tensor
title = name + ('-{}'.format(label) if label is not None else '')
_WINDOW_CASH[title] = _vis(env).images(
tensor.numpy(), win=_WINDOW_CASH.get(title), nrow=6,
opts=dict(title=title, width=w, height=h)
)
# This is useful when you want to maintain the most recent images.
if update_window_without_label:
_WINDOW_CASH[name] = _vis(env).images(
tensor.numpy(), win=_WINDOW_CASH.get(name), nrow=6,
opts=dict(title=name, width=w, height=h)
)
示例4: knn_indices_func_cpu
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def knn_indices_func_cpu(rep_pts : FloatTensor, # (N, pts, dim)
pts : FloatTensor, # (N, x, dim)
K : int, D : int
) -> LongTensor: # (N, pts, K)
"""
CPU-based Indexing function based on K-Nearest Neighbors search.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
rep_pts = rep_pts.data.numpy()
pts = pts.data.numpy()
region_idx = []
for n, p in enumerate(rep_pts):
P_particular = pts[n]
nbrs = NearestNeighbors(D*K + 1, algorithm = "ball_tree").fit(P_particular)
indices = nbrs.kneighbors(p)[1]
region_idx.append(indices[:,1::D])
region_idx = torch.from_numpy(np.stack(region_idx, axis = 0))
return region_idx
示例5: visualize_image
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_image(vis, tensor, name, label=None, w=250, h=250,
update_window_without_label=False):
tensor = tensor.cpu() if isinstance(tensor, CUDATensor) else tensor
title = name + ('-{}'.format(label) if label is not None else '')
_WINDOW_CASH[title] = vis.image(
tensor.numpy(), win=_WINDOW_CASH.get(title),
opts=dict(title=title, width=w, height=h)
)
# This is useful when you want to maintain the most recent images.
if update_window_without_label:
_WINDOW_CASH[name] = vis.image(
tensor.numpy(), win=_WINDOW_CASH.get(name),
opts=dict(title=name, width=w, height=h)
)
示例6: visualize_images
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_images(vis, tensor, name, label=None, w=250, h=250,
update_window_without_label=False):
tensor = tensor.cpu() if isinstance(tensor, CUDATensor) else tensor
title = name + ('-{}'.format(label) if label is not None else '')
_WINDOW_CASH[title] = vis.images(
tensor.numpy(), win=_WINDOW_CASH.get(title),
opts=dict(title=title, width=w, height=h)
)
# This is useful when you want to maintain the most recent images.
if update_window_without_label:
_WINDOW_CASH[name] = vis.images(
tensor.numpy(), win=_WINDOW_CASH.get(name),
opts=dict(title=name, width=w, height=h)
)
示例7: __init__
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def __init__(self):
super(VGG, self).__init__()
vgg = vgg19(pretrained=True)
self.vgg_mean = FloatTensor([[[[0.485]], [[0.456]], [[0.406]]]])
self.vgg_std = FloatTensor([[[[0.229]], [[0.224]], [[0.225]]]])
self.vgg_relu4_4 = vgg.features[:27]
示例8: forward
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def forward(self, input):
vgg_mean = FloatTensor([[[[0.485]], [[0.456]], [[0.406]]]])
vgg_std = FloatTensor([[[[0.229]], [[0.224]], [[0.225]]]])
return self.vgg_relu4_4((input-vgg_mean)/vgg_std)
示例9: visualize_kernel
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_kernel(kernel, name, label=None, env='main', w=250, h=250,
update_window_without_label=False, compress_tensor=False):
# Do not visualize kernels that does not exists.
if kernel is None:
return
assert len(kernel.size()) in (2, 4)
title = name + ('-{}'.format(label) if label is not None else '')
kernel = kernel.cpu() if isinstance(kernel, CUDATensor) else kernel
kernel_norm = kernel if len(kernel.size()) == 2 else (
(kernel**2).mean(-1).mean(-1) if compress_tensor else
kernel.view(
kernel.size()[0] * kernel.size()[2],
kernel.size()[1] * kernel.size()[3],
)
)
kernel_norm = kernel_norm.abs()
visualized = (
(kernel_norm - kernel_norm.min()) /
(kernel_norm.max() - kernel_norm.min())
).numpy()
_WINDOW_CASH[title] = _vis(env).image(
visualized, win=_WINDOW_CASH.get(title),
opts=dict(title=title, width=w, height=h)
)
# This is useful when you want to maintain the most recent images.
if update_window_without_label:
_WINDOW_CASH[name] = _vis(env).image(
visualized, win=_WINDOW_CASH.get(name),
opts=dict(title=name, width=w, height=h)
)
示例10: visualize_scalars
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def visualize_scalars(scalars, names, title, iteration, env='main'):
assert len(scalars) == len(names)
# Convert scalar tensors to numpy arrays.
scalars, names = list(scalars), list(names)
scalars = [s.cpu() if isinstance(s, CUDATensor) else s for s in scalars]
scalars = [s.numpy() if hasattr(s, 'numpy') else np.array([s]) for s in
scalars]
multi = len(scalars) > 1
num = len(scalars)
options = dict(
fillarea=True,
legend=names,
width=400,
height=400,
xlabel='Iterations',
ylabel=title,
title=title,
marginleft=30,
marginright=30,
marginbottom=80,
margintop=30,
)
X = (
np.column_stack(np.array([iteration] * num)) if multi else
np.array([iteration] * num)
)
Y = np.column_stack(scalars) if multi else scalars[0]
if title in _WINDOW_CASH:
_vis(env).updateTrace(X=X, Y=Y, win=_WINDOW_CASH[title], opts=options)
else:
_WINDOW_CASH[title] = _vis(env).line(X=X, Y=Y, opts=options)
示例11: knn_indices_func_cpu
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def knn_indices_func_cpu(rep_pts : FloatTensor, # (N, pts, dim)
pts : FloatTensor, # (N, x, dim)
K : int, D : int
) -> LongTensor: # (N, pts, K)
"""
CPU-based Indexing function based on K-Nearest Neighbors search.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
if rep_pts.is_cuda:
rep_pts = rep_pts.cpu()
if pts.is_cuda:
pts = pts.cpu()
rep_pts = rep_pts.data.numpy()
pts = pts.data.numpy()
region_idx = []
for n, p in enumerate(rep_pts):
P_particular = pts[n]
nbrs = NearestNeighbors(D*K + 1, algorithm = "auto").fit(P_particular)
indices = nbrs.kneighbors(p)[1]
region_idx.append(indices[:,1::D])
region_idx = torch.from_numpy(np.stack(region_idx, axis = 0))
return region_idx
示例12: knn_indices_func_approx
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def knn_indices_func_approx(rep_pts : FloatTensor, # (N, pts, dim)
pts : FloatTensor, # (N, x, dim)
K : int, D : int
) -> LongTensor: # (N, pts, K)
"""
Approximate CPU-based Indexing function based on K-Nearest Neighbors search.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
if rep_pts.is_cuda:
rep_pts = rep_pts.cpu()
if pts.is_cuda:
pts = pts.cpu()
rep_pts = rep_pts.data.numpy()
pts = pts.data.numpy()
region_idx = []
for n, p in enumerate(rep_pts):
P_particular = pts[n]
lshf = LSHForest(n_estimators = 20, n_candidates = 100, n_neighbors = D*K + 1)
lshf.fit(P_particular)
indices = lshf.kneighbors(p, return_distance = False)
region_idx.append(indices[:,1::D])
示例13: knn_indices_func_gpu
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def knn_indices_func_gpu(rep_pts : cuda.FloatTensor, # (N, pts, dim)
pts : cuda.FloatTensor, # (N, x, dim)
K : int, D : int
) -> cuda.LongTensor: # (N, pts, K)
"""
GPU-based Indexing function based on K-Nearest Neighbors search.
Very memory intensive, and thus unoptimal for large numbers of points.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
region_idx = []
for n, qry in enumerate(rep_pts):
qry = qry.half()
ref = pts[n].half()
r_A = torch.sum(qry * qry, dim = 1, keepdim = True)
r_B = torch.sum(ref * ref, dim = 1, keepdim = True)
dist2 = r_A - 2 * torch.matmul(qry, torch.t(ref)) + torch.t(r_B)
_, inds = torch.topk(dist2, D*K + 1, dim = 1, largest = False)
region_idx.append(inds[:,1::D])
region_idx = torch.stack(region_idx, dim = 0)
return region_idx
示例14: knn_indices_func_gpu
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def knn_indices_func_gpu(rep_pts : cuda.FloatTensor, # (N, pts, dim)
pts : cuda.FloatTensor, # (N, x, dim)
k : int, d : int
) -> cuda.LongTensor: # (N, pts, K)
"""
GPU-based Indexing function based on K-Nearest Neighbors search.
Very memory intensive, and thus unoptimal for large numbers of points.
:param rep_pts: Representative points.
:param pts: Point cloud to get indices from.
:param K: Number of nearest neighbors to collect.
:param D: "Spread" of neighboring points.
:return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
is the set k-nearest neighbors for the representative points in pts[n].
"""
region_idx = []
for n, qry in enumerate(rep_pts):
ref = pts[n]
n, d = ref.size()
m, d = qry.size()
mref = ref.expand(m, n, d)
mqry = qry.expand(n, m, d).transpose(0, 1)
dist2 = torch.sum((mqry - mref)**2, 2).squeeze()
_, inds = torch.topk(dist2, k*d + 1, dim = 1, largest = False)
region_idx.append(inds[:,1::d])
region_idx = torch.stack(region_idx, dim = 0)
return region_idx
示例15: forward
# 需要導入模塊: from torch import cuda [as 別名]
# 或者: from torch.cuda import FloatTensor [as 別名]
def forward(self, batch_item_index, batch_x, batch_word_seq, batch_neighbor_index):
z_1 = F.tanh(self.linear1(batch_x))
# z_1 = F.dropout(z_1, self.drop_rate)
z_rating = F.tanh(self.linear2(z_1))
z_content = self.get_content_z(batch_word_seq)
gate = F.sigmoid(z_rating.mm(self.gate_matrix1) + z_content.mm(self.gate_matrix2) + self.gate_bias)
gated_embedding = gate * z_rating + (1 - gate) * z_content
# save the embedding for direct lookup
self.item_gated_embedding.weight[batch_item_index] = gated_embedding.data
gated_neighbor_embedding = self.item_gated_embedding(batch_neighbor_index)
# aug_gated_embedding: [256, 1, 50]
aug_gated_embedding = torch.unsqueeze(gated_embedding, 1)
score = torch.matmul(aug_gated_embedding, torch.unsqueeze(self.neighbor_attention, 0))
# score: [256, 1, 480]
score = torch.bmm(score, gated_neighbor_embedding.permute(0, 2, 1))
# make the 0 in score, which will make a difference in softmax
score = torch.where(score == 0, T.FloatTensor([float('-inf')]), score)
score = F.softmax(score, dim=2)
# if the vectors all are '-inf', softmax will generate 'nan', so replace with 0
score = torch.where(score != score, T.FloatTensor([0]), score)
gated_neighbor_embedding = torch.bmm(score, gated_neighbor_embedding)
gated_neighbor_embedding = torch.squeeze(gated_neighbor_embedding, 1)
# gated_embedding = F.dropout(gated_embedding, self.drop_rate)
# gated_neighbor_embedding = F.dropout(gated_neighbor_embedding, self.drop_rate)
z_3 = F.tanh(self.linear3(gated_embedding))
# z_3 = F.dropout(z_3, self.drop_rate)
z_3_neighbor = F.tanh(self.linear3(gated_neighbor_embedding))
# z_3_neighbor = F.dropout(z_3_neighbor, self.drop_rate)
y_pred = F.sigmoid(self.linear4(z_3) + z_3_neighbor.mm(self.linear4.weight.t()))
return y_pred