本文整理匯總了Python中torch.from_numpy方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.from_numpy方法的具體用法?Python torch.from_numpy怎麽用?Python torch.from_numpy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.from_numpy方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _anchor_target_layer
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _anchor_target_layer(self, rpn_cls_score):
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, rpn_loss_weights = \
anchor_target_layer(
rpn_cls_score.data, self._gt_boxes.data.cpu().numpy(), self._pseudo_proposals['gt_scores'].data.cpu().numpy(), self._im_info, self._feat_stride, self._anchors.data.cpu().numpy(), self._num_anchors)
rpn_labels = Variable(torch.from_numpy(rpn_labels).float().cuda()) #.set_shape([1, 1, None, None])
rpn_bbox_targets = Variable(torch.from_numpy(rpn_bbox_targets).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_inside_weights = Variable(torch.from_numpy(rpn_bbox_inside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_outside_weights = Variable(torch.from_numpy(rpn_bbox_outside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
rpn_loss_weights = Variable(torch.from_numpy(rpn_loss_weights).float().cuda())#.set_shape([self._num_anchors])
rpn_labels = rpn_labels.long()
self._anchor_targets['rpn_labels'] = rpn_labels
self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights
self._anchor_targets['rpn_loss_weights'] = rpn_loss_weights
for k in self._anchor_targets.keys():
self._score_summaries[k] = self._anchor_targets[k]
return rpn_labels
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,代碼來源:network.py
示例2: run_batch
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def run_batch(inputs, outputs_gt, model, criterion, optimizer, train):
"""Train or validate on a batch (inputs + outputs)."""
if train:
model.train()
else:
model.eval()
val = not train
inputs, outputs_gt = torch.from_numpy(inputs), torch.from_numpy(outputs_gt)
inputs, outputs_gt = Variable(inputs, volatile=val), Variable(outputs_gt)
if GPU >= 0:
inputs = inputs.cuda(GPU)
outputs_gt = outputs_gt.cuda(GPU)
if train:
optimizer.zero_grad()
outputs_pred = model(inputs)
loss = criterion(outputs_pred, outputs_gt)
if train:
loss.backward()
optimizer.step()
return loss.data[0]
示例3: _get_area_ratio
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
"""Compute area ratio of the gt mask inside the proposal and the gt
mask of the corresponding instance."""
num_pos = pos_proposals.size(0)
if num_pos > 0:
area_ratios = []
proposals_np = pos_proposals.cpu().numpy()
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
# compute mask areas of gt instances (batch processing for speedup)
gt_instance_mask_area = gt_masks.areas
for i in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
# crop the gt mask inside the proposal
bbox = proposals_np[i, :].astype(np.int32)
gt_mask_in_proposal = gt_mask.crop(bbox)
ratio = gt_mask_in_proposal.areas[0] / (
gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
area_ratios.append(ratio)
area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
pos_proposals.device)
else:
area_ratios = pos_proposals.new_zeros((0, ))
return area_ratios
示例4: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def __getitem__(self, index):
img=self.adv_flat[self.sample_num,:]
if(self.shuff == False):
# shuff is true for non-pgd attacks
img = torch.from_numpy(np.reshape(img,(3,32,32)))
else:
img = torch.from_numpy(img).type(torch.FloatTensor)
target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
self.sample_num = self.sample_num + 1
return img, target
示例5: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def __getitem__(self, index):
img=self.adv_flat[self.sample_num,:]
if(self.transp == False):
# shuff is true for non-pgd attacks
img = torch.from_numpy(np.reshape(img,(28,28)))
else:
img = torch.from_numpy(img).type(torch.FloatTensor)
target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
self.sample_num = self.sample_num + 1
return img, target
示例6: _zeros_embed
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _zeros_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by zeros for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
示例7: _nn_embed
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _nn_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
embed = nn.Embedding(int(self.words_count), int(self.dim))
init.xavier_uniform_(embed.weight.data)
embeddings = np.array(embed.weight.data)
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
embeddings[self.padID] = 0
final_embed = torch.from_numpy(embeddings).float()
return final_embed
示例8: _uniform_embed
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _uniform_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by uniform for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = uniform_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
示例9: _avg_embed
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def _avg_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by avg for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = sum_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
示例10: get_eval_data
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def get_eval_data(text, wav_path):
'''
get data for eval
--------------
input:
text --- pinyin format sequence
output:
text --- [1, T_x]
mel --- [1, 1, n_mels]
'''
text = text_normalize(text) + 'E'
text = [hp.char2idx[c] for c in text]
text = torch.Tensor(text).type(torch.LongTensor) # [T_x]
text = text.unsqueeze(0) # [1, T_x]
mel = torch.zeros(1, 1, hp.n_mels) # GO frame [1, 1, n_mels]
_, ref_mels, _ = load_spectrograms(wav_path)
ref_mels = torch.from_numpy(ref_mels).unsqueeze(0)
return text, mel, ref_mels
示例11: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def __init__(self, T, opts):
super(LOOLoss, self).__init__()
self.gpu = opts.gpu
self.loo = opts.loo if 'LOO' in opts.method else 0.
self.label_smooth = opts.label_smooth
self.kld_u_const = math.log(len(T['wnids']))
self.relevant = [torch.from_numpy(rel) for rel in T['relevant']]
self.labels_relevant = torch.from_numpy(T['labels_relevant'].astype(np.uint8))
ch_slice = T['ch_slice']
if opts.class_wise:
num_children = T['num_children']
num_supers = len(num_children)
self.class_weight = torch.zeros(ch_slice[-1])
for m, num_ch in enumerate(num_children):
self.class_weight[ch_slice[m]:ch_slice[m+1]] = 1. / (num_ch * num_supers)
else:
self.class_weight = torch.ones(ch_slice[-1]) / ch_slice[-1]
示例12: plot_wh_methods
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), dpi=150)
plt.plot(x, ya, '.-', label='yolo method')
plt.plot(x, yb ** 2, '.-', label='^2 power method')
plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.legend()
fig.tight_layout()
fig.savefig('comparison.png', dpi=200)
示例13: get_screen
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def get_screen(self, env):
screen = env.render(mode='rgb_array').transpose((2, 0, 1)) # transpose into torch order (CHW)
# Strip off the top and bottom of the screen
screen = screen[:, 160:320]
view_width = 320
cart_location = self.get_cart_location(env)
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (self.screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescale, convert to torch tensor
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0)
示例14: __call__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def __call__(self, img):
"""Convert a ``numpy.ndarray`` to tensor.
Args:
img (numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if isinstance(img, np.ndarray):
# handle numpy array
if img.ndim == 3:
img = torch.from_numpy(img.transpose((2, 0, 1)).copy())
elif img.ndim == 2:
img = torch.from_numpy(img.copy())
else:
raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
# backward compatibility
#return img.float().div(255)
return img.float()
示例15: find_bbs
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import from_numpy [as 別名]
def find_bbs(img, model, conf_threshold, input_size):
"""Find bounding boxes in an image."""
# pad image so that its square
img_pad, (pad_top, pad_right, pad_bottom, pad_left) = to_aspect_ratio_add(img, 1.0, return_paddings=True)
# resize padded image to desired input size
# "linear" interpolation seems to be enough here for 400x400 or larger images
# change to "area" or "cubic" for marginally better quality
img_rs = ia.imresize_single_image(img_pad, (input_size, input_size), interpolation="linear")
# convert to torch-ready input variable
inputs_np = (np.array([img_rs])/255.0).astype(np.float32).transpose(0, 3, 1, 2)
inputs = torch.from_numpy(inputs_np)
inputs = Variable(inputs, volatile=True)
if GPU >= 0:
inputs = inputs.cuda(GPU)
# apply model and measure the model's time
time_start = time.time()
outputs_pred = model(inputs)
time_req = time.time() - time_start
# process the model's output (i.e. convert heatmaps to BBs)
result = ModelResult(
outputs_pred,
inputs_np,
img,
(pad_top, pad_right, pad_bottom, pad_left)
)
bbs = result.get_bbs()
return bbs, time_req