本文整理匯總了Python中torch.float方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.float方法的具體用法?Python torch.float怎麽用?Python torch.float使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.float方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward_single
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def forward_single(self, x, scale):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls and quality joint scores for a single
scale level the channel number is num_classes.
bbox_pred (Tensor): Box distribution logits for a single scale
level, the channel number is 4*(n+1), n is max value of
integral set.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.gfl_cls(cls_feat)
bbox_pred = scale(self.gfl_reg(reg_feat)).float()
return cls_score, bbox_pred
示例2: rel_roi_point_to_rel_img_point
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def rel_roi_point_to_rel_img_point(rois,
rel_roi_points,
img_shape,
spatial_scale=1.):
"""Convert roi based relative point coordinates to image based absolute
point coordinates.
Args:
rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
rel_roi_points (Tensor): Point coordinates inside RoI, relative to
RoI, location, range (0, 1), shape (N, P, 2)
img_shape (tuple): (height, width) of image or feature map.
spatial_scale (float): Scale points by this factor. Default: 1.
Returns:
Tensor: Image based relative point coordinates for sampling,
shape (N, P, 2)
"""
abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)
rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img_shape,
spatial_scale)
return rel_img_point
示例3: patch_norm_fp32
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def patch_norm_fp32(module):
"""Recursively convert normalization layers from FP16 to FP32.
Args:
module (nn.Module): The modules to be converted in FP16.
Returns:
nn.Module: The converted module, the normalization layers have been
converted to FP32.
"""
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
示例4: train
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def train(net, train_features, train_labels, test_features, test_labels,
num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
optimizer = torch.optim.Adam(params=net.parameters(), lr=learning_rate, weight_decay=weight_decay)
net = net.float()
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(X.float()), y.float())
optimizer.zero_grad()
l.backward()
optimizer.step()
train_ls.append(log_rmse(net, train_features, train_labels))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels))
return train_ls, test_ls
示例5: test
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def test(self, dataset):
self.model.eval()
with torch.no_grad():
total_loss = 0.0
predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Testing epoch ' + str(self.epoch) + ''):
ltree, linput, rtree, rinput, label = dataset[idx]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
output = output.squeeze().to('cpu')
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
示例6: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例7: _write_results
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def _write_results(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int], probs: List[float]):
results = []
for image_id, bbox, cls, prob in zip(image_ids, bboxes, classes, probs):
results.append(
{
'image_id': int(image_id), # COCO evaluation requires `image_id` to be type `int`
'category_id': COCO2017.CATEGORY_TO_LABEL_DICT[COCO2017Animal.LABEL_TO_CATEGORY_DICT[cls]], # mapping to original `COCO2017` dataset
'bbox': [ # format [left, top, width, height] is expected
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]
],
'score': prob
}
)
with open(os.path.join(path_to_results_dir, 'results.json'), 'w') as f:
json.dump(results, f)
示例8: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例9: _write_results
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def _write_results(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int], probs: List[float]):
results = []
for image_id, bbox, cls, prob in zip(image_ids, bboxes, classes, probs):
results.append(
{
'image_id': int(image_id), # COCO evaluation requires `image_id` to be type `int`
'category_id': COCO2017.CATEGORY_TO_LABEL_DICT[COCO2017Person.LABEL_TO_CATEGORY_DICT[cls]], # mapping to original `COCO2017` dataset
'bbox': [ # format [left, top, width, height] is expected
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]
],
'score': prob
}
)
with open(os.path.join(path_to_results_dir, 'results.json'), 'w') as f:
json.dump(results, f)
示例10: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [obj.label for obj in annotation.objects]
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例11: evaluate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def evaluate(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int], probs: List[float]) -> Tuple[float, str]:
self._write_results(path_to_results_dir, image_ids, bboxes, classes, probs)
annType = 'bbox'
path_to_coco_dir = os.path.join(self._path_to_data_dir, 'COCO')
path_to_annotations_dir = os.path.join(path_to_coco_dir, 'annotations')
path_to_annotation = os.path.join(path_to_annotations_dir, 'instances_val2017.json')
cocoGt = COCO(path_to_annotation)
cocoDt = cocoGt.loadRes(os.path.join(path_to_results_dir, 'results.json'))
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.evaluate()
cocoEval.accumulate()
original_stdout = sys.stdout
string_stdout = StringIO()
sys.stdout = string_stdout
cocoEval.summarize()
sys.stdout = original_stdout
mean_ap = cocoEval.stats[0].item() # stats[0] records AP@[0.5:0.95]
detail = string_stdout.getvalue()
return mean_ap, detail
示例12: _write_results
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def _write_results(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int], probs: List[float]):
results = []
for image_id, bbox, cls, prob in zip(image_ids, bboxes, classes, probs):
results.append(
{
'image_id': int(image_id), # COCO evaluation requires `image_id` to be type `int`
'category_id': cls,
'bbox': [ # format [left, top, width, height] is expected
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]
],
'score': prob
}
)
with open(os.path.join(path_to_results_dir, 'results.json'), 'w') as f:
json.dump(results, f)
示例13: __getitem__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_id = self._image_ids[index]
annotation = self._image_id_to_annotation_dict[image_id]
bboxes = [obj.bbox.tolist() for obj in annotation.objects]
labels = [COCO2017Car.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset
bboxes = torch.tensor(bboxes, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(annotation.filename).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == COCO2017Car.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
image, scale = COCO2017Car.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
bboxes *= scale
return image_id, image, scale, bboxes, labels
示例14: _write_results
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def _write_results(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int], probs: List[float]):
results = []
for image_id, bbox, cls, prob in zip(image_ids, bboxes, classes, probs):
results.append(
{
'image_id': int(image_id), # COCO evaluation requires `image_id` to be type `int`
'category_id': COCO2017.CATEGORY_TO_LABEL_DICT[COCO2017Car.LABEL_TO_CATEGORY_DICT[cls]], # mapping to original `COCO2017` dataset
'bbox': [ # format [left, top, width, height] is expected
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1]
],
'score': prob
}
)
with open(os.path.join(path_to_results_dir, 'results.json'), 'w') as f:
json.dump(results, f)
示例15: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import float [as 別名]
def __init__(self, backbone: BackboneBase, num_classes: int, pooler_mode: Pooler.Mode,
anchor_ratios: List[Tuple[int, int]], anchor_sizes: List[int],
rpn_pre_nms_top_n: int, rpn_post_nms_top_n: int,
anchor_smooth_l1_loss_beta: Optional[float] = None, proposal_smooth_l1_loss_beta: Optional[float] = None):
super().__init__()
self.features, hidden, num_features_out, num_hidden_out = backbone.features()
self._bn_modules = nn.ModuleList([it for it in self.features.modules() if isinstance(it, nn.BatchNorm2d)] +
[it for it in hidden.modules() if isinstance(it, nn.BatchNorm2d)])
# NOTE: It's crucial to freeze batch normalization modules for few batches training, which can be done by following processes
# (1) Change mode to `eval`
# (2) Disable gradient (we move this process into `forward`)
for bn_module in self._bn_modules:
for parameter in bn_module.parameters():
parameter.requires_grad = False
self.rpn = RegionProposalNetwork(num_features_out, anchor_ratios, anchor_sizes, rpn_pre_nms_top_n, rpn_post_nms_top_n, anchor_smooth_l1_loss_beta)
self.detection = Model.Detection(pooler_mode, hidden, num_hidden_out, num_classes, proposal_smooth_l1_loss_beta)