当前位置: 首页>>代码示例>>Python>>正文


Python torch.half方法代码示例

本文整理汇总了Python中torch.half方法的典型用法代码示例。如果您正苦于以下问题:Python torch.half方法的具体用法?Python torch.half怎么用?Python torch.half使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.half方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: wrap_fp16_model

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def wrap_fp16_model(model):
    """Wrap the FP32 model to FP16.

    1. Convert FP32 model to FP16.
    2. Remain some necessary layers to be FP32, e.g., normalization layers.

    Args:
        model (nn.Module): Model in FP32.
    """
    # convert model to fp16
    model.half()
    # patch the normalization layers to make it work in fp32 mode
    patch_norm_fp32(model)
    # set `fp16_enabled` flag
    for m in model.modules():
        if hasattr(m, 'fp16_enabled'):
            m.fp16_enabled = True 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:19,代码来源:hooks.py

示例2: patch_norm_fp32

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def patch_norm_fp32(module):
    """Recursively convert normalization layers from FP16 to FP32.

    Args:
        module (nn.Module): The modules to be converted in FP16.

    Returns:
        nn.Module: The converted module, the normalization layers have been
            converted to FP32.
    """
    if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
        module.float()
        if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
            module.forward = patch_forward_method(module.forward, torch.half,
                                                  torch.float)
    for child in module.children():
        patch_norm_fp32(child)
    return module 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:20,代码来源:hooks.py

示例3: cross_entropy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def cross_entropy(logits, target, ignore_index=-100, reduction='mean'):
        if logits.device == torch.device('cpu'):
            return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
        else:
            half_to_float = (logits.dtype == torch.half)
            losses = xentropy.SoftmaxCrossEntropyLoss.apply(
                logits, target, 0.0, ignore_index, half_to_float,
            )
            if reduction == 'sum':
                return losses.sum()
            elif reduction == 'mean':
                if ignore_index >= 0:
                    return losses.sum() / target.ne(ignore_index).sum()
                else:
                    return losses.mean()
            elif reduction == 'none':
                return losses
            else:
                raise NotImplementedError 
开发者ID:pytorch,项目名称:fairseq,代码行数:21,代码来源:cross_entropy.py

示例4: torch_dtype_to_np_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:24,代码来源:types.py

示例5: _mix_on_path

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def _mix_on_path(real, fake):
  result = None
  if isinstance(real, (list, tuple)):
    result = [
      _mix_on_path(real_part, fake_part)
      for real_part, fake_part in zip(real, fake)
    ]
  elif isinstance(real, dict):
    result = {
      key: _mix_on_path(real[key], fake[key])
      for key in real
    }
  elif isinstance(real, torch.Tensor):
    if real.dtype in (torch.half, torch.float, torch.double):
      result = _mix_on_path_aux(real, fake)
    else:
      result = random.choice([real, fake])
  else:
    result = random.choice([real, fake])
  return result 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:22,代码来源:gan.py

示例6: _test_roialign_gradcheck

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def _test_roialign_gradcheck(device, dtype):
    if not torch.cuda.is_available() and device == 'cuda':
        pytest.skip('test requires GPU')
    try:
        from mmcv.ops import RoIAlign
    except ModuleNotFoundError:
        pytest.skip('RoIAlign op is not successfully compiled')
    if dtype is torch.half:
        pytest.skip('grad check does not support fp16')
    for case in inputs:
        np_input = np.array(case[0])
        np_rois = np.array(case[1])

        x = torch.tensor(
            np_input, dtype=dtype, device=device, requires_grad=True)
        rois = torch.tensor(np_rois, dtype=dtype, device=device)

        froipool = RoIAlign((pool_h, pool_w), spatial_scale, sampling_ratio)

        gradcheck(froipool, (x, rois), eps=1e-5, atol=1e-5) 
开发者ID:open-mmlab,项目名称:mmcv,代码行数:22,代码来源:test_roi_align.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
        seq_length = input_ids.size(1)
        if position_ids is None:
            position_ids = torch.arange(
                seq_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        if self.num_pos_emb > 1:
            num_batch = position_embeddings.size(0)
            num_pos = position_embeddings.size(1)
            position_embeddings = position_embeddings.view(
                num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        if self.fp32_embedding:
            embeddings = embeddings.half()
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings 
开发者ID:microsoft,项目名称:unilm,代码行数:27,代码来源:modeling.py

示例8: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def __init__(self, config, bert_model_embedding_weights):
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
                                 bert_model_embedding_weights.size(0),
                                 bias=False)
        self.decoder.weight = bert_model_embedding_weights
        self.bias = nn.Parameter(torch.zeros(
            bert_model_embedding_weights.size(0)))
        if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
            self.relax_projection = config.relax_projection
        else:
            self.relax_projection = 0
        self.fp32_embedding = config.fp32_embedding

        def convert_to_type(tensor):
            if self.fp32_embedding:
                return tensor.half()
            else:
                return tensor
        self.type_converter = convert_to_type
        self.converted = False 
开发者ID:microsoft,项目名称:unilm,代码行数:27,代码来源:modeling.py

示例9: test_box2delta

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def test_box2delta(device_dtype):
    ## this test only checks that encoding and decoding  gives the same result
    device, dtype = device_dtype
    boxes = random_boxes([10, 10, 20, 20], 10, 10).to(device).to(dtype)
    anchors = random_boxes([10, 10, 20, 20], 10, 10).to(device).to(dtype)
    deltas = pt.utils.box.box2delta(boxes, anchors)
    boxes_reconstructed = pt.utils.box.delta2box(deltas, anchors)
    atol = 2e-2 if dtype == torch.half else 1e-6  # for fp16 sometimes error is large
    assert torch.allclose(boxes, boxes_reconstructed, atol=atol)

    # check that it's jit friendly
    jit_box2delta = torch.jit.script(pt.utils.box.box2delta)
    jit_delta2box = torch.jit.script(pt.utils.box.delta2box)
    deltas2 = jit_box2delta(boxes, anchors)
    boxes_reconstructed2 = jit_delta2box(deltas2, anchors)
    assert torch.allclose(boxes, boxes_reconstructed2, atol=atol) 
开发者ID:bonlime,项目名称:pytorch-tools,代码行数:18,代码来源:test_utils.py

示例10: test_when_param_norm_is_zero_with_half

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def test_when_param_norm_is_zero_with_half():
    param_norm = torch.tensor(0., dtype=torch.half, device='cuda')
    grad_norm = torch.tensor(1., dtype=torch.half, device='cuda')
    adaptive_lr = torch.tensor(0., dtype=torch.half, device='cuda')

    weight_decay = 1.
    eps = 1.
    trust_coef = 1.

    adaptive_lr = compute_adaptive_lr(
        param_norm,
        grad_norm,
        weight_decay,
        eps,
        trust_coef,
        adaptive_lr)

    assert adaptive_lr == torch.tensor(1., dtype=torch.half, device='cuda') 
开发者ID:kakaobrain,项目名称:torchlars,代码行数:20,代码来源:test_compute_adaptive_lr.py

示例11: test_when_grad_norm_is_zero_with_half

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def test_when_grad_norm_is_zero_with_half():
    param_norm = torch.tensor(1., dtype=torch.half, device='cuda')
    grad_norm = torch.tensor(0., dtype=torch.half, device='cuda')
    adaptive_lr = torch.tensor(0., dtype=torch.half, device='cuda')

    weight_decay = 1.
    eps = 1.
    trust_coef = 1.

    adaptive_lr = compute_adaptive_lr(
        param_norm,
        grad_norm,
        weight_decay,
        eps,
        trust_coef,
        adaptive_lr)

    assert adaptive_lr == torch.tensor(1., dtype=torch.half, device='cuda') 
开发者ID:kakaobrain,项目名称:torchlars,代码行数:20,代码来源:test_compute_adaptive_lr.py

示例12: test_specific_case_with_half

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def test_specific_case_with_half():
    param_norm = torch.tensor(1.234, dtype=torch.half, device='cuda')
    grad_norm = torch.tensor(5.678, dtype=torch.half, device='cuda')
    adaptive_lr = torch.tensor(0., dtype=torch.half, device='cuda')

    weight_decay = 1e-4
    eps = 1e-8
    trust_coef = 0.001

    adaptive_lr = compute_adaptive_lr(
        param_norm,
        grad_norm,
        weight_decay,
        eps,
        trust_coef,
        adaptive_lr)

    assert torch.allclose(adaptive_lr, torch.tensor(0.000217325, dtype=torch.half, device='cuda')) 
开发者ID:kakaobrain,项目名称:torchlars,代码行数:20,代码来源:test_compute_adaptive_lr.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def forward(self, vis_feats, vis_pe, input_ids, token_type_ids=None, position_ids=None, vis_input=True, len_vis_input=49):
        seq_length = input_ids.size(1)
        if position_ids is None:
            position_ids = torch.arange(
                seq_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        if vis_input:
            words_embeddings = torch.cat((words_embeddings[:, :1], vis_feats,
                words_embeddings[:, len_vis_input+1:]), dim=1)
            assert len_vis_input == 100, 'only support region attn!'
            position_embeddings = torch.cat((position_embeddings[:, :1], vis_pe,
                position_embeddings[:, len_vis_input+1:]), dim=1) # hacky...
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        if self.fp32_embedding:
            embeddings = embeddings.half()
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings 
开发者ID:LuoweiZhou,项目名称:VLP,代码行数:27,代码来源:modeling.py

示例14: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def forward(self, z, reverse: bool = False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.dtype == torch.half:
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W.float())
            z = self.conv(z)
            return (
                z,
                log_det_W,
            ) 
开发者ID:NVIDIA,项目名称:NeMo,代码行数:26,代码来源:waveglow.py

示例15: run_cell_test

# 需要导入模块: import torch [as 别名]
# 或者: from torch import half [as 别名]
def run_cell_test(self, cell, state_tuple=False):
        shape = (self.b, self.h)
        for typ in [torch.float, torch.half]:
            xs = [torch.randn(shape, dtype=typ).requires_grad_()
                  for _ in range(self.t)]
            hidden_fn = lambda: torch.zeros(shape, dtype=typ)
            if state_tuple:
                hidden = (hidden_fn(), hidden_fn())
            else:
                hidden = hidden_fn()
            outputs = []
            for i in range(self.t):
                hidden = cell(xs[i], hidden)
                if state_tuple:
                    output = hidden[0]
                else:
                    output = hidden
                outputs.append(output)
            for y in outputs:
                self.assertEqual(y.type(), HALF)
            outputs[-1].float().sum().backward()
            for i, x in enumerate(xs):
                self.assertEqual(x.grad.dtype, x.dtype) 
开发者ID:NVIDIA,项目名称:apex,代码行数:25,代码来源:test_rnn.py


注:本文中的torch.half方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。