当前位置: 首页>>代码示例>>Python>>正文


Python torch.float16方法代码示例

本文整理汇总了Python中torch.float16方法的典型用法代码示例。如果您正苦于以下问题:Python torch.float16方法的具体用法?Python torch.float16怎么用?Python torch.float16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.float16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: invert_attention_mask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
        """type: torch.Tensor -> torch.Tensor"""
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
        elif self.dtype == torch.float32:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
                "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
                    self.dtype
                )
            )

        return encoder_extended_attention_mask 
开发者ID:plkmo,项目名称:NLP_Toolkit,代码行数:27,代码来源:modeling_utils.py

示例2: convert_param

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def convert_param(param, requires_grad):
    if get_backend() == "pytorch":
        # Do nothing.
        if isinstance(param, torch.Tensor):
            return param
        if isinstance(param, list):
            param = np.asarray(param)
        if isinstance(param, np.ndarray):
            param_type = param.dtype
        else:
            param_type = type(param)
        convert_type = convert_dtype(param_type, to="pytorch")

        # PyTorch cannot convert from a np.bool_, must be uint.
        if isinstance(param, np.ndarray) and param.dtype == np.bool_:
            param = param.astype(np.uint8)

        if convert_type == torch.float32 or convert_type == torch.float or convert_type == torch.float16:
            # Only floats can require grad.
            return torch.tensor(param, dtype=convert_type, requires_grad=requires_grad)
        else:
            return torch.tensor(param, dtype=convert_type) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:24,代码来源:util.py

示例3: torch_dtype_to_np_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:24,代码来源:types.py

示例4: test_to_both_args

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def test_to_both_args(self):
        dev = 'cuda:1'
        dtype = torch.float16

        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.to = Mock()
        optimizer = torch.optim.Adam(torchmodel.parameters(), 0.1)
        state_tensor = torch.Tensor([1])
        state_tensor.to = Mock()
        optimizer.state = {'test': {'test': state_tensor}}

        torchbearertrial = Trial(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearertrial.to(dev, dtype)

        self.assertTrue(torchmodel.to.call_args[0][0] == dev)
        self.assertTrue(torchmodel.to.call_args[0][1] == dtype)
        self.assertTrue(state_tensor.to.call_args[0][0] == dev)
        self.assertTrue(state_tensor.to.call_args[0][1] == dtype) 
开发者ID:pytorchbearer,项目名称:torchbearer,代码行数:20,代码来源:test_trial.py

示例5: test_to_kwargs

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def test_to_kwargs(self):
        dev = 'cuda:1'
        dtype = torch.float16

        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.to = Mock()
        optimizer = torch.optim.Adam(torchmodel.parameters(), 0.1)
        state_tensor = torch.Tensor([1])
        state_tensor.to = Mock()
        optimizer.state = {'test': {'test': state_tensor}}

        torchbearertrial = Trial(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearertrial.to(device=dev, dtype=dtype)

        self.assertTrue(torchmodel.to.call_args[1]['device'] == dev)
        self.assertTrue(torchmodel.to.call_args[1]['dtype'] == dtype)
        self.assertTrue(state_tensor.to.call_args[1]['device'] == dev)
        self.assertTrue(state_tensor.to.call_args[1]['dtype'] == dtype) 
开发者ID:pytorchbearer,项目名称:torchbearer,代码行数:20,代码来源:test_trial.py

示例6: update_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def update_dtype(self, old_dtype):
        updated = {}
        for k, v in old_dtype.items():
            if v == np.float32:
                dt = torch.float32
            elif v == np.float64:
                dt = torch.float64
            elif v == np.float16:
                dt = torch.float16
            elif v == np.uint8:
                dt = torch.uint8
            elif v == np.int8:
                dt = torch.int8
            elif v == np.int16:
                dt = torch.int16
            elif v == np.int32:
                dt = torch.int32
            elif v == np.int16:
                dt = torch.int16
            else:
                raise ValueError("Unsupported dtype {}".format(v))
            updated[k] = dt
        return updated 
开发者ID:heronsystems,项目名称:adeptRL,代码行数:25,代码来源:ops.py

示例7: testDtypes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def testDtypes(self):
    # Spot check a few.
    config_str = """
      # Test without torch prefix, but using the
      # prefix is strongly recommended!
      configurable.float32 = %float32
      # Test with torch prefix.
      configurable.int8 = %torch.int8
      configurable.float16 = %torch.float16
    """
    config.parse_config(config_str)

    vals = configurable()
    # pylint: disable=E1101
    self.assertIs(vals['float32'], torch.float32)
    self.assertIs(vals['int8'], torch.int8)
    self.assertIs(vals['float16'], torch.float16)
    # pylint: disable=E1101 
开发者ID:google,项目名称:gin-config,代码行数:20,代码来源:external_configurables_test.py

示例8: pytorch_dtype_to_type

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def pytorch_dtype_to_type(dtype):
    """Map a pytorch dtype to a myia type."""
    import torch

    _type_map = {
        torch.int8: Int[8],
        torch.int16: Int[16],
        torch.int32: Int[32],
        torch.int64: Int[64],
        torch.uint8: UInt[8],
        torch.float16: Float[16],
        torch.float32: Float[32],
        torch.float64: Float[64],
        torch.bool: Bool,
    }
    if dtype not in _type_map:
        raise TypeError(f"Unsupported dtype {dtype}")
    return _type_map[dtype] 
开发者ID:mila-iqia,项目名称:myia,代码行数:20,代码来源:pytorch_abstract_types.py

示例9: print_shapes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def print_shapes(x, prefix='', raise_on_nan=False):
    if isinstance(x, torch.Tensor):
        print(prefix, x.shape)
        if x.dtype == torch.float32 or x.dtype == torch.float16:
            print(x.min(), x.max(), x.mean(), x.std())
        if raise_on_nan and torch.isnan(x).long().sum().item() > 0:
            print("GOT NAN!!")
            raise ValueError
    elif isinstance(x, (list, tuple)):
        for ele in x:
            print_shapes(ele, prefix + '-->')
    elif isinstance(x, dict):
        for k, v in x.items():
            print_shapes(v, prefix + ' ' + k + ':')
    else:
        print("COULDN'T get shape ", type(x)) 
开发者ID:allenai,项目名称:kb,代码行数:18,代码来源:knowbert.py

示例10: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def forward(self, q, k):
        b_q, t_q, dim_q = list(q.size())
        b_k, t_k, dim_k = list(k.size())
        assert(dim_q == dim_k)  # dims should be equal
        b = b_q
        qk = torch.bmm(q, k.transpose(1, 2))  # b x t_q x t_k
        qk = qk / (dim_k ** 0.5)
        mask = None
        with torch.no_grad():
            if self.causal and t_q > 1:
                causal_mask = q.data.new(t_q, t_k).byte().fill_(1).triu_(1)
                mask = causal_mask.unsqueeze(0).expand(b, t_q, t_k)
            if self.mask_k is not None:
                mask_k = self.mask_k.unsqueeze(1).expand(b, t_q, t_k)
                mask = mask_k if mask is None else mask | mask_k
            if self.mask_q is not None:
                mask_q = self.mask_q.unsqueeze(2).expand(b, t_q, t_k)
                mask = mask_q if mask is None else mask | mask_q
        if mask is not None:
            qk.masked_fill_(mask, float('-inf'))

        return F.softmax(qk, dim=2,
                         dtype=torch.float32 if qk.dtype == torch.float16 else qk.dtype) 
开发者ID:eladhoffer,项目名称:seq2seq.pytorch,代码行数:25,代码来源:attention.py

示例11: init_optimizer_and_mixup

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def init_optimizer_and_mixup(args, train_loader, model, optim_state_dict=None):
    optimizer_class = torch.optim.SGD
    optimizer_params = {"lr": args.learning_rate, "momentum": args.momentum, "weight_decay": args.decay,
                        "nesterov": True}

    if args.sched == 'clr':
        scheduler_class = CyclicLR
        scheduler_params = {"base_lr": args.min_lr, "max_lr": args.max_lr,
                            "step_size_up": args.epochs_per_step * len(train_loader), "mode": args.mode,
                            "last_epoch": args.start_step - 1}
    elif args.sched == 'multistep':
        scheduler_class = MultiStepLR
        scheduler_params = {"milestones": args.schedule, "gamma": args.gamma, "last_epoch": args.start_epoch - 1}
    elif args.sched == 'cosine':
        scheduler_class = CosineLR
        scheduler_params = {"max_epochs": args.epochs, "warmup_epochs": args.warmup, "iter_in_epoch": len(train_loader),
                            "last_epoch": args.start_step - 1}
    elif args.sched == 'gamma':
        scheduler_class = StepLR
        scheduler_params = {"step_size": 30, "gamma": args.gamma, "last_epoch": args.start_epoch - 1}
    else:
        raise ValueError('Wrong scheduler!')
    optim = OptimizerWrapper(model, optimizer_class=optimizer_class, optimizer_params=optimizer_params,
                             optimizer_state_dict=optim_state_dict, scheduler_class=scheduler_class,
                             scheduler_params=scheduler_params, use_shadow_weights=args.dtype == torch.float16)
    mixup_start = len(train_loader) * args.mixup_warmup
    mixup_nr = len(train_loader) * (args.epochs - args.mixup_warmup)
    mixup = MixupScheduled(start_gamma=0, stop_gamma=args.mixup, wait_steps=mixup_start, nr_steps=mixup_nr,
                           start_step=args.start_step, num_classes=args.num_classes, smooth_eps=args.smooth_eps)
    return optim, mixup 
开发者ID:Randl,项目名称:MobileNetV3-pytorch,代码行数:32,代码来源:imagenet.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def forward(self, x):
        # Cast all fixed parameters to half() if necessary 
        if x.dtype == torch.float16:
            self.weight = self.weight.half()
            self.bias = self.bias.half()
            self.running_mean = self.running_mean.half()
            self.running_var = self.running_var.half()

        scale = self.weight * self.running_var.rsqrt()
        bias = self.bias - self.running_mean * scale
        scale = scale.reshape(1, -1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1)
        return x * scale + bias 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:15,代码来源:batch_norm.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def forward(self, hidden_states, p_mask=None):
        """ Args:
            **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
                invalid position mask such as query and special symbols (PAD, SEP, CLS)
                1.0 means token should be masked.
        """
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
            if next(self.parameters()).dtype == torch.float16:
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask

        return x 
开发者ID:plkmo,项目名称:BERT-Relation-Extraction,代码行数:17,代码来源:modeling_utils.py

示例14: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def __init__(self, dataloader, device, half=False):
        self.loader = dataloader
        self.iter = None
        self.device = device
        self.dtype = torch.float16 if half else torch.float32
        self.stream = torch.cuda.Stream()
        self.next_data = None 
开发者ID:kakaobrain,项目名称:autoclint,代码行数:9,代码来源:dataloader.py

示例15: data_type_dict

# 需要导入模块: import torch [as 别名]
# 或者: from torch import float16 [as 别名]
def data_type_dict():
    return {'float16' : th.float16,
            'float32' : th.float32,
            'float64' : th.float64,
            'uint8'   : th.uint8,
            'int8'    : th.int8,
            'int16'   : th.int16,
            'int32'   : th.int32,
            'int64'   : th.int64,
            'bool'    : th.bool} 
开发者ID:dmlc,项目名称:dgl,代码行数:12,代码来源:tensor.py


注:本文中的torch.float16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。