当前位置: 首页>>代码示例>>Python>>正文


Python torch.bfloat16方法代码示例

本文整理汇总了Python中torch.bfloat16方法的典型用法代码示例。如果您正苦于以下问题:Python torch.bfloat16方法的具体用法?Python torch.bfloat16怎么用?Python torch.bfloat16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.bfloat16方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _prepare_sample

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def _prepare_sample(self, sample):
        if sample == "DUMMY":
            raise Exception(
                "Trying to use an uninitialized 'dummy' batch. This usually indicates "
                "that the total number of batches is smaller than the number of "
                "participating GPUs. Try reducing the batch size or using fewer GPUs."
            )

        if sample is None or len(sample) == 0:
            return None

        if self.cuda:
            sample = utils.move_to_cuda(sample)

        def apply_half(t):
            if t.dtype is torch.float32:
                return t.half()
            return t

        def apply_bfloat16(t):
            if t.dtype is torch.float32:
                return t.to(dtype=torch.bfloat16)
            return t

        if self.args.fp16:
            sample = utils.apply_to_sample(apply_half, sample)

        if self.args.bf16:
            sample = utils.apply_to_sample(apply_bfloat16, sample)

        return sample 
开发者ID:pytorch,项目名称:fairseq,代码行数:33,代码来源:trainer.py

示例2: move_to_cpu

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def move_to_cpu(sample):
    def _move_to_cpu(tensor):
        # PyTorch has poor support for half tensors (float16) on CPU.
        # Move any such tensors to float32.
        if tensor.dtype in {torch.bfloat16, torch.float16}:
            tensor = tensor.to(dtype=torch.float32)
        return tensor.cpu()

    return apply_to_sample(_move_to_cpu, sample) 
开发者ID:pytorch,项目名称:fairseq,代码行数:11,代码来源:utils.py

示例3: test_protobuf_serde_tensor_roundtrip

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def test_protobuf_serde_tensor_roundtrip(str_dtype):
    """Checks that tensors passed through serialization-deserialization stay same"""

    def compare(roundtrip, original):
        assert type(roundtrip) == torch.Tensor
        assert roundtrip.dtype == original.dtype

        # PyTorch doesn't implement equality checking for bfloat16, so convert to float
        if original.dtype == torch.bfloat16:
            roundtrip = roundtrip.float()
            original = original.float()

        # PyTorch doesn't implement equality checking for float16, so use numpy
        assert numpy.array_equal(roundtrip.data.numpy(), original.data.numpy())
        return True

    serde_worker = syft.hook.local_worker
    original_framework = serde_worker.framework
    serde_worker.framework = None

    tensor = torch.rand([10, 10]) * 16
    tensor = tensor.to(TORCH_STR_DTYPE[str_dtype])

    protobuf_tensor = protobuf.serde._bufferize(serde_worker, tensor)
    roundtrip_tensor = protobuf.serde._unbufferize(serde_worker, protobuf_tensor)

    serde_worker.framework = original_framework

    assert compare(roundtrip_tensor, tensor) is True


# quantized types can't be created by conversion with `tensor.to()` 
开发者ID:OpenMined,项目名称:PySyft,代码行数:34,代码来源:test_protobuf_serde.py

示例4: bhalf

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def bhalf(module):
    return module._apply(lambda t: t.to(torch.bfloat16) if t.is_floating_point() else t) 
开发者ID:mgrankin,项目名称:ru_transformers,代码行数:4,代码来源:debug_lm.py

示例5: bfloat16

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def bfloat16(self):
        return self.type_as(
            torch.tensor(0, dtype=torch.bfloat16, device=self.device())) 
开发者ID:rusty1s,项目名称:pytorch_sparse,代码行数:5,代码来源:tensor.py

示例6: _pytorch_result_type

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def _pytorch_result_type(dtypes, non_tensor_inputs):
    """This promotes TVM dtypes like PyTorch would"""
    import torch
    dtype_map = {
        "float64": torch.float64,
        "float32": torch.float32,
        "float16": torch.float16,
        "bfloat16": torch.bfloat16,
        "int64": torch.int64,
        "int32": torch.int32,
        "int16": torch.int16,
        "int8": torch.int8,
        "uint8": torch.uint8,
        "bool": torch.bool
        }
    if len(dtypes) > 0:
        result_type = dtypes[0]
        for dt in dtypes[1:]:
            if dt != result_type: # we don't want to work with same types as we
                                  # don't do quantized here (which cannot be promoted?)
                result_type = _convert_data_type(str(torch.result_type(
                    torch.zeros((), dtype=dtype_map[result_type]),
                    torch.zeros((), dtype=dtype_map[dt]))))
    else:
        result_type = "bool"  # this is the smallest type...
    for inp in non_tensor_inputs:
        result_type = _convert_data_type(
            str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]),
                                  inp)))
    return result_type 
开发者ID:apache,项目名称:incubator-tvm,代码行数:32,代码来源:pytorch.py

示例7: step

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def step(self, closure=None):
        """Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            lr = group['lr']
            lr_old = group.get('lr_old', lr)
            lr_correct = lr / lr_old

            for p in group['params']:
                if p.grad is None:
                    continue

                p_data_fp32 = p.data
                if p_data_fp32.dtype in {torch.float16, torch.bfloat16}:
                    p_data_fp32 = p_data_fp32.float()

                d_p = p.grad.data.float()
                param_state = self.state[p]
                if 'momentum_buffer' not in param_state:
                    param_state['momentum_buffer'] = torch.zeros_like(d_p)
                else:
                    param_state['momentum_buffer'] = param_state['momentum_buffer'].to(d_p)

                buf = param_state['momentum_buffer']

                if weight_decay != 0:
                    p_data_fp32.mul_(1 - lr * weight_decay)
                p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct)
                p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr)

                buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr)

                if p.data.dtype in {torch.float16, torch.bfloat16}:
                    p.data.copy_(p_data_fp32)

            group['lr_old'] = lr

        return loss 
开发者ID:pytorch,项目名称:fairseq,代码行数:50,代码来源:nag.py

示例8: sequential_forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bfloat16 [as 别名]
def sequential_forward(self, dense_x, lS_o, lS_i):
        # process dense features (using bottom mlp), resulting in a row vector
        if self.bf16:
            dense_x = dense_x.to_mkldnn(torch.bfloat16)
        elif self.fp32:
            dense_x = dense_x.to_mkldnn()

        x = self.apply_mlp(dense_x, self.bot_l)

        if self.bf16:
            x = x.to_dense(torch.float)
        elif self.fp32:
            x = x.to_dense()
        # debug prints
        # print("intermediate")
        # print(x.detach().cpu().numpy())

        # process sparse features(using embeddings), resulting in a list of row vectors
        ly = self.apply_emb(lS_o, lS_i, self.emb_l)
        # for y in ly:
        #     print(y.detach().cpu().numpy())

        # interact features (dense and sparse)
        z = self.interact_features(x, ly)
        # print(z.detach().cpu().numpy())

        # obtain probability of a click (using top mlp)
        if self.bf16:
            z = z.to_mkldnn(torch.bfloat16)
        elif self.fp32:
            z = z.to_mkldnn()

        p = self.apply_mlp(z, self.top_l)

        if self.bf16:
            p = p.to_dense(torch.float)
        elif self.fp32:
            p = p.to_dense()

        # clamp output if needed
        if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
            z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
        else:
            z = p

        return z 
开发者ID:intel,项目名称:optimized-models,代码行数:48,代码来源:dlrm_s_pytorch.py


注:本文中的torch.bfloat16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。