当前位置: 首页>>代码示例>>Python>>正文


Python dtypes.float16方法代码示例

本文整理汇总了Python中tensorflow.python.framework.dtypes.float16方法的典型用法代码示例。如果您正苦于以下问题:Python dtypes.float16方法的具体用法?Python dtypes.float16怎么用?Python dtypes.float16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.framework.dtypes的用法示例。


在下文中一共展示了dtypes.float16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_check_numerics_ops

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def add_check_numerics_ops():
  """Connect a `check_numerics` to every floating point tensor.

  `check_numerics` operations themselves are added for each `half`, `float`,
  or `double` tensor in the graph. For all ops in the graph, the
  `check_numerics` op for all of its (`half`, `float`, or `double`) inputs
  is guaranteed to run before the `check_numerics` op on any of its outputs.

  Returns:
    A `group` op depending on all `check_numerics` ops added.
  """
  check_op = []
  # This code relies on the ordering of ops in get_operations().
  # The producer of a tensor always comes before that tensor's consumer in
  # this list. This is true because get_operations() returns ops in the order
  # added, and an op can only be added after its inputs are added.
  for op in ops.get_default_graph().get_operations():
    for output in op.outputs:
      if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
        message = op.name + ":" + str(output.value_index)
        with ops.control_dependencies(check_op):
          check_op = [array_ops.check_numerics(output, message=message)]
  return control_flow_ops.group(*check_op) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:numerics.py

示例2: set_floatx

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def set_floatx(value):
  """Sets the default float type.

  Arguments:
      value: String; 'float16', 'float32', or 'float64'.

  Example:
  ```python
      >>> from keras import backend as K
      >>> K.floatx()
      'float32'
      >>> K.set_floatx('float16')
      >>> K.floatx()
      'float16'
  ```

  Raises:
      ValueError: In case of invalid value.
  """
  global _FLOATX
  if value not in {'float16', 'float32', 'float64'}:
    raise ValueError('Unknown floatx type: ' + str(value))
  _FLOATX = str(value) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:backend.py

示例3: _convert_string_dtype

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _convert_string_dtype(dtype):
  if dtype == 'float16':
    return dtypes_module.float16
  if dtype == 'float32':
    return dtypes_module.float32
  elif dtype == 'float64':
    return dtypes_module.float64
  elif dtype == 'int16':
    return dtypes_module.int16
  elif dtype == 'int32':
    return dtypes_module.int32
  elif dtype == 'int64':
    return dtypes_module.int64
  elif dtype == 'uint8':
    return dtypes_module.int8
  elif dtype == 'uint16':
    return dtypes_module.uint16
  else:
    raise ValueError('Unsupported dtype:', dtype) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:backend.py

示例4: testGradientTensor4D

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def testGradientTensor4D(self):
    for (data_format, use_gpu) in [("NHWC", False)]:
      for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
        np_input = np.arange(
            1.0, 49.0,
            dtype=dtype.as_numpy_dtype).reshape([2, 3, 4, 2]).astype(np.float32)
        bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
        self._testGradient(np_input, bias, dtype, data_format, use_gpu)
        np_input = np.arange(
            1.0, 513.0,
            dtype=dtype.as_numpy_dtype).reshape([64, 2, 2,
                                                 2]).astype(np.float32)
        self._testGradient(np_input, bias, dtype, data_format, use_gpu)
        np_input = np.arange(
            1.0, 513.0,
            dtype=dtype.as_numpy_dtype).reshape([2, 2, 2,
                                                 64]).astype(np.float32)
        self._testGradient(np_input,
                           np.random.rand(64).astype(dtype.as_numpy_dtype),
                           dtype, data_format, use_gpu) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:22,代码来源:test_patch_bias_add.py

示例5: testDeterministicGradients

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def testDeterministicGradients(self):
    with self.session(force_gpu=True):
      # There are problems with using force_gpu=True and cached_session with
      # both eager mode and graph mode in the same test. Using a non-cached
      # session and putting everything inside the same session context is
      # a compromise.
      for op_binding in (tf.nn.bias_add, nn.bias_add, nn_ops.bias_add):
        for data_layout in ('channels_first', 'channels_last'):
          # With the selected layer configuration, at least in TensorFlow
          # version 2.0, when data_layout='channels_last', bias_add operates
          # deterministically by default. I don't know if this is true for
          # all layer configurations. These cases are still being tested here,
          # for completeness.
          for data_rank in (1, 2, 3):
            for data_type in (dtypes.float16, dtypes.float32, dtypes.float64):
              self._testDeterministicGradientsCase(op_binding, data_layout,
                                                   data_rank, data_type) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:19,代码来源:test_patch_bias_add.py

示例6: tanh

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def tanh(x, name=None):
  """Computes hyperbolic tangent of `x` element-wise.

  Args:
    x: A Tensor or SparseTensor with type `float16`, `float32`, `double`,
      `complex64`, or `complex128`.
    name: A name for the operation (optional).

  Returns:
    A Tensor or SparseTensor respectively with the same type as `x`.
  """
  with ops.name_scope(name, "Tanh", [x]) as name:
    if isinstance(x, sparse_tensor.SparseTensor):
      x_tanh = gen_math_ops._tanh(x.values, name=name)
      return sparse_tensor.SparseTensor(
          indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
    else:
      return gen_math_ops._tanh(x, name=name) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:math_ops.py

示例7: _IsTrainable

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _IsTrainable(tensor):
  dtype = dtypes.as_dtype(tensor.dtype)
  return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
                              dtypes.complex64, dtypes.complex128) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:gradients_impl.py

示例8: _compute_gradient

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _compute_gradient(x,
                      x_shape,
                      dx,
                      y,
                      y_shape,
                      dy,
                      x_init_value=None,
                      delta=1e-3,
                      extra_feed_dict=None):
  """Computes the theoretical and numerical jacobian."""
  t = dtypes.as_dtype(x.dtype)
  allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
                   dtypes.complex64, dtypes.complex128]
  assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
  t2 = dtypes.as_dtype(y.dtype)
  assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name

  if x_init_value is not None:
    i_shape = list(x_init_value.shape)
    assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
        x_shape, i_shape)
    x_data = x_init_value
  else:
    x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)
    if t.is_complex:
      x_data.imag = np.random.random_sample(x_shape)

  jacob_t = _compute_theoretical_jacobian(
      x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
  jacob_n = _compute_numeric_jacobian(
      x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
  return jacob_t, jacob_n 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:gradient_checker.py

示例9: _CropAndResizeGrad

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _CropAndResizeGrad(op, grad):
  """The derivatives for crop_and_resize.

  We back-propagate to the image only when the input image tensor has floating
  point dtype but we always back-propagate to the input boxes tensor.

  Args:
    op: The CropAndResize op.
    grad: The tensor representing the gradient w.r.t. the output.

  Returns:
    The gradients w.r.t. the input image, boxes, as well as the always-None
    gradients w.r.t. box_ind and crop_size.
  """
  image = op.inputs[0]
  if image.get_shape().is_fully_defined():
    image_shape = image.get_shape().as_list()
  else:
    image_shape = array_ops.shape(image)

  allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
  if op.inputs[0].dtype in allowed_types:
    # pylint: disable=protected-access
    grad0 = gen_image_ops.crop_and_resize_grad_image(grad,
                                                     op.inputs[1],
                                                     op.inputs[2],
                                                     image_shape,
                                                     T=op.get_attr("T"))
    # pylint: enable=protected-access
  else:
    grad0 = None

  grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0],
                                                   op.inputs[1], op.inputs[2])

  return [grad0, grad1, None, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:image_grad.py

示例10: _CastGrad

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _CastGrad(op, grad):
  t = [
      dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
      dtypes.complex64, dtypes.complex128
  ]
  src_type = op.inputs[0].dtype.base_dtype
  dst_type = grad.dtype.base_dtype
  if src_type in t and dst_type in t:
    return math_ops.cast(grad, src_type)
  else:
    return None 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:13,代码来源:math_grad.py

示例11: _valid_dtypes

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _valid_dtypes(self):
    """Valid types for loss, variables and gradients.

    Subclasses should override to allow other float types.

    Returns:
      Valid types for loss, variables and gradients.
    """
    return set([dtypes.float16, dtypes.float32, dtypes.float64]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:11,代码来源:optimizer.py

示例12: ExtractBitsFromFloat16

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:4,代码来源:tensor_util.py

示例13: floatx

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def floatx():
  """Returns the default float type, as a string.

  E.g. 'float16', 'float32', 'float64'.

  Returns:
      String, the current default float type.

  Example:
  ```python
      >>> keras.backend.floatx()
      'float32'
  ```
  """
  return _FLOATX 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:17,代码来源:backend.py

示例14: _dtypes_to_test

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _dtypes_to_test(self):
    # TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit.
    return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:5,代码来源:linear_operator_test_util.py

示例15: _compute_gradient

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import float16 [as 别名]
def _compute_gradient(x,
                      x_shape,
                      dx,
                      y,
                      y_shape,
                      dy,
                      x_init_value=None,
                      delta=1e-3,
                      extra_feed_dict=None):
  """Computes the theoretical and numerical jacobian."""
  t = dtypes.as_dtype(x.dtype)
  allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
                   dtypes.complex64, dtypes.complex128]
  assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
  t2 = dtypes.as_dtype(y.dtype)
  assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name

  if x_init_value is not None:
    i_shape = list(x_init_value.shape)
    assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
        x_shape, i_shape)
    x_data = x_init_value
  else:
    if t == dtypes.float16:
      dtype = np.float16
    elif t == dtypes.float32:
      dtype = np.float32
    else:
      dtype = np.float64
    x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)

  jacob_t = _compute_theoretical_jacobian(
      x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
  jacob_n = _compute_numeric_jacobian(
      x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
  return jacob_t, jacob_n 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:38,代码来源:gradient_checker.py


注:本文中的tensorflow.python.framework.dtypes.float16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。