当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.uint64方法代码示例

本文整理汇总了Python中tensorflow.uint64方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.uint64方法的具体用法?Python tensorflow.uint64怎么用?Python tensorflow.uint64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.uint64方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test__dtype_to_bytes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def test__dtype_to_bytes():
    np_tf_dt = [
        (np.uint8, tf.uint8, b"uint8"),
        (np.uint16, tf.uint16, b"uint16"),
        (np.uint32, tf.uint32, b"uint32"),
        (np.uint64, tf.uint64, b"uint64"),
        (np.int8, tf.int8, b"int8"),
        (np.int16, tf.int16, b"int16"),
        (np.int32, tf.int32, b"int32"),
        (np.int64, tf.int64, b"int64"),
        (np.float16, tf.float16, b"float16"),
        (np.float32, tf.float32, b"float32"),
        (np.float64, tf.float64, b"float64"),
    ]

    for npd, tfd, dt in np_tf_dt:
        npd = np.dtype(npd)
        assert tfrecord._dtype_to_bytes(npd) == dt
        assert tfrecord._dtype_to_bytes(tfd) == dt

    assert tfrecord._dtype_to_bytes("float32") == b"float32"
    assert tfrecord._dtype_to_bytes("foobar") == b"foobar" 
开发者ID:neuronets,项目名称:nobrainer,代码行数:24,代码来源:tfrecord_test.py

示例2: reduce_mean_support_empty

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def reduce_mean_support_empty(input, keepdims=False):
    return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input))


# def bit_tensor_list(input):
#     assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*'
#     num_bits = 0
#     if input.dtype == tf.int8:
#         num_bits = 8
#     elif input.dtype == tf.int16:
#         num_bits = 16
#     elif input.dtype == tf.uint32:
#         num_bits = 32
#     elif input.dtype == tf.uint64:
#         num_bits = 64
#     bit_tensors = []
#     for i in range(num_bits):
#         current_bit = 1 << i
#         current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1
#         bit_tensors.append(current_bit_tensor)
#     print(bit_tensors)
#     return bit_tensors 
开发者ID:christianpayer,项目名称:MedicalDataAugmentationTool,代码行数:24,代码来源:tensorflow_util.py

示例3: args_check

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def args_check(cls, node, **kwargs):
    unsupported_dtype = [
        tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64
    ]
    x = kwargs["tensor_dict"][node.inputs[0]]
    y = kwargs["tensor_dict"][node.inputs[1]]
    if x.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype),
                                      "Tensorflow")
    if y.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype),
                                      "Tensorflow") 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:14,代码来源:mod.py

示例4: args_check

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def args_check(cls, node, **kwargs):
    x = kwargs["tensor_dict"][node.inputs[0]]
    # uint64 cannot upcast to any tensorflow supported datatype
    # for tf.clip_by_value that didn't lose precision
    if x.dtype == tf.uint64:
      exception.OP_UNSUPPORTED_EXCEPT(
          "Clip input, min and max in " + str(x.dtype) + " datatype",
          "Tensorflow") 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:10,代码来源:clip.py

示例5: reduce_batch_minus_min_and_max

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def reduce_batch_minus_min_and_max(x, reduce_instance_dims):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor`.
    reduce_instance_dims: A bool indicating whether this should collapse the
      batch and instance dimensions to arrive at a single scalar output, or only
      collapse the batch dimension and outputs a vector of the same shape as the
      input.

  Returns:
    The computed `tf.Tensor`s (batch -min, batch max) pair.
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  if reduce_instance_dims:
    if isinstance(x, tf.SparseTensor):
      x = x.values

    x_batch_max = tf.reduce_max(input_tensor=x)
    x_batch_minus_min = tf.reduce_max(input_tensor=tf.zeros_like(x) - x)
    x_batch_minus_min, x_batch_max = assert_same_shape(x_batch_minus_min,
                                                       x_batch_max)
  elif isinstance(x, tf.SparseTensor):
    x_batch_minus_min, x_batch_max = (
        _sparse_minus_reduce_min_and_reduce_max(x))
  else:
    x_batch_max = tf.reduce_max(input_tensor=x, axis=0)
    x_batch_minus_min = tf.reduce_max(input_tensor=0 - x, axis=0)

  # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
  # NaNs produces -inf.
  return (_inf_to_nan(x_batch_minus_min, output_dtype),
          _inf_to_nan(x_batch_max, output_dtype)) 
开发者ID:tensorflow,项目名称:transform,代码行数:42,代码来源:tf_utils.py

示例6: sum

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def sum(x, reduce_instance_dims=True, name=None):  # pylint: disable=redefined-builtin
  """Computes the sum of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor` or `SparseTensor`. Its type must be floating point
        (float{16|32|64}),integral (int{8|16|32|64}), or
        unsigned integral (uint{8|16})
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the sum. If `x` is float32 or float64, the sum will
    have the same type as `x`. If `x` is float16, the output is cast to float32.
    If `x` is integral, the output is cast to [u]int64. If `x` is sparse and
    reduce_inst_dims is False will return 0 in place where column has no values
    across batches.

  Raises:
    TypeError: If the type of `x` is not supported.
  """
  with tf.compat.v1.name_scope(name, 'sum'):
    if reduce_instance_dims:
      if isinstance(x, tf.SparseTensor):
        x = x.values
      x = tf.reduce_sum(input_tensor=x)
    elif isinstance(x, tf.SparseTensor):
      if x.dtype == tf.uint8 or x.dtype == tf.uint16:
        x = tf.cast(x, tf.int64)
      elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
        TypeError('Data type %r is not supported' % x.dtype)
      x = tf.sparse.reduce_sum(x, axis=0)
    else:
      x = tf.reduce_sum(input_tensor=x, axis=0)
    output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype)
    return _numeric_combine([x], sum_fn, reduce_instance_dims,
                            [output_dtype])[0] 
开发者ID:tensorflow,项目名称:transform,代码行数:40,代码来源:analyzers.py

示例7: _create_variables

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def _create_variables(self):
    if self.input_type.ndim != 0:
      raise TypeError('Embeddings take scalar inputs.')
    dtype = tf.as_dtype(self.input_type.dtype)
    if not dtype.is_integer: raise TypeError('Embeddings take integer inputs.')
    if dtype not in (tf.int32, tf.int64):  # only dtypes supported by tf.gather
      if np.iinfo(dtype.as_numpy_dtype).max > 2147483647:
         # pedantic future-proofing to handle hypothetical tf.uint64
        raise TypeError('cannot gather or upcast dtype %s' % dtype)
      self._cast = True
    else:
      self._cast = False
    self._weights = tf.get_variable(
        'weights', self._weights_shape, initializer=self._initializer,
        trainable=self._trainable) 
开发者ID:tensorflow,项目名称:fold,代码行数:17,代码来源:layers.py

示例8: masked_bit

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def masked_bit(input, bit_index):
    """
    Returns a boolean tensor, where values are true, on which the bit on bit_index is True.
    :param input: The input tensor to check.
    :param bit_index: The bit index which will be compared with bitwise and. (LSB 0 order)
    :return: The tensor.
    """
    assert input.dtype in [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be *int*'
    current_bit = tf.bitwise.left_shift(tf.constant(1, dtype=input.dtype), tf.cast(bit_index, dtype=input.dtype))
    return tf.greater(tf.bitwise.bitwise_and(input, current_bit), 0) 
开发者ID:christianpayer,项目名称:MedicalDataAugmentationTool,代码行数:12,代码来源:tensorflow_util.py

示例9: reduce_batch_minus_min_and_max_per_key

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint64 [as 别名]
def reduce_batch_minus_min_and_max_per_key(x, key):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor` or `SparseTensor`.
    key: A `Tensor` or `SparseTensor`.
        Must meet one of the following conditions:
        1. Both x and key are dense,
        2. Both x and key are sparse and `key` must exactly match `x` in
        everything except values,
        3. The axis=1 index of each x matches its index of dense key.
  Returns:
    A 3-tuple containing the `Tensor`s (key_vocab, min_per_key, max_per_key).
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  x, key = _validate_and_get_dense_value_key_inputs(x, key)

  def get_batch_max_per_key(tensor, key_uniques, dtype):  # pylint: disable=missing-docstring
    if tensor.get_shape().ndims < 2:
      row_maxes = tensor
    else:
      row_maxes = tf.reduce_max(
          tensor, axis=tf.range(1, tensor.get_shape().ndims))
    batch_max = tf.math.unsorted_segment_max(
        row_maxes, key_uniques.idx, tf.size(input=key_uniques.y))

    # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
    # NaNs produces -inf.
    return _inf_to_nan(batch_max, dtype)

  unique = tf.unique_with_counts(key, out_idx=tf.int64)
  x_batch_maxes = get_batch_max_per_key(x, unique, output_dtype)
  x_batch_minus_mins = get_batch_max_per_key(-x, unique, output_dtype)

  x_batch_minus_mins, x_batch_maxes = assert_same_shape(x_batch_minus_mins,
                                                        x_batch_maxes)

  return (unique.y, x_batch_minus_mins, x_batch_maxes) 
开发者ID:tensorflow,项目名称:transform,代码行数:47,代码来源:tf_utils.py


注:本文中的tensorflow.uint64方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。