当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.int16方法代码示例

本文整理汇总了Python中tensorflow.int16方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.int16方法的具体用法?Python tensorflow.int16怎么用?Python tensorflow.int16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.int16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: diet_adam_optimizer_params

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def diet_adam_optimizer_params():
  """Default hyperparameters for a DietAdamOptimizer.

  Returns:
    a hyperparameters object.
  """
  return tf.contrib.training.HParams(
      quantize=True,  # use 16-bit fixed-point
      quantization_scale=10.0 / tf.int16.max,
      optimizer="DietAdam",
      learning_rate=1.0,
      learning_rate_warmup_steps=2000,
      learning_rate_decay_scheme="noam",  # "noam" or "none"
      epsilon=1e-10,
      beta1=0.0,  # we can save memory if beta1=0
      beta2=0.98,
      factored_second_moment_accumulator=True,  # this saves memory
  ) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:diet.py

示例2: _quantize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:diet.py

示例3: decode_pred

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def decode_pred(serialized_example):
	"""Parses prediction data from the given `serialized_example`."""

	features = tf.parse_single_example(
					serialized_example,
					features={
						'T1':tf.FixedLenFeature([],tf.string),
						'T2':tf.FixedLenFeature([], tf.string)
					})

	patch_shape = [conf.patch_size, conf.patch_size, conf.patch_size]

	# Convert from a scalar string tensor
	image_T1 = tf.decode_raw(features['T1'], tf.int16)
	image_T1 = tf.reshape(image_T1, patch_shape)
	image_T2 = tf.decode_raw(features['T2'], tf.int16)
	image_T2 = tf.reshape(image_T2, patch_shape)

	# Convert dtype.
	image_T1 = tf.cast(image_T1, tf.float32)
	image_T2 = tf.cast(image_T2, tf.float32)
	label = tf.zeros(image_T1.shape) # pseudo label

	return image_T1, image_T2, label 
开发者ID:zhengyang-wang,项目名称:3D-Unet--Tensorflow,代码行数:26,代码来源:input_fn.py

示例4: diet_adam_optimizer_params

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def diet_adam_optimizer_params():
  """Default hyperparameters for a DietAdamOptimizer.

  Returns:
    a hyperparameters object.
  """
  return hparam.HParams(
      quantize=True,  # use 16-bit fixed-point
      quantization_scale=10.0 / tf.int16.max,
      optimizer="DietAdam",
      learning_rate=1.0,
      learning_rate_warmup_steps=2000,
      learning_rate_decay_scheme="noam",  # "noam" or "none"
      epsilon=1e-10,
      beta1=0.0,  # we can save memory if beta1=0
      beta2=0.98,
      factored_second_moment_accumulator=True,  # this saves memory
  ) 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:diet.py

示例5: testZeros

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def testZeros(self):
    with self.test_session(use_gpu=True):
      for dtype in tf.uint8, tf.int16, tf.int32, tf.int64:
        zero = tf.constant(0, dtype=dtype)
        one = tf.constant(1, dtype=dtype)
        bads = [one // zero]
        if dtype in (tf.int32, tf.int64):
          bads.append(one % zero)
        for bad in bads:
          try:
            result = bad.eval()
          except tf.OpError as e:
            # Ideally, we'd get a nice exception.  In theory, this should only
            # happen on CPU, but 32 bit integer GPU division is actually on
            # CPU due to a placer bug.
            # TODO(irving): Make stricter once the placer bug is fixed.
            self.assertIn('Integer division by zero', str(e))
          else:
            # On the GPU, integer division by zero produces all bits set.
            # But apparently on some GPUs "all bits set" for 64 bit division
            # means 32 bits set, so we allow 0xffffffff as well.  This isn't
            # very portable, so we may need to expand this list if other GPUs
            # do different things.
            self.assertTrue(tf.test.is_gpu_available())
            self.assertIn(result, (-1, 0xff, 0xffffffff)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,代码来源:zero_division_test.py

示例6: testDtype

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def testDtype(self):
    with self.test_session():
      d = tf.fill([2, 3], 12., name="fill")
      self.assertEqual(d.get_shape(), [2, 3])
      # Test default type for both constant size and dynamic size
      z = tf.zeros([2, 3])
      self.assertEqual(z.dtype, tf.float32)
      self.assertEqual([2, 3], z.get_shape())
      self.assertAllEqual(z.eval(), np.zeros([2, 3]))
      z = tf.zeros(tf.shape(d))
      self.assertEqual(z.dtype, tf.float32)
      self.assertEqual([2, 3], z.get_shape())
      self.assertAllEqual(z.eval(), np.zeros([2, 3]))
      # Test explicit type control
      for dtype in [tf.float32, tf.float64, tf.int32,
                    tf.uint8, tf.int16, tf.int8,
                    tf.complex64, tf.complex128, tf.int64, tf.bool]:
        z = tf.zeros([2, 3], dtype=dtype)
        self.assertEqual(z.dtype, dtype)
        self.assertEqual([2, 3], z.get_shape())
        self.assertAllEqual(z.eval(), np.zeros([2, 3]))
        z = tf.zeros(tf.shape(d), dtype=dtype)
        self.assertEqual(z.dtype, dtype)
        self.assertEqual([2, 3], z.get_shape())
        self.assertAllEqual(z.eval(), np.zeros([2, 3])) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,代码来源:constant_op_test.py

示例7: testOnesLike

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def testOnesLike(self):
    for dtype in [tf.float32, tf.float64, tf.int32,
                  tf.uint8, tf.int16, tf.int8,
                  tf.complex64, tf.complex128, tf.int64]:
      numpy_dtype = dtype.as_numpy_dtype
      with self.test_session():
        # Creates a tensor of non-zero values with shape 2 x 3.
        d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
        # Constructs a tensor of zeros of the same dimensions and type as "d".
        z_var = tf.ones_like(d)
        # Test that the type is correct
        self.assertEqual(z_var.dtype, dtype)
        z_value = z_var.eval()

      # Test that the value is correct
      self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
      self.assertEqual([2, 3], z_var.get_shape()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:constant_op_test.py

示例8: testIntTypes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def testIntTypes(self):
    for dtype, nptype in [
        (tf.int32, np.int32),
        (tf.uint8, np.uint8),
        (tf.uint16, np.uint16),
        (tf.int16, np.int16),
        (tf.int8, np.int8)]:
      # Test with array.
      t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
      self.assertEquals(dtype, t.dtype)
      self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
      a = tensor_util.MakeNdarray(t)
      self.assertEquals(nptype, a.dtype)
      self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
      # Test with ndarray.
      t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
      self.assertEquals(dtype, t.dtype)
      self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
      a = tensor_util.MakeNdarray(t)
      self.assertEquals(nptype, a.dtype)
      self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:23,代码来源:tensor_util_test.py

示例9: testNumpyConversion

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def testNumpyConversion(self):
    self.assertIs(tf.float32, tf.as_dtype(np.float32))
    self.assertIs(tf.float64, tf.as_dtype(np.float64))
    self.assertIs(tf.int32, tf.as_dtype(np.int32))
    self.assertIs(tf.int64, tf.as_dtype(np.int64))
    self.assertIs(tf.uint8, tf.as_dtype(np.uint8))
    self.assertIs(tf.uint16, tf.as_dtype(np.uint16))
    self.assertIs(tf.int16, tf.as_dtype(np.int16))
    self.assertIs(tf.int8, tf.as_dtype(np.int8))
    self.assertIs(tf.complex64, tf.as_dtype(np.complex64))
    self.assertIs(tf.complex128, tf.as_dtype(np.complex128))
    self.assertIs(tf.string, tf.as_dtype(np.object))
    self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype))
    self.assertIs(tf.bool, tf.as_dtype(np.bool))
    with self.assertRaises(TypeError):
      tf.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)])) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:dtypes_test.py

示例10: parse_withlabel_function

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def parse_withlabel_function(example_proto):
        time1 = time.time()
        features = {
            'image': tf.FixedLenFeature([], tf.string),
            'image_shape': tf.FixedLenFeature([], tf.string),
            'image_label': tf.FixedLenFeature([], tf.string)
        }

        content = tf.parse_single_example(example_proto, features=features)

        content['image_shape'] = tf.decode_raw(content['image_shape'], tf.int32)
        content['image_label'] = tf.decode_raw(content['image_label'], tf.int16)
        content['image'] = tf.decode_raw(content['image'], tf.int16)
        content['image'] = tf.reshape(content['image'], content['image_shape'])
        print('parse using time: ', time.time() - time1)
        return content['image'], content['image_label'] 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:18,代码来源:convert_tf.py

示例11: _input

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def _input(self, dtype='float32', shape=None, name=None):
        
        """Define an input for the recommender.

        Parameters
        ----------
        dtype: str
            Data type: "float16", "float32", "float64", "int8", "int16", "int32", "int64", "bool", or "string".
        shape: list or tuple
            Input shape.
        name: str
            Name of the input.

        Returns
        -------
        Tensorflow placeholder
            Defined tensorflow placeholder.
        """
        if dtype not in self._str_to_dtype:
            raise ValueError
        else:
            return tf.placeholder(self._str_to_dtype[dtype], shape=shape, name=name) 
开发者ID:ylongqi,项目名称:openrec,代码行数:24,代码来源:recommender.py

示例12: test__dtype_to_bytes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def test__dtype_to_bytes():
    np_tf_dt = [
        (np.uint8, tf.uint8, b"uint8"),
        (np.uint16, tf.uint16, b"uint16"),
        (np.uint32, tf.uint32, b"uint32"),
        (np.uint64, tf.uint64, b"uint64"),
        (np.int8, tf.int8, b"int8"),
        (np.int16, tf.int16, b"int16"),
        (np.int32, tf.int32, b"int32"),
        (np.int64, tf.int64, b"int64"),
        (np.float16, tf.float16, b"float16"),
        (np.float32, tf.float32, b"float32"),
        (np.float64, tf.float64, b"float64"),
    ]

    for npd, tfd, dt in np_tf_dt:
        npd = np.dtype(npd)
        assert tfrecord._dtype_to_bytes(npd) == dt
        assert tfrecord._dtype_to_bytes(tfd) == dt

    assert tfrecord._dtype_to_bytes("float32") == b"float32"
    assert tfrecord._dtype_to_bytes("foobar") == b"foobar" 
开发者ID:neuronets,项目名称:nobrainer,代码行数:24,代码来源:tfrecord_test.py

示例13: set_dtype

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def set_dtype(cls, data_type: str) -> None:
        """
        Class method to set the data types
        Args:
            data_type (str): '16' or '32'
        """
        if data_type.endswith('32'):
            float_key = 'float32'
            int_key = 'int32'
        elif data_type.endswith('16'):
            float_key = 'float16'
            int_key = 'int16'
        else:
            raise ValueError("Data type not known, choose '16' or '32'")

        cls.np_float = DTYPES[float_key]['numpy']
        cls.tf_float = DTYPES[float_key]['tf']
        cls.np_int = DTYPES[int_key]['numpy']
        cls.tf_int = DTYPES[int_key]['tf'] 
开发者ID:materialsvirtuallab,项目名称:megnet,代码行数:21,代码来源:config.py

示例14: _convert_string_dtype

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype) 
开发者ID:GUR9000,项目名称:KerasNeuralFingerprint,代码行数:21,代码来源:tensorflow_backend.py

示例15: reduce_mean_support_empty

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int16 [as 别名]
def reduce_mean_support_empty(input, keepdims=False):
    return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input))


# def bit_tensor_list(input):
#     assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*'
#     num_bits = 0
#     if input.dtype == tf.int8:
#         num_bits = 8
#     elif input.dtype == tf.int16:
#         num_bits = 16
#     elif input.dtype == tf.uint32:
#         num_bits = 32
#     elif input.dtype == tf.uint64:
#         num_bits = 64
#     bit_tensors = []
#     for i in range(num_bits):
#         current_bit = 1 << i
#         current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1
#         bit_tensors.append(current_bit_tensor)
#     print(bit_tensors)
#     return bit_tensors 
开发者ID:christianpayer,项目名称:MedicalDataAugmentationTool,代码行数:24,代码来源:tensorflow_util.py


注:本文中的tensorflow.int16方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。