本文整理汇总了Python中tensorflow.half方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.half方法的具体用法?Python tensorflow.half怎么用?Python tensorflow.half使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.half方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testBasic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
示例2: testTensorLearningRate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
lrate = tf.constant(3.0)
sgd_op = tf.train.GradientDescentOptimizer(lrate).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
示例3: testWithGlobalStep
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testWithGlobalStep(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
global_step = tf.Variable(0, trainable=False)
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]),
global_step=global_step)
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
self.assertAllCloseAccordingToType(1, global_step.eval())
示例4: testBasic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
示例5: testAggregationMethod
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testAggregationMethod(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step,
[var0, var1],
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
示例6: testPrecomputedGradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testPrecomputedGradient(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = tf.constant([42, -42], dtype=dtype)
global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
sgd_op = tf.train.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost,
global_step, [var0, var1],
grad_loss=grad_loss)
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose(
[1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)], var0.eval())
self.assertAllClose(
[3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)], var1.eval())
示例7: doTestBasic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def doTestBasic(self, use_locking=False):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(3.0,
initial_accumulator_value=0.1,
use_locking=use_locking)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
示例8: testTensorLearningRate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(
tf.constant(3.0),
initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
示例9: testEquivAdagradwithoutRegularization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testEquivAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
示例10: testEquivSparseAdagradwithoutRegularization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
示例11: testEquivSparseGradientDescentwithoutRegularization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
示例12: testEquivGradientDescentwithoutRegularization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
示例13: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def args_check(cls, node, **kwargs):
supported_dtype = [
tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.uint16,
tf.int8, tf.int16, tf.int32, tf.int64, tf.complex64, tf.complex128
]
x = kwargs["tensor_dict"][node.inputs[0]]
if x.dtype not in supported_dtype:
exception.OP_UNSUPPORTED_EXCEPT(
"CumSum input in " + str(x.dtype) + " which", "Tensorflow")
示例14: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def args_check(cls, node, **kwargs):
supported_dtype = [
tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8,
tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8,
tf.qint32, tf.string, tf.bool, tf.complex128
]
x = kwargs["tensor_dict"][node.inputs[0]]
if x.dtype not in supported_dtype:
exception.OP_UNSUPPORTED_EXCEPT(
"Equal inputs in " + str(x.dtype) + " which", "Tensorflow")
示例15: _dtypes_to_test
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import half [as 别名]
def _dtypes_to_test(use_gpu):
# Based on issue #347 (https://github.com/tensorflow/addons/issues/347)
# tf.half is not registered for 'ResourceScatterUpdate' OpKernel for 'GPU'.
# So we have to remove tf.half when testing with gpu.
if use_gpu:
return [tf.float32, tf.float64]
else:
return [tf.half, tf.float32, tf.float64]