本文整理汇总了Python中tensorflow.realdiv方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.realdiv方法的具体用法?Python tensorflow.realdiv怎么用?Python tensorflow.realdiv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.realdiv方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lr_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import realdiv [as 别名]
def lr_decay(init_lr, decay, time_const, global_step):
"""Gets a decayed learning rate using inverse decay rule.
Args:
init_lr: Initial learning rate at step 0.
decay: Decay exponent.
time_const: Time constant of the inverse decay rule.
global_step: Time step.
Returns:
lr: Learning rate at the current time step.
"""
decay = tf.constant(decay)
decay_step = tf.cast(global_step, tf.float32)
lr = tf.realdiv(init_lr,
tf.pow(1.0 + tf.realdiv(decay_step, decay_const),
self._decay_exp))
return lr
示例2: tf_preprocess
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import realdiv [as 别名]
def tf_preprocess(self,
image_size=84,
crop_size=92,
random_crop=True,
random_flip=True,
random_color=True,
whiten=False):
inp = tf.placeholder(tf.uint8, [None, None, 3])
image = tf.realdiv(tf.cast(inp, tf.float32), 255.0)
# image = debug_identity(image)
if random_crop:
log.info("Apply random cropping")
image = tf.image.resize_image_with_crop_or_pad(image, crop_size,
crop_size)
image = tf.random_crop(image, [image_size, image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, image_size,
image_size)
if random_flip:
log.info("Apply random flipping")
image = tf.image.random_flip_left_right(image)
# Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
if random_color:
image = tf.image.random_brightness(image, max_delta=63. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
if whiten:
log.info("Apply whitening")
image = tf.image.per_image_whitening(image)
return inp, image
示例3: _realdiv_maybe_zero
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import realdiv [as 别名]
def _realdiv_maybe_zero(x, y):
"""Support tf.realdiv(x, y) where y may contain zeros."""
return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y))
示例4: adam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import realdiv [as 别名]
def adam(grads, velocity_m, velocity_v, var_list, lr, beta1, beta2, epsilon):
"""ADAM update.
Args:
grads: List of gradients of the trainable variables.
velocity_m: List of velocity of the trainable variables.
velocity_v: List of velocity of the trainable variables.
var_list: List of variables to be optimized.
lr: Learning rate.
beta1: First momentum.
beta2: Second momentum.
Returns:
var_list_new: List of new variables to be assigned.
velocity_m_new: List of new velocity_m to be assigned.
velocity_v_new: List of new velocity_v to be assigned.
"""
velocity_m_new = [
beta1 * mm + (1 - beta1) * gg
for gg, mm in list(zip(grads, velocity_m))
]
velocity_v_new = [
beta2 * vv + (1 - beta2) * gg * gg
for gg, vv in list(zip(grads, velocity_v))
]
var_list_new = [
var - tf.realdiv(lr * mm, (tf.sqrt(vv) + epsilon))
for var, mm, vv in list(zip(var_list, velocity_m_new, velocity_v_new))
]
return var_list_new, velocity_m_new, velocity_v_new
示例5: minimize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import realdiv [as 别名]
def minimize(self, cost, var_list=None, global_step=None,
gate_gradients=1):
"""See above in class Optimizer."""
if var_list is None:
var_list = tf.trainable_variables()
self._var_list = var_list
if global_step is None:
global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64,
initializer=tf.constant_initializer(0, dtype=tf.int64),
trainable=False)
grads = tf.gradients(cost, var_list, gate_gradients=gate_gradients)
self._grads = grads
self._lr, self._mom = self.reparameterize(self.hyperparams['lr'],
self.hyperparams['mom'])
# Learning rate decay.
decay = self.hyperparams['decay']
t = tf.cast(global_step, self.dtype)
time_const_f = tf.constant(self._time_const, dtype=self.dtype)
self._lr = self._lr * tf.pow(1.0 + tf.realdiv(t, time_const_f), -decay)
grads = tf.gradients(cost, var_list, gate_gradients=True)
return self.apply_gradients(
list(zip(grads, var_list)), global_step=global_step)