本文整理匯總了Python中dragnn.python.composite_optimizer.CompositeOptimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python composite_optimizer.CompositeOptimizer方法的具體用法?Python composite_optimizer.CompositeOptimizer怎麽用?Python composite_optimizer.CompositeOptimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dragnn.python.composite_optimizer
的用法示例。
在下文中一共展示了composite_optimizer.CompositeOptimizer方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _create_learning_rate
# 需要導入模塊: from dragnn.python import composite_optimizer [as 別名]
# 或者: from dragnn.python.composite_optimizer import CompositeOptimizer [as 別名]
def _create_learning_rate(hyperparams, step_var):
"""Creates learning rate var, with decay and switching for CompositeOptimizer.
Args:
hyperparams: a GridPoint proto containing optimizer spec, particularly
learning_method to determine optimizer class to use.
step_var: tf.Variable, global training step.
Returns:
a scalar `Tensor`, the learning rate based on current step and hyperparams.
"""
if hyperparams.learning_method != 'composite':
base_rate = hyperparams.learning_rate
else:
spec = hyperparams.composite_optimizer_spec
switch = tf.less(step_var, spec.switch_after_steps)
base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
lambda: tf.constant(spec.method2.learning_rate))
return tf.train.exponential_decay(
base_rate,
step_var,
hyperparams.decay_steps,
hyperparams.decay_base,
staircase=hyperparams.decay_staircase)
示例2: _create_learning_rate
# 需要導入模塊: from dragnn.python import composite_optimizer [as 別名]
# 或者: from dragnn.python.composite_optimizer import CompositeOptimizer [as 別名]
def _create_learning_rate(hyperparams, step_var):
"""Creates learning rate var, with decay and switching for CompositeOptimizer.
Args:
hyperparams: a GridPoint proto containing optimizer spec, particularly
learning_method to determine optimizer class to use.
step_var: tf.Variable, global training step.
Raises:
ValueError: If the composite optimizer is set, but not correctly configured.
Returns:
a scalar `Tensor`, the learning rate based on current step and hyperparams.
"""
if hyperparams.learning_method != 'composite':
base_rate = hyperparams.learning_rate
adjusted_steps = step_var
else:
spec = hyperparams.composite_optimizer_spec
switch = tf.less(step_var, spec.switch_after_steps)
base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
lambda: tf.constant(spec.method2.learning_rate))
if spec.reset_learning_rate:
adjusted_steps = tf.cond(switch, lambda: step_var,
lambda: step_var - spec.switch_after_steps)
else:
adjusted_steps = step_var
return tf.train.exponential_decay(
learning_rate=base_rate,
global_step=adjusted_steps,
decay_steps=hyperparams.decay_steps,
decay_rate=hyperparams.decay_base,
staircase=hyperparams.decay_staircase)
示例3: test_switching
# 需要導入模塊: from dragnn.python import composite_optimizer [as 別名]
# 或者: from dragnn.python.composite_optimizer import CompositeOptimizer [as 別名]
def test_switching(self):
with self.test_session() as sess:
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for w and b that compute y_data = w * x_data + b
# (We know that w should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = w * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
# Set up optimizers.
step = tf.get_variable(
"step",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
optimizer1 = MockAdamOptimizer(0.05)
optimizer2 = MockMomentumOptimizer(0.05, 0.5)
switch = tf.less(step, 100)
optimizer = composite_optimizer.CompositeOptimizer(optimizer1, optimizer2,
switch)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
# Fit the line.:
for iteration in range(201):
self.assertEqual(sess.run(switch), iteration < 100)
sess.run(train_op)
sess.run(tf.assign_add(step, 1))
slot_names = optimizer.get_slot_names()
self.assertItemsEqual(
slot_names,
["m", "v", "momentum", "adam_counter", "momentum_counter"])
adam_counter = sess.run(optimizer.get_slot(w, "adam_counter"))
momentum_counter = sess.run(optimizer.get_slot(w, "momentum_counter"))
self.assertEqual(adam_counter, min(iteration + 1, 100))
self.assertEqual(momentum_counter, max(iteration - 99, 0))
if iteration % 20 == 0:
logging.info("%d %s %d %d", iteration, sess.run([switch, step, w, b]),
adam_counter, momentum_counter)
示例4: _create_optimizer
# 需要導入模塊: from dragnn.python import composite_optimizer [as 別名]
# 或者: from dragnn.python.composite_optimizer import CompositeOptimizer [as 別名]
def _create_optimizer(hyperparams, learning_rate_var, step_var=None):
"""Creates an optimizer object for a given spec, learning rate and step var.
Args:
hyperparams: a GridPoint proto containing optimizer spec, particularly
learning_method to determine optimizer class to use.
learning_rate_var: a `tf.Tensor`, the learning rate.
step_var: a `tf.Variable`, global training step.
Returns:
a `tf.train.Optimizer` object that was built.
"""
if hyperparams.learning_method == 'gradient_descent':
return tf.train.GradientDescentOptimizer(
learning_rate_var, use_locking=True)
elif hyperparams.learning_method == 'adam':
return tf.train.AdamOptimizer(
learning_rate_var,
beta1=hyperparams.adam_beta1,
beta2=hyperparams.adam_beta2,
epsilon=hyperparams.adam_eps,
use_locking=True)
elif hyperparams.learning_method == 'lazyadam':
return tf.contrib.opt.LazyAdamOptimizer(
learning_rate_var,
beta1=hyperparams.adam_beta1,
beta2=hyperparams.adam_beta2,
epsilon=hyperparams.adam_eps,
use_locking=True)
elif hyperparams.learning_method == 'momentum':
return tf.train.MomentumOptimizer(
learning_rate_var, hyperparams.momentum, use_locking=True)
elif hyperparams.learning_method == 'composite':
spec = hyperparams.composite_optimizer_spec
optimizer1 = _create_optimizer(spec.method1, learning_rate_var, step_var)
optimizer2 = _create_optimizer(spec.method2, learning_rate_var, step_var)
if step_var is None:
logging.fatal('step_var is required for CompositeOptimizer')
switch = tf.less(step_var, spec.switch_after_steps)
return composite_optimizer.CompositeOptimizer(
optimizer1, optimizer2, switch, use_locking=True)
else:
logging.fatal('Unknown learning method (optimizer)')
示例5: test_switching
# 需要導入模塊: from dragnn.python import composite_optimizer [as 別名]
# 或者: from dragnn.python.composite_optimizer import CompositeOptimizer [as 別名]
def test_switching(self):
with self.test_session() as sess:
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for w and b that compute y_data = w * x_data + b
# (We know that w should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = w * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
# Set up optimizers.
step = tf.get_variable(
"step",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
optimizer1 = MockAdamOptimizer(0.05)
optimizer2 = MockMomentumOptimizer(0.05, 0.5)
switch = tf.less(step, 100)
optimizer = composite_optimizer.CompositeOptimizer(
optimizer1, optimizer2, switch)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
# Fit the line.:
for iteration in range(201):
self.assertEqual(sess.run(switch), iteration < 100)
sess.run(train_op)
sess.run(tf.assign_add(step, 1))
slot_names = optimizer.get_slot_names()
adam_slots = ["c1-m", "c1-v", "c1-adam_counter"]
momentum_slots = ["c2-momentum", "c2-momentum_counter"]
self.assertItemsEqual(slot_names, adam_slots + momentum_slots)
adam_counter = sess.run(optimizer.get_slot(w, "c1-adam_counter"))
momentum_counter = sess.run(
optimizer.get_slot(w, "c2-momentum_counter"))
self.assertEqual(adam_counter, min(iteration + 1, 100))
self.assertEqual(momentum_counter, max(iteration - 99, 0))
if iteration % 20 == 0:
logging.info("%d %s %d %d", iteration,
sess.run([switch, step, w, b]), adam_counter,
momentum_counter)