本文整理汇总了Python中tensorflow.python.training.training.AdagradOptimizer方法的典型用法代码示例。如果您正苦于以下问题:Python training.AdagradOptimizer方法的具体用法?Python training.AdagradOptimizer怎么用?Python training.AdagradOptimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.training
的用法示例。
在下文中一共展示了training.AdagradOptimizer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _centered_bias_step
# 需要导入模块: from tensorflow.python.training import training [as 别名]
# 或者: from tensorflow.python.training.training import AdagradOptimizer [as 别名]
def _centered_bias_step(centered_bias, logits_dimension, labels, loss_fn):
"""Creates and returns training op for centered bias."""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(logits, labels), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
示例2: _centered_bias_step
# 需要导入模块: from tensorflow.python.training import training [as 别名]
# 或者: from tensorflow.python.training.training import AdagradOptimizer [as 别名]
def _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights):
"""Creates and returns training op for centered bias."""
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
logits_dimension = array_ops.shape(centered_bias)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(labels, logits, weights), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
示例3: _centered_bias_step
# 需要导入模块: from tensorflow.python.training import training [as 别名]
# 或者: from tensorflow.python.training.training import AdagradOptimizer [as 别名]
def _centered_bias_step(labels, loss_fn, num_label_columns):
centered_bias = ops.get_collection(_CENTERED_BIAS)
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, num_label_columns])
loss = loss_fn(logits, labels)
return train.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)
示例4: _centered_bias_step
# 需要导入模块: from tensorflow.python.training import training [as 别名]
# 或者: from tensorflow.python.training.training import AdagradOptimizer [as 别名]
def _centered_bias_step(logits_dimension, weight_collection, labels,
train_loss_fn):
"""Creates and returns training op for centered bias."""
centered_bias = ops.get_collection(weight_collection)
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, logits_dimension])
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
train_loss_fn(logits, labels), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=centered_bias)