当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.optimize_loss函数代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.optimizers.optimize_loss函数的典型用法代码示例。如果您正苦于以下问题:Python optimize_loss函数的具体用法?Python optimize_loss怎么用?Python optimize_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了optimize_loss函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testBadSummaries

 def testBadSummaries(self):
   with ops.Graph().as_default() as g, self.test_session(graph=g):
     _, _, loss, global_step = _setup_model()
     with self.assertRaises(ValueError):
       optimizers_lib.optimize_loss(
           loss, global_step, learning_rate=0.1, optimizer="SGD",
           summaries=["loss", "bad_summary"])
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:7,代码来源:optimizers_test.py

示例2: _make_training_op

  def _make_training_op(training_loss):
    """Training op for the DNN linear combined model."""
    train_ops = []
    if dnn_logits is not None:
      train_ops.append(
          optimizers.optimize_loss(
              loss=training_loss,
              global_step=contrib_variables.get_global_step(),
              learning_rate=_DNN_LEARNING_RATE,
              optimizer=_get_optimizer(dnn_optimizer),
              gradient_multipliers=_extract_embedding_lr_multipliers(  # pylint: disable=protected-access
                  embedding_lr_multipliers, dnn_parent_scope,
                  dnn_input_scope.name),
              clip_gradients=gradient_clip_norm,
              variables=ops.get_collection(dnn_parent_scope),
              name=dnn_parent_scope,
              # Empty summaries, because head already logs "loss" summary.
              summaries=[]))
    if linear_logits is not None:
      train_ops.append(
          optimizers.optimize_loss(
              loss=training_loss,
              global_step=contrib_variables.get_global_step(),
              learning_rate=_linear_learning_rate(len(linear_feature_columns)),
              optimizer=_get_optimizer(linear_optimizer),
              clip_gradients=gradient_clip_norm,
              variables=ops.get_collection(linear_parent_scope),
              name=linear_parent_scope,
              # Empty summaries, because head already logs "loss" summary.
              summaries=[]))

    return control_flow_ops.group(*train_ops)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:32,代码来源:dnn_linear_combined.py

示例3: testWrongOptimizer

 def testWrongOptimizer(self):
   optimizers = ["blah", variables.Variable, object(), lambda x: None]
   for optimizer in optimizers:
     with ops.Graph().as_default() as g:
       with self.test_session(graph=g):
         _, _, loss, global_step = _setup_model()
         with self.assertRaises(ValueError):
           optimizers_lib.optimize_loss(
               loss, global_step, learning_rate=0.1, optimizer=optimizer)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:9,代码来源:optimizers_test.py

示例4: testInvalidLoss

 def testInvalidLoss(self):
   with ops.Graph().as_default() as g, self.test_session(graph=g):
     _, _, _, global_step = _setup_model()
     with self.assertRaises(ValueError):
       optimizers_lib.optimize_loss(
           None, global_step, learning_rate=0.1, optimizer="SGD")
     with self.assertRaises(ValueError):
       optimizers_lib.optimize_loss(
           [[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:9,代码来源:optimizers_test.py

示例5: testIgnoreVariablesWithNoGradients

  def testIgnoreVariablesWithNoGradients(self):
    _, _, loss, global_step = _setup_model()

    unused_variable = variable_scope.get_variable("ignore_me", [])

    optimizers_lib.optimize_loss(
        loss,
        global_step,
        learning_rate=0.1,
        optimizer="SGD",
        gradient_noise_scale=10.0,
        gradient_multipliers={unused_variable: 1.},
        clip_gradients=10.0)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:13,代码来源:optimizers_test.py

示例6: _dynamic_rnn_model_fn

  def _dynamic_rnn_model_fn(features, labels, mode):
    """The model to be passed to an `Estimator`."""
    with ops.name_scope(name):
      initial_state = features.get(initial_state_key)
      sequence_length = features.get(sequence_length_key)
      sequence_input = build_sequence_input(features,
                                            sequence_feature_columns,
                                            context_feature_columns)
      if mode == model_fn.ModeKeys.TRAIN:
        cell_for_mode = apply_dropout(
            cell, input_keep_probability, output_keep_probability)
      else:
        cell_for_mode = cell
      rnn_activations, final_state = construct_rnn(
          initial_state,
          sequence_input,
          cell_for_mode,
          target_column.num_label_columns,
          dtype=dtype,
          parallel_iterations=parallel_iterations,
          swap_memory=swap_memory)

      loss = None  # Created below for modes TRAIN and EVAL.
      if prediction_type == PredictionType.MULTIPLE_VALUE:
        prediction_dict = _multi_value_predictions(
            rnn_activations, target_column, predict_probabilities)
        if mode != model_fn.ModeKeys.INFER:
          loss = _multi_value_loss(
              rnn_activations, labels, sequence_length, target_column, features)
      elif prediction_type == PredictionType.SINGLE_VALUE:
        prediction_dict = _single_value_predictions(
            rnn_activations, sequence_length, target_column,
            predict_probabilities)
        if mode != model_fn.ModeKeys.INFER:
          loss = _single_value_loss(
              rnn_activations, labels, sequence_length, target_column, features)
      prediction_dict[RNNKeys.FINAL_STATE_KEY] = final_state

      eval_metric_ops = None
      if mode != model_fn.ModeKeys.INFER:
        eval_metric_ops = _get_eval_metric_ops(
            problem_type, prediction_type, sequence_length, prediction_dict,
            labels)

      train_op = None
      if mode == model_fn.ModeKeys.TRAIN:
        train_op = optimizers.optimize_loss(
            loss=loss,
            global_step=None,  # Get it internally.
            learning_rate=learning_rate,
            optimizer=optimizer,
            clip_gradients=gradient_clipping_norm,
            summaries=optimizers.OPTIMIZER_SUMMARIES)

    return model_fn.ModelFnOps(mode=mode,
                               predictions=prediction_dict,
                               loss=loss,
                               train_op=train_op,
                               eval_metric_ops=eval_metric_ops)
开发者ID:BloodD,项目名称:tensorflow,代码行数:59,代码来源:dynamic_rnn_estimator.py

示例7: linear_model_fn_with_model_fn_ops

def linear_model_fn_with_model_fn_ops(features, labels, mode):
  """Same as linear_model_fn, but returns `ModelFnOps`."""
  assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
                  model_fn.ModeKeys.INFER)
  prediction, loss = (models.linear_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return model_fn.ModelFnOps(
      mode=mode, predictions=prediction, loss=loss, train_op=train_op)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:estimator_test.py

示例8: _train_op_fn

 def _train_op_fn(loss):
     """Returns the op to optimize the loss."""
     return optimizers.optimize_loss(
         loss=loss,
         global_step=contrib_variables.get_global_step(),
         learning_rate=learning_rate,
         optimizer=optimizer,
         name=parent_scope,
         # Empty summaries to prevent optimizers from logging the training_loss.
         summaries=[])
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:10,代码来源:machine_learning.py

示例9: linear_model_fn

def linear_model_fn(features, labels, mode):
  features = extract(features, 'input')
  labels = extract(labels, 'labels')
  assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
                  model_fn.ModeKeys.INFER)
  if isinstance(features, dict):
    (_, features), = features.items()
  prediction, loss = (models.linear_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return prediction, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:11,代码来源:estimator_test.py

示例10: logistic_model_no_mode_fn

def logistic_model_no_mode_fn(features, labels):
  features = extract(features, 'input')
  labels = extract(labels, 'labels')
  labels = array_ops.one_hot(labels, 3, 1, 0)
  prediction, loss = (models.logistic_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return {
      'class': math_ops.argmax(prediction, 1),
      'prob': prediction
  }, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:11,代码来源:estimator_test.py

示例11: _train_op_fn

 def _train_op_fn(loss):
   """Returns the op to optimize the loss."""
   return optimizers.optimize_loss(
       loss=loss,
       global_step=contrib_variables.get_global_step(),
       learning_rate=_LEARNING_RATE,
       optimizer=_get_optimizer(optimizer),
       clip_gradients=gradient_clip_norm,
       name=parent_scope,
       # Empty summaries to prevent optimizers from logging the training_loss.
       summaries=[])
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:11,代码来源:dnn.py

示例12: _dnn_train_op_fn

 def _dnn_train_op_fn(loss):
   """Returns the op to optimize the loss."""
   return optimizers.optimize_loss(
       loss=loss,
       global_step=training_util.get_global_step(),
       learning_rate=_DNN_LEARNING_RATE,
       optimizer=_get_optimizer(dnn_optimizer),
       name=dnn_parent_scope,
       variables=ops.get_collection(
           ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
       # Empty summaries to prevent optimizers from logging training_loss.
       summaries=[])
开发者ID:StephenOman,项目名称:tensorflow,代码行数:12,代码来源:dnn_tree_combined_estimator.py

示例13: testInvalidGlobalStep

 def testInvalidGlobalStep(self):
   with ops.Graph().as_default() as g, self.test_session(graph=g):
     x = array_ops.placeholder(dtypes.float32, [])
     var = variable_scope.get_variable(
         "test", [], initializer=init_ops.constant_initializer(10))
     loss = math_ops.abs(var * x)
     with self.assertRaises(AttributeError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=constant_op.constant(
               43, dtype=dtypes.int64),
           learning_rate=0.1,
           optimizer="SGD")
     with self.assertRaises(TypeError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=variable_scope.get_variable(
               "global_step", [],
               trainable=False,
               dtype=dtypes.float64,
               initializer=init_ops.constant_initializer(
                   0.0, dtype=dtypes.float64)),
           learning_rate=0.1,
           optimizer="SGD")
     with self.assertRaises(ValueError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=variable_scope.get_variable(
               "global_step", [1],
               trainable=False,
               dtype=dtypes.int64,
               initializer=init_ops.constant_initializer(
                   [0], dtype=dtypes.int64)),
           learning_rate=0.1,
           optimizer="SGD")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:35,代码来源:optimizers_test.py

示例14: _make_training_op

  def _make_training_op(training_loss):
    """Training op for the DNN linear combined model."""
    train_ops = []
    global_step = training_util.get_global_step()
    if dnn_logits is not None:
      train_ops.append(
          optimizers.optimize_loss(
              loss=training_loss,
              global_step=global_step,
              learning_rate=_DNN_LEARNING_RATE,
              optimizer=dnn_optimizer,
              gradient_multipliers=_extract_embedding_lr_multipliers(  # pylint: disable=protected-access
                  embedding_lr_multipliers, dnn_parent_scope,
                  dnn_input_scope.name),
              clip_gradients=gradient_clip_norm,
              variables=ops.get_collection(dnn_parent_scope),
              name=dnn_parent_scope,
              # Empty summaries, because head already logs "loss" summary.
              summaries=[],
              increment_global_step=not fix_global_step_increment_bug))
    if linear_logits is not None:
      train_ops.append(
          optimizers.optimize_loss(
              loss=training_loss,
              global_step=global_step,
              learning_rate=_linear_learning_rate(len(linear_feature_columns)),
              optimizer=linear_optimizer,
              clip_gradients=gradient_clip_norm,
              variables=ops.get_collection(linear_parent_scope),
              name=linear_parent_scope,
              # Empty summaries, because head already logs "loss" summary.
              summaries=[],
              increment_global_step=not fix_global_step_increment_bug))

    train_op = control_flow_ops.group(*train_ops)
    if fix_global_step_increment_bug:
      with ops.control_dependencies([train_op]):
        with ops.colocate_with(global_step):
          return state_ops.assign_add(global_step, 1).op
    return train_op
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:40,代码来源:dnn_linear_combined.py

示例15: linear_model_params_fn

def linear_model_params_fn(features, labels, mode, params):
  features = extract(features, 'input')
  labels = extract(labels, 'labels')

  assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
                  model_fn.ModeKeys.INFER)
  prediction, loss = (models.linear_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss,
      variables.get_global_step(),
      optimizer='Adagrad',
      learning_rate=params['learning_rate'])
  return prediction, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:13,代码来源:estimator_test.py


注:本文中的tensorflow.contrib.layers.python.layers.optimizers.optimize_loss函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。