當前位置: 首頁>>代碼示例>>Python>>正文


Python saver.Saver方法代碼示例

本文整理匯總了Python中tensorflow.python.training.saver.Saver方法的典型用法代碼示例。如果您正苦於以下問題:Python saver.Saver方法的具體用法?Python saver.Saver怎麽用?Python saver.Saver使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.training.saver的用法示例。


在下文中一共展示了saver.Saver方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_frozen_graph_def

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def get_frozen_graph_def(inference_graph_def, use_moving_averages,
                         input_checkpoint, output_node_names):
  """Freezes all variables in a graph definition."""
  saver = None
  if use_moving_averages:
    variable_averages = tf.train.ExponentialMovingAverage(0.0)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
  else:
    saver = tf.train.Saver()

  frozen_graph_def = freeze_graph_with_def_protos(
      input_graph_def=inference_graph_def,
      input_saver_def=saver.as_saver_def(),
      input_checkpoint=input_checkpoint,
      output_node_names=output_node_names,
      restore_op_name='save/restore_all',
      filename_tensor_name='save/Const:0',
      clear_devices=True,
      initializer_nodes='')
  return frozen_graph_def


# TODO: Support batch tf example inputs. 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:26,代碼來源:exporter.py

示例2: replace_variable_values_with_moving_averages

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def replace_variable_values_with_moving_averages(graph,
                                                 current_checkpoint_file,
                                                 new_checkpoint_file):
  """Replaces variable values in the checkpoint with their moving averages.

  If the current checkpoint has shadow variables maintaining moving averages of
  the variables defined in the graph, this function generates a new checkpoint
  where the variables contain the values of their moving averages.

  Args:
    graph: a tf.Graph object.
    current_checkpoint_file: a checkpoint containing both original variables and
      their moving averages.
    new_checkpoint_file: file path to write a new checkpoint.
  """
  with graph.as_default():
    variable_averages = tf.train.ExponentialMovingAverage(0.0)
    ema_variables_to_restore = variable_averages.variables_to_restore()
    with tf.Session() as sess:
      read_saver = tf.train.Saver(ema_variables_to_restore)
      read_saver.restore(sess, current_checkpoint_file)
      write_saver = tf.train.Saver()
      write_saver.save(sess, new_checkpoint_file) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:25,代碼來源:exporter.py

示例3: testRestoredModelPerformance

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
    with self.test_session() as sess:
      sess.run(init_op)
      saver.save(sess, checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = evaluation.evaluate_once(
        '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:22,代碼來源:evaluation_test.py

示例4: testTrainWithNoneAsLogdirWhenUsingSaverRaisesError

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      saver = saver_lib.Saver()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, init_op=None, number_of_steps=300, saver=saver) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:20,代碼來源:learning_test.py

示例5: replace_variable_values_with_moving_averages

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def replace_variable_values_with_moving_averages(graph,
                                                 current_checkpoint_file,
                                                 new_checkpoint_file):
    """Replaces variable values in the checkpoint with their moving averages.
    If the current checkpoint has shadow variables maintaining moving averages of
    the variables defined in the graph, this function generates a new checkpoint
    where the variables contain the values of their moving averages.
    Args:
      graph: a tf.Graph object.
      current_checkpoint_file: a checkpoint containing both original variables and
        their moving averages.
      new_checkpoint_file: file path to write a new checkpoint.
    """
    with graph.as_default():
        variable_averages = tf.train.ExponentialMovingAverage(0.0)
        ema_variables_to_restore = variable_averages.variables_to_restore()
        with tf.Session() as sess:
            read_saver = tf.train.Saver(ema_variables_to_restore)
            read_saver.restore(sess, current_checkpoint_file)
            write_saver = tf.train.Saver()
            write_saver.save(sess, new_checkpoint_file) 
開發者ID:wanjinchang,項目名稱:SSH-TensorFlow,代碼行數:23,代碼來源:convert_ckpt_to_pb.py

示例6: replace_variable_values_with_moving_averages

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def replace_variable_values_with_moving_averages(graph,
                                                 current_checkpoint_file,
                                                 new_checkpoint_file):
    """Replaces variable values in the checkpoint with their moving averages.
    
    If the current checkpoint has shadow variables maintaining moving averages
    of the variables defined in the graph, this function generates a new
    checkpoint where the variables contain the values of their moving averages.
    
    Args:
        graph: A tf.Graph object.
        current_checkpoint_file: A checkpoint both original variables and
            their moving averages.
        new_checkpoint_file: File path to write a new checkpoint.
    """
    with graph.as_default():
        variable_averages = tf.train.ExponentialMovingAverage(0.0)
        ema_variables_to_restore = variable_averages.variables_to_restore()
        with tf.Session() as sess:
            read_saver = tf.train.Saver(ema_variables_to_restore)
            read_saver.restore(sess, current_checkpoint_file)
            write_saver = tf.train.Saver()
            write_saver.save(sess, new_checkpoint_file) 
開發者ID:Shirhe-Lyh,項目名稱:multi_task_test,代碼行數:25,代碼來源:exporter.py

示例7: create_checkpoint_from_values

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def create_checkpoint_from_values(self,
                                    var_names_to_values,
                                    checkpoint_dir,
                                    global_step=None):
    """Creates a checkpoint from a mapping of name to values in model_dir.

    Args:
      var_names_to_values: a map from variable names to values.
      checkpoint_dir: the directory where the checkpoint will be saved.
      global_step: the global step used to save the checkpoint.

    Returns:
      the model_path to the checkpoint.
    """
    var_list = []
    with session.Session('', graph=ops.Graph()) as sess:
      # Create a set of variables to save in the checkpoint.
      for var_name in var_names_to_values:
        var_value = var_names_to_values[var_name]
        var_list.append(variables_lib.VariableV1(var_value, name=var_name))
      saver = saver_lib.Saver(var_list)
      init_op = variables_lib.variables_initializer(var_list)
      sess.run(init_op)
      # Save the initialized values in the file at 'checkpoint_dir'
      return saver.save(sess, checkpoint_dir, global_step=global_step) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:27,代碼來源:variables_test.py

示例8: testReturnsSingleCheckpointIfOneCheckpointFound

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
    checkpoint_dir = tempfile.mkdtemp('one_checkpoint_found')
    if not gfile.Exists(checkpoint_dir):
      gfile.MakeDirs(checkpoint_dir)

    global_step = variables.get_or_create_global_step()
    saver = saver_lib.Saver()  # Saves the global step.

    with self.cached_session() as session:
      session.run(variables_lib.global_variables_initializer())
      save_path = os.path.join(checkpoint_dir, 'model.ckpt')
      saver.save(session, save_path, global_step=global_step)

    num_found = 0
    for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
      num_found += 1
    self.assertEqual(num_found, 1) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:19,代碼來源:evaluation_test.py

示例9: test_restore_fn_detection

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def test_restore_fn_detection(self):
    init_op = tf.global_variables_initializer()
    saver = tf_saver.Saver()
    save_path = self.get_temp_dir()
    with self.test_session() as sess:
      sess.run(init_op)
      saved_model_path = saver.save(sess, save_path)
      restore_fn = self._model.restore_fn(saved_model_path,
                                          from_detection_checkpoint=True)
      restore_fn(sess)
      for var in sess.run(tf.report_uninitialized_variables()):
        self.assertNotIn('FeatureExtractor', var.name) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:14,代碼來源:ssd_meta_arch_test.py

示例10: test_restore_fn_classification

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def test_restore_fn_classification(self):
    # Define mock tensorflow classification graph and save variables.
    test_graph_classification = tf.Graph()
    with test_graph_classification.as_default():
      image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
      with tf.variable_scope('mock_model'):
        net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1')
        slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')

      init_op = tf.global_variables_initializer()
      saver = tf.train.Saver()
      save_path = self.get_temp_dir()
      with self.test_session() as sess:
        sess.run(init_op)
        saved_model_path = saver.save(sess, save_path)

    # Create tensorflow detection graph and load variables from
    # classification checkpoint.
    test_graph_detection = tf.Graph()
    with test_graph_detection.as_default():
      inputs_shape = [2, 2, 2, 3]
      inputs = tf.to_float(tf.random_uniform(
          inputs_shape, minval=0, maxval=255, dtype=tf.int32))
      preprocessed_inputs = self._model.preprocess(inputs)
      prediction_dict = self._model.predict(preprocessed_inputs)
      self._model.postprocess(prediction_dict)
      restore_fn = self._model.restore_fn(saved_model_path,
                                          from_detection_checkpoint=False)
      with self.test_session() as sess:
        restore_fn(sess)
        for var in sess.run(tf.report_uninitialized_variables()):
          self.assertNotIn('FeatureExtractor', var.name) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:34,代碼來源:ssd_meta_arch_test.py

示例11: _parse_input_saver_proto

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def _parse_input_saver_proto(input_saver, input_binary):
  """Parser input tensorflow Saver into SaverDef proto."""
  if not gfile.Exists(input_saver):
    print("Input saver file '" + input_saver + "' does not exist!")
    return -1
  mode = "rb" if input_binary else "r"
  with gfile.FastGFile(input_saver, mode) as f:
    saver_def = saver_pb2.SaverDef()
    if input_binary:
      saver_def.ParseFromString(f.read())
    else:
      text_format.Merge(f.read(), saver_def)
  return saver_def 
開發者ID:rockingdingo,項目名稱:deepnlp,代碼行數:15,代碼來源:freeze_graph.py

示例12: write_graph_and_checkpoint

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def write_graph_and_checkpoint(inference_graph_def,
                               model_path,
                               input_saver_def,
                               trained_checkpoint_prefix):
  """Writes the graph and the checkpoint into disk."""
  for node in inference_graph_def.node:
    node.device = ''
  with tf.Graph().as_default():
    tf.import_graph_def(inference_graph_def, name='')
    with session.Session() as sess:
      saver = saver_lib.Saver(saver_def=input_saver_def,
                              save_relative_paths=True)
      saver.restore(sess, trained_checkpoint_prefix)
      saver.save(sess, model_path) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:16,代碼來源:exporter.py

示例13: _init_saver

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def _init_saver(self, saver=USE_DEFAULT):
    """Initializes saver.

    Args:
      saver: A `Saver` object. If set to USE_DEFAULT, create one that
        saves all the variables.
    """
    if saver is Supervisor.USE_DEFAULT:
      saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
      if saver is None and variables.global_variables():
        saver = saver_mod.Saver()
        ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
    self._saver = saver 
開發者ID:yuantailing,項目名稱:ctw-baseline,代碼行數:15,代碼來源:supervisor.py

示例14: saver

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def saver(self):
    """Return the Saver used by the supervisor.

    Returns:
      A Saver object.
    """
    return self._saver 
開發者ID:yuantailing,項目名稱:ctw-baseline,代碼行數:9,代碼來源:supervisor.py

示例15: __init__

# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import Saver [as 別名]
def __init__(self, saver):
    # Makes a copy of the saver-def and disables garbage-collection, since the
    # exporter enforces garbage-collection independently. Specifically, since
    # the exporter performs atomic copies of the saver output, it is required
    # that garbage-collection via the underlying saver be disabled.
    saver_def = saver.as_saver_def()
    saver_def.ClearField("max_to_keep")
    self._saver = tf_saver.Saver(saver_def=saver_def)
    self._has_init = False
    self._assets_to_copy = {} 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:exporter.py


注:本文中的tensorflow.python.training.saver.Saver方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。