当前位置: 首页>>代码示例>>Python>>正文


Python importer.import_graph_def方法代码示例

本文整理汇总了Python中tensorflow.python.framework.importer.import_graph_def方法的典型用法代码示例。如果您正苦于以下问题:Python importer.import_graph_def方法的具体用法?Python importer.import_graph_def怎么用?Python importer.import_graph_def使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.framework.importer的用法示例。


在下文中一共展示了importer.import_graph_def方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_tfevent_from_pb

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def create_tfevent_from_pb(model,optimized=False):
    print("> creating tfevent of model: {}".format(model))

    if optimized:
        model_path=ROOT_DIR+'/models/{}/optimized_inference_graph.pb'.format(model)
        log_dir=ROOT_DIR+'/models/{}/log_opt/'.format(model)
    else:
        model_path=ROOT_DIR+'/models/{}/frozen_inference_graph.pb'.format(model)
        log_dir=ROOT_DIR+'/models/{}/log/'.format(model)

    with session.Session(graph=ops.Graph()) as sess:
        with gfile.FastGFile(model_path, "rb") as f:
          graph_def = graph_pb2.GraphDef()
          graph_def.ParseFromString(f.read())
          importer.import_graph_def(graph_def)
        pb_visual_writer = summary.FileWriter(log_dir)
        pb_visual_writer.add_graph(sess.graph)
    print("> Model {} Imported. \nVisualize by running: \
    tensorboard --logdir={}".format(model_path, log_dir))

# Gather all Model Names in models/ 
开发者ID:gustavz,项目名称:realtime_object_detection,代码行数:23,代码来源:all_models_to_tensorboard.py

示例2: __init__

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def __init__(self, target, parent_graph=None):
    """Initializes an ImperativeMode.

    Args:
      target: The TensorFlow execution engine to connect to.
      parent_graph: (Optional) An ImperativeGraph.

    Raises:
      UnimplementedError: if non-None parent_graph is not an ImperativeGraph.
    """
    self._target = target
    self._parent_graph = parent_graph
    # Create a new graph
    self._graph = imperative_graph.ImperativeGraph(
        parent_graph=self._parent_graph)
    self._default_graph = self._graph.as_default()
    # Context manager to record variable inits
    self._record_variable_inits = self._graph.record_variable_inits()
    if self._parent_graph:
      if not isinstance(self._parent_graph, imperative_graph.ImperativeGraph):
        raise errors.UnimplementedError(None, None, 'ImperativeMode needs an '
                                        'ImperativeGraph')
      # Clone the `_parent_graph` in to the current graph. This is so that
      # operations used from the enclosing ImperativeMode context are
      # available in the current context.
      with self._graph.as_default(), self._graph.return_as_is():
        importer.import_graph_def(self._parent_graph.as_graph_def(), name='')
    self._session = session.Session(graph=self._graph, target=self._target)
    # Override the `_session`'s run, so that variable inits can be
    # called before the actual run.
    self._old_run = self._session.run
    self._session.run = self.run
    self._context_managers = [
        self._session.as_default(),
        self._default_graph,
        self._record_variable_inits,
        imperative_graph.add_session_attr(ops.Tensor, self._session)] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:39,代码来源:imperative_mode.py

示例3: testFuseResizePadAndConv

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def testFuseResizePadAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      resize_op = image_ops.resize_bilinear(
          input_op, [12, 4], align_corners=False)
      pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
                             mode="REFLECT")
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("MirrorPad", node.op)
      self.assertNotEqual("ResizeBilinear", node.op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:32,代码来源:optimize_for_inference_test.py

示例4: testFuseResizeAndConv

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def testFuseResizeAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      resize_op = image_ops.resize_bilinear(
          input_op, [12, 4], align_corners=False)
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("ResizeBilinear", node.op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:29,代码来源:optimize_for_inference_test.py

示例5: freeze_graph_with_def_protos

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def freeze_graph_with_def_protos(
    input_graph_def,
    input_saver_def,
    input_checkpoint,
    output_node_names,
    restore_op_name,
    filename_tensor_name,
    clear_devices,
    initializer_nodes,
    variable_names_blacklist=''):
  """Converts all variables in a graph and checkpoint into constants."""
  del restore_op_name, filename_tensor_name  # Unused by updated loading code.

  # 'input_checkpoint' may be a prefix if we're using Saver V2 format
  if not saver_lib.checkpoint_exists(input_checkpoint):
    raise ValueError(
        'Input checkpoint "' + input_checkpoint + '" does not exist!')

  if not output_node_names:
    raise ValueError(
        'You must supply the name of a node to --output_node_names.')

  # Remove all the explicit device specifications for this node. This helps to
  # make the graph more portable.
  if clear_devices:
    for node in input_graph_def.node:
      node.device = ''

  _ = importer.import_graph_def(input_graph_def, name='')

  with session.Session() as sess:
    if input_saver_def:
      saver = saver_lib.Saver(saver_def=input_saver_def)
      saver.restore(sess, input_checkpoint)
    else:
      var_list = {}
      reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
      var_to_shape_map = reader.get_variable_to_shape_map()
      for key in var_to_shape_map:
        try:
          tensor = sess.graph.get_tensor_by_name(key + ':0')
        except KeyError:
          # This tensor doesn't exist in the graph (for example it's
          # 'global_step' or a similar housekeeping element) so skip it.
          continue
        var_list[key] = tensor
      saver = saver_lib.Saver(var_list=var_list)
      saver.restore(sess, input_checkpoint)
      if initializer_nodes:
        sess.run(initializer_nodes)

    variable_names_blacklist = (variable_names_blacklist.split(',') if
                                variable_names_blacklist else None)
    output_graph_def = graph_util.convert_variables_to_constants(
        sess,
        input_graph_def,
        output_node_names.split(','),
        variable_names_blacklist=variable_names_blacklist)

  return output_graph_def 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:62,代码来源:exporter.py

示例6: _write_saved_model

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def _write_saved_model(inference_graph_path, inputs, outputs,
                       checkpoint_path=None, use_moving_averages=False):
  """Writes SavedModel to disk.

  If checkpoint_path is not None bakes the weights into the graph thereby
  eliminating the need of checkpoint files during inference. If the model
  was trained with moving averages, setting use_moving_averages to true
  restores the moving averages, otherwise the original set of variables
  is restored.

  Args:
    inference_graph_path: Path to write inference graph.
    inputs: The input image tensor to use for detection.
    outputs: A tensor dictionary containing the outputs of a DetectionModel.
    checkpoint_path: Optional path to the checkpoint file.
    use_moving_averages: Whether to export the original or the moving averages
      of the trainable variables from the checkpoint.
  """
  inference_graph_def = tf.get_default_graph().as_graph_def()
  checkpoint_graph_def = None
  if checkpoint_path:
    output_node_names = ','.join(outputs.keys())
    checkpoint_graph_def = get_frozen_graph_def(
        inference_graph_def=inference_graph_def,
        use_moving_averages=use_moving_averages,
        input_checkpoint=checkpoint_path,
        output_node_names=output_node_names
    )

  with tf.Graph().as_default():
    with session.Session() as sess:

      tf.import_graph_def(checkpoint_graph_def)

      builder = tf.saved_model.builder.SavedModelBuilder(inference_graph_path)

      tensor_info_inputs = {
          'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
      tensor_info_outputs = {}
      for k, v in outputs.items():
        tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)

      detection_signature = (
          tf.saved_model.signature_def_utils.build_signature_def(
              inputs=tensor_info_inputs,
              outputs=tensor_info_outputs,
              method_name=signature_constants.PREDICT_METHOD_NAME))

      builder.add_meta_graph_and_variables(
          sess, [tf.saved_model.tag_constants.SERVING],
          signature_def_map={
              signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                  detection_signature,
          },
      )
      builder.save() 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:58,代码来源:exporter.py

示例7: testStripUnused

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def testStripUnused(self):
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single constant containing 1.0,
    # and that then multiplies it by 2.
    with ops.Graph().as_default():
      constant_node = constant_op.constant(1.0, name="constant_node")
      wanted_input_node = math_ops.subtract(constant_node,
                                            3.0,
                                            name="wanted_input_node")
      output_node = math_ops.multiply(
          wanted_input_node, 2.0, name="output_node")
      math_ops.add(output_node, 2.0, name="later_node")
      sess = session.Session()
      output = sess.run(output_node)
      self.assertNear(-4.0, output, 0.00001)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_binary = False
    input_node_names = "wanted_input_node"
    output_binary = True
    output_node_names = "output_node"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
                                             output_graph_path, output_binary,
                                             input_node_names,
                                             output_node_names,
                                             dtypes.float32.as_datatype_enum)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(3, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Add", node.op)
        self.assertNotEqual("Sub", node.op)
        if node.name == input_node_names:
          self.assertTrue("shape" in node.attr)

      with session.Session() as sess:
        input_node = sess.graph.get_tensor_by_name("wanted_input_node:0")
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node, feed_dict={input_node: [10.0]})
        self.assertNear(20.0, output, 0.00001) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:56,代码来源:strip_unused_test.py

示例8: testStripUnusedMultipleInputs

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def testStripUnusedMultipleInputs(self):
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that multiplies two input nodes.
    with ops.Graph().as_default():
      constant_node1 = constant_op.constant(1.0, name="constant_node1")
      constant_node2 = constant_op.constant(2.0, name="constant_node2")
      input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1")
      input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2")
      output_node = math_ops.multiply(
          input_node1, input_node2, name="output_node")
      math_ops.add(output_node, 2.0, name="later_node")
      sess = session.Session()
      output = sess.run(output_node)
      self.assertNear(6.0, output, 0.00001)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_binary = False
    input_node_names = "input_node1,input_node2"
    input_node_types = [
        dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum
    ]
    output_binary = True
    output_node_names = "output_node"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
                                             output_graph_path, output_binary,
                                             input_node_names,
                                             output_node_names,
                                             input_node_types)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(3, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Add", node.op)
        self.assertNotEqual("Sub", node.op)
        if node.name == input_node_names:
          self.assertTrue("shape" in node.attr)

      with session.Session() as sess:
        input_node1 = sess.graph.get_tensor_by_name("input_node1:0")
        input_node2 = sess.graph.get_tensor_by_name("input_node2:0")
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node,
                          feed_dict={input_node1: [10.0],
                                     input_node2: [-5.0]})
        self.assertNear(-50.0, output, 0.00001) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:61,代码来源:strip_unused_test.py

示例9: testFoldBatchNorms

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def testFoldBatchNorms(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      conv_op = nn_ops.conv2d(
          input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
      mean_op = constant_op.constant(
          np.array([10, 20]), shape=[2], dtype=dtypes.float32)
      variance_op = constant_op.constant(
          np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
      beta_op = constant_op.constant(
          np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
      gamma_op = constant_op.constant(
          np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
      ops.get_default_graph().graph_def_versions.producer = 8
      gen_nn_ops._batch_norm_with_global_normalization(
          conv_op,
          mean_op,
          variance_op,
          beta_op,
          gamma_op,
          0.00001,
          False,
          name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
        original_graph_def)

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("BatchNormWithGlobalNormalization", node.op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:44,代码来源:optimize_for_inference_test.py

示例10: _testFreezeGraph

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def _testFreezeGraph(self, saver_write_version):

    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    with ops.Graph().as_default():
      variable_node = variables.Variable(1.0, name="variable_node")
      output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
      sess = session.Session()
      init = variables.global_variables_initializer()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = saver_lib.Saver(write_version=saver_write_version)
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("VariableV2", node.op)
        self.assertNotEqual("Variable", node.op)

      with session.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:60,代码来源:freeze_graph_test.py

示例11: main

# 需要导入模块: from tensorflow.python.framework import importer [as 别名]
# 或者: from tensorflow.python.framework.importer import import_graph_def [as 别名]
def main(unused_args):
  if not gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  known_modes = [
      "round", "quantize", "eightbit", "weights", "test", "weights_rounded"
  ]
  if not any(FLAGS.mode in s for s in known_modes):
    print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
          ".")
    return -1

  tf_graph = graph_pb2.GraphDef()
  with gfile.Open(FLAGS.input, "rb") as f:
    data = f.read()
    tf_graph.ParseFromString(data)

  graph = ops.Graph()
  with graph.as_default():
    importer.import_graph_def(tf_graph, input_map={}, name="")

  quantized_input_range = None
  if FLAGS.quantized_input:
    quantized_input_range = [
        FLAGS.quantized_input_min, FLAGS.quantized_input_max
    ]

  fallback_quantization_range = None
  if (FLAGS.quantized_fallback_min is not None or
      FLAGS.quantized_fallback_max is not None):
    assert FLAGS.quantized_fallback_min is not None
    assert FLAGS.quantized_fallback_max is not None
    fallback_quantization_range = [
        FLAGS.quantized_fallback_min, FLAGS.quantized_fallback_max
    ]

  rewriter = GraphRewriter(tf_graph, FLAGS.mode, quantized_input_range,
                           fallback_quantization_range)

  output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))

  f = gfile.FastGFile(FLAGS.output, "wb")
  f.write(output_graph.SerializeToString())

  return 0 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:48,代码来源:quantize_graph.py


注:本文中的tensorflow.python.framework.importer.import_graph_def方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。