当前位置: 首页>>代码示例>>Python>>正文


Python ops.uid函数代码示例

本文整理汇总了Python中tensorflow.python.framework.ops.uid函数的典型用法代码示例。如果您正苦于以下问题:Python uid函数的具体用法?Python uid怎么用?Python uid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了uid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_asset_loading

  def test_asset_loading(self):
    first_path = self._v1_asset_saved_model()
    imported = load.load(first_path)
    self.evaluate(lookup_ops.tables_initializer())
    fn = imported.signatures["serving_default"]
    self.assertAllClose({"output": [2, 0]},
                        fn(start=constant_op.constant(["gamma", "alpha"])))
    second_path = os.path.join(self.get_temp_dir(), "saved_model",
                               str(ops.uid()))
    save.save(imported, second_path, signatures=imported.signatures)
    shutil.rmtree(first_path)
    del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
    second_import = load.load(second_path)
    self.evaluate(lookup_ops.tables_initializer())
    fn = second_import.signatures["serving_default"]
    self.assertAllClose({"output": [2, 0]},
                        fn(start=constant_op.constant(["gamma", "alpha"])))

    third_path = os.path.join(self.get_temp_dir(), "saved_model",
                              str(ops.uid()))
    save.save(second_import, third_path, signatures=second_import.signatures)
    shutil.rmtree(second_path)
    del ops.get_collection_ref(ops.GraphKeys.TABLE_INITIALIZERS)[:]
    third_import = load.load(third_path)
    self.evaluate(lookup_ops.tables_initializer())
    fn = third_import.signatures["serving_default"]
    self.assertAllClose({"output": [2, 0]},
                        fn(start=constant_op.constant(["gamma", "alpha"])))
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py

示例2: _v1_multi_metagraph_saved_model

 def _v1_multi_metagraph_saved_model(self):
   export_graph = ops.Graph()
   with export_graph.as_default():
     start = array_ops.placeholder(
         shape=[None], dtype=dtypes.float32, name="start")
     v = resource_variable_ops.ResourceVariable(21.)
     first_output = array_ops.identity(start * v, name="first_output")
     second_output = array_ops.identity(v, name="second_output")
     with session_lib.Session() as session:
       session.run(v.initializer)
       path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
       builder = builder_impl.SavedModelBuilder(path)
       builder.add_meta_graph_and_variables(
           session, tags=["first"],
           signature_def_map={
               "first_key": signature_def_utils.build_signature_def(
                   {"first_start": utils_impl.build_tensor_info(start)},
                   {"first_output": utils_impl.build_tensor_info(
                       first_output)})})
       builder.add_meta_graph(
           tags=["second"],
           signature_def_map={
               "second_key": signature_def_utils.build_signature_def(
                   {"second_start": utils_impl.build_tensor_info(start)},
                   {"second_output": utils_impl.build_tensor_info(
                       second_output)})})
       builder.save()
   return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py

示例3: load_function_def_library

def load_function_def_library(library):
  """Load a set of functions as concrete functions without captured inputs.

  Functions names are manipulated during load such that they do not overlap
  with previously created ones.

  Args:
    library: FunctionDefLibrary proto message.

  Returns:
    Map of original function names in the library to instances of
    `ConcreteFunction` without captured inputs.

  Raises:
    ValueError: if functions dependencies have a cycle.
  """
  functions = {}

  load_shared_name_suffix = "_load_{}".format(ops.uid())
  for fdef in _sort_function_defs(library):
    copy = _fix_fdef(fdef, functions, load_shared_name_suffix)

    func_graph = function_def_lib.function_def_to_graph(copy)
    for dep in _list_function_deps(fdef):
      functions[dep].add_to_graph(func_graph)
    func = function_lib.ConcreteFunction(func_graph)
    func.add_to_graph()

    functions[fdef.signature.name] = func

    # Also register the gradients in the current root context.
    with ops.init_scope():
      func._register_gradient()  # pylint: disable=protected-access

  return functions
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:35,代码来源:function_deserialization.py

示例4: _no_trainable_variable_attribute

  def _no_trainable_variable_attribute(self, trainable):
    """A SavedModel where the VariableDef has no 'trainable' (it's false)."""

    class _MissingFieldsVariable(resource_variable_ops.ResourceVariable):

      def to_proto(self, export_scope=None):
        full_proto = super(_MissingFieldsVariable, self).to_proto(export_scope)
        return variable_pb2.VariableDef(
            variable_name=full_proto.variable_name,
            initial_value_name=full_proto.initial_value_name,
            initializer_name=full_proto.snapshot_name,
            save_slice_info_def=full_proto.save_slice_info_def,
            is_resource=full_proto.is_resource)

    export_graph = ops.Graph()
    with export_graph.as_default():
      v = _MissingFieldsVariable(3., trainable=trainable)
      with session_lib.Session() as session:
        session.run([v.initializer])
        path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
        b = builder_impl.SavedModelBuilder(path)
        b.add_meta_graph_and_variables(
            session,
            tags=[tag_constants.SERVING],
            signature_def_map={})
        b.save()

    return path
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py

示例5: _v1_single_metagraph_saved_model

 def _v1_single_metagraph_saved_model(self, use_resource):
   export_graph = ops.Graph()
   with export_graph.as_default():
     start = array_ops.placeholder(
         shape=[None], dtype=dtypes.float32, name="start")
     if use_resource:
       distractor = variables.RefVariable(-1., name="distractor")
       v = resource_variable_ops.ResourceVariable(3., name="v")
     else:
       # "distractor" gets saved in the checkpoint and so used in the restore
       # function, but not in the pruned function for the signature. This tests
       # node naming: it needs to be consistent (and ideally always the same as
       # the node in the original GraphDef) for the resource manager to find
       # the right variable.
       distractor = variables.RefVariable(-1., name="distractor")
       v = variables.RefVariable(3., name="v")
     local_variable = variables.VariableV1(
         1.,
         collections=[ops.GraphKeys.LOCAL_VARIABLES],
         trainable=False,
         use_resource=True)
     output = array_ops.identity(start * v * local_variable, name="output")
     with session_lib.Session() as session:
       session.run([v.initializer, distractor.initializer,
                    local_variable.initializer])
       path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
       simple_save.simple_save(
           session,
           path,
           inputs={"start": start},
           outputs={"output": output},
           legacy_init_op=local_variable.initializer)
   return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:load_v1_in_v2_test.py

示例6: _v1_nested_while_saved_model

  def _v1_nested_while_saved_model(self):
    export_graph = ops.Graph()
    with export_graph.as_default():

      def _inner_while(loop_iterations):
        _, output = control_flow_ops.while_loop(
            lambda index, accum: index <= loop_iterations,
            lambda index, accum: (index + 1, accum + index),
            [constant_op.constant(0), constant_op.constant(0)])
        return output

      loop_iterations = array_ops.placeholder(
          name="loop_iterations", shape=[], dtype=dtypes.int32)
      _, output = control_flow_ops.while_loop(
          lambda index, accum: index <= loop_iterations,
          lambda index, accum: (index + 1, accum + _inner_while(index)),
          [constant_op.constant(0), constant_op.constant(0)])
      with session_lib.Session() as session:
        path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
        simple_save.simple_save(
            session,
            path,
            inputs={"loop_iterations": loop_iterations},
            outputs={"output": output})
    return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:load_v1_in_v2_test.py

示例7: initialize

  def initialize(self, table):
    """Initializes the given `table` with `keys` and `values` tensors.

    Args:
      table: The table to initialize.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self._keys.dtype, self._values.dtype)
    with ops.name_scope(
        self._name, values=(table.table_ref, self._keys,
                            self._values)) as scope:
      if context.executing_eagerly():
        # Ensure a unique name when eager execution is enabled to avoid spurious
        # sharing issues.
        scope += str(ops.uid())
      init_op = gen_lookup_ops.initialize_table_v2(
          table.table_ref, self._keys, self._values, name=scope)
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    return init_op
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:25,代码来源:lookup_ops.py

示例8: capture

  def capture(self, tensor, name=None):
    """Captures `tensor` if it's external to this graph.

    If `tensor` is from a different graph, returns a placeholder for it.
    `tensor` and the placeholder will appear in self.captures, and the
    placeholder will appear in self.inputs.  Multiple calls to this method with
    the same `tensor` argument will return the same placeholder. If `tensor` is
    from this graph, returns `tensor`.

    Args:
      tensor: Tensor. May be from this FuncGraph or a different graph.
      name: Optional name if a placeholder is created.

    Returns:
      Tensor from this FuncGraph.
    """
    if isinstance(tensor, ops.EagerTensor):
      if name is None:
        name = str(ops.uid())
      return self._capture_helper(tensor, name)
    if tensor.graph is not self:
      if name is None:
        name = tensor.op.name
      return self._capture_helper(tensor, name)
    return tensor
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:25,代码来源:func_graph.py

示例9: initialize

  def initialize(self, table):
    """Initializes the given `table` with `keys` and `values` tensors.

    Args:
      table: The table to initialize.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self._keys.dtype, self._values.dtype)
    with ops.name_scope(
        self._name, values=(table.resource_handle, self._keys,
                            self._values)) as scope:
      if context.executing_eagerly():
        # Ensure a unique name when eager execution is enabled to avoid spurious
        # sharing issues.
        scope += str(ops.uid())
      if fwd_compat.forward_compatible(2018, 9, 19):
        init_op = gen_lookup_ops.lookup_table_import_v2(
            table.resource_handle, self._keys, self._values, name=scope)
      else:
        # To maintain forward compatibiltiy, use the old implementation.
        init_op = gen_lookup_ops.initialize_table_v2(
            table.resource_handle, self._keys, self._values, name=scope)
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    return init_op
开发者ID:aeverall,项目名称:tensorflow,代码行数:30,代码来源:lookup_ops.py

示例10: opt_variable

    def opt_variable(value, dtype=None, name=None, constraint=None):
      """Instantiates a variable and returns it."""
      if dtype is None:
        dtype = backend.floatx()

      variables = []
      for i in range(num_replicas):
        # Keras holds the variables in optimizer class instance , so the name
        # does not matter here. ResourceVariable constructor will find a unique
        # name (including name=None) for each replica.
        with ops.device("device:TPU:{}".format(i)):
          v = resource_variable_ops.ResourceVariable(
              value,
              dtype=dtypes_module.as_dtype(dtype),
              name=name,
              constraint=constraint)
          variables.append(v)
      name = "replicate_{}_{}".format("variable" if name is None else name,
                                      ops.uid())
      v = ReplicatedVariable(name, variables)

      # pylint: disable=protected-access

      if isinstance(value, np.ndarray):
        v._keras_shape = value.shape
      elif hasattr(value, "shape"):
        v._keras_shape = backend.int_shape(value)
      v._uses_learning_phase = False
      backend.track_variable(v)
      return v
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:keras_tpu_variables.py

示例11: _v1_asset_saved_model

 def _v1_asset_saved_model(self):
   export_graph = ops.Graph()
   vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
   with open(vocab_path, "w") as f:
     f.write("alpha\nbeta\ngamma\n")
   with export_graph.as_default():
     initializer = lookup_ops.TextFileInitializer(
         vocab_path,
         key_dtype=dtypes.string,
         key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
         value_dtype=dtypes.int64,
         value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
     table = lookup_ops.HashTable(
         initializer, default_value=-1)
     start = array_ops.placeholder(
         shape=None, dtype=dtypes.string, name="in")
     output = table.lookup(start, name="out")
     with session_lib.Session() as session:
       session.run([table.initializer])
       path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
       simple_save.simple_save(
           session,
           path,
           inputs={"start": start},
           outputs={"output": output},
           legacy_init_op=table.initializer)
   file_io.delete_file(vocab_path)
   return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py

示例12: maybe_capture_tensor

 def maybe_capture_tensor(self, tensor):
   if isinstance(tensor, ops.EagerTensor):
     return capture_value(
         self.captures, tensor, tensor.dtype, str(ops.uid()))
   if tensor.graph is not self:
     return capture_value(
         self.captures, tensor, tensor.dtype, tensor.op.name)
   return tensor
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:8,代码来源:function.py

示例13: create_file_writer_v2

def create_file_writer_v2(logdir,
                          max_queue=None,
                          flush_millis=None,
                          filename_suffix=None,
                          name=None):
  """Creates a summary file writer for the given log directory.

  Args:
    logdir: a string specifying the directory in which to write an event file.
    max_queue: the largest number of summaries to keep in a queue; will
     flush once the queue gets bigger than this. Defaults to 10.
    flush_millis: the largest interval between flushes. Defaults to 120,000.
    filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
    name: a name for the op that creates the writer.

  Returns:
    A SummaryWriter object.
  """
  if logdir is None:
    raise ValueError("logdir cannot be None")
  inside_function = ops.inside_function()
  with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
    # Run init inside an init_scope() to hoist it out of tf.functions.
    with ops.init_scope():
      if context.executing_eagerly():
        _check_create_file_writer_args(
            inside_function,
            logdir=logdir,
            max_queue=max_queue,
            flush_millis=flush_millis,
            filename_suffix=filename_suffix)
      logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
      if max_queue is None:
        max_queue = constant_op.constant(10)
      if flush_millis is None:
        flush_millis = constant_op.constant(2 * 60 * 1000)
      if filename_suffix is None:
        filename_suffix = constant_op.constant(".v2")
      # Prepend the PID and a process-local UID to the filename suffix to avoid
      # filename collisions within the machine (the filename already contains
      # the hostname to avoid cross-machine collisions).
      unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
      filename_suffix = unique_prefix + filename_suffix
      # Use a unique shared_name to prevent resource sharing.
      if context.executing_eagerly():
        shared_name = context.shared_name()
      else:
        shared_name = ops.name_from_scope_name(scope)  # pylint: disable=protected-access
      return ResourceSummaryWriter(
          shared_name=shared_name,
          init_op_fn=functools.partial(
              gen_summary_ops.create_summary_file_writer,
              logdir=logdir,
              max_queue=max_queue,
              flush_millis=flush_millis,
              filename_suffix=filename_suffix),
          name=name,
          v2=True)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:summary_ops_v2.py

示例14: test_nonexistant_prefix_directory

 def test_nonexistant_prefix_directory(self):
   m = keras.Model()
   v = m.add_weight(name='v', shape=[])
   self.evaluate(v.assign(42.))
   prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'bckpt')
   m.save_weights(prefix)
   self.evaluate(v.assign(2.))
   m.load_weights(prefix)
   self.assertEqual(42., self.evaluate(v))
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:9,代码来源:hdf5_format_test.py

示例15: _eager_safe_variable_handle

def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
  """Creates a variable handle with information to do shape inference."""
  container = ops.get_default_graph()._container  # pylint: disable=protected-access
  if container is None:
    container = ""
  if not graph_mode:
    # When in eager mode use a uid for the shared_name, to prevent accidental
    # sharing.
    shared_name = str(ops.uid())
  handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                   shared_name=shared_name,
                                                   name=name,
                                                   container=container)
  if graph_mode:
    return handle

  # We do not want two distinct ResourceVariable objects for the same
  # underlying resource in the runtime.
  # When in eager mode, explicitly ensure so here. When in graph mode, it's
  # ensured by always generating different variable names.
  exists = gen_resource_variable_ops.var_is_initialized_op(handle)
  if exists:
    raise ValueError("variable object with name '%s' already created. Use "
                     "get_variable() if reuse is desired." %
                     shared_name)
  with context.graph_mode(), ops.Graph().as_default() as graph:
    h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
                                                shared_name=shared_name,
                                                name=name,
                                                container=container)

    # Tensor._handle_data contains information for the shape-inference code to
    # know the shape and dtype of the variable pointed to by a handle. Since
    # shape inference doesn't run in eager mode we copy this data here for when
    # the handle is captured by an eager mode function.
    handle._handle_data = h._handle_data  # pylint: disable=protected-access
  # Clean up our reference cycles to avoid making the garbage collector run.
  # pylint: disable=protected-access
  # OrderedDict, constructed on Graph creation, makes a simple reference loop
  # and hides it in an __attribute in some Python versions. We don't need to
  # throw an error if we can't find it, but if we do find it we can break the
  # loop to avoid creating work for the garbage collector.
  problematic_cycle = graph._functions.__dict__.get("_OrderedDict__root", None)
  # pylint: enable=protected-access
  if problematic_cycle:
    try:
      del problematic_cycle[0][:]
    except TypeError:
      # This is probably not one of the problematic Python versions. Continue
      # with the rest of our cleanup.
      pass
  # Now clean up our own reference cycles by clearing all of the attributes for
  # the Graph and op we created.
  h.__dict__ = {}
  graph.__dict__ = {}
  return handle
开发者ID:keithc61,项目名称:tensorflow,代码行数:56,代码来源:resource_variable_ops.py


注:本文中的tensorflow.python.framework.ops.uid函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。