当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.identity函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.identity函数的典型用法代码示例。如果您正苦于以下问题:Python identity函数的具体用法?Python identity怎么用?Python identity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了identity函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testBijector

 def testBijector(self):
   x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4 * 2)
   with self.cached_session() as sess:
     nvp = RealNVP(
         num_masked=4,
         validate_args=True,
         **self._real_nvp_kwargs)
     x = constant_op.constant(x_)
     forward_x = nvp.forward(x)
     # Use identity to invalidate cache.
     inverse_y = nvp.inverse(array_ops.identity(forward_x))
     forward_inverse_y = nvp.forward(inverse_y)
     fldj = nvp.forward_log_det_jacobian(x, event_ndims=1)
     # Use identity to invalidate cache.
     ildj = nvp.inverse_log_det_jacobian(
         array_ops.identity(forward_x), event_ndims=1)
     variables.global_variables_initializer().run()
     [
         forward_x_,
         inverse_y_,
         forward_inverse_y_,
         ildj_,
         fldj_,
     ] = sess.run([
         forward_x,
         inverse_y,
         forward_inverse_y,
         ildj,
         fldj,
     ])
     self.assertEqual("real_nvp", nvp.name)
     self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-1, atol=0.)
     self.assertAllClose(x_, inverse_y_, rtol=1e-1, atol=0.)
     self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:real_nvp_test.py

示例2: test_rank_one_tensor_raises_if_rank_mismatches_static_rank

 def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
   with self.test_session():
     tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
     with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
       with ops.control_dependencies([
           check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
         array_ops.identity(tensor_rank1).eval()
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py

示例3: test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank

 def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
   with self.test_session():
     tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
     desired_rank = 1
     with ops.control_dependencies(
         [check_ops.assert_rank_at_least(tensor, desired_rank)]):
       array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py

示例4: _create_estimator_spec

 def _create_estimator_spec(features, mode, logits, labels, train_op_fn):
   del features, labels  # Not used.
   trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
   testcase.assertItemsEqual(
       expected_var_names,
       [var.name for var in trainable_vars])
   loss = constant_op.constant(1.)
   assert_logits = _assert_close(
       expected_logits, logits, message='Failed for mode={}. '.format(mode))
   with ops.control_dependencies([assert_logits]):
     if mode == model_fn.ModeKeys.TRAIN:
       return model_fn.EstimatorSpec(
           mode=mode,
           loss=loss,
           train_op=train_op_fn(loss))
     elif mode == model_fn.ModeKeys.EVAL:
       return model_fn.EstimatorSpec(
           mode=mode,
           loss=array_ops.identity(loss))
     elif mode == model_fn.ModeKeys.PREDICT:
       return model_fn.EstimatorSpec(
           mode=mode,
           predictions={'logits': array_ops.identity(logits)})
     else:
       testcase.fail('Invalid mode: {}'.format(mode))
开发者ID:cameronphchen,项目名称:tensorflow,代码行数:25,代码来源:dnn_test.py

示例5: test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank

 def test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
   with self.test_session():
     tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
     for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
       with ops.control_dependencies([
           check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
         array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py

示例6: _LayerWithIdentity

  def _LayerWithIdentity(self,
                         input_tensor=None,
                         scope='test',
                         post_activation_bypass=False):
    """Add a basic conv, identity, batch norm with skip to the default graph."""
    batch_size, height, width, depth = 5, 128, 128, 3
    if input_tensor is None:
      input_tensor = array_ops.zeros((batch_size, height, width, depth))
    weight_init = init_ops.truncated_normal_initializer
    with ops.name_scope(scope):
      output = layers.conv2d(
          input_tensor,
          depth, [5, 5],
          padding='SAME',
          weights_initializer=weight_init(0.09),
          activation_fn=None,
          normalizer_fn=None,
          biases_initializer=None)
      output = array_ops.identity(output, name='conv_out')

      output = layers.batch_norm(
          output, center=True, scale=True, decay=1.0 - 0.003, fused=True)

      output = array_ops.identity(output, name='bn_out')
      if post_activation_bypass:
        output += input_tensor
    return output
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:27,代码来源:quantize_graph_test.py

示例7: testFeedIndexedSlicesWithoutDenseShape

 def testFeedIndexedSlicesWithoutDenseShape(self):
   with session.Session() as s:
     values = np.array([1.0, 2.0]).astype(np.float32)
     indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
     dense_shape = None
     ind = ops.IndexedSlices(
         array_ops.placeholder(dtype=np.float32,
                               shape=(2,)),
         array_ops.placeholder(dtype=np.int64,
                               shape=(2, 3)),
         None)
     ind_values = array_ops.identity(ind.values)
     ind_indices = array_ops.identity(ind.indices)
     ind2 = ops.IndexedSlices(ind_values, ind_indices)
     # Feed with tuple
     values_out, indices_out = s.run(
         [ind_values, ind_indices], {ind: (values, indices)})
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(indices_out, indices)
     # Feed with IndexedSlicesValue
     values_out, indices_out = s.run(
         [ind_values, ind_indices],
         {ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
     self.assertAllEqual(values_out, values)
     self.assertAllEqual(indices_out, indices)
     # Feed with IndexedSlicesValue, fetch IndexedSlicesValue
     ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
                                                         dense_shape)})
     self.assertAllEqual(ind2_out.values, values)
     self.assertAllEqual(ind2_out.indices, indices)
     self.assertAllEqual(ind2_out.dense_shape, dense_shape)
开发者ID:agouwin,项目名称:udacity_deep_learning_homework,代码行数:31,代码来源:session_test.py

示例8: _v1_multi_metagraph_saved_model

 def _v1_multi_metagraph_saved_model(self):
   export_graph = ops.Graph()
   with export_graph.as_default():
     start = array_ops.placeholder(
         shape=[None], dtype=dtypes.float32, name="start")
     v = resource_variable_ops.ResourceVariable(21.)
     first_output = array_ops.identity(start * v, name="first_output")
     second_output = array_ops.identity(v, name="second_output")
     with session_lib.Session() as session:
       session.run(v.initializer)
       path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
       builder = builder_impl.SavedModelBuilder(path)
       builder.add_meta_graph_and_variables(
           session, tags=["first"],
           signature_def_map={
               "first_key": signature_def_utils.build_signature_def(
                   {"first_start": utils_impl.build_tensor_info(start)},
                   {"first_output": utils_impl.build_tensor_info(
                       first_output)})})
       builder.add_meta_graph(
           tags=["second"],
           signature_def_map={
               "second_key": signature_def_utils.build_signature_def(
                   {"second_start": utils_impl.build_tensor_info(start)},
                   {"second_output": utils_impl.build_tensor_info(
                       second_output)})})
       builder.save()
   return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:load_v1_in_v2_test.py

示例9: custom_getter

 def custom_getter(getter, *args, **kwargs):
   var = getter(*args, **kwargs)
   if kwargs["reuse"]:
     # This can be used, e.g., for changing the caching device if needed.
     return array_ops.identity(var, name="reused")
   else:
     return array_ops.identity(var, name="not_reused")
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:7,代码来源:variable_scope_test.py

示例10: _TestInsertQuantOpForAddAfterConv2d

  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:34,代码来源:quantize_test.py

示例11: __init__

  def __init__(self, alpha, beta, name="Gamma"):
    """Construct Gamma distributions with parameters `alpha` and `beta`.

    The parameters `alpha` and `beta` must be shaped in a way that supports
    broadcasting (e.g. `alpha + beta` is a valid operation).

    Args:
      alpha: `float` or `double` tensor, the shape params of the
        distribution(s).
        alpha must contain only positive values.
      beta: `float` or `double` tensor, the inverse scale params of the
        distribution(s).
        beta must contain only positive values.
      name: The name to prepend to all ops created by this distribution.

    Raises:
      TypeError: if `alpha` and `beta` are different dtypes.
    """
    with ops.op_scope([alpha, beta], name):
      with ops.control_dependencies([
          check_ops.assert_positive(alpha), check_ops.assert_positive(beta)]):
        alpha = array_ops.identity(alpha, name="alpha")
        beta = array_ops.identity(beta, name="beta")

        contrib_tensor_util.assert_same_float_dtype((alpha, beta))
        self._broadcast_tensor = alpha + beta

    self._get_batch_shape = self._broadcast_tensor.get_shape()
    self._get_event_shape = tensor_shape.TensorShape([])

    self._alpha = alpha
    self._beta = beta
    self._name = name
开发者ID:0ruben,项目名称:tensorflow,代码行数:33,代码来源:gamma.py

示例12: _overridden_initial_value_fn

 def _overridden_initial_value_fn(device=d, index=i):
   assert index > 0
   with ops.device(device):
     if context.executing_eagerly():
       return array_ops.identity(value_list[0].value())
     else:
       return array_ops.identity(value_list[0].initial_value)
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:collective_all_reduce_strategy.py

示例13: __init__

  def __init__(
      self, p, dtype=dtypes.int32, strict=True, strict_statistics=True,
      name="Bernoulli"):
    """Construct Bernoulli distributions.

    Args:
      p: An N-D `Tensor` representing the probability of a positive
          event. Each entry in the `Tensor` parameterizes an independent
          Bernoulli distribution.
      dtype: dtype for samples. Note that other values will take the dtype of p.
      strict: Whether to assert that `0 <= p <= 1`. If not strict, `log_pmf` may
        return nans.
      strict_statistics:  Boolean, default True.  If True, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If False, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: A name for this distribution.
    """
    self._strict_statistics = strict_statistics
    self._name = name
    self._dtype = dtype
    self._strict = strict
    check_op = check_ops.assert_less_equal
    with ops.op_scope([p], name):
      with ops.control_dependencies(
          [check_op(p, 1.), check_op(0., p)] if strict else []):
        p = array_ops.identity(p, name="p")
      self._p = p
      self._q = array_ops.identity(1. - p, name="q")
      self._batch_shape = array_ops.shape(self._p)
      self._event_shape = array_ops.constant([], dtype=dtypes.int32)
开发者ID:363158858,项目名称:tensorflow,代码行数:31,代码来源:bernoulli.py

示例14: _reduce_non_distributed_value

def _reduce_non_distributed_value(extended, reduce_op, value, destinations):
  """Reduce a non-DistributedValue `value` to `destinations`."""
  if isinstance(value, values.DistributedValues):
    raise ValueError("You are passing a `DistributedValue` to "
                     "`_reduce_non_distributed_value`, which is not allowed.")

  # If the same value is present on all replicas then the PerReplica value will
  # be a single value. We also handle the case when `value` is a single value
  # and equal to 0.
  if value == 0:
    return 0
  # If there is only a single value and the reduce op is MEAN,
  # that value should be on all destinations.
  if reduce_op == reduce_util.ReduceOp.MEAN:
    return value

  cross_device_ops_lib.validate_destinations(destinations)
  # We do not support a reduce op of SUM if the value is the same across
  # all replicas. We call this as part of assign functions for MirroredVariables
  # and summing up identical values across replicas is not clearly defined.
  if (len(extended.worker_devices) != 1 or
      not cross_device_ops_lib.check_destinations(destinations)):
    raise ValueError("A non-DistributedValues value %s cannot be reduced with "
                     "the given reduce op %s." % (value, reduce_op))
  # TODO(anjalisridhar): Moves these methods to a device utility file?
  devices = cross_device_ops_lib.get_devices_from(destinations)
  if len(devices) == 1:
    with ops.device(devices[0]):
      return array_ops.identity(value)
  else:
    value_updates = {}
    for d in devices:
      with ops.device(d):
        value_updates[d] = array_ops.identity(value)
    return values.Mirrored(value_updates)
开发者ID:aeverall,项目名称:tensorflow,代码行数:35,代码来源:mirrored_strategy.py

示例15: initialized_value

    def initialized_value(self):
        """Returns the value of the initialized variable.

    You should use this instead of the variable itself to initialize another
    variable with a value that depends on the value of this variable.

    ```python
    # Initialize 'v' with a random tensor.
    v = tf.Variable(tf.truncated_normal([10, 40]))
    # Use `initialized_value` to guarantee that `v` has been
    # initialized before its value is used to initialize `w`.
    # The random values are picked only once.
    w = tf.Variable(v.initialized_value() * 2.0)
    ```

    Returns:
      A `Tensor` holding the value of this variable after its initializer
      has run.
    """
        with ops.control_dependencies(None):
            with ops.control_dependencies([self._initializer_op]):
                # TODO(vrv): Change this class to not take caching_device, but
                # to take the op to colocate the snapshot with, so we can use
                # colocation rather than devices.
                if self._caching_device is not None:
                    with ops.device(self._caching_device):
                        return array_ops.identity(self._variable)
                else:
                    with ops.colocate_with(self._variable.op):
                        return array_ops.identity(self._variable)
开发者ID:shakamunyi,项目名称:tensorflow,代码行数:30,代码来源:variables.py


注:本文中的tensorflow.python.ops.array_ops.identity函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。