当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.identity方法代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.identity方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.identity方法的具体用法?Python array_ops.identity怎么用?Python array_ops.identity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.identity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: identity

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def identity(self):
    """Returns a TensorArray with the same content and properties.

    Returns:
      A new TensorArray object with flow that ensures the control dependencies
      from the contexts will become control dependencies for writes, reads, etc.
      Use this object all for subsequent operations.
    """
    flow = array_ops.identity(self._flow)
    ta = TensorArray(
        dtype=self._dtype, handle=self._handle, flow=flow,
        infer_shape=self._infer_shape,
        colocate_with_first_write_call=self._colocate_with_first_write_call)
    ta._element_shape = self._element_shape
    ta._colocate_with = self._colocate_with
    return ta 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:tensor_array_ops.py

示例2: grad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def grad(self, source, flow=None, name=None):
    # tensor_array_grad requires a flow input when forward
    # TensorArrays are dynamically sized.  This forces the creation
    # of the grad TensorArray only once the final forward array's size
    # is fixed.
    if flow is None:
      flow = self.flow
    with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
      with ops.colocate_with(self._handle):
        g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
            handle=self._handle, source=source, flow_in=flow, name=name)
        with ops.control_dependencies([g_handle]):
          flow = array_ops.identity(flow, name="gradient_flow")
        g = TensorArray(
            dtype=self._dtype,
            handle=g_handle,
            flow=flow,
            infer_shape=self._infer_shape,
            colocate_with_first_write_call=False)
        g._element_shape = self._element_shape
        return g 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:tensor_array_ops.py

示例3: read_value

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def read_value(self):
    """Constructs an op which reads the value of this variable.

    Should be used when there are multiple reads, or when it is desirable to
    read the value only after some condition is true.

    Returns:
     the read operation.
    """
    with ops.name_scope("Read"):
      with ops.device(self._handle.device):
        value = gen_resource_variable_ops.read_variable_op(
            self._handle, dtype=self._dtype)
    # Return an identity so it can get placed on whatever device the context
    # specifies instead of the device where the variable is.
    return array_ops.identity(value) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:resource_variable_ops.py

示例4: _apply_dense

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _apply_dense(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.apply_adagrad_da(
        var,
        g_acc,
        gg_acc,
        grad,
        math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:adagrad_da.py

示例5: _apply_sparse

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _apply_sparse(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.sparse_apply_adagrad_da(
        var,
        g_acc,
        gg_acc,
        grad.values,
        grad.indices,
        math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:adagrad_da.py

示例6: _resource_apply_sparse

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _resource_apply_sparse(self, grad, var, indices):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.resource_sparse_apply_adagrad_da(
        var.handle,
        g_acc.handle,
        gg_acc.handle,
        grad,
        indices,
        math_ops.cast(self._learning_rate_tensor, grad.dtype),
        math_ops.cast(self._l1_regularization_strength, grad.dtype),
        math_ops.cast(self._l2_regularization_strength, grad.dtype),
        global_step,
        use_locking=self._use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:adagrad_da.py

示例7: state_tuple_to_dict

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def state_tuple_to_dict(state):
  """Returns a dict containing flattened `state`.

  Args:
    state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
    have the same rank and agree on all dimensions except the last.

  Returns:
    A dict containing the `Tensor`s that make up `state`. The keys of the dict
    are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
    in a depth-first traversal of `state`.
  """
  with ops.name_scope('state_tuple_to_dict'):
    flat_state = nest.flatten(state)
    state_dict = {}
    for i, state_component in enumerate(flat_state):
      state_name = _get_state_name(i)
      state_value = (None if state_component is None else array_ops.identity(
          state_component, name=state_name))
      state_dict[state_name] = state_value
  return state_dict 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:state_saving_rnn_estimator.py

示例8: _mean

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _mean(self):
    shape = self.batch_shape.concatenate(self.event_shape)
    has_static_shape = shape.is_fully_defined()
    if not has_static_shape:
      shape = array_ops.concat([
          self.batch_shape_tensor(),
          self.event_shape_tensor(),
      ], 0)

    if self.loc is None:
      return array_ops.zeros(shape, self.dtype)

    if has_static_shape and shape == self.loc.get_shape():
      return array_ops.identity(self.loc)

    # Add dummy tensor of zeros to broadcast.  This is only necessary if shape
    # != self.loc.shape, but we could not determine if this is the case.
    return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:vector_laplace_linear_operator.py

示例9: identity

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def identity(labeled_tensor, name=None):
  """The identity op.

  See tf.identity.

  Args:
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    The tensor.
  """
  with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
    return LabeledTensor(
        array_ops.identity(
            labeled_tensor.tensor, name=scope),
        labeled_tensor.axes)


# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py. 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:core.py

示例10: _force_data_dependency

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _force_data_dependency(first_compute, then_compute):
  """Force all of `then_compute` to depend on all of `first_compute`.

  Uses a dummy data dependency, which is useful when running on TPUs because
  XLA ignores control dependencies. Only supports float arguments.

  Args:
    first_compute: `list<Tensor>`. These will be made to run before the
      `Tensor`s `then_compute`.
    then_compute: `list<Tensor>`. These will run after all the `Tensor`s in
      `first_compute`.

  Returns:
    `list<Tensor>`, same length as `then_compute`.

  Raises:
    ValueError: if ranks are unknown or types are not floating.
  """

  def _first_element(x):
    if x.get_shape().ndims is None:
      raise ValueError("Rank of Tensor %s must be known" % x)
    ndims = x.get_shape().ndims
    begin = framework_ops.convert_to_tensor([0] * ndims, dtype=dtypes.int32)
    size = framework_ops.convert_to_tensor([1] * ndims, dtype=dtypes.int32)
    return array_ops.reshape(array_ops.slice(x, begin, size), [])

  first_compute_sum = math_ops.add_n(
      [_first_element(x) for x in first_compute if x is not None])
  dtype = first_compute_sum.dtype
  if not dtype.is_floating:
    raise ValueError("_force_data_dependency only supports floating dtypes.")
  epsilon = np.finfo(dtype.as_numpy_dtype).tiny
  zero = array_ops.stop_gradient(epsilon * first_compute_sum)

  return [
      array_ops.identity(x) + zero if x is not None else None
      for x in then_compute
  ] 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:41,代码来源:rev_block_lib.py

示例11: _prepare

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def _prepare(self, var_list):
        # Get the value of the momentum cache before starting to apply gradients.
        self._m_cache_read = array_ops.identity(self._m_cache)
        return super(NadamW, self)._prepare(var_list) 
开发者ID:OverLordGoldDragon,项目名称:keras-adamw,代码行数:6,代码来源:optimizers_225tf.py

示例12: get

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def get(self, name=None):
    """Gets one element from this staging area.

    If the staging area is empty when this operation executes, it will block
    until there is an element to dequeue.

    Note that unlike others ops that can block, like the queue Dequeue
    operations, this can stop other work from happening.  To avoid this, the
    intended use is for this to be called only when there will be an element
    already available.  One method for doing this in a training loop would be to
    run a `put()` call during a warmup session.run call, and then call both
    `get()` and `put()` in each subsequent step.

    The placement of the returned tensor will be determined by the current
    device scope when this function is called.

    Args:
      name: A name for the operation (optional).

    Returns:
      The tuple of tensors that was gotten.
    """
    if name is None:
      name = "%s_get" % self._name

    with ops.colocate_with(self._coloc_op):
      ret = gen_data_flow_ops.unstage(dtypes=self._dtypes,
                                      shared_name=self._name, name=name)

    curr_device_scope = control_flow_ops.no_op().device
    if curr_device_scope != self._coloc_op.device:
      for i in range(len(ret)):
        ret[i] = array_ops.identity(ret[i])

    for output, shape in zip(ret, self._shapes):
      output.set_shape(shape)

    return self._get_return_value(ret) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:40,代码来源:data_flow_ops.py

示例13: clip_by_average_norm

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def clip_by_average_norm(t, clip_norm, name=None):
  """Clips tensor values to a maximum average L2-norm.

  Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
  normalizes `t` so that its average L2-norm is less than or equal to
  `clip_norm`. Specifically, if the average L2-norm is already less than or
  equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
  greater than `clip_norm`, then this operation returns a tensor of the same
  type and shape as `t` with its values set to:

  `t * clip_norm / l2norm_avg(t)`

  In this case, the average L2-norm of the output tensor is `clip_norm`.

  This operation is typically used to clip gradients before applying them with
  an optimizer.

  Args:
    t: A `Tensor`.
    clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
    name: A name for the operation (optional).

  Returns:
    A clipped `Tensor`.
  """
  with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
    t = ops.convert_to_tensor(t, name="t")

    # Calculate L2-norm per element, clip elements by ratio of clip_norm to
    # L2-norm per element
    n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
    l2norm_inv = math_ops.rsqrt(
        math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
    tclip = array_ops.identity(
        t * clip_norm * math_ops.minimum(
            l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
        name=name)

  return tclip 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:41,代码来源:clip_ops.py

示例14: add_n

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def add_n(inputs, name=None):
  """Adds all input tensors element-wise.

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
  if not inputs or not isinstance(inputs, (list, tuple)):
    raise ValueError("inputs must be a list of at least one Tensor with the "
                     "same dtype and shape")
  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
  if not all(isinstance(x, ops.Tensor) for x in inputs):
    raise ValueError("inputs must be a list of at least one Tensor with the "
                     "same dtype and shape")

  if len(inputs) == 1:
    if name:
      return array_ops.identity(inputs[0], name=name)
    return inputs[0]
  return gen_math_ops._add_n(inputs, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:29,代码来源:math_ops.py

示例15: read_value

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import identity [as 别名]
def read_value(self):
    """Returns the value of this variable, read in the current context.

    Can be different from value() if it's on another device, with control
    dependencies, etc.

    Returns:
      A `Tensor` containing the value of the variable.
    """
    return array_ops.identity(self._variable, name="read") 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:12,代码来源:variables.py


注:本文中的tensorflow.python.ops.array_ops.identity方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。