當前位置: 首頁>>代碼示例>>Python>>正文


Python ops.control_dependencies方法代碼示例

本文整理匯總了Python中tensorflow.python.framework.ops.control_dependencies方法的典型用法代碼示例。如果您正苦於以下問題:Python ops.control_dependencies方法的具體用法?Python ops.control_dependencies怎麽用?Python ops.control_dependencies使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.framework.ops的用法示例。


在下文中一共展示了ops.control_dependencies方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _finish

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.colocate_with(self._iterations):
                update_beta1 = self._beta1_power.assign(
                    self._beta1_power * self._beta1_t,
                    use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                    self._beta2_power * self._beta2_t,
                    use_locking=self._use_locking)
                t = self._iterations + 1.
                update_iterations = self._iterations.assign(t, use_locking=self._use_locking)
                momentum_cache_power = self._get_momentum_cache(self._schedule_decay_t, t)
                momentum_cache_t = self._beta1_t * (1. - 0.5 * momentum_cache_power)
                update_m_schedule = self._m_schedule.assign(
                    self._m_schedule * momentum_cache_t,
                    use_locking=self._use_locking)
        return control_flow_ops.group(
            *update_ops + [update_beta1, update_beta2] + [update_iterations, update_m_schedule],
            name=name_scope) 
開發者ID:ChenglongChen,項目名稱:tensorflow-XNN,代碼行數:22,代碼來源:optimizer.py

示例2: _dense_inner_flatten

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
開發者ID:taehoonlee,項目名稱:tensornets,代碼行數:27,代碼來源:layers.py

示例3: main_op_with_restore

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def main_op_with_restore(restore_op_name):
  """Returns a main op to init variables, tables and restore the graph.

  Returns the main op including the group of ops that initializes all
  variables, initialize local variables, initialize all tables and the restore
  op name.

  Args:
    restore_op_name: Name of the op to use to restore the graph.

  Returns:
    The set of ops to be run as part of the main op upon the load operation.
  """
  with ops.control_dependencies([main_op()]):
    main_op_with_restore = control_flow_ops.group(restore_op_name)
  return main_op_with_restore 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:main_op_impl.py

示例4: initialized_value

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def initialized_value(self):
    """Returns the value of the initialized variable.

    You should use this instead of the variable itself to initialize another
    variable with a value that depends on the value of this variable.

    ```python
    # Initialize 'v' with a random tensor.
    v = tf.Variable(tf.truncated_normal([10, 40]))
    # Use `initialized_value` to guarantee that `v` has been
    # initialized before its value is used to initialize `w`.
    # The random values are picked only once.
    w = tf.Variable(v.initialized_value() * 2.0)
    ```

    Returns:
      A `Tensor` holding the value of this variable after its initializer
      has run.
    """
    with ops.control_dependencies(None):
      return control_flow_ops.cond(is_variable_initialized(self),
                                   self.read_value,
                                   lambda: self.initial_value) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:variables.py

示例5: grad

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def grad(self, source, flow=None, name=None):
    # tensor_array_grad requires a flow input when forward
    # TensorArrays are dynamically sized.  This forces the creation
    # of the grad TensorArray only once the final forward array's size
    # is fixed.
    if flow is None:
      flow = self.flow
    with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
      with ops.colocate_with(self._handle):
        g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
            handle=self._handle, source=source, flow_in=flow, name=name)
        with ops.control_dependencies([g_handle]):
          flow = array_ops.identity(flow, name="gradient_flow")
        g = TensorArray(
            dtype=self._dtype,
            handle=g_handle,
            flow=flow,
            infer_shape=self._infer_shape,
            colocate_with_first_write_call=False)
        g._element_shape = self._element_shape
        return g 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:tensor_array_ops.py

示例6: AddValue

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def AddValue(self, val):
    """Add `val` to the current context and its outer context recursively."""
    if val.name in self._values:
      # Use the real value if it comes from outer context. This is needed in
      # particular for nested conds.
      result = self._external_values.get(val.name)
      result = val if result is None else result
    else:
      result = val
      self._values.add(val.name)
      if self._outer_context:
        result = self._outer_context.AddValue(val)
        self._values.add(result.name)
      with ops.control_dependencies(None):
        result = _SwitchRefOrTensor(result, self._pred)[self._branch]
      result.op.graph.prevent_fetching(result.op)
      # pylint: disable=protected-access
      result.op._set_control_flow_context(self)
      # pylint: enable=protected-access

      self._values.add(result.name)
      self._external_values[val.name] = result
    return result 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:control_flow_ops.py

示例7: add_check_numerics_ops

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def add_check_numerics_ops():
  """Connect a `check_numerics` to every floating point tensor.

  `check_numerics` operations themselves are added for each `half`, `float`,
  or `double` tensor in the graph. For all ops in the graph, the
  `check_numerics` op for all of its (`half`, `float`, or `double`) inputs
  is guaranteed to run before the `check_numerics` op on any of its outputs.

  Returns:
    A `group` op depending on all `check_numerics` ops added.
  """
  check_op = []
  # This code relies on the ordering of ops in get_operations().
  # The producer of a tensor always comes before that tensor's consumer in
  # this list. This is true because get_operations() returns ops in the order
  # added, and an op can only be added after its inputs are added.
  for op in ops.get_default_graph().get_operations():
    for output in op.outputs:
      if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
        message = op.name + ":" + str(output.value_index)
        with ops.control_dependencies(check_op):
          check_op = [array_ops.check_numerics(output, message=message)]
  return control_flow_ops.group(*check_op) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:numerics.py

示例8: _ZetaGrad

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:math_grad.py

示例9: _SquaredDifferenceGrad

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _SquaredDifferenceGrad(op, grad):
  """Returns the gradient for (x-y)^2."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  # .op works with Tensors or IndexedSlices
  with ops.control_dependencies([grad.op]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients. 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:math_grad.py

示例10: _init_clusters_random

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _init_clusters_random(self):
    """Does random initialization of clusters.

    Returns:
      Tensor of randomly initialized clusters.
    """
    num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
    # Note that for mini-batch k-means, we should ensure that the batch size of
    # data used during initialization is sufficiently large to avoid duplicated
    # clusters.
    with ops.control_dependencies(
        [check_ops.assert_less_equal(self._num_clusters, num_data)]):
      indices = random_ops.random_uniform(
          array_ops.reshape(self._num_clusters, [-1]),
          minval=0,
          maxval=math_ops.cast(num_data, dtypes.int64),
          seed=self._random_seed,
          dtype=dtypes.int64)
      clusters_init = embedding_lookup(
          self._inputs, indices, partition_strategy='div')
      return clusters_init 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:clustering_ops.py

示例11: _init_clusters_random

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
  with ops.control_dependencies(
      [check_ops.assert_less_equal(num_clusters, num_data)]):
    indices = random_ops.random_uniform(
        [num_clusters],
        minval=0,
        maxval=math_ops.cast(num_data, dtypes.int64),
        seed=random_seed,
        dtype=dtypes.int64)
  indices %= math_ops.cast(num_data, dtypes.int64)
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:26,代碼來源:gmm_ops.py

示例12: __init__

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def __init__(self, inputs, outputs, updates=None):
    updates = updates or []
    if not isinstance(inputs, (list, tuple)):
      raise TypeError('`inputs` to a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(outputs, (list, tuple)):
      raise TypeError('`outputs` of a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(updates, (list, tuple)):
      raise TypeError('`updates` in a TensorFlow backend function '
                      'should be a list or tuple.')
    self.inputs = list(inputs)
    self.outputs = list(outputs)
    with ops.control_dependencies(self.outputs):
      updates_ops = []
      for update in updates:
        if isinstance(update, tuple):
          p, new_p = update
          updates_ops.append(state_ops.assign(p, new_p))
        else:
          # assumed already an op
          updates_ops.append(update)
      self.updates_op = control_flow_ops.group(*updates_ops) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:backend.py

示例13: _apply_sparse_shared

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
開發者ID:imsb-uke,項目名稱:scGAN,代碼行數:33,代碼來源:AMSGrad.py

示例14: _resource_scatter_add

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _resource_scatter_add(self, x, i, v):
        with ops.control_dependencies(
                [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
            return x.value() 
開發者ID:imsb-uke,項目名稱:scGAN,代碼行數:6,代碼來源:AMSGrad.py

示例15: _finish

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import control_dependencies [as 別名]
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.colocate_with(self._beta1_power):
                update_beta1 = self._beta1_power.assign(
                    self._beta1_power * self._beta1_t,
                    use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                    self._beta2_power * self._beta2_t,
                    use_locking=self._use_locking)
        return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
                                      name=name_scope) 
開發者ID:imsb-uke,項目名稱:scGAN,代碼行數:14,代碼來源:AMSGrad.py


注:本文中的tensorflow.python.framework.ops.control_dependencies方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。