當前位置: 首頁>>代碼示例>>Python>>正文


Python array_ops.constant方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.array_ops.constant方法的典型用法代碼示例。如果您正苦於以下問題:Python array_ops.constant方法的具體用法?Python array_ops.constant怎麽用?Python array_ops.constant使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.constant方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: dense_to_sparse

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
開發者ID:taehoonlee,項目名稱:tensornets,代碼行數:24,代碼來源:layers.py

示例2: _safe_scalar_div

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:metrics_impl.py

示例3: __init__

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:feeder.py

示例4: testOverflow

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def testOverflow(self):
    x = [1000, 1001, 1002, 1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "overflow encountered in exp"):
        out = log(np.sum(exp(x_np)))
        if out == np.inf:
          raise RuntimeWarning("overflow encountered in exp")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:18,代碼來源:math_ops_test.py

示例5: testUnderflow

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def testUnderflow(self):
    x = [-1000, -1001, -1002, -1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "divide by zero encountered in log"):
        out = log(np.sum(exp(x_np)))
        if out == -np.inf:
          raise RuntimeWarning("divide by zero encountered in log")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:18,代碼來源:math_ops_test.py

示例6: testConsistent

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def testConsistent(self):
    nums, divs = self.intTestData()
    with self.test_session():
      tf_result = (
          math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
      ).eval()
      tf_nums = array_ops.constant(nums)
      tf_divs = array_ops.constant(divs)
      tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
      np_result = (nums // divs) * divs + (nums % divs)
      # consistentcy with numpy
      self.assertAllEqual(tf_result, np_result)
      # consistentcy with two forms of divide
      self.assertAllEqual(tf_result, tf2_result)
      # consistency for truncation form
      tf3_result = (
          math_ops.truncatediv(nums, divs) * divs
          + math_ops.truncatemod(nums, divs)
      ).eval()
      expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
                                 (nums.shape[0], divs.shape[1]))
      # Consistent with desire to get numerator
      self.assertAllEqual(tf3_result, expanded_nums)
      # Consistent with desire to get numerator
      self.assertAllEqual(tf_result, expanded_nums) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:27,代碼來源:math_ops_test.py

示例7: report_uninitialized_resources

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def report_uninitialized_resources(resource_list=None,
                                   name="report_uninitialized_resources"):
  """Returns the names of all uninitialized resources in resource_list.

  If the returned tensor is empty then all resources have been initialized.

  Args:
   resource_list: resources to check. If None, will use shared_resources() +
    local_resources().
   name: name for the resource-checking op.

  Returns:
   Tensor containing names of the handles of all resources which have not
   yet been initialized.

  """
  if resource_list is None:
    resource_list = shared_resources() + local_resources()
  with ops.name_scope(name):
    if not resource_list:
      # Return an empty tensor so we only need to check for returned tensor
      # size being 0 as an indication of model ready.
      return array_ops.constant([], dtype=dtypes.string)
    # Get a 1-D boolean tensor listing whether each resource is initialized.
    variables_mask = math_ops.logical_not(
        array_ops.stack([r.is_initialized for r in resource_list]))
    # Get a 1-D string tensor containing all the resource names.
    variable_names_tensor = array_ops.constant(
        [s.handle.name for s in resource_list])
    # Return a 1-D tensor containing all the names of uninitialized resources.
    return array_ops.boolean_mask(variable_names_tensor, variables_mask) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:33,代碼來源:resources.py

示例8: report_uninitialized_variables

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def report_uninitialized_variables(var_list=None,
                                   name="report_uninitialized_variables"):
  """Adds ops to list the names of uninitialized variables.

  When run, it returns a 1-D tensor containing the names of uninitialized
  variables if there are any, or an empty array if there are none.

  Args:
    var_list: List of `Variable` objects to check. Defaults to the
      value of `global_variables() + local_variables()`
    name: Optional name of the `Operation`.

  Returns:
    A 1-D tensor containing names of the uninitialized variables, or an empty
    1-D tensor if there are no variables or no uninitialized variables.
  """
  if var_list is None:
    var_list = global_variables() + local_variables()
    # Backwards compatibility for old-style variables. TODO(touts): remove.
    if not var_list:
      var_list = []
      for op in ops.get_default_graph().get_operations():
        if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
          var_list.append(op.outputs[0])
  with ops.name_scope(name):
    if not var_list:
      # Return an empty tensor so we only need to check for returned tensor
      # size being 0 as an indication of model ready.
      return array_ops.constant([], dtype=dtypes.string)
    else:
      # Get a 1-D boolean tensor listing whether each variable is initialized.
      variables_mask = math_ops.logical_not(
          array_ops.stack(
              [state_ops.is_variable_initialized(v) for v in var_list]))
      # Get a 1-D string tensor containing all the variable names.
      variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
      # Return a 1-D tensor containing all the names of uninitialized variables.
      return array_ops.boolean_mask(variable_names_tensor, variables_mask)

# pylint: disable=protected-access 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:42,代碼來源:variables.py

示例9: PostProcessing

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def PostProcessing(self):
    """Perform postprocessing at the end of gradients().

    We have created the gradient graph at this point. So this function
    can be used to perform any postprocessing on the gradient graph.
    We currently perform the following postprocessing:
      1. Patch the gradient graph if the output of a loop variable
         doesn't depend on its input.
    """
    for _, grad_state in self._map.items():
      for _, b_merge in grad_state.switch_map.items():
        if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
          # The value of this loop variable at iteration i+1 doesn't
          # depend on its value at iteration i. So use zeros as the
          # gradients for all iterations > 0.
          dtype = b_merge.op.inputs[0].dtype
          shape = b_merge.op.inputs[0].get_shape()
          # pylint: disable=protected-access
          if shape.is_fully_defined():
            grad_state.grad_context.Enter()
            # Create a zeros and use it for iterations > 0.
            grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
            next_grad_val = _NextIteration(grad_val)
            grad_state.grad_context.Exit()
          else:
            # Create a zeros in the outer grad context.
            outer_grad_ctxt = grad_state.grad_context.outer_context
            if outer_grad_ctxt: outer_grad_ctxt.Enter()
            enter_grad_op = b_merge.op.inputs[0].op
            enter_grad = enter_grad_op.inputs[0]
            grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
            grad_val = array_ops.zeros(grad_shape)
            if outer_grad_ctxt: outer_grad_ctxt.Exit()
            # Use the zeros for iterations > 0.
            grad_state.grad_context.Enter()
            next_grad_val = _NextIteration(grad_val)
            grad_state.grad_context.Exit()
          b_merge.op._update_input(1, next_grad_val)
          # pylint: enable=protected-access 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:41,代碼來源:control_flow_ops.py

示例10: _event_shape_tensor

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _event_shape_tensor(self):
    return array_ops.constant([], dtype=dtypes.int32) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:4,代碼來源:bernoulli.py

示例11: _SegmentMeanGrad

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _SegmentMeanGrad(op, grad):
  """Gradient for SegmentMean."""
  input_rank = array_ops.rank(op.inputs[0])
  ones_shape = array_ops.concat([
      array_ops.shape(op.inputs[1]),
      array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
  ], 0)
  ones = array_ops.fill(ones_shape,
                        constant_op.constant(1, dtype=grad.dtype))
  scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
  return array_ops.gather(scaled_grad, op.inputs[1]), None 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:13,代碼來源:math_grad.py

示例12: _ErfcGrad

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _ErfcGrad(op, grad):
  """Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  minus_two_over_root_pi = constant_op.constant(
      -2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:10,代碼來源:math_grad.py

示例13: _DigammaGrad

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例14: _AsinGrad

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _AsinGrad(op, grad):
  """Returns grad * 1/sqrt(1-x^2)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    x2 = math_ops.square(x)
    one = constant_op.constant(1, dtype=grad.dtype)
    den = math_ops.sqrt(math_ops.subtract(one, x2))
    inv = math_ops.reciprocal(den)
    return grad * inv 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:math_grad.py

示例15: _AcosGrad

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import constant [as 別名]
def _AcosGrad(op, grad):
  """Returns grad * -1/sqrt(1-x^2)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    x2 = math_ops.square(x)
    one = constant_op.constant(1, dtype=grad.dtype)
    den = math_ops.sqrt(math_ops.subtract(one, x2))
    inv = math_ops.reciprocal(den)
    return -grad * inv 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:math_grad.py


注:本文中的tensorflow.python.ops.array_ops.constant方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。