当前位置: 首页>>代码示例>>Python>>正文


Python ops.internal_convert_to_tensor函数代码示例

本文整理汇总了Python中tensorflow.python.framework.ops.internal_convert_to_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python internal_convert_to_tensor函数的具体用法?Python internal_convert_to_tensor怎么用?Python internal_convert_to_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了internal_convert_to_tensor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: args_to_matching_eager

def args_to_matching_eager(l, ctx, default_dtype=None):
  """Convert sequence `l` to eager same-type Tensors."""
  # TODO(josh11b): Could we do a better job if we also passed in the
  # allowed dtypes when that was known?

  # Is some input already a Tensor with a dtype?
  dtype = None
  for t in l:
    if isinstance(t, ops.EagerTensor):
      dtype = t.dtype
      break

  if dtype is None:
    # Infer a dtype based on the first value, and use that dtype for the
    # remaining values.
    ret = []
    for t in l:
      ret.append(ops.internal_convert_to_tensor(
          t, dtype, preferred_dtype=default_dtype, ctx=ctx))
      if dtype is None:
        dtype = ret[-1].dtype
  else:
    ret = [ops.internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]

  return dtype, ret
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:25,代码来源:execute.py

示例2: __init__

  def __init__(self, example_indices, feature_indices, feature_values):
    """Creates a `SparseFeatureColumn` representation.

    Args:
      example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
      python lists, or numpy arrays.
      feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
      python lists, or numpy arrays.
      feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
      accepts python lists, or numpy arrays.

    Returns:
      A `SparseFeatureColumn`
    """
    with name_scope(None, 'SparseFeatureColumn',
                    [example_indices, feature_indices]):
      self._example_indices = internal_convert_to_tensor(
          example_indices, name='example_indices', dtype=dtypes.int64)
      self._feature_indices = internal_convert_to_tensor(
          feature_indices, name='feature_indices', dtype=dtypes.int64)
    self._feature_values = None
    if feature_values is not None:
      with name_scope(None, 'SparseFeatureColumn', [feature_values]):
        self._feature_values = internal_convert_to_tensor(
            feature_values, name='feature_values', dtype=dtypes.float32)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:25,代码来源:sparse_feature_column.py

示例3: unregularized_loss

  def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified([
        'example_labels', 'example_weights', 'sparse_features', 'dense_features'
    ], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = math_ops.cast(
          self._linear_predictions(examples), dtypes.float64)
      labels = math_ops.cast(
          internal_convert_to_tensor(examples['example_labels']),
          dtypes.float64)
      weights = math_ops.cast(
          internal_convert_to_tensor(examples['example_weights']),
          dtypes.float64)

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            sigmoid_cross_entropy_with_logits(labels=labels,
                                              logits=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] == 'poisson_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            log_poisson_loss(targets=labels, log_input=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
        # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
        # first convert 0/1 labels into -1/1 labels.
        all_ones = array_ops.ones_like(predictions)
        adjusted_labels = math_ops.subtract(2 * labels, all_ones)
        # Tensor that contains (unweighted) error (hinge loss) per
        # example.
        error = nn_ops.relu(
            math_ops.subtract(all_ones,
                              math_ops.multiply(adjusted_labels, predictions)))
        weighted_error = math_ops.multiply(error, weights)
        return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
            weights)

      # squared loss
      err = math_ops.subtract(labels, predictions)

      weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
      # SDCA squared loss function is sum(err^2) / (2*sum(weights))
      return (math_ops.reduce_sum(weighted_squared_err) /
              (2.0 * math_ops.reduce_sum(weights)))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:59,代码来源:sdca_ops.py

示例4: testTensorConversion

  def testTensorConversion(self):
    with context.graph_mode():
      _, tower_local = _make_tower_local("sum")
      converted = ops.internal_convert_to_tensor(tower_local, as_ref=False)
      self.assertIsInstance(converted, ops.Tensor)
      self.assertEqual(converted.dtype, tower_local.dtype)

      converted = ops.internal_convert_to_tensor(tower_local, as_ref=True)
      # Resources variable are converted to tensors as well when as_ref is True.
      self.assertIsInstance(converted, ops.Tensor)
      self.assertEqual(converted.dtype, tower_local.dtype)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:11,代码来源:values_test.py

示例5: testTensorConversion

  def testTensorConversion(self):
    with context.graph_mode():
      _, replica_local = _make_replica_local(
          variable_scope.VariableAggregation.SUM)
      converted = ops.internal_convert_to_tensor(replica_local, as_ref=False)
      self.assertIsInstance(converted, ops.Tensor)
      self.assertEqual(converted.dtype, replica_local.dtype)

      converted = ops.internal_convert_to_tensor(replica_local, as_ref=True)
      # Resources variable are converted to tensors as well when as_ref is True.
      self.assertIsInstance(converted, ops.Tensor)
      self.assertEqual(converted.dtype, replica_local.dtype)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:12,代码来源:values_test.py

示例6: regularized_loss

  def regularized_loss(self, examples):
    """Add operations to compute the loss with regularization loss included.

    Args:
      examples: Examples to compute loss on.

    Returns:
      An Operation that computes mean (regularized) loss for given set of
      examples.
    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified([
        'example_labels', 'example_weights', 'sparse_features', 'dense_features'
    ], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/regularized_loss'):
      weights = internal_convert_to_tensor(examples['example_weights'])
      return ((
          self._l1_loss() +
          # Note that here we are using the raw regularization
          # (as specified by the user) and *not*
          # self._symmetric_l2_regularization().
          self._l2_loss(self._options['symmetric_l2_regularization'])) /
              math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
              self.unregularized_loss(examples))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:sdca_ops.py

示例7: convert_to_tensor_or_sparse_tensor

def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
  """Converts value to a `SparseTensor` or `Tensor`.

  Args:
    value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
      registered `Tensor` conversion function.
    dtype: Optional element type for the returned tensor. If missing, the
      type is inferred from the type of `value`.
    name: Optional name to use if a new `Tensor` is created.

  Returns:
    A `SparseTensor` or `Tensor` based on `value`.

  Raises:
    RuntimeError: If result type is incompatible with `dtype`.
  """
  if dtype is not None:
    dtype = dtypes.as_dtype(dtype)
  if isinstance(value, sparse_tensor.SparseTensorValue):
    value = sparse_tensor.SparseTensor.from_value(value)
  if isinstance(value, sparse_tensor.SparseTensor):
    if dtype and not dtype.is_compatible_with(value.dtype):
      raise RuntimeError(
          'Sparse dtype: requested = %s, actual = %s' % (
              dtype.name, value.dtype.name))
    return value
  return ops.internal_convert_to_tensor(
      value, dtype=dtype, name=name)
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:28,代码来源:tensor_util.py

示例8: __init__

  def __init__(self, indices, values, dense_shape):
    """Creates a `SparseTensor`.

    Args:
      indices: A 2-D int64 tensor of shape `[N, ndims]`.
      values: A 1-D tensor of any type and shape `[N]`.
      dense_shape: A 1-D int64 tensor of shape `[ndims]`.

    """
    with ops.name_scope(None, "SparseTensor",
                        [indices, values, dense_shape]):
      indices = ops.convert_to_tensor(
          indices, name="indices", dtype=dtypes.int64)
      # Always pass as_ref=True because we want to be able to update
      # values later if it is a VariableOp.
      # TODO(touts): Consider adding mutable_values() when 'values'
      # is a VariableOp and updating users of SparseTensor.
      values = ops.internal_convert_to_tensor(
          values, name="values", as_ref=True)
      dense_shape = ops.convert_to_tensor(
          dense_shape, name="dense_shape", dtype=dtypes.int64)
    self._indices = indices
    self._values = values
    self._dense_shape = dense_shape

    indices_shape = indices.get_shape().with_rank(2)
    values_shape = values.get_shape().with_rank(1)
    dense_shape_shape = dense_shape.get_shape().with_rank(1)

    # Assert number of rows in indices match the number of elements in values.
    indices_shape[0].merge_with(values_shape[0])
    # Assert number of columns in indices matches the number of elements in
    # dense_shape.
    indices_shape[1].merge_with(dense_shape_shape[0])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:34,代码来源:sparse_tensor.py

示例9: _dense_var_to_tensor

 def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
   """Converts this variable to a tensor."""
   if not self._should_cast():
     return ops.internal_convert_to_tensor(self._variable, dtype, name,
                                           as_ref)
   # TODO(reedwm): Support as_ref?
   assert not as_ref
   if dtype is not None and not dtype.is_compatible_with(self.dtype):
     raise ValueError(
         'Incompatible type conversion requested to type {!r} for variable '
         'of type {!r}'.format(dtype.name, self.dtype.name))
   val = ops.internal_convert_to_tensor(self._variable,
                                        self._variable.dtype, name,
                                        as_ref=False)
   with ops.colocate_with(None, ignore_existing=True):
     with ops.device(val.device):
       return math_ops.cast(val, self.dtype)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:17,代码来源:autocast_variable.py

示例10: __init__

 def __init__(self, path):
   """Record the full path to the asset."""
   # The init_scope prevents functions from capturing `path` in an
   # initialization graph, since it is transient and should not end up in a
   # serialized function body.
   with ops.init_scope(), ops.device("CPU"):
     self._path = ops.internal_convert_to_tensor(path, dtype=dtypes.string,
                                                 name="asset_path")
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:tracking.py

示例11: partitioned_call

def partitioned_call(args, f, tout=None, executing_eagerly=None):
  """Executes a function while respecting device annotations.

  Currently, only those functions that execute within the same address space
  can be executed.

  Args:
    args: The arguments of the function, including captured inputs.
    f: The function to execute; an instance of `_DefinedFunction` or
      `_EagerDefinedFunction`.
    tout: a list containing the output dtypes enums; if `None`, inferred from
      the signature of `f`.
    executing_eagerly: (Optional) A boolean indicating whether the context is
      executing eagerly. If `None`, fetched from the global context.

  Returns:
    The list of `Tensor`s returned by invoking `f(args)`. If the function does
    not return anything, then returns `None` if eager execution is enabled, or
    the `Operation` if not.
  """

  if tout is None:
    tout = tuple(x.type for x in f.definition.signature.output_arg)

  if executing_eagerly is None:
    executing_eagerly = context.executing_eagerly()

  if executing_eagerly or len(tout):
    if f.stateful_ops:
      outputs = gen_functional_ops.stateful_partitioned_call(
          args=args, Tout=tout, f=f)
    else:
      outputs = gen_functional_ops.partitioned_call(args=args, Tout=tout, f=f)
    return outputs if outputs else None

  # The generated binding returns an empty list for functions that don't
  # return any Tensors, hence the need to use `create_op` directly.
  args = [ops.internal_convert_to_tensor(x) for x in args]
  tin_attr = attr_value_pb2.AttrValue(
      list=attr_value_pb2.AttrValue.ListValue(
          type=[x.dtype.as_datatype_enum for x in args]))
  tout_attr = attr_value_pb2.AttrValue(
      list=attr_value_pb2.AttrValue.ListValue(type=tout))
  func_attr = attr_value_pb2.AttrValue(
      func=attr_value_pb2.NameAttrList(name=f.name))

  graph = ops.get_default_graph()
  f.add_to_graph(graph)
  op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
  op = graph.create_op(
      op_name,
      args,
      tout,
      compute_shapes=False,
      name="PartitionedFunctionCall",
      attrs={"Tin": tin_attr, "Tout": tout_attr, "f": func_attr})
  outputs = op.outputs
  return outputs if outputs else op
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:58,代码来源:functional_ops.py

示例12: _record_gradient

def _record_gradient(op_name, inputs, attrs, results, ctx, name):
  """Records gradients for a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    inputs: A flat list of Tensor object inputs to the operation.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    results: The results of the operation (as a flat list).
    ctx: The value of context.context().
    name: Customized name for the operation.

  Returns:
    A list of maybe-wrapped results. Either Tensors or TensorNodes.

  Raises:
    An exception on error.
  """
  if not tape.could_possibly_record():
    return

  if op_name in _ops_which_dont_need_outputs:
    op_outputs = None
  else:
    # TODO(apassos) this line creates a weak circular reference where the
    # backprop function keeps an output alive which in turn keeps the tape entry
    # alive which keeps the backprop function alive. Figure out how to break
    # this up without breaking second derivatives of ops like Exp whose
    # gradients depend only on the outputs.
    op_outputs = results

  if op_name in _ops_which_dont_need_inputs:
    op_inputs = None
  else:
    op_inputs = inputs

  num_inputs = len(inputs)

  def grad_fn(*orig_outputs):
    """Generated gradient function."""
    result = _magic_gradient_function(op_name, attrs, num_inputs,
                                      op_inputs, op_outputs, orig_outputs)
    if _tracing:
      print("Gradient for", (name if name else op_name), "inputs", op_inputs,
            "output_grads", orig_outputs, "gradients", result)
    return result

  inputs = [ops.internal_convert_to_tensor(x, ctx=ctx) for x in inputs]
  tape.record_operation(op_name, results, inputs, [], grad_fn)
  if _tracing:
    print("Computed op", (name if name else op_name), "inputs", inputs,
          "outputs", results)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:53,代码来源:backprop.py

示例13: _l2_loss

 def _l2_loss(self, l2):
   """Computes the (un-normalized) l2 loss of the model."""
   with name_scope('sdca/l2_loss'):
     sums = []
     for name in ['sparse_features_weights', 'dense_features_weights']:
       for var in self._variables[name]:
         for v in self._var_to_list(var):
           weights = internal_convert_to_tensor(v)
           with ops.device(weights.device):
             sums.append(math_ops.reduce_sum(math_ops.square(math_ops.cast(
                 weights, dtypes.float64))))
     # SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
     return l2 * math_ops.add_n(sums) / 2.0
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:13,代码来源:sdca_ops.py

示例14: _l1_loss

 def _l1_loss(self):
   """Computes the (un-normalized) l1 loss of the model."""
   with name_scope('sdca/l1_loss'):
     sums = []
     for name in ['sparse_features_weights', 'dense_features_weights']:
       for var in self._variables[name]:
         for v in self._var_to_list(var):
           weights = internal_convert_to_tensor(v)
           with ops.device(weights.device):
             sums.append(
                 math_ops.reduce_sum(
                     math_ops.abs(math_ops.cast(weights, dtypes.float64))))
     # SDCA L1 regularization cost is: l1 * sum(|weights|)
     return self._options['symmetric_l1_regularization'] * math_ops.add_n(sums)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:14,代码来源:sdca_ops.py

示例15: _convert_n_to_tensor

 def _convert_n_to_tensor(self, input_list, as_ref=False):
   """Converts input list to a set of tensors."""
   # input_list can be a list of Variables (that are implicitly partitioned),
   # in which case the underlying logic in internal_convert_to_tensor will not
   # concatenate the partitions together.  This method takes care of the
   # concatenating (we only allow partitioning on the first axis).
   output_list = []
   for x in input_list:
     tensor_to_convert = x
     if isinstance(x, list) or isinstance(x, var_ops.PartitionedVariable):
       # We only allow for partitioning on the first axis.
       tensor_to_convert = array_ops.concat(x, axis=0)
     output_list.append(internal_convert_to_tensor(
         tensor_to_convert, as_ref=as_ref))
   return output_list
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:15,代码来源:sdca_ops.py


注:本文中的tensorflow.python.framework.ops.internal_convert_to_tensor函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。