当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.cast函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.cast函数的典型用法代码示例。如果您正苦于以下问题:Python cast函数的具体用法?Python cast怎么用?Python cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _spectrum_to_circulant_1d

  def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
    """Creates a circulant matrix from a spectrum.

    Intentionally done in an explicit yet inefficient way.  This provides a
    cross check to the main code that uses fancy reshapes.

    Args:
      spectrum: Float or complex `Tensor`.
      shape:  Python list.  Desired shape of returned matrix.
      dtype:  Type to cast the returned matrix to.

    Returns:
      Circulant (batch) matrix of desired `dtype`.
    """
    spectrum = _to_complex(spectrum)
    spectrum_shape = self._shape_to_spectrum_shape(shape)
    domain_dimension = spectrum_shape[-1]
    if not domain_dimension:
      return array_ops.zeros(shape, dtype)

    # Explicitly compute the action of spectrum on basis vectors.
    matrix_rows = []
    for m in range(domain_dimension):
      x = np.zeros([domain_dimension])
      # x is a basis vector.
      x[m] = 1.0
      fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
      h_convolve_x = fft_ops.ifft(spectrum * fft_x)
      matrix_rows.append(h_convolve_x)
    matrix = array_ops.stack(matrix_rows, axis=-1)
    return math_ops.cast(matrix, dtype)
开发者ID:aritratony,项目名称:tensorflow,代码行数:31,代码来源:linear_operator_circulant_test.py

示例2: __init__

  def __init__(self, partitioned_dim_sizes, inner_dim_sizes,
               dim_size_dtype=None):
    """Creates a RaggedTensorDynamicShape.

    Args:
      partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
        each partitioned dimension.  If dimension `d` is uniform, then
        `partitioned_dim_sizes[d]` must be an integer scalar, specifying the
        size of all slices across dimension `d`.  If dimension `d` is ragged,
        then `partitioned_dim_sizes[d]` must be an integer vector, specifying
        the size of each slice across dimension `d`.
      inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
        number of inner dimensions.  `inner_dim_sizes[n]` is the size of all
        slices across the `n`th inner dimension (which is the
        `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
      dim_size_dtype: dtype for dimension sizes.  If not specified, then it
        is chosen based on the dtypes of `partitioned_dim_sizes` and
        `inner_dim_sizes`.
    """
    assert isinstance(partitioned_dim_sizes, (list, tuple))

    with ops.name_scope(None, 'RaggedTensorDynamicShape',
                        (partitioned_dim_sizes, inner_dim_sizes)):
      partitioned_dim_sizes = tuple(
          ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i)
          for (i, size) in enumerate(partitioned_dim_sizes))
      inner_dim_sizes = ops.convert_to_tensor(
          inner_dim_sizes, name='inner_dim_sizes')

      # Validate shapes.
      if partitioned_dim_sizes:
        for axis, dimension_size in enumerate(partitioned_dim_sizes):
          if dimension_size.shape.ndims is None:
            raise ValueError(
                'rank of partitioned_dim_sizes[%d] is unknown' % axis)
          dimension_size.shape.with_rank_at_most(1)
        if partitioned_dim_sizes[0].shape.ndims == 1:
          raise ValueError('outermost partitioned dimension must be uniform')
        if partitioned_dim_sizes[-1].shape.ndims == 0:
          raise ValueError('innermost partitioned dimension must be ragged')
      inner_dim_sizes.shape.assert_has_rank(1)

      # Convert dimension size tensors to a single dtype.
      if dim_size_dtype is None:
        dim_size_dtypes = set([p.dtype for p in partitioned_dim_sizes
                               if p.shape.ndims == 1])
        if not dim_size_dtypes:
          dim_size_dtype = dtypes.int64
        elif len(dim_size_dtypes) == 1:
          dim_size_dtype = dim_size_dtypes.pop()
        else:
          if not ragged_config.auto_cast_partition_dtype():
            raise ValueError('partitioned_dim_sizes must have matching dtypes')
          dim_size_dtype = dtypes.int64
      partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype)
                                    for p in partitioned_dim_sizes)
      inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)

      self._partitioned_dim_sizes = partitioned_dim_sizes
      self._inner_dim_sizes = inner_dim_sizes
开发者ID:aritratony,项目名称:tensorflow,代码行数:60,代码来源:ragged_tensor_shape.py

示例3: get_beta_accumulators

def get_beta_accumulators(opt, dtype):
  local_step = math_ops.cast(opt.iterations + 1, dtype)
  beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
  beta_1_power = math_ops.pow(beta_1_t, local_step)
  beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
  beta_2_power = math_ops.pow(beta_2_t, local_step)
  return (beta_1_power, beta_2_power)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:adam_test.py

示例4: weighted

  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = math_ops.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(
          math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:33,代码来源:training_utils.py

示例5: _batch_norm

 def _batch_norm(self, x, mean, var, offset, scale, epsilon):
   # We compute the batch norm manually in this function because
   # nn_impl.batch_normalization does not support float16 yet.
   # TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
   inv = math_ops.rsqrt(var + epsilon) * scale
   y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
   return math_ops.cast(y, x.dtype)
开发者ID:SylChan,项目名称:tensorflow,代码行数:7,代码来源:nn_fused_batchnorm_test.py

示例6: per_step_batch_loss

  def per_step_batch_loss(self, features, mode, state):
    """Computes predictions, losses, and intermediate model states.

    Args:
      features: A dictionary with times, values, and (optionally) exogenous
          regressors. See `define_loss`.
      mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
      state: Model-dependent state, each with size [batch size x ...]. The
          number and type will typically be fixed by the model (for example a
          mean and variance).
    Returns:
      A tuple of (loss, filtered_states, predictions)
        loss: Average loss values across the batch.
        filtered_states: For each Tensor in `state` with shape [batch size x
            ...], `filtered_states` has a Tensor with shape [batch size x window
            size x ...] with filtered state for each part of the batch and
            window.
        predictions: A dictionary with model-dependent one-step-ahead (or
            at-least-one-step-ahead with missing values) predictions, with keys
            indicating the type of prediction and values having shape [batch
            size x window size x ...]. For example state space models provide
            "mean", "covariance", and "log_likelihood".

    """
    self._check_graph_initialized()
    times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtype=dtypes.int64)
    values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
    exogenous_regressors = self._process_exogenous_features(
        times=times,
        features={key: value for key, value in features.items()
                  if key not in [TrainEvalFeatures.TIMES,
                                 TrainEvalFeatures.VALUES]})
    def _batch_loss_filtering_step(step_number, current_times, state):
      """Make a prediction and update it based on data."""
      current_values = values[:, step_number, :]
      state = self._apply_exogenous_update(
          step_number=step_number, current_times=current_times, state=state,
          raw_features=features,
          embedded_exogenous_regressors=exogenous_regressors)
      predicted_state, predictions = self._prediction_step(
          current_times=current_times,
          state=state)
      filtered_state, outputs = self._filtering_step(
          current_times=current_times,
          current_values=current_values,
          state=predicted_state,
          predictions=predictions)
      return filtered_state, outputs
    state, outputs = self._state_update_loop(
        times=times, state=state, state_update_fn=_batch_loss_filtering_step,
        outputs=["loss"] + self._train_output_names)
    outputs["loss"].set_shape(times.get_shape())
    loss_sum = math_ops.reduce_sum(outputs["loss"])
    per_observation_loss = (loss_sum / math_ops.cast(
        math_ops.reduce_prod(array_ops.shape(times)), dtype=self.dtype))
    per_observation_loss += self._loss_additions(times, values, mode)
    # Since we have window-level additions to the loss, its per-step value is
    # misleading, so we avoid returning it.
    del outputs["loss"]
    return per_observation_loss, state, outputs
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:60,代码来源:model.py

示例7: assert_integer_form

def assert_integer_form(
    x, data=None, summarize=None, message=None,
    int_dtype=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Floating-point `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
      implies the smallest possible signed int will be used for casting.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
  """
  with ops.name_scope(name, values=[x, data]):
    x = ops.convert_to_tensor(x, name="x")
    if x.dtype.is_integer:
      return control_flow_ops.no_op()
    message = message or "{} has non-integer components".format(x.op.name)
    if int_dtype is None:
      try:
        int_dtype = {
            dtypes.float16: dtypes.int16,
            dtypes.float32: dtypes.int32,
            dtypes.float64: dtypes.int64,
        }[x.dtype.base_dtype]
      except KeyError:
        raise TypeError("Unrecognized type {}".format(x.dtype.name))
    return check_ops.assert_equal(
        x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
        data=data, summarize=summarize, message=message, name=name)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:util.py

示例8: _fused_batch_norm

  def _fused_batch_norm(self, inputs, training):
    """Returns the output of fused batch norm."""
    beta = self.beta if self.center else self._beta_const
    gamma = self.gamma if self.scale else self._gamma_const

    def _fused_batch_norm_training():
      return nn.fused_batch_norm(
          inputs,
          gamma,
          beta,
          epsilon=self.epsilon,
          data_format=self._data_format)

    def _fused_batch_norm_inference():
      return nn.fused_batch_norm(
          inputs,
          gamma,
          beta,
          mean=self.moving_mean,
          variance=self.moving_variance,
          epsilon=self.epsilon,
          is_training=False,
          data_format=self._data_format)

    output, mean, variance = tf_utils.smart_cond(
        training, _fused_batch_norm_training, _fused_batch_norm_inference)
    if not self._bessels_correction_test_only:
      # Remove Bessel's correction to be consistent with non-fused batch norm.
      # Note that the variance computed by fused batch norm is
      # with Bessel's correction.
      sample_size = math_ops.cast(
          array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
      factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
      variance *= factor

    training_value = tf_utils.constant_value(training)
    if training_value is None:
      momentum = tf_utils.smart_cond(training,
                                     lambda: self.momentum,
                                     lambda: 1.0)
    else:
      momentum = ops.convert_to_tensor(self.momentum)
    if training_value or training_value is None:
      if distribution_strategy_context.in_cross_replica_context():
        strategy = distribution_strategy_context.get_strategy()
        mean_update = strategy.extended.update(
            self.moving_mean, self._assign_moving_average,
            (mean, self.momentum))
        variance_update = strategy.extended.update(
            self.moving_variance, self._assign_moving_average,
            (variance, self.momentum))
      else:
        mean_update = self._assign_moving_average(self.moving_mean, mean,
                                                  momentum)
        variance_update = self._assign_moving_average(self.moving_variance,
                                                      variance, momentum)
      self.add_update(mean_update, inputs=True)
      self.add_update(variance_update, inputs=True)

    return output
开发者ID:gautam1858,项目名称:tensorflow,代码行数:60,代码来源:normalization.py

示例9: _sample_n

 def _sample_n(self, n, seed=None):
   n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
   if self.total_count.get_shape().ndims is not None:
     if self.total_count.get_shape().ndims != 0:
       raise NotImplementedError(
           "Sample only supported for scalar number of draws.")
   elif self.validate_args:
     is_scalar = check_ops.assert_rank(
         n_draws, 0,
         message="Sample only supported for scalar number of draws.")
     n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
   k = self.event_shape_tensor()[0]
   # Flatten batch dims so logits has shape [B, k],
   # where B = reduce_prod(self.batch_shape_tensor()).
   x = random_ops.multinomial(
       logits=array_ops.reshape(self.logits, [-1, k]),
       num_samples=n * n_draws,
       seed=seed)
   x = array_ops.reshape(x, shape=[-1, n, n_draws])
   x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k),
                           axis=-2)  # shape: [B, n, k]
   x = array_ops.transpose(x, perm=[1, 0, 2])
   final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
   x = array_ops.reshape(x, final_shape)
   return math_ops.cast(x, self.dtype)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:25,代码来源:multinomial.py

示例10: quantiles_ready

  def quantiles_ready():
    """The subgraph for when the quantiles are ready."""
    quantized_feature = quantile_ops.quantiles([sparse_column_values], [],
                                               [quantile_buckets], [])
    quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
    quantized_feature = array_ops.reshape(quantized_feature, [-1])
    example_indices, _ = array_ops.split(
        sparse_column_indices, num_or_size_splits=2, axis=1)
    example_indices = array_ops.squeeze(example_indices, [1])
    filtered_gradients = array_ops.gather(gradients, example_indices)
    filtered_hessians = array_ops.gather(hessians, example_indices)
    filtered_partition_ids = array_ops.gather(example_partition_ids,
                                              example_indices)
    unique_partitions, mapped_partitions = array_ops.unique(
        example_partition_ids)

    # Compute aggregate stats for each partition.
    per_partition_gradients = math_ops.unsorted_segment_sum(
        gradients, mapped_partitions, array_ops.size(unique_partitions))
    per_partition_hessians = math_ops.unsorted_segment_sum(
        hessians, mapped_partitions, array_ops.size(unique_partitions))

    # Prepend a bias feature per partition that accumulates the stats for all
    # examples in that partition.
    bias_feature_ids = array_ops.fill(
        array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
    bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
    partition_ids = array_ops.concat(
        [unique_partitions, filtered_partition_ids], 0)
    filtered_gradients = array_ops.concat(
        [per_partition_gradients, filtered_gradients], 0)
    filtered_hessians = array_ops.concat(
        [per_partition_hessians, filtered_hessians], 0)
    bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
    return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:ordinal_split_handler.py

示例11: test_defining_spd_operator_by_taking_real_part

  def test_defining_spd_operator_by_taking_real_part(self):
    with self.cached_session() as sess:
      # S is real and positive.
      s = linear_operator_test_util.random_uniform(
          shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)

      # Let S = S1 + S2, the Hermitian and anti-hermitian parts.
      # S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
      # where ^H is the Hermitian transpose of the function:
      #    f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
      # We want to isolate S1, since
      #   S1 is Hermitian by construction
      #   S1 is real since S is
      #   S1 is positive since it is the sum of two positive kernels

      # IDFT[S] = IDFT[S1] + IDFT[S2]
      #         =      H1  +      H2
      # where H1 is real since it is Hermitian,
      # and H2 is imaginary since it is anti-Hermitian.
      ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))

      # Throw away H2, keep H1.
      real_ifft_s = math_ops.real(ifft_s)

      # This is the perfect spectrum!
      # spectrum = DFT[H1]
      #          = S1,
      fft_real_ifft_s = fft_ops.fft3d(
          math_ops.cast(real_ifft_s, dtypes.complex64))

      # S1 is Hermitian ==> operator is real.
      # S1 is real ==> operator is self-adjoint.
      # S1 is positive ==> operator is positive-definite.
      operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)

      # Allow for complex output so we can check operator has zero imag part.
      self.assertEqual(operator.dtype, dtypes.complex64)
      matrix, matrix_t = sess.run([
          operator.to_dense(),
          array_ops.matrix_transpose(operator.to_dense())
      ])
      operator.assert_positive_definite().run()  # Should not fail.
      np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
      self.assertAllClose(matrix, matrix_t)

      # Just to test the theory, get S2 as well.
      # This should create an imaginary operator.
      # S2 is anti-Hermitian ==> operator is imaginary.
      # S2 is real ==> operator is self-adjoint.
      imag_ifft_s = math_ops.imag(ifft_s)
      fft_imag_ifft_s = fft_ops.fft3d(
          1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
      operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)

      matrix, matrix_h = sess.run([
          operator_imag.to_dense(),
          array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
      ])
      self.assertAllClose(matrix, matrix_h)
      np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:60,代码来源:linear_operator_circulant_test.py

示例12: _apply_sparse_shared

 def _apply_sparse_shared(self, grad, var, indices,
                          scatter_add, scatter_update):
   beta1_power = self._get_beta_accumulators()
   beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
   lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
   beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
   beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
   epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
   # m_t = beta1 * m + (1 - beta1) * g_t
   m = self.get_slot(var, "m")
   m_slice = array_ops.gather(m, indices)
   m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)
   with ops.control_dependencies([m_t_slice]):
     m_t = scatter_update(m, indices, m_t_slice)
   # u_t = max(beta2 * u, abs(g_t))
   v = self.get_slot(var, "v")
   v_slice = array_ops.gather(v, indices)
   v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))
   with ops.control_dependencies([v_t_slice]):
     v_t = scatter_update(v, indices, v_t_slice)
   # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
   var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /
                                            (v_t_slice + epsilon_t))
   with ops.control_dependencies([var_slice]):
     var_update = scatter_add(var, indices, var_slice)
   return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:adamax.py

示例13: call

  def call(self, values, weights=None):
    """Accumulate statistics for computing the mean.

    For example, if values is [1, 3, 5, 7] then the mean is 4.
    If the weights were specified as [1, 1, 0, 0] then the mean would be 2.

    Args:
      values: Tensor with the per-example value.
      weights: Optional weighting of each example. Defaults to 1.
    """
    if not self.built:  # False only in the first call().
      self.numer = self.add_variable(name="numer", shape=(),
                                     dtype=dtypes.float64,
                                     initializer=init_ops.zeros_initializer)
      self.denom = self.add_variable(name="denom", shape=(),
                                     dtype=dtypes.float64,
                                     initializer=init_ops.zeros_initializer)
    if weights is None:
      self.denom.assign_add(
          math_ops.cast(array_ops.size(values), dtypes.float64))
      values = math_ops.reduce_sum(values)
      self.numer.assign_add(math_ops.cast(values, dtypes.float64))
    else:
      weights = math_ops.cast(weights, dtypes.float64)
      self.denom.assign_add(math_ops.reduce_sum(weights))
      values = math_ops.cast(values, dtypes.float64) * weights
      self.numer.assign_add(math_ops.reduce_sum(values))
开发者ID:rajeev921,项目名称:tensorflow,代码行数:27,代码来源:metrics_impl.py

示例14: call

  def call(self, y_true, y_pred):
    """Invokes the `CategoricalCrossentropy` instance.

    Args:
      y_true: Ground truth values.
      y_pred: The predicted values.

    Returns:
      Categorical cross entropy losses.
    """
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = ops.convert_to_tensor(y_true)
    is_sparse = y_pred.shape != y_true.shape

    if is_sparse:
      return sparse_categorical_crossentropy(
          y_true, y_pred, from_logits=self.from_logits)
    else:
      y_true = math_ops.cast(y_true, y_pred.dtype)
      if self.label_smoothing > 0:
        num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
        smooth_positives = 1.0 - self.label_smoothing
        smooth_negatives = self.label_smoothing / num_classes
        y_true = y_true * smooth_positives + smooth_negatives

      return categorical_crossentropy(
          y_true, y_pred, from_logits=self.from_logits)
开发者ID:aeverall,项目名称:tensorflow,代码行数:27,代码来源:losses.py

示例15: _apply_transform

  def _apply_transform(self, input_tensors, **kwargs):
    """Applies the transformation to the `transform_input`.

    Args:
      input_tensors: a list of Tensors representing the input to
        the Transform.
      **kwargs: Additional keyword arguments, unused here.

    Returns:
        A namedtuple of Tensors representing the transformed output.
    """
    d = input_tensors[0]

    if self.strip_value is np.nan:
      strip_hot = math_ops.is_nan(d)
    else:
      strip_hot = math_ops.equal(d,
                                 array_ops.constant([self.strip_value],
                                                    dtype=d.dtype))
    keep_hot = math_ops.logical_not(strip_hot)

    length = array_ops.reshape(array_ops.shape(d), [])
    indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
    values = array_ops.boolean_mask(d, keep_hot)

    sparse_indices = array_ops.reshape(
        math_ops.cast(indices, dtypes.int64), [-1, 1])
    shape = math_ops.cast(array_ops.shape(d), dtypes.int64)

    # pylint: disable=not-callable
    return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:31,代码来源:sparsify.py


注:本文中的tensorflow.python.ops.math_ops.cast函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。