当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.to_int32函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.to_int32函数的典型用法代码示例。如果您正苦于以下问题:Python to_int32函数的具体用法?Python to_int32怎么用?Python to_int32使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了to_int32函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: linear_decay_fn

 def linear_decay_fn(global_step):
   if global_step is None:
     raise ValueError("global_step is required for linear_decay.")
   global_step = math_ops.minimum(global_step, decay_steps)
   remaining_steps = math_ops.to_int32(decay_steps) - math_ops.to_int32(
       global_step)
   decayed = math_ops.to_float(remaining_steps) / math_ops.to_float(
       decay_steps)
   return math_ops.maximum(0.0, decayed)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:sign_decay.py

示例2: _compute_accuracy

 def _compute_accuracy(logits, targets, weights=None):
   if self._n_classes > 2:
     _, predictions = nn.top_k(logits, 1)
   else:
     predictions = array_ops.reshape(logits, [-1])
     predictions = math_ops.greater(predictions,
                                    array_ops.zeros_like(predictions))
     targets = array_ops.reshape(targets, [-1])
   return metrics_lib.streaming_accuracy(
       math_ops.to_int32(predictions), math_ops.to_int32(targets), weights)
开发者ID:01-,项目名称:tensorflow,代码行数:10,代码来源:dnn_linear_combined.py

示例3: update_medoid_per_cluster

def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
                              labels, chosen_ids, cluster_member_ids,
                              cluster_idx, margin_multiplier, margin_type):
  """Updates the cluster medoid per cluster.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
    labels: 1-D Tensor of ground truth cluster assignment.
    chosen_ids: 1-D Tensor of cluster centroid indices.
    cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
    cluster_idx: Index of this one cluster.
    margin_multiplier: multiplication constant.
    margin_type: Type of structured margin to use. Default is nmi.

  Returns:
    chosen_ids: Updated 1-D Tensor of cluster centroid indices.
  """

  def func_cond(iteration, scores_margin):
    del scores_margin  # Unused variable scores_margin.
    return iteration < num_candidates

  def func_body(iteration, scores_margin):
    # swap the current medoid with the candidate cluster member
    candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
    tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
    predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
    metric_score = compute_clustering_score(labels, predictions, margin_type)
    pad_before = array_ops.zeros([iteration])
    pad_after = array_ops.zeros([num_candidates - 1 - iteration])
    return iteration + 1, scores_margin + array_ops.concat(
        [pad_before, [1.0 - metric_score], pad_after], 0)

  # pairwise_distances_subset is of size [p, 1, 1, p],
  #   the intermediate dummy dimensions at
  #   [1, 2] makes this code work in the edge case where p=1.
  #   this happens if the cluster size is one.
  scores_fac = -1.0 * math_ops.reduce_sum(
      array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)

  iteration = array_ops.constant(0)
  num_candidates = array_ops.size(cluster_member_ids)
  scores_margin = array_ops.zeros([num_candidates])

  _, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
                                                 [iteration, scores_margin])
  candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)

  argmax_index = math_ops.to_int32(
      math_ops.argmax(candidate_scores, dimension=0))

  best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
  chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
  return chosen_ids
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:55,代码来源:metric_loss_ops.py

示例4: _class_predictions_streaming_mean

 def _class_predictions_streaming_mean(
     predictions, labels, weights=None, class_id=None):
   del labels
   return metrics_lib.streaming_mean(
       array_ops.where(
           math_ops.equal(
               math_ops.to_int32(class_id),
               math_ops.to_int32(predictions)),
           array_ops.ones_like(predictions),
           array_ops.zeros_like(predictions)),
       weights=weights)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:11,代码来源:head.py

示例5: testListOfScalarTensors

  def testListOfScalarTensors(self):
    a = math_ops.to_int32(5)
    b = math_ops.to_int32(6)

    value = np.random.rand(11, 11)

    with self.test_session(use_gpu=False) as sess:
      result = sess.run(array_ops.split(value, [a, b]))

    self.assertAllEqual(result[0], value[0:5, :])
    self.assertAllEqual(result[1], value[5:, :])
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:11,代码来源:split_op_test.py

示例6: testListOfScalarTensors

  def testListOfScalarTensors(self):
    a = math_ops.to_int32(5)
    b = math_ops.to_int32(6)

    value = np.random.rand(11, 11)

    with test_util.device(use_gpu=True):
      result = self.evaluate(array_ops.split(value, [a, b]))

    self.assertAllEqual(result[0], value[0:5, :])
    self.assertAllEqual(result[1], value[5:, :])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:11,代码来源:split_op_test.py

示例7: _randomize

def _randomize(coeffs, radixes, seed=None):
  """Applies the Owen randomization to the coefficients."""
  given_dtype = coeffs.dtype
  coeffs = math_ops.to_int32(coeffs)
  num_coeffs = array_ops.shape(coeffs)[-1]
  radixes = array_ops.reshape(math_ops.to_int32(radixes), [-1])
  perms = _get_permutations(num_coeffs, radixes, seed=seed)
  perms = array_ops.reshape(perms, [-1])
  radix_sum = math_ops.reduce_sum(radixes)
  radix_offsets = array_ops.reshape(math_ops.cumsum(radixes, exclusive=True),
                                    [-1, 1])
  offsets = radix_offsets + math_ops.range(num_coeffs) * radix_sum
  permuted_coeffs = array_ops.gather(perms, coeffs + offsets)
  return math_ops.cast(permuted_coeffs, dtype=given_dtype)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:14,代码来源:halton_sequence_impl.py

示例8: one_hot_mask

def one_hot_mask(labels, num_classes, scope=None):
  """Compute 1-hot encodings for masks.

  Given a label image, this computes the one hot encoding at
  each pixel.

  Args:
    labels: (batch_size, width, height, 1) tensor containing labels.
    num_classes: number of classes
    scope: optional scope name

  Returns:
    Tensor of shape (batch_size, width, height, num_classes) with
    a 1-hot encoding.
  """
  with ops.name_scope(scope, "OneHotMask", [labels]):
    height, width, depth = _shape(labels)
    assert depth == 1
    sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1]))
    sparse_size, _ = _shape(sparse_labels)
    indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1])
    concated = array_ops.concat([indices, sparse_labels], 1)
    dense_result = sparse_ops.sparse_to_dense(concated,
                                              [sparse_size, num_classes], 1.0,
                                              0.0)
    result = array_ops.reshape(dense_result, [height, width, num_classes])
    return result
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:misc.py

示例9: get_best

  def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = math_ops.reduce_min(new_scores)
      new_length = math_ops.reduce_sum(
          math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
      u1 = self.sl_ids.assign(
          math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
      u2 = self.sl_scores.assign(
          array_ops.concat([[smallest_new_score], new_scores], 0))
      self.last_ops = [u1, u2]
      return control_flow_ops.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with ops.control_dependencies(self.last_ops):
      cond_op = control_flow_ops.cond(n > self.sl_ids[0], refresh_shortlist,
                                      control_flow_ops.no_op)
      with ops.control_dependencies([cond_op]):
        topk_values, topk_indices = nn_ops.top_k(
            self.sl_scores,
            math_ops.minimum(n, math_ops.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = array_ops.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:29,代码来源:topn.py

示例10: _compute_zeroone_score

def _compute_zeroone_score(labels, predictions):
  zeroone_score = math_ops.to_float(
      math_ops.equal(
          math_ops.reduce_sum(
              math_ops.to_int32(math_ops.equal(labels, predictions))),
          array_ops.shape(labels)[0]))
  return zeroone_score
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:metric_loss_ops.py

示例11: _compute_power_svd

  def _compute_power_svd(self, var, mat_g, mat_g_size, alpha, mat_h_slot_name):
    """Computes mat_h = mat_g^alpha using svd. mat_g is a symmetric PSD matrix.

    Args:
      var: the variable we are updating.
      mat_g: the symmetric PSD matrix whose power it to be computed
      mat_g_size: size of mat_g
      alpha: a real number
      mat_h_slot_name: name of slot to store the power, if needed.

    Returns:
      mat_h = mat_g^alpha

    Stores mat_h in the appropriate slot, if it exists.
    Note that mat_g is PSD. So we could use linalg_ops.self_adjoint_eig.
    """
    if mat_g_size == 1:
      mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
    else:
      damping = self._epsilon * linalg_ops.eye(math_ops.to_int32(mat_g_size))
      diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
      mat_h = math_ops.matmul(
          mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),
          array_ops.transpose(mat_u))
    if mat_h_slot_name is not None:
      return state_ops.assign(self.get_slot(var, mat_h_slot_name), mat_h)
    return mat_h
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:27,代码来源:shampoo.py

示例12: average_impurity

  def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:25,代码来源:tensor_forest.py

示例13: Backward

    def Backward(*args):
      """Backward pass for the recurrent net."""
      # theta, state0, inputs are Forward's inputs.
      # acc_state is the accumulated 1st output of Forward.
      # acc_extras is the accumulated 2nd output of Forward.
      # d_acc_state is the gradient for acc_state.
      # d_state1 is the gradient for the final state computed by Forward.
      (theta, state0, inputs, max_input_length, acc_state, acc_extras,
       d_acc_state, d_state1) = _Pack(args, backward_sig)

      # Accumulators for gradients.
      d_theta = _EmptyLike(theta)
      d_inputs = _EmptyLike(inputs)

      # Loop backwards. Note the loop's limit is open-ended, so goes through
      # t=0.
      t = max_input_length - 1
      dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
      run = functional_ops.For(
          start=t,
          limit=-1,
          delta=-1,
          inputs=[dev_t] + _Flatten([
              theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
              d_inputs, d_acc_state
          ]),
          body=BackwardLoopBody,
          rewrite_with_while=compiled)

      (theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
       d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)

      d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
      return _Flatten(
          [d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:35,代码来源:recurrent.py

示例14: has_zero

 def has_zero():
   # Insert a zero in the consecutive ids where zero appears in unique_ids.
   # id_is_zero has length 1.
   zero_id_ind = math_ops.to_int32(id_is_zero[0])
   ids_before = nonzero_consecutive_ids[:zero_id_ind]
   ids_after = nonzero_consecutive_ids[zero_id_ind:]
   return array_ops.concat([ids_before, [0], ids_after], axis=0)
开发者ID:Eagle732,项目名称:tensorflow,代码行数:7,代码来源:image_ops.py

示例15: _Update

def _Update(struct_acc, struct_x, t):
  """Updates t-th row in accumulators.

  Args:
    struct_acc: The accumulators. A structure of tensors.
    struct_x: The new values. A structure of tensors congruent to `struct_acc`.
    t: A scalar integer. Performance is better if `t` is on the device
      memory.

  Returns:
    A structure of tensors. Say, ret is a returned dictionary. Then, for
    each key, we have:
      ret[key] = struct_acc[key];
      ret[key][t, :] = struct_x[key]
  """
  to_skip_update = set()
  acc_lst = nest.flatten(struct_acc)
  x_lst = nest.flatten(struct_x)
  t = math_ops.to_int32([t])  # tf.to_int32 casts on-device tensors.
  lst = []
  for acc, x in zip(acc_lst, x_lst):
    if acc in to_skip_update:
      # Until b/62105730 is fixed, we need to avoid inplace update for tensors
      # of rank 1.  could reshape to handle it, but we don't really need the
      # values applied to these, so just skip their modification.
      lst += [acc]
    else:
      lst += [alias_inplace_update(acc, t, array_ops.expand_dims(x, 0))]
  return nest.pack_sequence_as(struct_acc, lst)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:29,代码来源:recurrent.py


注:本文中的tensorflow.python.ops.math_ops.to_int32函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。