當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.equal方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.equal方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.equal方法的具體用法?Python v1.equal怎麽用?Python v1.equal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.equal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _ensure_keep_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _ensure_keep_mask(self, x):
    if self._keep_mask is None or not self._share_mask:
      shape = tf.shape(x)
      k = shape[1]
      # To make this class a drop-in replacement for bernoulli dropout we
      # paramaterize it with keep_prob. Set alpha of the dirichlet so that the
      # variance is equal to the variance of the bernoulli with p=keep_prob
      # divided by keep_prob.
      # Now the variance of the dirichlet with k equal alphas is
      # (k-1)/(k^2(k*alpha+1). Solve that for alpha.
      kf = tf.cast(k, tf.float32)
      alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf
      dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha)
      assert (dist.reparameterization_type ==
              tfp.distributions.FULLY_REPARAMETERIZED)
      # The E[dir(alpha)] = 1/k for all elements, but we want the expectation to
      # be keep_prob, hence the multiplication.
      self._keep_mask = kf * dist.sample(shape[0])
      self._keep_mask.set_shape(x.get_shape())
    return self._keep_mask 
開發者ID:deepmind,項目名稱:lamb,代碼行數:22,代碼來源:dropout.py

示例2: _build

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _build(self, x, state):
    prev_keep_mask = state
    shape = tf.shape(x)
    noise = tf.random_uniform(shape, dtype=x.dtype)
    other_mask = tf.floor(self._keep_prob + noise)
    choice_noise = tf.random_uniform(shape, dtype=x.dtype)
    choice = tf.less(choice_noise, self._flip_prob)
    # KLUDGE(melisgl): The client has to pass the last keep_mask from
    # a batch to the next so the mask may end up next to some
    # recurrent cell state. This state is often zero at the beginning
    # and may be periodically zeroed (per example) during training.
    # While zeroing LSTM state is okay, zeroing the dropout mask is
    # not. So instead of forcing every client to deal with this common
    # (?) case, if an all zero mask is detected, then regenerate a
    # fresh mask. This is of course a major hack and won't help with
    # learnt initial states, for example.
    sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True)
    is_initializing = tf.equal(sum_, 0.0)

    self._keep_mask = tf.where(tf.logical_or(choice, is_initializing),
                               other_mask,
                               prev_keep_mask)
    self._time_step += 1
    return x * self._keep_mask / self._keep_prob * self._scaler 
開發者ID:deepmind,項目名稱:lamb,代碼行數:26,代碼來源:dropout.py

示例3: padded_accuracy_topk

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:metrics.py

示例4: set_precision

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:metrics.py

示例5: set_recall

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:metrics.py

示例6: top_k_with_unique

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def top_k_with_unique(inputs, k):
  """Finds the values and indices of the k largests entries.

  Instead of doing sort like tf.nn.top_k, this function finds the max value
  k times. The running time is proportional to k, which is be faster when k
  is small. The current implementation supports only inputs of rank 2.
  In addition, iota is used to replace the lower bits of each element, this
  makes the selection more stable when there are equal elements. The
  overhead is that output values are approximated.

  Args:
    inputs: A tensor with rank of 2. [batch_size, original_size].
    k: An integer, number of top elements to select.

  Returns:
    top_values: A tensor, the k largest elements in sorted order.
      [batch_size, k].
    indices: A tensor, indices of the top_values. [batch_size, k].
  """
  unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
  top_values, indices = _create_topk_unique(unique_inputs, k)
  top_values = tf.cast(top_values, inputs.dtype)
  return top_values, indices 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:25,代碼來源:beam_search.py

示例7: _apply_cond

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:multistep_with_adamoptimizer.py

示例8: _apply_cond

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:multistep_optimizer.py

示例9: _finish

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:multistep_optimizer.py

示例10: pool

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
  """Pooling (supports "LEFT")."""
  with tf.name_scope("pool", values=[inputs]):
    static_shape = inputs.get_shape()
    if not static_shape or len(static_shape) != 4:
      raise ValueError("Inputs to conv must have statically known rank 4.")
    # Add support for left padding.
    if padding == "LEFT":
      assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
      if len(static_shape) == 3:
        width_padding = 2 * (window_size[1] // 2)
        padding_ = [[0, 0], [width_padding, 0], [0, 0]]
      else:
        height_padding = 2 * (window_size[0] // 2)
        cond_padding = tf.cond(
            tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
            lambda: tf.constant(2 * (window_size[1] // 2)))
        width_padding = 0 if static_shape[2] == 1 else cond_padding
        padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
      inputs = tf.pad(inputs, padding_)
      inputs.set_shape([static_shape[0], None, None, static_shape[3]])
      padding = "VALID"

  return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:common_layers.py

示例11: weights_multi_problem

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def weights_multi_problem(labels, taskid=-1):
  """Assign weight 1.0 to only the "targets" portion of the labels.

  Weight 1.0 is assigned to all labels past the taskid.

  Args:
    labels: A Tensor of int32s.
    taskid: an int32 representing the task id for a problem.

  Returns:
    A Tensor of floats.

  Raises:
    ValueError: The Task ID must be valid.
  """
  taskid = check_nonnegative(taskid)
  past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
  # Additionally zero out the task id location
  past_taskid *= to_float(tf.not_equal(labels, taskid))
  non_taskid = to_float(labels)
  return to_float(tf.not_equal(past_taskid * non_taskid, 0)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:common_layers.py

示例12: top_1_tpu

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def top_1_tpu(inputs):
  """find max and argmax over the last dimension.

  Works well on TPU

  Args:
    inputs: A tensor with shape [..., depth]

  Returns:
    values: a Tensor with shape [...]
    indices: a Tensor with shape [...]
  """
  inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
  mask = tf.to_int32(tf.equal(inputs_max, inputs))
  index = tf.range(tf.shape(inputs)[-1]) * mask
  return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:common_layers.py

示例13: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def call(self, inputs):
    batch_shape = tf.shape(inputs)[:-1]
    length = tf.shape(inputs)[-1]
    ngram_range_counts = []
    for n in range(self.minval, self.maxval):
      # Reshape inputs from [..., length] to [..., 1, length // n, n], dropping
      # remainder elements. Each n-vector is an ngram.
      reshaped_inputs = tf.reshape(
          inputs[..., :(n * (length // n))],
          tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0))
      # Count the number of times each ngram appears in the input. We do so by
      # checking whether each n-vector in the input is equal to each n-vector
      # in a Tensor of all possible ngrams. The comparison is batched between
      # the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor
      # of shape [..., input_dim**n, 1, n].
      ngrams = tf.reshape(
          list(np.ndindex((self.input_dim,) * n)),
          [1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n])
      is_ngram = tf.equal(
          tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32),
                        axis=-1),
          n)
      ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1)
      ngram_range_counts.append(ngram_counts)
    return tf.concat(ngram_range_counts, axis=-1) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:27,代碼來源:ngram.py

示例14: _apply_with_random_selector

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def _apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].

  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.

  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)
  ])[0] 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:vqa_utils.py

示例15: build

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import equal [as 別名]
def build(self, hparams, is_training=True):
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmEncoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Encoder:\n'
                    '  input length: %d\n'
                    '  level lengths: %s\n',
                    self._total_length,
                    self._level_lengths)
    self._hierarchical_encoders = []
    num_splits = int(np.prod(self._level_lengths))
    for i, l in enumerate(self._level_lengths):
      num_splits //= l
      tf.logging.info('Level %d splits: %d', i, num_splits)
      h_encoder = self._core_encoder_cls()
      h_encoder.build(
          hparams, is_training,
          name_or_scope=tf.VariableScope(
              tf.AUTO_REUSE, 'encoder/hierarchical_level_%d' % i))
      self._hierarchical_encoders.append((num_splits, h_encoder)) 
開發者ID:magenta,項目名稱:magenta,代碼行數:25,代碼來源:lstm_models.py


注:本文中的tensorflow.compat.v1.equal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。