當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.logical_or方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.logical_or方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.logical_or方法的具體用法?Python v1.logical_or怎麽用?Python v1.logical_or使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.logical_or方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def __init__(self, regularizers_to_group):
    """Creates an instance.

    Args:
      regularizers_to_group: A list of generic_regularizers.OpRegularizer
        objects.Their regularization_vector (alive_vector) are expected to be of
        the same length.

    Raises:
      ValueError: regularizers_to_group is not of length at least 2.
    """
    if len(regularizers_to_group) < 2:
      raise ValueError('Groups must be of at least size 2.')

    first = regularizers_to_group[0]
    regularization_vector = first.regularization_vector
    alive_vector = first.alive_vector
    for index in range(1, len(regularizers_to_group)):
      regularizer = regularizers_to_group[index]
      regularization_vector = tf.maximum(regularization_vector,
                                         regularizer.regularization_vector)
      alive_vector = tf.logical_or(alive_vector, regularizer.alive_vector)
    self._regularization_vector = regularization_vector
    self._alive_vector = alive_vector 
開發者ID:google-research,項目名稱:morph-net,代碼行數:26,代碼來源:grouping_regularizers.py

示例2: _build

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _build(self, inputs, labels):

    def cond(i, unused_attack, success):
      # If we are already successful, we break.
      return tf.logical_and(i < self._num_restarts,
                            tf.logical_not(tf.reduce_all(success)))

    def body(i, attack, success):
      new_attack = self._inner_attack(inputs, labels)
      new_success = self._inner_attack.success
      # The first iteration always sets the attack.
      use_new_values = tf.logical_or(tf.equal(i, 0), new_success)
      return (i + 1,
              tf.where(use_new_values, new_attack, attack),
              tf.logical_or(success, new_success))

    _, self._attack, self._success = tf.while_loop(
        cond, body, back_prop=False, parallel_iterations=1,
        loop_vars=[
            tf.constant(0, dtype=tf.int32),
            inputs,
            tf.zeros([tf.shape(inputs)[0]], dtype=tf.bool),
        ])
    self._logits = self._eval_fn(self._attack, mode='final')
    return self._attack 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:27,代碼來源:attacks.py

示例3: image_corruption

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def image_corruption(
    image, label, reso, image_corrupt_ratio_mean, image_corrupt_ratio_stddev):
  """Randomly drop non-lesion pixels."""

  if image_corrupt_ratio_mean < 0.000001 and (
      image_corrupt_ratio_stddev < 0.000001):
    return image

  # Corrupt non-lesion region according to keep_mask.
  keep_mask = _gen_rand_mask(
      1 - image_corrupt_ratio_mean,
      image_corrupt_ratio_stddev,
      reso // 3, image.shape)

  keep_mask = tf.logical_or(tf.greater(label, 1.5), keep_mask)
  image *= tf.cast(keep_mask, tf.float32)

  return image 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:20,代碼來源:data_aug_lib.py

示例4: _sequence_correct

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _sequence_correct(labels, predictions):
  """Computes a per-example sequence accuracy."""
  target_decode_steps = decode_utils.decode_steps_from_labels(
      labels, trim_start_symbol=True)
  predicted_decode_steps = decode_utils.decode_steps_from_predictions(
      predictions)

  decode_utils.assert_shapes_match(target_decode_steps, predicted_decode_steps)

  equal_tokens = decode_utils.compare_decode_steps(target_decode_steps,
                                                   predicted_decode_steps)
  target_len = labels["target_len"] - 1
  loss_mask = tf.sequence_mask(
      lengths=tf.to_int32(target_len),
      maxlen=tf.to_int32(tf.shape(equal_tokens)[1]))
  equal_tokens = tf.logical_or(equal_tokens, tf.logical_not(loss_mask))
  all_equal = tf.cast(tf.reduce_all(equal_tokens, 1), tf.float32)
  return all_equal 
開發者ID:google-research,項目名稱:language,代碼行數:20,代碼來源:metrics.py

示例5: compare_generating_steps

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def compare_generating_steps(target_decode_steps, predicted_decode_steps):
  """Compare generating steps only but ignoring target copying steps.

  Args:
    target_decode_steps: Target DecodeSteps, Each tensor is expected to be shape
      [batch_size, output_length].
    predicted_decode_steps: Predicted DecodeSteps, Each tensor is expected to be
      shape [batch_size, output_length].

  Returns:
    A tensor of bools indicating whether generating steps are equal.
    Copy Steps will have value True.
  """
  # Set all copying steps to True, Since we only care about generating steps.
  return tf.logical_or(
      tf.not_equal(target_decode_steps.action_types, constants.GENERATE_ACTION),
      tf.logical_and(
          tf.equal(target_decode_steps.action_types,
                   predicted_decode_steps.action_types),
          tf.equal(target_decode_steps.action_ids,
                   predicted_decode_steps.action_ids))) 
開發者ID:google-research,項目名稱:language,代碼行數:23,代碼來源:decode_utils.py

示例6: _upper_bound_grad

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _upper_bound_grad(op, grad):
  """Gradient for `upper_bound` if `gradient == 'identity_if_towards'`.

  Args:
    op: The op for which to calculate a gradient.
    grad: Gradient with respect to the output of the op.

  Returns:
    Gradient with respect to the inputs of the op.
  """
  inputs, bound = op.inputs
  pass_through_if = tf.logical_or(inputs <= bound, grad > 0)
  return [tf.cast(pass_through_if, grad.dtype) * grad, None] 
開發者ID:tensorflow,項目名稱:compression,代碼行數:15,代碼來源:math_ops.py

示例7: _lower_bound_grad

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _lower_bound_grad(op, grad):
  """Gradient for `lower_bound` if `gradient == 'identity_if_towards'`.

  Args:
    op: The op for which to calculate a gradient.
    grad: Gradient with respect to the output of the op.

  Returns:
    Gradient with respect to the inputs of the op.
  """
  inputs, bound = op.inputs
  pass_through_if = tf.logical_or(inputs >= bound, grad < 0)
  return [tf.cast(pass_through_if, grad.dtype) * grad, None] 
開發者ID:tensorflow,項目名稱:compression,代碼行數:15,代碼來源:math_ops.py

示例8: _get_values_from_start_and_end

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
開發者ID:tensorflow,項目名稱:models,代碼行數:33,代碼來源:balanced_positive_negative_sampler.py

示例9: test_logical_or

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def test_logical_or():
    with tf.Graph().as_default():
        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name='in1')
        in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name='in2')
        out = tf.logical_or(in1, in2, name='out')
        in_data1 = np.random.choice(
            a=[False, True], size=(1, 4, 4, 3)).astype('bool')
        in_data2 = np.random.choice(
            a=[False, True], size=(1, 4, 4, 3)).astype('bool')
        compare_tf_with_tvm([in_data1, in_data2], ['in1:0', 'in2:0'], 'out:0') 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:12,代碼來源:test_forward.py

示例10: _top_p_sample

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _top_p_sample(logits, ignore_ids=None, num_samples=1, p=0.9):
    """
    Does top-p sampling. if ignore_ids is on, then we will zero out those logits.
    :param logits: [batch_size, vocab_size] tensor
    :param ignore_ids: [vocab_size] one-hot representation of the indices we'd like to ignore and never predict,
                        like padding maybe
    :param p: topp threshold to use, either a float or a [batch_size] vector
    :return: [batch_size, num_samples] samples

    # TODO FIGURE OUT HOW TO DO THIS ON TPUS. IT'S HELLA SLOW RIGHT NOW, DUE TO ARGSORT I THINK
    """
    with tf.variable_scope('top_p_sample'):
        batch_size, vocab_size = get_shape_list(logits, expected_rank=2)

        probs = tf.nn.softmax(logits if ignore_ids is None else logits - tf.cast(ignore_ids[None], tf.float32) * 1e10,
                              axis=-1)

        if isinstance(p, float) and p > 0.999999:
            # Don't do top-p sampling in this case
            print("Top-p sampling DISABLED", flush=True)
            return {
                'probs': probs,
                'sample': tf.random.categorical(
                    logits=logits if ignore_ids is None else logits - tf.cast(ignore_ids[None], tf.float32) * 1e10,
                    num_samples=num_samples, dtype=tf.int32),
            }

        # [batch_size, vocab_perm]
        indices = tf.argsort(probs, direction='DESCENDING')
        cumulative_probabilities = tf.math.cumsum(tf.batch_gather(probs, indices), axis=-1, exclusive=False)

        # find the top pth index to cut off. careful we don't want to cutoff everything!
        # result will be [batch_size, vocab_perm]
        p_expanded = p if isinstance(p, float) else p[:, None]
        exclude_mask = tf.logical_not(
            tf.logical_or(cumulative_probabilities < p_expanded, tf.range(vocab_size)[None] < 1))

        # OPTION A - sample in the sorted space, then unsort.
        logits_to_use = tf.batch_gather(logits, indices) - tf.cast(exclude_mask, tf.float32) * 1e10
        sample_perm = tf.random.categorical(logits=logits_to_use, num_samples=num_samples)
        sample = tf.batch_gather(indices, sample_perm)

        # OPTION B - unsort first - Indices need to go back to 0 -> N-1 -- then sample
        # unperm_indices = tf.argsort(indices, direction='ASCENDING')
        # include_mask_unperm = tf.batch_gather(include_mask, unperm_indices)
        # logits_to_use = logits - (1 - tf.cast(include_mask_unperm, tf.float32)) * 1e10
        # sample = tf.random.categorical(logits=logits_to_use, num_samples=num_samples, dtype=tf.int32)

    return {
        'probs': probs,
        'sample': sample,
    } 
開發者ID:imcaspar,項目名稱:gpt2-ml,代碼行數:54,代碼來源:modeling.py

示例11: maybe_split_sequence_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def maybe_split_sequence_lengths(sequence_length, num_splits, total_length):
  """Validates and splits `sequence_length`, if necessary.

  Returned value must be used in graph for all validations to be executed.

  Args:
    sequence_length: A batch of sequence lengths, either sized `[batch_size]`
      and equal to either 0 or `total_length`, or sized
      `[batch_size, num_splits]`.
    num_splits: The scalar number of splits of the full sequences.
    total_length: The scalar total sequence length (potentially padded).

  Returns:
    sequence_length: If input shape was `[batch_size, num_splits]`, returns the
      same Tensor. Otherwise, returns a Tensor of that shape with each input
      length in the batch divided by `num_splits`.
  Raises:
    ValueError: If `sequence_length` is not shaped `[batch_size]` or
      `[batch_size, num_splits]`.
    tf.errors.InvalidArgumentError: If `sequence_length` is shaped
      `[batch_size]` and all values are not either 0 or `total_length`.
  """
  if sequence_length.shape.ndims == 1:
    if total_length % num_splits != 0:
      raise ValueError(
          '`total_length` must be evenly divisible by `num_splits`.')
    with tf.control_dependencies(
        [tf.Assert(
            tf.reduce_all(
                tf.logical_or(tf.equal(sequence_length, 0),
                              tf.equal(sequence_length, total_length))),
            data=[sequence_length])]):
      sequence_length = (
          tf.tile(tf.expand_dims(sequence_length, axis=1), [1, num_splits]) //
          num_splits)
  elif sequence_length.shape.ndims == 2:
    with tf.control_dependencies([
        tf.assert_less_equal(
            sequence_length,
            tf.constant(total_length // num_splits, tf.int32),
            message='Segment length cannot be more than '
                    '`total_length / num_splits`.')]):
      sequence_length = tf.identity(sequence_length)
    sequence_length.set_shape([sequence_length.shape[0], num_splits])
  else:
    raise ValueError(
        'Sequence lengths must be given as a vector or a 2D Tensor whose '
        'second dimension size matches its initial hierarchical split. Got '
        'shape: %s' % sequence_length.shape.as_list())
  return sequence_length 
開發者ID:magenta,項目名稱:magenta,代碼行數:52,代碼來源:lstm_utils.py

示例12: next_inputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with tf.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                       [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledOutputTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))
      sample_ids = tf.cast(sample_ids, tf.bool)

      def maybe_sample():
        """Perform scheduled sampling."""

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = tf.nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = tf.gather_nd(auxiliary_inputs, indices)
          return tf.nest.map_structure(
              lambda x, y: tf.concat((x, y), -1),
              outputs_, auxiliary_inputs)

        if self._next_inputs_fn is None:
          return tf.where(
              sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
              base_next_inputs)

        where_sampling = tf.cast(
            tf.where(sample_ids), tf.int32)
        where_not_sampling = tf.cast(
            tf.where(tf.logical_not(sample_ids)), tf.int32)
        outputs_sampling = tf.gather_nd(outputs, where_sampling)
        inputs_not_sampling = tf.gather_nd(base_next_inputs,
                                           where_not_sampling)
        sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
            self._next_inputs_fn(outputs_sampling), where_sampling)

        base_shape = tf.shape(base_next_inputs)
        return (tf.scatter_nd(indices=where_sampling,
                              updates=sampled_next_inputs,
                              shape=base_shape)
                + tf.scatter_nd(indices=where_not_sampling,
                                updates=inputs_not_sampling,
                                shape=base_shape))

      all_finished = tf.reduce_all(finished)
      no_samples = tf.logical_not(tf.reduce_any(sample_ids))
      next_inputs = tf.cond(
          tf.logical_or(all_finished, no_samples),
          lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state) 
開發者ID:magenta,項目名稱:magenta,代碼行數:60,代碼來源:seq2seq.py

示例13: _subsample_selection_to_desired_neg_pos_ratio

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def _subsample_selection_to_desired_neg_pos_ratio(self,
                                                    indices,
                                                    match,
                                                    max_negatives_per_positive,
                                                    min_negatives_per_image=0):
    """Subsample a collection of selected indices to a desired neg:pos ratio.

    This function takes a subset of M indices (indexing into a large anchor
    collection of N anchors where M<N) which are labeled as positive/negative
    via a Match object (matched indices are positive, unmatched indices
    are negative).  It returns a subset of the provided indices retaining all
    positives as well as up to the first K negatives, where:
      K=floor(num_negative_per_positive * num_positives).

    For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
    with positives=[2, 5] and negatives=[4, 7, 9, 10] and
    num_negatives_per_positive=1, then the returned subset of indices
    is [2, 4, 5, 7].

    Args:
      indices: An integer tensor of shape [M] representing a collection
        of selected anchor indices
      match: A matcher.Match object encoding the match between anchors and
        groundtruth boxes for a given image, with rows of the Match objects
        corresponding to groundtruth boxes and columns corresponding to anchors.
      max_negatives_per_positive: (float) maximum number of negatives for
        each positive anchor.
      min_negatives_per_image: minimum number of negative anchors for a given
        image. Allow sampling negatives in image without any positive anchors.

    Returns:
      selected_indices: An integer tensor of shape [M'] representing a
        collection of selected anchor indices with M' <= M.
      num_positives: An integer tensor representing the number of positive
        examples in selected set of indices.
      num_negatives: An integer tensor representing the number of negative
        examples in selected set of indices.
    """
    positives_indicator = tf.gather(match.matched_column_indicator(), indices)
    negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
    num_positives = tf.reduce_sum(tf.cast(positives_indicator, dtype=tf.int32))
    max_negatives = tf.maximum(
        min_negatives_per_image,
        tf.cast(max_negatives_per_positive *
                tf.cast(num_positives, dtype=tf.float32), dtype=tf.int32))
    topk_negatives_indicator = tf.less_equal(
        tf.cumsum(tf.cast(negatives_indicator, dtype=tf.int32)), max_negatives)
    subsampled_selection_indices = tf.where(
        tf.logical_or(positives_indicator, topk_negatives_indicator))
    num_negatives = tf.size(subsampled_selection_indices) - num_positives
    return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]),
            num_positives, num_negatives) 
開發者ID:tensorflow,項目名稱:models,代碼行數:54,代碼來源:losses.py

示例14: subsample

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logical_or [as 別名]
def subsample(self, indicator, batch_size, labels, scope=None):
    """Returns subsampled minibatch.

    Args:
      indicator: boolean tensor of shape [N] whose True entries can be sampled.
      batch_size: desired batch size. If None, keeps all positive samples and
        randomly selects negative samples so that the positive sample fraction
        matches self._positive_fraction. It cannot be None is is_static is True.
      labels: boolean tensor of shape [N] denoting positive(=True) and negative
          (=False) examples.
      scope: name scope.

    Returns:
      sampled_idx_indicator: boolean tensor of shape [N], True for entries which
        are sampled.

    Raises:
      ValueError: if labels and indicator are not 1D boolean tensors.
    """
    if len(indicator.get_shape().as_list()) != 1:
      raise ValueError('indicator must be 1 dimensional, got a tensor of '
                       'shape %s' % indicator.get_shape())
    if len(labels.get_shape().as_list()) != 1:
      raise ValueError('labels must be 1 dimensional, got a tensor of '
                       'shape %s' % labels.get_shape())
    if labels.dtype != tf.bool:
      raise ValueError('labels should be of type bool. Received: %s' %
                       labels.dtype)
    if indicator.dtype != tf.bool:
      raise ValueError('indicator should be of type bool. Received: %s' %
                       indicator.dtype)
    with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
      if self._is_static:
        return self._static_subsample(indicator, batch_size, labels)

      else:
        # Only sample from indicated samples
        negative_idx = tf.logical_not(labels)
        positive_idx = tf.logical_and(labels, indicator)
        negative_idx = tf.logical_and(negative_idx, indicator)

        # Sample positive and negative samples separately
        if batch_size is None:
          max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))
        else:
          max_num_pos = int(self._positive_fraction * batch_size)
        sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
        num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
        if batch_size is None:
          negative_positive_ratio = (
              1 - self._positive_fraction) / self._positive_fraction
          max_num_neg = tf.cast(
              negative_positive_ratio *
              tf.cast(num_sampled_pos, dtype=tf.float32),
              dtype=tf.int32)
        else:
          max_num_neg = batch_size - num_sampled_pos
        sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)

        return tf.logical_or(sampled_pos_idx, sampled_neg_idx) 
開發者ID:tensorflow,項目名稱:models,代碼行數:62,代碼來源:balanced_positive_negative_sampler.py


注:本文中的tensorflow.compat.v1.logical_or方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。