当前位置: 首页>>代码示例>>Python>>正文


Python v1.less_equal方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.less_equal方法的典型用法代码示例。如果您正苦于以下问题:Python v1.less_equal方法的具体用法?Python v1.less_equal怎么用?Python v1.less_equal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.less_equal方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: pad_to_fixed_size

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def pad_to_fixed_size(data, pad_value, output_shape):
  """Pad data to a fixed length at the first dimension.

  Args:
    data: Tensor to be padded to output_shape.
    pad_value: A constant value assigned to the paddings.
    output_shape: The output shape of a 2D tensor.

  Returns:
    The Padded tensor with output_shape [max_num_instances, dimension].
  """
  max_num_instances = output_shape[0]
  dimension = output_shape[1]
  data = tf.reshape(data, [-1, dimension])
  num_instances = tf.shape(data)[0]
  assert_length = tf.Assert(
      tf.less_equal(num_instances, max_num_instances), [num_instances])
  with tf.control_dependencies([assert_length]):
    pad_length = max_num_instances - num_instances
  paddings = pad_value * tf.ones([pad_length, dimension])
  padded_data = tf.concat([data, paddings], axis=0)
  padded_data = tf.reshape(padded_data, output_shape)
  return padded_data 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:25,代码来源:dataloader.py

示例2: assert_box_normalized

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
开发者ID:tensorflow,项目名称:models,代码行数:23,代码来源:shape_utils.py

示例3: test_assert_true

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def test_assert_true():
    g = tf.Graph()
    shape = (1, 2)
    with g.as_default():
        x = tf.placeholder(tf.float32, shape=shape, name="input")
        assert_op = tf.Assert(tf.reduce_all(tf.less_equal(x, x)), ["it failed"])

        with tf.Session() as sess:
            x_value = np.random.rand(*shape)
            assert sess.run(assert_op, feed_dict={x: x_value}) is None

        # In TVM, tf.assert is converted to a no-op which is actually a 0,
        # though it should probably be none or an empty tuple.
        #
        # ToDo: It appears that the frontend converter gets confused here and
        # entirely eliminates all operands from main(). Likely because x <= x
        # is always true, so the placeholder can be eliminated. But TF doesn't
        # do that, it's happening in Relay, and that optimization shouldn't
        # affect the arity of the main function. We should have to pass in
        # x_value here.
        np.testing.assert_allclose(0, run_relay(g, {'input': shape}).asnumpy()) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:23,代码来源:test_debugging.py

示例4: test_assert_true_var_capture

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def test_assert_true_var_capture():
    g = tf.Graph()
    with g.as_default():
        x = tf.placeholder(tf.float32, shape=())

        # It turns out that tf.assert() creates a large and complex subgraph if
        # you capture a variable as part of the error message. So we need to
        # test that, too.
        assert_op = tf.Assert(tf.less_equal(x, x), ["it failed", x])

        with tf.Session() as sess:
            x_value = np.random.rand()
            assert sess.run(assert_op, feed_dict={x: x_value}) is None

        # TODO: The frontend converter notes the output of
        # the graph as a boolean, which is not correct - as you can see above,
        # TF believes that the value of this graph is None.
        np.testing.assert_allclose(True,
                                   run_relay(g, None, x_value).asnumpy()) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:21,代码来源:test_debugging.py

示例5: cumsum

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def cumsum(x, axis=0, exclusive=False):
  """TPU hack for tf.cumsum.

  This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
  the axis dimension is very large.

  Args:
    x: a Tensor
    axis: an integer
    exclusive: a boolean

  Returns:
    Tensor of the same shape as x.
  """
  if not is_xla_compiled():
    return tf.cumsum(x, axis=axis, exclusive=exclusive)
  x_shape = shape_list(x)
  rank = len(x_shape)
  length = x_shape[axis]
  my_range = tf.range(length)
  comparator = tf.less if exclusive else tf.less_equal
  mask = tf.cast(
      comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
      x.dtype)
  ret = tf.tensordot(x, mask, axes=[[axis], [0]])
  if axis != rank - 1:
    ret = tf.transpose(
        ret,
        list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
  return ret 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:32,代码来源:common_layers.py

示例6: sample_with_temperature

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1):
  """Either argmax or random sampling.

  Args:
    logits: a Tensor.
    temperature: a float  0.0=argmax 1.0=random
    sampling_keep_top_k: If not -1, only sample from the top k logits.
  Returns:
    a Tensor with one fewer dimension than logits.
  """
  if temperature == 0.0:
    # TF argmax doesn't handle >5 dimensions, so we reshape here.
    logits_shape = shape_list(logits)
    argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
    return tf.reshape(argmax, logits_shape[:-1])
  else:
    tf.debugging.assert_greater(temperature, 0.0)

    if sampling_keep_top_k != -1:
      if sampling_keep_top_k <= 0:
        raise ValueError("sampling_keep_top_k must either be -1 or positive.")

      vocab_size = shape_list(logits)[1]

      k_largest = contrib.nn().nth_element(
          logits, n=sampling_keep_top_k, reverse=True)
      k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size])

      # Force every position that is not in the top k to have probability near
      # 0 by setting the logit to be very negative.
      logits = tf.where(tf.less_equal(logits, k_largest),
                        tf.ones_like(logits)*-1e6, logits)

    reshaped_logits = (
        tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
    choices = tf.multinomial(reshaped_logits, 1)
    choices = tf.reshape(choices,
                         shape_list(logits)[:logits.get_shape().ndims - 1])
    return choices 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:41,代码来源:common_layers.py

示例7: prune_completely_outside_window

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
      the window.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
开发者ID:tensorflow,项目名称:models,代码行数:32,代码来源:box_list_ops.py

示例8: testRandomPixelValueScale

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def testRandomPixelValueScale(self):

    def graph_fn():
      preprocessing_options = []
      preprocessing_options.append((preprocessor.normalize_image, {
          'original_minval': 0,
          'original_maxval': 255,
          'target_minval': 0,
          'target_maxval': 1
      }))
      preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
      images = self.createTestImages()
      tensor_dict = {fields.InputDataFields.image: images}
      tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
      images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0
      images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0
      images = tensor_dict[fields.InputDataFields.image]
      values_greater = tf.greater_equal(images, images_min)
      values_less = tf.less_equal(images, images_max)
      values_true = tf.fill([1, 4, 4, 3], True)
      return [values_greater, values_less, values_true]

    (values_greater_, values_less_,
     values_true_) = self.execute_cpu(graph_fn, [])
    self.assertAllClose(values_greater_, values_true_)
    self.assertAllClose(values_less_, values_true_) 
开发者ID:tensorflow,项目名称:models,代码行数:28,代码来源:preprocessor_test.py

示例9: provide_dataset

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def provide_dataset(self):
    """Provides dataset (audio, labels) of nsynth."""
    length = 64000
    channels = 1

    pitch_counts = self.get_pitch_counts()
    pitches = sorted(pitch_counts.keys())
    label_index_table = tf.lookup.StaticVocabularyTable(
        tf.lookup.KeyValueTensorInitializer(
            keys=pitches,
            values=np.arange(len(pitches)),
            key_dtype=tf.int64,
            value_dtype=tf.int64),
        num_oov_buckets=1)

    def _parse_nsynth(record):
      """Parsing function for NSynth dataset."""
      features = {
          'pitch': tf.FixedLenFeature([1], dtype=tf.int64),
          'audio': tf.FixedLenFeature([length], dtype=tf.float32),
          'qualities': tf.FixedLenFeature([10], dtype=tf.int64),
          'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64),
          'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64),
      }

      example = tf.parse_single_example(record, features)
      wave, label = example['audio'], example['pitch']
      wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis],
                                      length,
                                      channels)[0]
      one_hot_label = tf.one_hot(
          label_index_table.lookup(label), depth=len(pitches))[0]
      return wave, one_hot_label, label, example['instrument_source']

    dataset = self._get_dataset_from_path()
    dataset = dataset.map(_parse_nsynth, num_parallel_calls=4)

    # Filter just acoustic instruments (as in the paper)
    # (0=acoustic, 1=electronic, 2=synthetic)
    dataset = dataset.filter(lambda w, l, p, s: tf.equal(s, 0)[0])
    # Filter just pitches 24-84
    dataset = dataset.filter(lambda w, l, p, s: tf.greater_equal(p, 24)[0])
    dataset = dataset.filter(lambda w, l, p, s: tf.less_equal(p, 84)[0])
    dataset = dataset.map(lambda w, l, p, s: (w, l))
    return dataset 
开发者ID:magenta,项目名称:magenta,代码行数:47,代码来源:datasets.py

示例10: packed_parallel_tsv_dataset

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def packed_parallel_tsv_dataset(dataset=gin.REQUIRED,
                                dataset_split=gin.REQUIRED,
                                batch_size=None,
                                sequence_length=gin.REQUIRED,
                                vocabulary=gin.REQUIRED,
                                append_eos=True,
                                eos_id=1,
                                max_encoded_len=0):
  """Reads parallel tab-separated text file. One example per line."""
  del batch_size
  del dataset_split

  def _parse_fn(record):  # pylint: disable=missing-docstring
    tokens = tf.decode_csv(
        record,
        record_defaults=[""] * 2,
        field_delim="\t",
        use_quote_delim=False)
    return {"inputs": tokens[0], "targets": tokens[1]}

  def _encode_fn(features):  # pylint: disable=missing-docstring
    inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
                                                    tuple) else vocabulary
    targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
                                                     tuple) else vocabulary
    inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
    targets_enc = targets_vocabulary.encode_tf(features["targets"])
    if append_eos:
      inputs_enc = tf.concat([tf.cast(inputs_enc, tf.int64), [eos_id]], 0)
      targets_enc = tf.concat([tf.cast(targets_enc, tf.int64), [eos_id]], 0)
    return {"inputs": inputs_enc, "targets": targets_enc}

  dataset = dataset.map(
      _parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
  dataset = dataset.map(
      _encode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)

  def _filter_fn(features):  # pylint: disable=missing-docstring
    return tf.less_equal(
        tf.reduce_max(
            tf.stack([tf.size(v) for v in features.values()], axis=0)),
        max_encoded_len)

  if max_encoded_len:
    tf.logging.info("Filtering encoded examples longer than %d" %
                    max_encoded_len)
    dataset = dataset.filter(_filter_fn)

  return pack_or_pad(dataset, sequence_length) 
开发者ID:tensorflow,项目名称:mesh,代码行数:51,代码来源:dataset.py

示例11: _subsample_selection_to_desired_neg_pos_ratio

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import less_equal [as 别名]
def _subsample_selection_to_desired_neg_pos_ratio(self,
                                                    indices,
                                                    match,
                                                    max_negatives_per_positive,
                                                    min_negatives_per_image=0):
    """Subsample a collection of selected indices to a desired neg:pos ratio.

    This function takes a subset of M indices (indexing into a large anchor
    collection of N anchors where M<N) which are labeled as positive/negative
    via a Match object (matched indices are positive, unmatched indices
    are negative).  It returns a subset of the provided indices retaining all
    positives as well as up to the first K negatives, where:
      K=floor(num_negative_per_positive * num_positives).

    For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
    with positives=[2, 5] and negatives=[4, 7, 9, 10] and
    num_negatives_per_positive=1, then the returned subset of indices
    is [2, 4, 5, 7].

    Args:
      indices: An integer tensor of shape [M] representing a collection
        of selected anchor indices
      match: A matcher.Match object encoding the match between anchors and
        groundtruth boxes for a given image, with rows of the Match objects
        corresponding to groundtruth boxes and columns corresponding to anchors.
      max_negatives_per_positive: (float) maximum number of negatives for
        each positive anchor.
      min_negatives_per_image: minimum number of negative anchors for a given
        image. Allow sampling negatives in image without any positive anchors.

    Returns:
      selected_indices: An integer tensor of shape [M'] representing a
        collection of selected anchor indices with M' <= M.
      num_positives: An integer tensor representing the number of positive
        examples in selected set of indices.
      num_negatives: An integer tensor representing the number of negative
        examples in selected set of indices.
    """
    positives_indicator = tf.gather(match.matched_column_indicator(), indices)
    negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
    num_positives = tf.reduce_sum(tf.cast(positives_indicator, dtype=tf.int32))
    max_negatives = tf.maximum(
        min_negatives_per_image,
        tf.cast(max_negatives_per_positive *
                tf.cast(num_positives, dtype=tf.float32), dtype=tf.int32))
    topk_negatives_indicator = tf.less_equal(
        tf.cumsum(tf.cast(negatives_indicator, dtype=tf.int32)), max_negatives)
    subsampled_selection_indices = tf.where(
        tf.logical_or(positives_indicator, topk_negatives_indicator))
    num_negatives = tf.size(subsampled_selection_indices) - num_positives
    return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]),
            num_positives, num_negatives) 
开发者ID:tensorflow,项目名称:models,代码行数:54,代码来源:losses.py


注:本文中的tensorflow.compat.v1.less_equal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。