当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.reduce_min方法代码示例

本文整理汇总了Python中tensorflow.reduce_min方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_min方法的具体用法?Python tensorflow.reduce_min怎么用?Python tensorflow.reduce_min使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reduce_min方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: from_float32_to_uint8

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def from_float32_to_uint8(
        tensor,
        tensor_key='tensor',
        min_key='min',
        max_key='max'):
    """

    :param tensor:
    :param tensor_key:
    :param min_key:
    :param max_key:
    :returns:
    """
    tensor_min = tf.reduce_min(tensor)
    tensor_max = tf.reduce_max(tensor)
    return {
        tensor_key: tf.cast(
            (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
            * 255.9999, dtype=tf.uint8),
        min_key: tensor_min,
        max_key: tensor_max
    } 
开发者ID:deezer,项目名称:spleeter,代码行数:24,代码来源:tensor.py

示例2: top_k_softmax

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def top_k_softmax(x, k):
  """Calculate softmax(x), select top-k and rescale to sum to 1.

  Args:
    x: Input to softmax over.
    k: Number of top-k to select.

  Returns:
    softmax(x) and maximum item.
  """
  x = tf.nn.softmax(x)
  top_x, _ = tf.nn.top_k(x, k=k + 1)
  min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)
  x = tf.nn.relu((x - min_top) + 1e-12)
  x /= tf.reduce_sum(x, axis=-1, keep_dims=True)
  return x, tf.reduce_max(top_x, axis=-1) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:discretization.py

示例3: add_variable_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def add_variable_summaries(variable, scope):
  '''
  Attach some summaries to a tensor for TensorBoard visualization, namely
  mean, standard deviation, minimum, maximum, and histogram.

  Arguments:
    var (TensorFlow Variable): A TensorFlow Variable of any shape to which to
        add summary operations. Must be a numerical data type.
  '''
  with tf.name_scope(scope):
    mean = tf.reduce_mean(variable)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(variable))
    tf.summary.scalar('min', tf.reduce_min(variable))
    tf.summary.histogram('histogram', variable) 
开发者ID:pierluigiferrari,项目名称:fcn8s_tensorflow,代码行数:20,代码来源:tf_variable_summaries.py

示例4: assert_box_normalized

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:23,代码来源:shape_utils.py

示例5: tensor_stats

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def tensor_stats(name, tensor, verbose=True, collections=None, family=None):
    """
    Args:
        tensor: A non-scalar tensor.
    """
    if verbose:
        with tf.name_scope(name):
            mean = tf.reduce_mean(tensor)
            tf.summary.scalar('mean', mean, collections=collections, family=family)

            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
            tf.summary.scalar('stddev', stddev, collections=collections, family=family)
            tf.summary.scalar('max', tf.reduce_max(tensor), collections=collections, family=family)
            tf.summary.scalar('min', tf.reduce_min(tensor), collections=collections, family=family)
            tf.summary.histogram('histogram', tensor, collections=collections, family=family)
    else:
        pass 
开发者ID:naturomics,项目名称:CapsLayer,代码行数:20,代码来源:summary.py

示例6: _build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def _build(self, input_shape, dtype=tf.float32):
        """
        Called on the first iteration once the input shape is known
        :param input_shape: Input shape including batch size
        """
        with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
            non_zeros = int(round(input_shape[-1].value * self.percent_on))

            # Create random mask with k elements set to 1, all other elements set to 0
            values = tf.random_uniform(input_shape)
            top_k, _ = tf.math.top_k(input=values, k=non_zeros, sorted=False)
            kth = tf.reduce_min(top_k, axis=1, keepdims=True)
            mask = tf.cast(tf.greater_equal(values, kth), dtype=dtype)
            self.mask = tf.get_variable(
                self.name,
                initializer=mask,
                trainable=False,
                synchronization=tf.VariableSynchronization.NONE,
            )
            keras.backend.track_variable(self.mask)
            self._built = True 
开发者ID:numenta,项目名称:nupic.tensorflow,代码行数:23,代码来源:sparse_weights.py

示例7: distribution_accuracy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def distribution_accuracy(a, b):
    """
    Each point of a is measured against the closest point on b.  Distance differences are added together.  
    
    This works best on a large batch of small inputs."""
    tiled_a = a
    tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])

    tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])

    tiled_b = b
    tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
    tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])

    difference = tf.abs(tiled_a-tiled_b)
    difference = tf.reduce_min(difference, axis=1)
    difference = tf.reduce_sum(difference, axis=1)
    return tf.reduce_sum(difference, axis=0) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:20,代码来源:common.py

示例8: test_get_tensor_with_random_shape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def test_get_tensor_with_random_shape(self):
    x = test_utils.get_tensor_with_random_shape()
    self.assertIsInstance(x, tf.Tensor)
    self.assertFalse(x.shape.is_fully_defined())
    # Rank of the Tensor should be known, even though the dimension is not.
    self.assertEqual(1, x.shape.ndims)

    # Assert that unknown shape corresponds to a value of actually random shape
    # at execution time.
    samples = [self.evaluate(x) for _ in range(10)]
    self.assertGreater(len(set([len(s) for s in samples])), 1)

    # Test that source_fn has effect on the output values.
    x_uniform = test_utils.get_tensor_with_random_shape(
        expected_num_elements=50, source_fn=tf.random.uniform)
    x_normal = test_utils.get_tensor_with_random_shape(
        expected_num_elements=50, source_fn=tf.random.normal)
    self.assertGreaterEqual(self.evaluate(tf.reduce_min(x_uniform)), 0.0)
    self.assertLess(self.evaluate(tf.reduce_min(x_normal)), 0.0) 
开发者ID:tensorflow,项目名称:model-optimization,代码行数:21,代码来源:test_utils_test.py

示例9: _graph_fn_call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def _graph_fn_call(self, inputs):
        min_value = inputs
        max_value = inputs

        if get_backend() == "tf":
            # Iteratively reduce dimensionality across all axes to get the min/max values for each sample in the batch.
            for axis in self.axes:
                min_value = tf.reduce_min(input_tensor=min_value, axis=axis, keep_dims=True)
                max_value = tf.reduce_max(input_tensor=max_value, axis=axis, keep_dims=True)
        elif get_backend() == "pytorch":
            for axis in self.axes:
                min_value = torch.min(min_value, axis)
                max_value = torch.max(max_value, axis)

        # Add some small constant to never let the range be zero.
        return (inputs - min_value) / (max_value - min_value + SMALL_NUMBER) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:18,代码来源:normalize.py

示例10: _graph_fn_critic_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def _graph_fn_critic_loss(self, log_probs_next_sampled, q_values_next_sampled, q_values, rewards, terminals, alpha):
        # In case log_probs come in as shape=(), expand last rank to 1.
        if log_probs_next_sampled.shape.as_list()[-1] is None:
            log_probs_next_sampled = tf.expand_dims(log_probs_next_sampled, axis=-1)

        log_probs_next_sampled = tf.reduce_sum(log_probs_next_sampled, axis=1, keepdims=True)
        rewards = tf.expand_dims(rewards, axis=-1)
        terminals = tf.expand_dims(terminals, axis=-1)

        q_min_next = tf.reduce_min(tf.concat(q_values_next_sampled, axis=1), axis=1, keepdims=True)
        assert q_min_next.shape.as_list() == [None, 1]
        soft_state_value = q_min_next - alpha * log_probs_next_sampled
        q_target = rewards + self.discount * (1.0 - tf.cast(terminals, tf.float32)) * soft_state_value
        total_loss = 0.0
        if self.num_q_functions < 2:
            q_values = [q_values]
        for i, q_value in enumerate(q_values):
            loss = 0.5 * (q_value - tf.stop_gradient(q_target)) ** 2
            loss = tf.identity(loss, "critic_loss_per_item_{}".format(i + 1))
            total_loss += loss
        return tf.squeeze(total_loss, axis=1) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:23,代码来源:sac_loss_function.py

示例11: get_batch_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def get_batch_dataset(record_file, parser, config):
    num_threads = tf.constant(config.num_threads, dtype=tf.int32)
    dataset = tf.data.TFRecordDataset(record_file).map(
        parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
    if config.is_bucket:
        buckets = [tf.constant(num) for num in range(*config.bucket_range)]

        def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
            c_len = tf.reduce_sum(
                tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
            buckets_min = [np.iinfo(np.int32).min] + buckets
            buckets_max = buckets + [np.iinfo(np.int32).max]
            conditions_c = tf.logical_and(
                tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
            bucket_id = tf.reduce_min(tf.where(conditions_c))
            return bucket_id

        def reduce_func(key, elements):
            return elements.batch(config.batch_size)

        dataset = dataset.apply(tf.contrib.data.group_by_window(
            key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
    else:
        dataset = dataset.batch(config.batch_size)
    return dataset 
开发者ID:HKUST-KnowComp,项目名称:R-Net,代码行数:27,代码来源:util.py

示例12: print_act_stats

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def print_act_stats(x, _str=""):
    if not do_print_act_stats:
        return x
    if hvd.rank() != 0:
        return x
    if len(x.get_shape()) == 1:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 2:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 4:
        x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
    stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
             tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
    return tf.Print(x, stats, "["+_str+"] "+x.name)

# Allreduce methods 
开发者ID:openai,项目名称:glow,代码行数:18,代码来源:tfops.py

示例13: detect_min_val

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def detect_min_val(input_mat, var, threshold=1e-6, name='', debug=False):
    """
    If debug is not set, will run clipout_neg. Else, will clip and print out odd eigen values

    :param input_mat: (TensorFlow Tensor)
    :param var: (TensorFlow Tensor) variable
    :param threshold: (float) the cutoff threshold
    :param name: (str) the name of the variable
    :param debug: (bool) debug function
    :return: (TensorFlow Tensor) clipped tensor
    """
    eigen_min = tf.reduce_min(input_mat)
    eigen_max = tf.reduce_max(input_mat)
    eigen_ratio = eigen_max / eigen_min
    input_mat_clipped = clipout_neg(input_mat, threshold)

    if debug:
        input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)),
                                    lambda: input_mat_clipped, lambda: tf.Print(
                input_mat_clipped,
                [tf.convert_to_tensor('odd ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name),
                 eigen_min, eigen_max, eigen_ratio]))

    return input_mat_clipped 
开发者ID:Stable-Baselines-Team,项目名称:stable-baselines,代码行数:26,代码来源:kfac_utils.py

示例14: ind_max_pool

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def ind_max_pool(x, inds):
    """
    This tensorflow operation compute a maxpooling according to the list of indices 'inds'.
    > x = [n1, d] features matrix
    > inds = [n2, max_num] each row of this tensor is a list of indices of features to be pooled together
    >> output = [n2, d] pooled features matrix
    """

    # Add a last row with minimum features for shadow pools
    x = tf.concat([x, tf.reduce_min(x, axis=0, keep_dims=True)], axis=0)

    # Get features for each pooling cell [n2, max_num, d]
    pool_features = tf.gather(x, inds, axis=0)

    # Pool the maximum
    return tf.reduce_max(pool_features, axis=1) 
开发者ID:HuguesTHOMAS,项目名称:KPConv,代码行数:18,代码来源:network_blocks.py

示例15: fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_min [as 别名]
def fprop(self, x, **kwargs):
        mean = tf.reduce_mean(x)
        std = tf.sqrt(tf.reduce_mean(tf.square(x - mean)))
        return tf.Print(x,
                        [tf.reduce_min(x), mean, tf.reduce_max(x), std],
                        "Print layer") 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:8,代码来源:picklable_model.py


注:本文中的tensorflow.reduce_min方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。