当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.reduce_min函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_min函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_min函数的具体用法?Python reduce_min怎么用?Python reduce_min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了reduce_min函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testNegativeBinomialSample

  def testNegativeBinomialSample(self):
    with self.cached_session() as sess:
      probs = [.3, .9]
      total_count = [4., 11.]
      n = int(100e3)
      negbinom = negative_binomial.NegativeBinomial(
          total_count=total_count, probs=probs)

      samples = negbinom.sample(n, seed=12345)
      self.assertEqual([n, 2], samples.get_shape())

      sample_mean = math_ops.reduce_mean(samples, axis=0)
      sample_var = math_ops.reduce_mean(
          (samples - sample_mean[array_ops.newaxis, ...])**2., axis=0)
      sample_min = math_ops.reduce_min(samples)
      [sample_mean_, sample_var_, sample_min_] = sess.run([
          sample_mean, sample_var, sample_min])
      self.assertAllEqual(np.ones(sample_min_.shape, dtype=np.bool),
                          sample_min_ >= 0.0)
      for i in range(2):
        self.assertAllClose(sample_mean_[i],
                            stats.nbinom.mean(total_count[i], 1 - probs[i]),
                            atol=0.,
                            rtol=.02)
        self.assertAllClose(sample_var_[i],
                            stats.nbinom.var(total_count[i], 1 - probs[i]),
                            atol=0.,
                            rtol=.02)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:28,代码来源:negative_binomial_test.py

示例2: grow_tree_from_stats_summaries

 def grow_tree_from_stats_summaries(stats_summary_list):
   """Updates ensemble based on the best gains from stats summaries."""
   (node_ids_per_feature, gains_list, thresholds_list,
    left_node_contribs_list, right_node_contribs_list) = (
        boosted_trees_ops.calculate_best_gains_per_feature(
            node_id_range=array_ops.stack([
                math_ops.reduce_min(node_ids),
                math_ops.reduce_max(node_ids)
            ]),
            stats_summary_list=stats_summary_list,
            l1=tree_hparams.l1,
            l2=tree_hparams.l2,
            tree_complexity=tree_hparams.tree_complexity,
            max_splits=max_splits))
   grow_op = boosted_trees_ops.update_ensemble(
       # Confirm if local_tree_ensemble or tree_ensemble should be used.
       tree_ensemble.resource_handle,
       feature_ids=math_ops.range(0, num_features, dtype=dtypes.int32),
       node_ids=node_ids_per_feature,
       gains=gains_list,
       thresholds=thresholds_list,
       left_node_contribs=left_node_contribs_list,
       right_node_contribs=right_node_contribs_list,
       learning_rate=tree_hparams.learning_rate,
       max_depth=tree_hparams.max_depth,
       pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
   return grow_op
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:27,代码来源:boosted_trees.py

示例3: testGradient4

 def testGradient4(self):
   s = [2, 3, 4, 2]
   x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
   with self.test_session():
     t = ops.convert_to_tensor(x)
     su = math_ops.reduce_min(t)
     jacob_t, jacob_n = gradient_checker.compute_gradient(
         t, s, su, [1], x_init_value=x, delta=1)
   self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:reduction_ops_test.py

示例4: my_rnn

def my_rnn(alphabetEnc, cell, inputs, initial_state=None, dtype=None,
        sequence_length=None, scope=None):

  if not isinstance(cell, rnn_cell.RNNCell):
    raise TypeError("cell must be an instance of RNNCell")
  if not isinstance(inputs, list):
    raise TypeError("inputs must be a list")
  if not inputs:
    raise ValueError("inputs must not be empty")

  outputs = []
  with vs.variable_scope(scope or "RNN"):
    fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
    if fixed_batch_size.value:
      batch_size = fixed_batch_size.value
    else:
      batch_size = array_ops.shape(inputs[0])[0]
    if initial_state is not None:
      state = initial_state
    else:
      if not dtype:
        raise ValueError("If no initial_state is provided, dtype must be.")
      state = cell.zero_state(batch_size, dtype)

    if sequence_length is not None:
      sequence_length = math_ops.to_int32(sequence_length)

    if sequence_length:  # Prepare variables
      zero_output = array_ops.zeros(
          array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
      zero_output.set_shape(
          tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))


      min_sequence_length = math_ops.reduce_min(sequence_length)
      max_sequence_length = math_ops.reduce_max(sequence_length)


    for time, input_ in enumerate(inputs):
      if time > 0: vs.get_variable_scope().reuse_variables()
      # pylint: disable=cell-var-from-loop
      call_cell = lambda: cell([ input_ , alphabetEnc[time] ], state)
      # pylint: enable=cell-var-from-loop
      if sequence_length:

        (output, state) = _rnn_step(
            time, sequence_length, min_sequence_length, max_sequence_length,
            zero_output, state, call_cell)
      else:

        (output, state) = call_cell()

      outputs.append(output)

    return (outputs, state)
开发者ID:chagge,项目名称:repository,代码行数:55,代码来源:rnn.py

示例5: initialize_graph

  def initialize_graph(self, features, update_statistics=True):
    """Create any ops needed to provide input statistics.

    Should be called before statistics are requested.

    Args:
      features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
          TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
      update_statistics: Whether `features` should be used to update adaptive
          statistics. Typically True for training and false for evaluation.
    Returns:
      An InputStatistics object composed of Variables, which will be updated
      based on mini-batches of data if requested.
    """
    if (TrainEvalFeatures.TIMES in features
        and TrainEvalFeatures.VALUES in features):
      times = features[TrainEvalFeatures.TIMES]
      values = features[TrainEvalFeatures.VALUES]
    else:
      # times and values may not be available, for example during prediction. We
      # still need to retrieve our variables so that they can be read from, even
      # if we're not going to update them.
      times = None
      values = None
    # Create/retrieve variables representing input statistics, initialized
    # without data to avoid deadlocking if variables are initialized before
    # queue runners are started.
    with variable_scope.variable_scope("input_statistics", use_resource=True):
      statistics = self._create_variable_statistics_object()
    with variable_scope.variable_scope(
        "input_statistics_auxiliary", use_resource=True):
      # Secondary statistics, necessary for the incremental computation of the
      # primary statistics (e.g. counts and sums for computing a mean
      # incrementally).
      auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
          num_features=self._num_features, dtype=self._dtype)
    if update_statistics and times is not None and values is not None:
      # If we have times and values from mini-batch input, create update ops to
      # take the new data into account.
      assign_op = self._update_statistics_from_mini_batch(
          statistics, auxiliary_variables, times, values)
      with ops.control_dependencies([assign_op]):
        stat_variables = nest.pack_sequence_as(statistics, [
            array_ops.identity(tensor) for tensor in nest.flatten(statistics)
        ])
        # Since start time updates have a race condition, ensure that the
        # reported start time is at least as low as the lowest time in this
        # mini-batch. The start time should converge on the correct value
        # eventually even with the race condition, but for example state space
        # models have an assertion which could fail without this
        # post-processing.
        return stat_variables._replace(start_time=gen_math_ops.minimum(
            stat_variables.start_time, math_ops.reduce_min(times)))
    else:
      return statistics
开发者ID:AnishShah,项目名称:tensorflow,代码行数:55,代码来源:math_utils.py

示例6: testLargeFeed

 def testLargeFeed(self):
   server = self._cached_server
   with session.Session(server.target, config=self._useRPCConfig()) as sess:
     feed_val = np.empty([10000, 3000], dtype=np.float32)
     feed_val.fill(0.5)
     p = array_ops.placeholder(dtypes.float32, shape=[10000, 3000])
     min_t = math_ops.reduce_min(p)
     max_t = math_ops.reduce_max(p)
     min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
     self.assertEqual(0.5, min_val)
     self.assertEqual(0.5, max_val)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:server_lib_test.py

示例7: _ones_diag

  def _ones_diag(self):
    """Returns the diagonal of this operator as all ones."""
    if self.shape.is_fully_defined():
      d_shape = self.batch_shape.concatenate(
          [min(self.domain_dimension.value, self.range_dimension.value)])
    else:
      d_shape = array_ops.concat(
          [self.batch_shape_tensor(),
           [math_ops.reduce_min(self.shape_tensor()[-2:])]], axis=0)

    return array_ops.ones(shape=d_shape, dtype=self.dtype)
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_identity.py

示例8: refresh_shortlist

 def refresh_shortlist():
   """Update the shortlist with the highest scores in id_to_score."""
   new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
   smallest_new_score = math_ops.reduce_min(new_scores)
   new_length = math_ops.reduce_sum(
       math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
   u1 = self.sl_ids.assign(
       math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
   u2 = self.sl_scores.assign(
       array_ops.concat([[smallest_new_score], new_scores], 0))
   self.last_ops = [u1, u2]
   return control_flow_ops.group(u1, u2)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:12,代码来源:topn.py

示例9: element_to_bucket_id

    def element_to_bucket_id(*args):
      """Return int64 id of the length bucket for this element."""
      seq_length = element_length_func(*args)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = math_ops.logical_and(
          math_ops.less_equal(buckets_min, seq_length),
          math_ops.less(seq_length, buckets_max))
      bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

      return bucket_id
开发者ID:bunbutter,项目名称:tensorflow,代码行数:13,代码来源:grouping.py

示例10: func_body

 def func_body(iteration, gt_cluster_score):
   """Per each cluster, compute the average travel distance."""
   mask = math_ops.equal(labels, unique_class_ids[iteration])
   this_cluster_ids = array_ops.where(mask)
   pairwise_distances_subset = array_ops.transpose(
       array_ops.gather(
           array_ops.transpose(
               array_ops.gather(pairwise_distances, this_cluster_ids)),
           this_cluster_ids))
   this_cluster_score = -1.0 * math_ops.reduce_min(
       math_ops.reduce_sum(
           pairwise_distances_subset, axis=0))
   return iteration + 1, gt_cluster_score + this_cluster_score
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:13,代码来源:metric_loss_ops.py

示例11: compute_facility_energy

def compute_facility_energy(pairwise_distances, centroid_ids):
  """Compute the average travel distance to the assigned centroid.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of indices.

  Returns:
    facility_energy: dtypes.float32 scalar.
  """
  return -1.0 * math_ops.reduce_sum(
      math_ops.reduce_min(
          array_ops.gather(pairwise_distances, centroid_ids), axis=0))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:13,代码来源:metric_loss_ops.py

示例12: _compare

 def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
   np_ans = x
   if reduction_axes is None:
     np_ans = np.amin(np_ans, keepdims=keep_dims)
   else:
     for ra in reduction_axes[::-1]:
       np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
   with self.test_session(use_gpu=use_gpu):
     if reduction_axes is not None:
       reduction_axes = np.array(reduction_axes).astype(np.int32)
     tf_ans = math_ops.reduce_min(x, reduction_axes, keep_dims)
     out = tf_ans.eval()
   self.assertAllClose(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:14,代码来源:reduction_ops_test.py

示例13: grow_tree

  def grow_tree(self, stats_summaries_list, feature_ids_list,
                last_layer_nodes_range):
    # For not in memory situation, we need to accumulate enough of batches first
    # before proceeding with building a tree layer.
    max_splits = _get_max_splits(self._tree_hparams)

    # Prepare accumulators.
    accumulators = []
    dependencies = []
    for i, feature_ids in enumerate(feature_ids_list):
      stats_summaries = stats_summaries_list[i]
      accumulator = data_flow_ops.ConditionalAccumulator(
          dtype=dtypes.float32,
          # The stats consist of grads and hessians (the last dimension).
          shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
          shared_name='numeric_stats_summary_accumulator_' + str(i))
      accumulators.append(accumulator)

      apply_grad = accumulator.apply_grad(
          array_ops.stack(stats_summaries, axis=0), self._stamp_token)
      dependencies.append(apply_grad)

    # Grow the tree if enough batches is accumulated.
    with ops.control_dependencies(dependencies):
      if not self._is_chief:
        return control_flow_ops.no_op()

      min_accumulated = math_ops.reduce_min(
          array_ops.stack([acc.num_accumulated() for acc in accumulators]))

      def grow_tree_from_accumulated_summaries_fn():
        """Updates tree with the best layer from accumulated summaries."""
        # Take out the accumulated summaries from the accumulator and grow.
        stats_summaries_list = []
        stats_summaries_list = [
            array_ops.unstack(accumulator.take_grad(1), axis=0)
            for accumulator in accumulators
        ]
        grow_op = self._grow_tree_from_stats_summaries(
            stats_summaries_list, feature_ids_list, last_layer_nodes_range)
        return grow_op

      grow_model = control_flow_ops.cond(
          math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
          grow_tree_from_accumulated_summaries_fn,
          control_flow_ops.no_op,
          name='wait_until_n_batches_accumulated')
      return grow_model
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:48,代码来源:boosted_trees.py

示例14: _assert_non_singular

 def _assert_non_singular(self):
   """Private default implementation of _assert_non_singular."""
   logging.warn(
       "Using (possibly slow) default implementation of assert_non_singular."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   if self._can_use_cholesky():
     return self.assert_positive_definite()
   else:
     singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
     # TODO(langmore) Add .eig and .cond as methods.
     cond = (math_ops.reduce_max(singular_values, axis=-1) /
             math_ops.reduce_min(singular_values, axis=-1))
     return check_ops.assert_less(
         cond,
         self._max_condition_number_to_be_non_singular(),
         message="Singular matrix up to precision epsilon.")
开发者ID:aritratony,项目名称:tensorflow,代码行数:16,代码来源:linear_operator.py

示例15: grow_not_in_mem

        def grow_not_in_mem():
          """Accumulates the data and grows a layer when ready."""

          accumulators = []
          dependencies = []
          for i, feature_ids in enumerate(feature_ids_list):
            stats_summaries = stats_summaries_list[i]
            accumulator = data_flow_ops.ConditionalAccumulator(
                dtype=dtypes.float32,
                # The stats consist of grads and hessians (the last dimension).
                shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
                shared_name='numeric_stats_summary_accumulator_' + str(i))
            accumulators.append(accumulator)

            apply_grad = accumulator.apply_grad(
                array_ops.stack(stats_summaries, axis=0), stamp_token)
            dependencies.append(apply_grad)

          def grow_tree_from_accumulated_summaries_fn():
            """Updates tree with the best layer from accumulated summaries."""
            # Take out the accumulated summaries from the accumulator and grow.
            stats_summaries_list = []

            stats_summaries_list = [
                array_ops.unstack(accumulator.take_grad(1), axis=0)
                for accumulator in accumulators
            ]

            grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
                                                     feature_ids_list)
            return grow_op

          with ops.control_dependencies(dependencies):
            if config.is_chief:
              min_accumulated = math_ops.reduce_min(
                  array_ops.stack(
                      [acc.num_accumulated() for acc in accumulators]))

              grow_model = control_flow_ops.cond(
                  math_ops.greater_equal(min_accumulated, n_batches_per_layer),
                  grow_tree_from_accumulated_summaries_fn,
                  control_flow_ops.no_op,
                  name='wait_until_n_batches_accumulated')

              return grow_model
            else:
              return control_flow_ops.no_op()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:47,代码来源:boosted_trees.py


注:本文中的tensorflow.python.ops.math_ops.reduce_min函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。