当前位置: 首页>>代码示例>>Python>>正文


Python v2.reduce_min方法代码示例

本文整理汇总了Python中tensorflow.compat.v2.reduce_min方法的典型用法代码示例。如果您正苦于以下问题:Python v2.reduce_min方法的具体用法?Python v2.reduce_min怎么用?Python v2.reduce_min使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v2的用法示例。


在下文中一共展示了v2.reduce_min方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: select_actor_action

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def select_actor_action(self, env_output, agent_output):
    oracle_next_action = env_output.observation[constants.ORACLE_NEXT_ACTION]
    oracle_next_action_indices = tf.where(
        tf.equal(env_output.observation[constants.CONN_IDS],
                 oracle_next_action))
    oracle_next_action_idx = tf.reduce_min(oracle_next_action_indices)
    assert self._mode, 'mode must be set.'
    if self._mode == 'train':
      if self._loss_type == common.CE_LOSS:
        # This is teacher-forcing mode, so choose action same as oracle action.
        action_idx = oracle_next_action_idx
      elif self._loss_type == common.AC_LOSS:
        # Choose next pano from probability distribution over next panos
        action_idx = tfp.distributions.Categorical(
            logits=agent_output.policy_logits).sample()
      else:
        raise ValueError('Unsupported loss type {}'.format(self._loss_type))
    else:
      # In non-train modes, choose greedily.
      action_idx = tf.argmax(agent_output.policy_logits, axis=-1)
    action_val = env_output.observation[constants.CONN_IDS][action_idx]
    return common.ActorAction(
        chosen_action_idx=int(action_idx.numpy()),
        oracle_next_action_idx=int(oracle_next_action_idx.numpy())), int(
            action_val.numpy()) 
开发者ID:google-research,项目名称:valan,代码行数:27,代码来源:ndh_problem.py

示例2: select_actor_action

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def select_actor_action(self, env_output, agent_output):
    oracle_next_action = env_output.observation[constants.ORACLE_NEXT_ACTION]
    oracle_next_action_indices = tf.where(
        tf.equal(env_output.observation[constants.CONN_IDS],
                 oracle_next_action))
    oracle_next_action_idx = tf.reduce_min(oracle_next_action_indices)
    if self._loss_type == common.CE_LOSS:
      # This is teacher-forcing mode, so choose action same as oracle action.
      action_idx = oracle_next_action_idx
    elif self._loss_type == common.AC_LOSS:
      # Choose next pano from probability distribution over next panos
      action_idx = tfp.distributions.Categorical(
          logits=agent_output.policy_logits).sample()
    else:
      raise ValueError('Unsupported loss type {}'.format(self._loss_type))
    action_val = env_output.observation[constants.CONN_IDS][action_idx]
    return common.ActorAction(
        chosen_action_idx=int(action_idx.numpy()),
        oracle_next_action_idx=int(oracle_next_action_idx.numpy())), int(
            action_val.numpy()) 
开发者ID:google-research,项目名称:valan,代码行数:22,代码来源:mt_problem.py

示例3: amin

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def amin(a, axis=None, keepdims=None):
  return _reduce(tf.reduce_min, a, axis=axis, dtype=None, keepdims=keepdims,
                 promote_int=None, tf_bool_fn=tf.reduce_all, preserve_bool=True)


# TODO(wangpeng): Remove this workaround once b/157232284 is fixed 
开发者ID:google,项目名称:trax,代码行数:8,代码来源:array_ops.py

示例4: call

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def call(self, multi_objectives: tf.Tensor) -> tf.Tensor:
    return tf.reduce_min(
        (multi_objectives - self._reference_point) * self._weights, axis=1) 
开发者ID:tensorflow,项目名称:agents,代码行数:5,代码来源:multi_objective_scalarizer.py

示例5: select_actor_action

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def select_actor_action(self, env_output, agent_output):
    # Agent_output is unused here.
    oracle_next_action = env_output.observation[constants.ORACLE_NEXT_ACTION]
    oracle_next_action_indices = tf.where(
        tf.equal(env_output.observation[constants.CONN_IDS],
                 oracle_next_action))
    oracle_next_action_idx = tf.reduce_min(oracle_next_action_indices)
    assert self._mode, 'mode must be set.'
    action_idx = oracle_next_action_idx
    action_val = env_output.observation[constants.CONN_IDS][action_idx]
    return common.ActorAction(
        chosen_action_idx=int(action_idx.numpy()),
        oracle_next_action_idx=int(oracle_next_action_idx.numpy())), int(
            action_val) 
开发者ID:google-research,项目名称:valan,代码行数:16,代码来源:discriminator_problem.py

示例6: estimate_tails

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_min [as 别名]
def estimate_tails(func, target, shape, dtype):
  """Estimates approximate tail quantiles.

  This runs a simple Adam iteration to determine tail quantiles. The
  objective is to find an `x` such that:
  ```
  func(x) == target
  ```
  For instance, if `func` is a CDF and the target is a quantile value, this
  would find the approximate location of that quantile. Note that `func` is
  assumed to be monotonic. When each tail estimate has passed the optimal value
  of `x`, the algorithm does 10 additional iterations and then stops.

  This operation is vectorized. The tensor shape of `x` is given by `shape`, and
  `target` must have a shape that is broadcastable to the output of `func(x)`.

  Arguments:
    func: A callable that computes cumulative distribution function, survival
      function, or similar.
    target: The desired target value.
    shape: The shape of the `tf.Tensor` representing `x`.
    dtype: The `tf.dtypes.Dtype` of the computation (and the return value).

  Returns:
    A `tf.Tensor` representing the solution (`x`).
  """
  with tf.name_scope("estimate_tails"):
    dtype = tf.as_dtype(dtype)
    shape = tf.convert_to_tensor(shape, tf.int32)
    target = tf.convert_to_tensor(target, dtype)

    def loop_cond(tails, m, v, count):
      del tails, m, v  # unused
      return tf.reduce_min(count) < 10

    def loop_body(tails, m, v, count):
      with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(tails)
        loss = abs(func(tails) - target)
      grad = tape.gradient(loss, tails)
      m = .5 * m + .5 * grad  # Adam mean estimate.
      v = .9 * v + .1 * tf.square(grad)  # Adam variance estimate.
      tails -= .5 * m / (tf.sqrt(v) + 1e-7)
      # Start counting when the gradient flips sign (note that this assumes
      # `tails` is initialized to zero).
      count = tf.where(
          tf.math.logical_or(count > 0, tails * grad > 0),
          count + 1, count)
      return tails, m, v, count

    init_tails = tf.zeros(shape, dtype=dtype)
    init_m = tf.zeros(shape, dtype=dtype)
    init_v = tf.ones(shape, dtype=dtype)
    init_count = tf.zeros(shape, dtype=tf.int32)
    return tf.while_loop(
        loop_cond, loop_body, (init_tails, init_m, init_v, init_count),
        back_prop=False)[0] 
开发者ID:tensorflow,项目名称:compression,代码行数:59,代码来源:helpers.py


注:本文中的tensorflow.compat.v2.reduce_min方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。