本文整理汇总了Python中absl.logging.log_first_n方法的典型用法代码示例。如果您正苦于以下问题:Python logging.log_first_n方法的具体用法?Python logging.log_first_n怎么用?Python logging.log_first_n使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类absl.logging
的用法示例。
在下文中一共展示了logging.log_first_n方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gym_env_wrapper
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import log_first_n [as 别名]
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
rendered_env_resize_to, sticky_actions, output_dtype,
num_actions):
"""Wraps a gym environment. see make_gym_env for details."""
# rl_env_max_episode_steps is None or int.
assert ((not rl_env_max_episode_steps) or
isinstance(rl_env_max_episode_steps, int))
wrap_with_time_limit = ((not rl_env_max_episode_steps) or
rl_env_max_episode_steps >= 0)
if wrap_with_time_limit:
env = remove_time_limit_wrapper(env)
if num_actions is not None:
logging.log_first_n(
logging.INFO, "Number of discretized actions: %d", 1, num_actions)
env = ActionDiscretizeWrapper(env, num_actions=num_actions)
if sticky_actions:
env = StickyActionEnv(env)
if maxskip_env:
env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type
if rendered_env:
env = RenderedEnv(
env, resize_to=rendered_env_resize_to, output_dtype=output_dtype)
if wrap_with_time_limit and rl_env_max_episode_steps is not None:
env = gym.wrappers.TimeLimit(
env, max_episode_steps=rl_env_max_episode_steps)
return env
示例2: _compute_gradient
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import log_first_n [as 别名]
def _compute_gradient(self, loss, dense_features, gradient_tape=None):
"""Computes the gradient given a loss and dense features."""
feature_values = list(dense_features.values())
if gradient_tape is None:
grads = tf.gradients(loss, feature_values)
else:
grads = gradient_tape.gradient(loss, feature_values)
# The order of elements returned by .values() and .keys() are guaranteed
# corresponding to each other.
keyed_grads = dict(zip(dense_features.keys(), grads))
invalid_grads, valid_grads = self._split_dict(keyed_grads,
lambda grad: grad is None)
# Two cases that grad can be invalid (None):
# (1) The feature is not differentiable, like strings or integers.
# (2) The feature is not involved in loss computation.
if invalid_grads:
if self._raise_invalid_gradient:
raise ValueError('Cannot perturb features ' + str(invalid_grads.keys()))
logging.log_first_n(logging.WARNING, 'Cannot perturb features %s', 1,
invalid_grads.keys())
# Guards against numerical errors. If the gradient is malformed (inf, -inf,
# or NaN) on a dimension, replace it with 0, which has the effect of not
# perturbing the original sample along that perticular dimension.
return tf.nest.map_structure(
lambda g: tf.where(tf.math.is_finite(g), g, tf.zeros_like(g)),
valid_grads)
# The _compose_as_dict and _decompose_as functions are similar to
# tf.nest.{flatten, pack_sequence_as} except that the composed representation
# is a dictionary of (name, value) pairs instead of a list of values. The
# names are needed for joining values from different inputs (e.g. input
# features and feature masks) with possibly missing values (e.g. no mask for
# some features).
示例3: tfhub_cache_dir
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import log_first_n [as 别名]
def tfhub_cache_dir(default_cache_dir=None, use_temp=False):
"""Returns cache directory.
Returns cache directory from either TFHUB_CACHE_DIR environment variable
or --tfhub_cache_dir or default, if set.
Args:
default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR
environment variable nor --tfhub_cache_dir are
not specified.
use_temp: bool, Optional to enable using system's temp directory as a
module cache directory if neither default_cache_dir nor
--tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are
specified .
"""
# Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir)
# to access the flag value in order to avoid parsing argv list. The flags
# should have been parsed by now in main() by tf.app.run(). If that was not
# the case (say in Colab env) we skip flag parsing because argv may contain
# unknown flags.
cache_dir = (
os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or
default_cache_dir)
if not cache_dir and use_temp:
# Place all TF-Hub modules under <system's temp>/tfhub_modules.
cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules")
if cache_dir:
logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1,
cache_dir)
return cache_dir