本文整理汇总了Python中absl.logging.warn方法的典型用法代码示例。如果您正苦于以下问题:Python logging.warn方法的具体用法?Python logging.warn怎么用?Python logging.warn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类absl.logging
的用法示例。
在下文中一共展示了logging.warn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: disassociate_tag
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def disassociate_tag(self, user_email, tag_name):
"""Disassociates a tag from a device.
Args:
user_email: str, the email of the user taking the action.
tag_name: str, the name of the tag to be disassociated.
Raises:
ValueError: If the tag requested to be disassociated from the device is
not currently associated with the device.
"""
for tag_reference in self.tags:
if tag_reference.tag.name == tag_name:
self.tags.remove(tag_reference)
self.put()
self.stream_to_bq(
user_email, 'Removed tag %s from device %s' %
(tag_reference.tag.name, self.identifier))
return
logging.warn(
'Tag with name %s is not associated with device %s',
tag_name, self.identifier)
示例2: num_rewards
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def num_rewards(self):
"""Returns the number of distinct rewards.
Returns:
Returns None if the reward range is infinite or the processed rewards
aren't discrete, otherwise returns the number of distinct rewards.
"""
# Pre-conditions: reward range is finite.
# : processed rewards are discrete.
if not self.is_reward_range_finite:
logging.warn("Infinite reward range, `num_rewards returning None`")
return None
if not self.is_processed_rewards_discrete:
logging.warn(
"Processed rewards are not discrete, `num_rewards` returning None")
return None
min_reward, max_reward = self.reward_range
return max_reward - min_reward + 1
示例3: generate_raw_dataset
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def generate_raw_dataset(self, args):
logging.warn(
"Not actually regenerating the raw dataset.\n"
"To regenerate the raw CSV dataset, see the TFX Chicago Taxi example "
"for details as to how to do so. "
"tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py "
"has the BigQuery query used to generate the dataset.\n"
"After regenerating the raw CSV dataset, you should also regenerate "
"the derived TFRecords dataset. You can do so by passing "
"--generate_dataset_args=/path/to/csv_dataset.csv to "
"regenerate_datasets.py.")
if args:
logging.info("Converting CSV at %s to TFRecords", args)
self.convert_csv_to_tf_examples(args, self.dataset_path())
logging.info("TFRecords written to %s", self.dataset_path())
示例4: check_invalid_state
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def check_invalid_state(self):
"""Checks whether the physics state is invalid at exit.
Yields:
None
Raises:
PhysicsError: if the simulation state is invalid at exit, unless this
context is nested inside a `suppress_physics_errors` context, in which
case a warning will be logged instead.
"""
# `np.copyto(dst, src)` is marginally faster than `dst[:] = src`.
np.copyto(self._warnings_before, self._warnings)
yield
np.greater(self._warnings, self._warnings_before, out=self._new_warnings)
if any(self._new_warnings):
warning_names = np.compress(self._new_warnings, enums.mjtWarning._fields)
message = _INVALID_PHYSICS_STATE.format(
warning_names=', '.join(warning_names))
if self._warnings_cause_exception:
raise _control.PhysicsError(message)
else:
logging.warn(message)
示例5: get_vars_to_restore
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def get_vars_to_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
if ckpt is not None:
ckpt_var_names = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
for v in model_vars:
if v.op.name not in ckpt_var_names:
logging.warn('Missing var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
model_vars = [v for v in model_vars if v.op.name in ckpt_var_names]
return model_vars
示例6: build
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def build(self, strategy: tf.distribute.Strategy = None) -> tf.data.Dataset:
"""Construct a dataset end-to-end and return it using an optional strategy.
Args:
strategy: a strategy that, if passed, will distribute the dataset
according to that strategy. If passed and `num_devices > 1`,
`use_per_replica_batch_size` must be set to `True`.
Returns:
A TensorFlow dataset outputting batched images and labels.
"""
if strategy:
if strategy.num_replicas_in_sync != self.config.num_devices:
logging.warn('Passed a strategy with %d devices, but expected'
'%d devices.',
strategy.num_replicas_in_sync,
self.config.num_devices)
dataset = strategy.experimental_distribute_datasets_from_function(
self._build)
else:
dataset = self._build()
return dataset
示例7: _check_budget
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def _check_budget(self, config):
num_trainables = utils.log_trainables()
if config.num_params > -1:
assert num_trainables <= config.num_params, (
'The number of trainable parameters ({}) exceeds the budget ({}). '
.format(num_trainables, config.num_params))
if num_trainables < 0.98*(config.num_params-500):
logging.warn('Number of parameters (%s) is way below the budget (%s)',
num_trainables, config.num_params)
示例8: _get_observations
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def _get_observations(self, target_game_loop):
# Transform in the thread so it runs while waiting for other observations.
def parallel_observe(c, f):
obs = c.observe(target_game_loop=target_game_loop)
agent_obs = f.transform_obs(obs)
return obs, agent_obs
with self._metrics.measure_observation_time():
self._obs, self._agent_obs = zip(*self._parallel.run(
(parallel_observe, c, f)
for c, f in zip(self._controllers, self._features)))
game_loop = self._agent_obs[0].game_loop[0]
if (game_loop < target_game_loop and
not any(o.player_result for o in self._obs)):
raise ValueError(
("The game didn't advance to the expected game loop. "
"Expected: %s, got: %s") % (target_game_loop, game_loop))
elif game_loop > target_game_loop and target_game_loop > 0:
logging.warn("Received observation %d step(s) late: %d rather than %d.",
game_loop - target_game_loop, game_loop, target_game_loop)
if self._realtime:
# Track delays on executed actions.
# Note that this will underestimate e.g. action sent, new observation
# taken before action executes, action executes, observation taken
# with action. This is difficult to avoid without changing the SC2
# binary - e.g. send the observation game loop with each action,
# return them in the observation action proto.
if self._last_obs_game_loop is not None:
for i, obs in enumerate(self._obs):
for action in obs.actions:
if action.HasField("game_loop"):
delay = action.game_loop - self._last_obs_game_loop
if delay > 0:
num_slots = len(self._action_delays[i])
delay = min(delay, num_slots - 1) # Cap to num buckets.
self._action_delays[i][delay] += 1
break
self._last_obs_game_loop = game_loop
示例9: _choose_rec_from_softmax
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def _choose_rec_from_softmax(self, softmax_probs, deterministic):
if deterministic:
rec = np.argmax(softmax_probs)
else:
# Fix the probability vector to avoid np.random.choice exception.
softmax_probs = np.nan_to_num(softmax_probs)
softmax_probs += 1e-10
if not np.any(softmax_probs):
logging.warn('All zeros in the softmax prediction.')
softmax_probs = softmax_probs / np.sum(softmax_probs)
# TODO(): Use epsilon for exploration at the model level.
rec = self._rng.choice(self.action_space_size, p=softmax_probs)
return rec
# TODO(): Move the simulation function to a runner class.
示例10: _observer
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def _observer(self, subgraph):
input_nodes = self._inputs_for_observed_module(subgraph)
if input_nodes is None:
# We do not fail as we want to allow higher-level Sonnet components.
# In practice, the rest of the logic will fail if we are unable to
# connect all low-level modules.
logging.warn('Unprocessed module "%s"', str(subgraph.module))
return
if subgraph.outputs in input_nodes:
# The Sonnet module is just returning its input as its output.
# This may happen with a reshape in which the shape does not change.
return
self._add_module(self._wrapper_for_observed_module(subgraph),
subgraph.outputs, *input_nodes)
示例11: _propagate_through
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def _propagate_through(self, module, input_bounds):
if isinstance(module, layers.BatchNorm):
# This IBP-specific batch-norm implementation exposes stats recorded
# the most recent time the BatchNorm module was connected.
# These will be either the batch stats (e.g. if training) or the moving
# averages, depending on how the module was called.
mean = module.mean
variance = module.variance
epsilon = module.epsilon
scale = module.scale
bias = module.bias
else:
# This plain Sonnet batch-norm implementation only exposes the
# moving averages.
logging.warn('Sonnet BatchNorm module encountered: %s. '
'IBP will always use its moving averages, not the local '
'batch stats, even in training mode.', str(module))
mean = module.moving_mean
variance = module.moving_variance
epsilon = module._eps # pylint: disable=protected-access
try:
bias = module.beta
except snt.Error:
bias = None
try:
scale = module.gamma
except snt.Error:
scale = None
return input_bounds.apply_batch_norm(self, mean, variance,
scale, bias, epsilon)
示例12: apply_increasing_monotonic_fn
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
if fn.__name__ != 'relu':
# Fallback to regular interval bound propagation for unsupported
# operations.
logging.warn('"%s" is not supported by SymbolicBounds. '
'Fallback on IntervalBounds.', fn.__name__)
interval_bounds = basic_bounds.IntervalBounds.convert(self)
converted_args = [basic_bounds.IntervalBounds.convert(b) for b in args]
interval_bounds = interval_bounds._increasing_monotonic_fn( # pylint: disable=protected-access
fn, *converted_args)
return self.convert(interval_bounds)
concrete = self.concretize()
lb, ub = concrete.lower, concrete.upper
is_ambiguous = tf.logical_and(ub > 0, lb < 0)
# Ensure denominator is always positive, even when not needed.
ambiguous_denom = tf.where(is_ambiguous, ub - lb, tf.ones_like(ub))
scale = tf.where(
is_ambiguous, ub / ambiguous_denom,
tf.where(lb >= 0, tf.ones_like(lb), tf.zeros_like(lb)))
bias = tf.where(is_ambiguous, -lb, tf.zeros_like(lb))
lb_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.lower.w,
b=scale * self.lower.b,
lower=self.lower.lower, upper=self.lower.upper)
ub_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.upper.w,
b=scale * (self.upper.b + bias),
lower=self.upper.lower, upper=self.upper.upper)
return SymbolicBounds(lb_out, ub_out).with_priors(wrapper.output_bounds)
示例13: map_feed_dict_unsafe
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def map_feed_dict_unsafe(feature_placeholders_spec, np_inputs_spec):
"""Deprecated function to create a feed_dict to be passed to session.run.
tensorspec_utils.map_feed_dict should be used instead. map_feed_dict_unsafe
does not check that there is actually any agreement between
feature_placeholders_spec or np_inputs spec in terms of dtype, shape
or additional unused attributes within np_inputs_spec.
Args:
feature_placeholders_spec: An TensorSpecStruct containing
{str: tf.placeholder}.
np_inputs_spec: The numpy input according to the same spec.
Returns:
A mapping {placeholder: np.ndarray} which can be fed to a tensorflow
session.run.
"""
logging.warning('map_feed_dict_unsafe is deprecated. '
'Please update to map_feed_dict.')
flat_spec = flatten_spec_structure(feature_placeholders_spec)
flat_np_inputs = flatten_spec_structure(np_inputs_spec)
for key, value in flat_np_inputs.items():
if key not in flat_spec:
logging.warn(
'np_inputs has an input: %s, not found in the tensorspec.', key)
feed_dict = {}
for key, value in flat_spec.items():
feed_dict[value] = flat_np_inputs[key]
return feed_dict
示例14: is_encoded_image_spec
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def is_encoded_image_spec(tensor_spec):
"""Determines whether the passed tensor_spec speficies an encoded image."""
if hasattr(tensor_spec, 'data_format'):
# If tensor_spec is an ExtendedTensorSpec, use the data_format to check.
return (tensor_spec.data_format is not None) and (
tensor_spec.data_format.upper() in ['JPEG', 'PNG'])
else:
# Otherwise default to the old "name contains 'image'" logic.
logging.warn('Using a deprecated tensor specification. '
'Use ExtendedTensorSpec.')
return 'image' in tensor_spec.name
示例15: __init__
# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import warn [as 别名]
def __init__(self, year, track='main', language='EN', **kwargs):
"""BuilderConfig for Qa4Mre.
Args:
year: string, year of dataset
track: string, the task track from PATHS[year]['_TRACKS'].
language: string, Acronym for language in the main task.
**kwargs: keyword arguments forwarded to super.
"""
if track.lower() not in PATHS[year]['_TRACKS']:
raise ValueError(
'Incorrect track. Track should be one of the following: ',
PATHS[year]['_TRACKS'])
if track.lower() != 'main' and language.upper() != 'EN':
logging.warn('Only English documents available for pilot '
'tracks. Setting English by default.')
language = 'EN'
if track.lower() == 'main' and language.upper(
) not in PATHS[year]['_LANGUAGES_MAIN']:
raise ValueError(
'Incorrect language for the main track. Correct options: ',
PATHS[year]['_LANGUAGES_MAIN'])
self.year = year
self.track = track.lower()
self.lang = language.upper()
name = self.year + '.' + self.track + '.' + self.lang
description = _DESCRIPTION
description += ('This configuration includes the {} track for {} language '
'in {} year.').format(self.track, self.lang, self.year)
super(Qa4mreConfig, self).__init__(
name=name,
description=description,
version=tfds.core.Version('0.1.0'),
**kwargs)