本文整理汇总了Python中tensorflow.python.platform.tf_logging.warning函数的典型用法代码示例。如果您正苦于以下问题:Python warning函数的具体用法?Python warning怎么用?Python warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warning函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate_generator
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
use_multiprocessing: if True, use process based threading.
Note that because this implementation
relies on multiprocessing, you should not pass
non picklable arguments to the generator
as they can't be passed easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if not self.built:
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.evaluate_generator(
generator,
steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
示例2: _read_latest_config_files
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run directory."""
configs = {}
config_fpaths = {}
for run_name, assets_dir in run_path_pairs:
config = projector_config_pb2.ProjectorConfig()
config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath)
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
logdir = _assets_dir_to_logdir(assets_dir)
ckpt_path = _find_latest_checkpoint(logdir)
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if (config.model_checkpoint_path and
not checkpoint_exists(config.model_checkpoint_path)):
logging.warning('Checkpoint file "%s" not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
示例3: __init__
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
if config is None:
self._config = BaseEstimator._Config()
logging.warning('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and targets TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._targets_info = None
self._graph = None
示例4: new_func
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
示例5: get_config
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalizationBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例6: __init__
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', (mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
示例7: _register_block_with_sequence_key
def _register_block_with_sequence_key(self, layer_key, fisher_block):
"""Validates and registers the layer_key if it's a sequence."""
inclusions = {
fisher_elt
for layer_elt in layer_key for fisher_elt in self.fisher_blocks
if self._equal_or_subset(layer_elt, fisher_elt)
}
if not inclusions:
self.fisher_blocks[layer_key] = fisher_block
return
for key in inclusions:
fisher_block_key = key if isinstance(key, (tuple, list)) else (key,)
if set(layer_key).issubset(fisher_block_key):
logging.warning("Graph Registration Warning: tried to register "
"a subset ({}) of an already registered tuple "
"({}), skipping".format(layer_key, fisher_block_key))
return
if not set(fisher_block_key).issubset(layer_key):
raise ValueError(
"Inconsistent registration, expected new key to be a subset or "
"superset of the existing key: existing is {}, new is {}".format(
key, layer_key))
else:
self.fisher_blocks.pop(key)
self.fisher_blocks[layer_key] = fisher_block
示例8: __init__
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
示例9: __init__
def __init__(self,
num_words=None,
filters='!"#$%&()*+,-./:;<=>[email protected][\\]^_`{|}~\t\n',
lower=True,
split=' ',
char_level=False,
oov_token=None,
**kwargs):
# Legacy support
if 'nb_words' in kwargs:
logging.warning('The `nb_words` argument in `Tokenizer` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self.word_counts = OrderedDict()
self.word_docs = {}
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = 0
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = {}
示例10: filter_distributed_callbacks
def filter_distributed_callbacks(callbacks_list):
"""Filter Callbacks based on the worker context when running multi-worker.
Arguments:
callbacks_list: A list of `Callback` instances.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not K.in_multi_worker_mode():
raise ValueError(
'filter_distributed_callbacks() should only be called when Keras '
'is in multi worker mode.')
worker_context = dc_context.get_current_worker_context()
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to.
logging.warning('ModelCheckpoint callback is not provided. '
'Workers will need to restart training if any fails.')
# TODO(rchao): Add similar warning for restoring callback (to be designed).
if callbacks_list is None or worker_context.is_chief:
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback for callback in callbacks_list if not callback._chief_worker_only
] # pylint: disable=protected-access
示例11: new_func
def new_func(*args, **kwargs):
logging.warning(
"%s (from %s) is experimental and may change or be removed at " "any time, and without warning.",
decorator_utils.get_qualified_name(func),
func.__module__,
)
return func(*args, **kwargs)
示例12: _serve_runs
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
示例13: _read_config_files
def _read_config_files(self, run_paths):
configs = {}
config_fpaths = {}
for run_name, logdir in run_paths.items():
config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
if not file_io.file_exists(config_fpath):
# Skip runs that have no config file.
continue
# Read the config file.
file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
config = ProjectorConfig()
text_format.Merge(file_content, config)
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# Or in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join('../', logdir))
if not ckpt_path:
logging.warning('Cannot find model checkpoint in %s', logdir)
continue
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if not file_io.file_exists(config.model_checkpoint_path):
logging.warning('Checkpoint file %s not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
示例14: __init__
def __init__(self, model_dir=None, config=None):
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
if config is None:
self._config = BaseEstimator._Config()
else:
self._config = config
# Set device function depending if there are replicas or not.
if self._config.num_ps_replicas > 0:
ps_ops = ['Variable', 'AutoReloadVariable']
self._device_fn = device_setter.replica_device_setter(
ps_tasks=self._config.num_ps_replicas,
merge_devices=False, ps_ops=ps_ops)
else:
self._device_fn = None
# Features and targets TensorSingature objects.
self._features_info = None
self._targets_info = None
self._graph = None
示例15: _model_not_ready
def _model_not_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
`None` if the model is ready, a `String` with the reason why it is not
ready otherwise.
"""
if self._ready_op is None:
return None
else:
try:
ready_value = sess.run(self._ready_op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if ready_value is None or ready_value.dtype == np.int32 or ready_value.size == 0:
return None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join([i.decode("utf-8") for i in ready_value])
return "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("Model not ready raised: %s", str(e))
raise e
return str(e)