本文整理汇总了Python中tensorflow.python.platform.tf_logging.error方法的典型用法代码示例。如果您正苦于以下问题:Python tf_logging.error方法的具体用法?Python tf_logging.error怎么用?Python tf_logging.error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.platform.tf_logging
的用法示例。
在下文中一共展示了tf_logging.error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GetTempDir
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def GetTempDir():
"""Return a temporary directory for tests to use."""
global _googletest_temp_dir
if not _googletest_temp_dir:
first_frame = tf_inspect.stack()[-1][0]
temp_dir = os.path.join(tempfile.gettempdir(),
os.path.basename(tf_inspect.getfile(first_frame)))
temp_dir = tempfile.mkdtemp(prefix=temp_dir.rstrip('.py'))
def delete_temp_dir(dirname=temp_dir):
try:
file_io.delete_recursively(dirname)
except errors.OpError as e:
logging.error('Error removing %s: %s', dirname, e)
atexit.register(delete_temp_dir)
_googletest_temp_dir = temp_dir
return _googletest_temp_dir
示例2: _SetPath
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _SetPath(self, path):
"""Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch.
"""
old_path = self._path
if old_path and not io_wrapper.IsGCSPath(old_path):
try:
# We're done with the path, so store its size.
size = gfile.Stat(old_path).length
logging.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except errors.OpError as e:
logging.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path)
示例3: Reload
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
logging.info('Finished with EventMultiplexer.Reload()')
return self
示例4: _execute_schedule
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _execute_schedule(experiment, schedule):
"""Execute the method named `schedule` of `experiment`."""
if not hasattr(experiment, schedule):
logging.error('Schedule references non-existent task %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise ValueError('Schedule references non-existent task %s' % schedule)
task = getattr(experiment, schedule)
if not callable(task):
logging.error('Schedule references non-callable member %s', schedule)
valid_tasks = [x for x in dir(experiment)
if not x.startswith('_')
and callable(getattr(experiment, x))]
logging.error('Allowed values for this experiment are: %s', valid_tasks)
raise TypeError('Schedule references non-callable member %s' % schedule)
return task()
示例5: every_n_step_end
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
示例6: after_run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
示例7: _validate_input_pipeline
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
示例8: _AddRateToSummary
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _AddRateToSummary(tag, rate, step, sw):
"""Adds the given rate to the summary with the given tag.
Args:
tag: Name for this value.
rate: Value to add to the summary. Perhaps an error rate.
step: Global step of the graph for the x-coordinate of the summary.
sw: Summary writer to which to write the rate value.
"""
sw.add_summary(
summary_pb2.Summary(value=[summary_pb2.Summary.Value(
tag=tag, simple_value=rate)]), step)
示例9: _run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _run(self, sess, enqueue_op, coord=None):
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
self.func(sess, enqueue_op) # call enqueue function
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except ValueError: # ignore value error defined by queueing function
pass
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
示例10: _run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def _run(self, sess, enqueue_op, coord=None):
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
self.func(sess, enqueue_op) # call enqueue function
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
示例11: close
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseDeprecatedSession(self._session, status)
示例12: __exit__
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
示例13: reset
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
示例14: after_run
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
示例15: __init__
# 需要导入模块: from tensorflow.python.platform import tf_logging [as 别名]
# 或者: from tensorflow.python.platform.tf_logging import error [as 别名]
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.