当前位置: 首页>>代码示例>>Python>>正文


Python tf_logging.error函数代码示例

本文整理汇总了Python中tensorflow.python.platform.tf_logging.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_global_counter

def get_global_counter(collection, name, graph=None):
    """Get the global counter tensor.

    The global counter tensor must be an integer variable. We first try to find it
    in the collection, or by name.

    Args:
        collection: the counter's collection.
        name: the counter's name.
        graph: The graph to find the global counter in. If missing, use default graph.

    Returns:
        The global counter variable, or `None` if none was found.

    Raises:
        TypeError: If the global counter tensor has a non-integer type,
            or if it is not a `Variable`.
    """
    graph = graph or tf.get_default_graph()
    global_counter_tensors = graph.get_collection(collection)
    if len(global_counter_tensors) == 1:
        global_counter_tensor = global_counter_tensors[0]
    elif not global_counter_tensors:
        try:
            global_counter_tensor = graph.get_tensor_by_name(name)
        except KeyError:
            return None
    else:
        logging.error('Multiple tensors in `{}` collection.'.format(collection))
        return None

    assert_global_counter(global_counter_tensor)
    return global_counter_tensor
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:33,代码来源:utils.py

示例2: record_error

  def record_error(self, source, exc_info, session=None):
    """Report an exception from the given source.

    If a session is passed, a timer will be registered to close it after a few
    seconds.  This is necessary to ensure the main training loop does not hang
    if an infeed/oufeed error occurs.  We sleep a few seconds to allow a more
    interesting error from another thread to propagate.

    Args:
      source: string, source of the error
      exc_info: Output from `sys.exc_info` (type, value, traceback)
      session: Session to close after delay.
    """
    _, value, _ = exc_info
    self._errors[source] = exc_info
    logging.error('Error recorded from %s: %s', source, value)

    if session is not None and self._session_cancel_timer is None:

      def _cancel_session():
        time.sleep(5)
        logging.error('Closing session due to error %s' % value)
        try:
          session.close()
        except:  # pylint: disable=bare-except
          logging.error(
              '\n\n\nFailed to close session after error.'
              'Other threads may hang.\n\n\n')

      self._session_cancel_timer = threading.Thread(target=_cancel_session,)
      self._session_cancel_timer.daemon = True
      self._session_cancel_timer.start()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:32,代码来源:error_handling.py

示例3: _check_trt_version_compatibility

def _check_trt_version_compatibility():
  """Check compatibility of TensorRT version.

  Raises:
    RuntimeError: if the TensorRT library version is incompatible.
  """
  compiled_version = get_linked_tensorrt_version()
  loaded_version = get_loaded_tensorrt_version()
  tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
  tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
  version_mismatch = False
  if loaded_version[0] < compiled_version[0]:
    tf_logging.error(
        "TensorRT version mismatch. Tensorflow was compiled against " +
        "TensorRT %s but library loaded from environment is TensorRT %s" %
        (".".join([str(x) for x in compiled_version]),
         ".".join([str(x) for x in loaded_version])) +
        ". Please make sure that correct version of TensorRT " +
        "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
    raise RuntimeError("Incompatible TensorRT library version")
  for i in zip(loaded_version, compiled_version):
    if i[0] != i[1]:
      tf_logging.warn("TensorRT mismatch. Compiled against version " +
                      "%s, but loaded %s. Things may not work" %
                      (".".join([str(x) for x in compiled_version]),
                       ".".join([str(x) for x in loaded_version])))
      version_mismatch = True
      break
  if not version_mismatch:
    tf_logging.info("Running against TensorRT version %s" %
                    ".".join([str(x) for x in loaded_version]))
开发者ID:aritratony,项目名称:tensorflow,代码行数:31,代码来源:trt_convert.py

示例4: latest_checkpoint

def latest_checkpoint(checkpoint_dir, latest_filename=None):
  """Finds the filename of latest saved checkpoint file.

  Args:
    checkpoint_dir: Directory where the variables were saved.
    latest_filename: Optional name for the protocol buffer file that
      contains the list of most recent checkpoint filenames.
      See the corresponding argument to `Saver.save()`.

  Returns:
    The full path to the latest checkpoint or `None` if no checkpoint was found.
  """
  # Pick the latest checkpoint based on checkpoint state.
  ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
  if ckpt and ckpt.model_checkpoint_path:
    # Look for either a V2 path or a V1 path, with priority for V2.
    v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
                                         saver_pb2.SaverDef.V2)
    v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
                                         saver_pb2.SaverDef.V1)
    if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
        v1_path):
      return ckpt.model_checkpoint_path
    else:
      logging.error("Couldn't match files for checkpoint %s",
                    ckpt.model_checkpoint_path)
  return None
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:27,代码来源:checkpoint_management.py

示例5: get_global_step

def get_global_step(graph=None):
  """Get the global step tensor.

  The global step tensor must be an integer variable. We first try to find it
  in the collection `GLOBAL_STEP`, or by name `global_step:0`.

  Args:
    graph: The graph to find the global step in. If missing, use default graph.

  Returns:
    The global step variable, or `None` if none was found.

  Raises:
    TypeError: If the global step tensor has a non-integer type, or if it is not
      a `Variable`.
  """
  graph = ops.get_default_graph() if graph is None else graph
  global_step_tensor = None
  global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP)
  if len(global_step_tensors) == 1:
    global_step_tensor = global_step_tensors[0]
  elif not global_step_tensors:
    try:
      global_step_tensor = graph.get_tensor_by_name('global_step:0')
    except KeyError:
      return None
  else:
    logging.error('Multiple tensors in global_step collection.')
    return None

  assert_global_step(global_step_tensor)
  return global_step_tensor
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:32,代码来源:training_util.py

示例6: _AddOpInternal

  def _AddOpInternal(self, op):
    # pylint: disable=protected-access
    if op.type in _BLACKLISTED_OPS:
      logging.error("Operation of type %s (%s) is not supported on the TPU. "
                    "Execution will fail if this op is used in the graph. " %
                    (op.type, op.name))

    if op.type in _NOT_IMPLEMENTED_OPS:
      self._unsupported_ops.append(op)

    if any(x.dtype._is_ref_dtype for x in op.inputs):
      raise NotImplementedError(
          "Non-resource Variables are not supported inside TPU computations "
          "(operator name: %s)" % op.name)
    if _TPU_REPLICATE_ATTR in op.node_def.attr:
      raise ValueError("TPU computations cannot be nested")
    op._set_attr(_TPU_REPLICATE_ATTR,
                 attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
    if self._outside_compilation_cluster:
      op._set_attr(
          _OUTSIDE_COMPILATION_ATTR,
          attr_value_pb2.AttrValue(
              s=compat.as_bytes(self._outside_compilation_cluster)))
    if self._num_replicas > 1 or not self._outside_compilation_cluster:
      # Prevent feeding or fetching anything that is being compiled,
      # and any replicated outside_compilation Op.
      op.graph.prevent_feeding(op)
      op.graph.prevent_fetching(op)
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:28,代码来源:tpu.py

示例7: run_simple_server

def run_simple_server(tb_app):
  """Start serving TensorBoard, and print some messages to console."""
  # Mute the werkzeug logging.
  base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)

  try:
    server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)
    server.daemon_threads = True
  except socket.error:
    if FLAGS.port == 0:
      msg = 'TensorBoard unable to find any open port'
    else:
      msg = (
          'TensorBoard attempted to bind to port %d, but it was already in use'
          % FLAGS.port)
    logging.error(msg)
    print(msg)
    exit(-1)

  port = server.socket.getsockname()[1]
  msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,
                                                     port)
  print(msg)
  logging.info(msg)
  print('(Press CTRL+C to quit)')
  sys.stdout.flush()

  server.serve_forever()
开发者ID:Immexxx,项目名称:tensorflow,代码行数:28,代码来源:tensorboard.py

示例8: _check_dtypes

def _check_dtypes(value, dtype):
  if value.dtype != dtype:
    logging.error(
        "Error: Input value {} has dtype {}, but expected dtype {}.  "
        "This leads to undefined behavior and will be an error "
        "in future versions of TensorFlow.  Traceback:\n{}".format(
            value, str(value.dtype), str(dtype),
            "".join(traceback.format_stack())))
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:tensor_array_ops.py

示例9: __exit__

  def __exit__(self, exec_type, exec_value, exec_tb):
    if exec_type is errors.OpError:
      logging.error('Session closing due to OpError: %s', (exec_value,))

    for context_manager in reversed(self._context_managers):
      context_manager.__exit__(exec_type, exec_value, exec_tb)

    self.close()
开发者ID:3kwa,项目名称:tensorflow,代码行数:8,代码来源:session.py

示例10: __call__

  def __call__(self, path, parent, children):
    # The path to the object.
    lib_path = 'tensorflow.%s' % path if path else 'tensorflow'

    # A small helper method to construct members(children) protos.
    def _AddMember(member_name, member_obj, proto):
      """Add the child object to the object being constructed."""
      _, member_obj = tf_decorator.unwrap(member_obj)
      if member_name == '__init__' or not member_name.startswith('_'):
        if tf_inspect.isroutine(member_obj):
          new_method = proto.member_method.add()
          new_method.name = member_name
          # If member_obj is a python builtin, there is no way to get its
          # argspec, because it is implemented on the C side. It also has no
          # func_code.
          if getattr(member_obj, 'func_code', None):
            new_method.argspec = _SanitizedArgSpec(member_obj)
        else:
          new_member = proto.member.add()
          new_member.name = member_name
          new_member.mtype = str(type(member_obj))

    parent_corner_cases = _CORNER_CASES.get(path, {})

    if path not in _CORNER_CASES or parent_corner_cases:
      # Decide if we have a module or a class.
      if tf_inspect.ismodule(parent):
        # Create a module object.
        module_obj = api_objects_pb2.TFAPIModule()
        for name, child in children:
          if name in parent_corner_cases:
            # If we have an empty entry, skip this object.
            if parent_corner_cases[name]:
              module_obj.member.add(**(parent_corner_cases[name]))
          else:
            _AddMember(name, child, module_obj)

        # Store the constructed module object.
        self._protos[lib_path] = api_objects_pb2.TFAPIObject(
            path=lib_path, tf_module=module_obj)
      elif tf_inspect.isclass(parent):
        # Construct a class.
        class_obj = api_objects_pb2.TFAPIClass()
        class_obj.is_instance.extend(_SanitizedMRO(parent))
        for name, child in children:
          if name in parent_corner_cases:
            # If we have an empty entry, skip this object.
            if parent_corner_cases[name]:
              module_obj.member.add(**(parent_corner_cases[name]))
          else:
            _AddMember(name, child, class_obj)

        # Store the constructed class object.
        self._protos[lib_path] = api_objects_pb2.TFAPIObject(
            path=lib_path, tf_class=class_obj)
      else:
        logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'
                      'Object is neither a module nor a class: %s', path)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:58,代码来源:python_object_to_proto_visitor.py

示例11: make_simple_server

def make_simple_server(tb_app, host, port):
  """Create an HTTP server for TensorBoard.

  Args:
    tb_app: The TensorBoard WSGI application to create a server for.
    host: Indicates the interfaces to bind to ('::' or '0.0.0.0' for all
        interfaces, '::1' or '127.0.0.1' for localhost). A blank value ('')
        indicates protocol-agnostic all interfaces.
    port: The port to bind to (0 indicates an unused port selected by the
        operating system).
  Returns:
    A tuple of (server, url):
      server: An HTTP server object configured to host TensorBoard.
      url: A best guess at a URL where TensorBoard will be accessible once the
        server has been started.
  Raises:
    socket.error: If a server could not be constructed with the host and port
      specified. Also logs an error message.
  """
  # Mute the werkzeug logging.
  base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)

  try:
    if host:
      # The user gave us an explicit host
      server = serving.make_server(host, port, tb_app, threaded=True)
      if ':' in host and not host.startswith('['):
        # Display IPv6 addresses as [::1]:80 rather than ::1:80
        final_host = '[{}]'.format(host)
      else:
        final_host = host
    else:
      # We've promised to bind to all interfaces on this host. However, we're
      # not sure whether that means IPv4 or IPv6 interfaces.
      try:
        # First try passing in a blank host (meaning all interfaces). This,
        # unfortunately, defaults to IPv4 even if no IPv4 interface is available
        # (yielding a socket.error).
        server = serving.make_server(host, port, tb_app, threaded=True)
      except socket.error:
        # If a blank host didn't work, we explicitly request IPv6 interfaces.
        server = serving.make_server('::', port, tb_app, threaded=True)
      final_host = socket.gethostname()
    server.daemon_threads = True
  except socket.error as socket_error:
    if port == 0:
      msg = 'TensorBoard unable to find any open port'
    else:
      msg = (
          'TensorBoard attempted to bind to port %d, but it was already in use'
          % FLAGS.port)
    logging.error(msg)
    print(msg)
    raise socket_error

  final_port = server.socket.getsockname()[1]
  tensorboard_url = 'http://%s:%d' % (final_host, final_port)
  return server, tensorboard_url
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:58,代码来源:tensorboard.py

示例12: _cancel_session

 def _cancel_session():
   time.sleep(5)
   logging.error('Closing session due to error %s' % value)
   try:
     session.close()
   except:  # pylint: disable=bare-except
     logging.error(
         '\n\n\nFailed to close session after error.'
         'Other threads may hang.\n\n\n')
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:9,代码来源:error_handling.py

示例13: after_run

 def after_run(self, run_context, run_values):
   if np.isnan(run_values.results):
     failure_message = "Model diverged with loss = NaN."
     if self._fail_on_nan_loss:
       logging.error(failure_message)
       raise NanLossDuringTrainingError
     else:
       logging.warning(failure_message)
       # We don't raise an error but we request stop without an exception.
       run_context.request_stop()
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:10,代码来源:basic_session_run_hooks.py

示例14: __exit__

  def __exit__(self, exec_type, exec_value, exec_tb):
    if exec_type is errors.OpError:
      logging.error('Session closing due to OpError: %s', (exec_value,))
    self._default_session_context_manager.__exit__(
        exec_type, exec_value, exec_tb)
    self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)

    self._default_session_context_manager = None
    self._default_graph_context_manager = None

    self.close()
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:11,代码来源:session.py

示例15: every_n_step_end

 def every_n_step_end(self, step, outputs):
   super(NanLoss, self).every_n_step_end(step, outputs)
   if np.isnan(_extract_output(outputs, self._loss_tensor)):
     failure_message = "Model diverged with loss = NaN."
     if self._fail_on_nan_loss:
       logging.error(failure_message)
       raise NanLossDuringTrainingError
     else:
       logging.warning(failure_message)
       # We don't raise an error but we return "should stop" so we stop, but
       # without an exception.
       return True
开发者ID:Immexxx,项目名称:tensorflow,代码行数:12,代码来源:monitors.py


注:本文中的tensorflow.python.platform.tf_logging.error函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。