当前位置: 首页>>代码示例>>Python>>正文


Python logging.fatal方法代码示例

本文整理汇总了Python中absl.logging.fatal方法的典型用法代码示例。如果您正苦于以下问题:Python logging.fatal方法的具体用法?Python logging.fatal怎么用?Python logging.fatal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在absl.logging的用法示例。


在下文中一共展示了logging.fatal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def main(unused_argv):
  generate.init_modules(FLAGS.train_split)

  output_dir = os.path.expanduser(FLAGS.output_dir)
  if os.path.exists(output_dir):
    logging.fatal('output dir %s already exists', output_dir)
  logging.info('Writing to %s', output_dir)
  os.makedirs(output_dir)

  for regime, flat_modules in six.iteritems(generate.filtered_modules):
    regime_dir = os.path.join(output_dir, regime)
    os.mkdir(regime_dir)
    per_module = generate.counts[regime]
    for module_name, module in six.iteritems(flat_modules):
      path = os.path.join(regime_dir, module_name + '.txt')
      with open(path, 'w') as text_file:
        for _ in range(per_module):
          problem, _ = generate.sample_from_module(module)
          text_file.write(str(problem.question) + '\n')
          text_file.write(str(problem.answer) + '\n')
      logging.info('Written %s', path) 
开发者ID:deepmind,项目名称:mathematics_dataset,代码行数:23,代码来源:generate_to_file.py

示例2: build_optimizer

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def build_optimizer(learning_rate,
                    optimizer_name='rmsprop',
                    decay=0.9,
                    epsilon=0.001,
                    momentum=0.9):
  """Build optimizer."""
  if optimizer_name == 'sgd':
    logging.info('Using SGD optimizer')
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  elif optimizer_name == 'momentum':
    logging.info('Using Momentum optimizer')
    optimizer = tf.train.MomentumOptimizer(
        learning_rate=learning_rate, momentum=momentum)
  elif optimizer_name == 'rmsprop':
    logging.info('Using RMSProp optimizer')
    optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
                                          epsilon)
  else:
    logging.fatal('Unknown optimizer: %s', optimizer_name)

  return optimizer 
开发者ID:lukemelas,项目名称:EfficientNet-PyTorch,代码行数:23,代码来源:utils.py

示例3: _find_files

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def _find_files(dl_paths, publisher, url_dict):
  """Find files corresponding to urls."""
  if publisher == 'cnn':
    top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
  elif publisher == 'dm':
    top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
  else:
    logging.fatal('Unsupported publisher: %s', publisher)
  files = tf.io.gfile.listdir(top_dir)

  ret_files = []
  for p in files:
    basename = os.path.basename(p)
    if basename[0:basename.find('.story')] in url_dict:
      ret_files.append(os.path.join(top_dir, p))
  return ret_files 
开发者ID:tensorflow,项目名称:datasets,代码行数:18,代码来源:cnn_dailymail.py

示例4: value

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def value(self, v):
    if v in _CPP_LEVEL_TO_NAMES:
      # --stderrthreshold also accepts numberic strings whose values are
      # Abseil C++ log levels.
      cpp_value = int(v)
      v = _CPP_LEVEL_TO_NAMES[v]  # Normalize to strings.
    elif v.lower() in _CPP_NAME_TO_LEVELS:
      v = v.lower()
      if v == 'warn':
        v = 'warning'  # Use 'warning' as the canonical name.
      cpp_value = int(_CPP_NAME_TO_LEVELS[v])
    else:
      raise ValueError(
          '--stderrthreshold must be one of (case-insensitive) '
          "'debug', 'info', 'warning', 'error', 'fatal', "
          "or '0', '1', '2', '3', not '%s'" % v)

    self._value = v 
开发者ID:abseil,项目名称:abseil-py,代码行数:20,代码来源:__init__.py

示例5: set_verbosity

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def set_verbosity(v):
  """Sets the logging verbosity.

  Causes all messages of level <= v to be logged,
  and all messages of level > v to be silently discarded.

  Args:
    v: int|str, the verbosity level as an integer or string. Legal string values
        are those that can be coerced to an integer as well as case-insensitive
        'debug', 'info', 'warning', 'error', and 'fatal'.
  """
  try:
    new_level = int(v)
  except ValueError:
    new_level = converter.ABSL_NAMES[v.upper()]
  FLAGS.verbosity = new_level 
开发者ID:abseil,项目名称:abseil-py,代码行数:18,代码来源:__init__.py

示例6: set_stderrthreshold

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def set_stderrthreshold(s):
  """Sets the stderr threshold to the value passed in.

  Args:
    s: str|int, valid strings values are case-insensitive 'debug',
        'info', 'warning', 'error', and 'fatal'; valid integer values are
        logging.DEBUG|INFO|WARNING|ERROR|FATAL.

  Raises:
      ValueError: Raised when s is an invalid value.
  """
  if s in converter.ABSL_LEVELS:
    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
    FLAGS.stderrthreshold = s
  else:
    raise ValueError(
        'set_stderrthreshold only accepts integer absl logging level '
        'from -3 to 1, or case-insensitive string values '
        "'debug', 'info', 'warning', 'error', and 'fatal'. "
        'But found "{}" ({}).'.format(s, type(s))) 
开发者ID:abseil,项目名称:abseil-py,代码行数:23,代码来源:__init__.py

示例7: find_log_dir

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def find_log_dir(log_dir=None):
  """Returns the most suitable directory to put log files into.

  Args:
    log_dir: str|None, if specified, the logfile(s) will be created in that
        directory.  Otherwise if the --log_dir command-line flag is provided,
        the logfile will be created in that directory.  Otherwise the logfile
        will be created in a standard location.
  """
  # Get a list of possible log dirs (will try to use them in order).
  if log_dir:
    # log_dir was explicitly specified as an arg, so use it and it alone.
    dirs = [log_dir]
  elif FLAGS['log_dir'].value:
    # log_dir flag was provided, so use it and it alone (this mimics the
    # behavior of the same flag in logging.cc).
    dirs = [FLAGS['log_dir'].value]
  else:
    dirs = ['/tmp/', './']

  # Find the first usable log dir.
  for d in dirs:
    if os.path.isdir(d) and os.access(d, os.W_OK):
      return d
  _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs) 
开发者ID:abseil,项目名称:abseil-py,代码行数:27,代码来源:__init__.py

示例8: score

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def score(self, data: List[List[str]], labels: List[List[str]]) -> float:
        """Evaluate the performance of ner model with given data and labels, return the f1 score.

        Args:
            data: List of List of str. List of tokenized (in char level) texts ,
                like ``[['我', '在', '上', '海', '上', '学'], ...]``.
            labels: List of List of str. The corresponding labels , usually in BIO or BIOES
                format, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O', 'O'], ...]``.

        Returns:
            Float. The F1 score.

        """
        if self.trainer:
            return self.trainer.evaluate(data, labels)
        else:
            logging.fatal('Trainer is None! Call fit() or load() to get trainer.') 
开发者ID:boat-group,项目名称:fancy-nlp,代码行数:19,代码来源:ner.py

示例9: predict_batch

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def predict_batch(self, texts: Union[List[str], List[List[str]]]) -> List[List[str]]:
        """Return the tag sequences of given batch of texts predicted by the ner model

        Args:
            texts: List of str or List of List of str. Can be a batch of un-tokenized texts,
                like ``['我在上海上学', ...]`` or a batch of tokenized (in char level) text sequences,
                like ``[['我', '在', '上', '海', '上', '学'], ...]``.

        Returns:
            List of List of str. The tag sequences, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O',
            'O']]``

        """
        if self.predictor:
            return self.predictor.tag_batch(texts)
        else:
            logging.fatal('Predictor is None! Call fit() or load() to get predictor.') 
开发者ID:boat-group,项目名称:fancy-nlp,代码行数:19,代码来源:ner.py

示例10: create_bottleneck_file

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           decoded_image_tensor, resized_input_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  logging.debug('Creating bottleneck at %s', bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not tf.gfile.Exists(image_path):
    logging.fatal('File does not exist %s', image_path)
  image_data = tf.gfile.GFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, decoded_image_tensor,
        resized_input_tensor, bottleneck_tensor)
  except Exception as e:
    raise RuntimeError('Error during processing file %s (%s)' % (image_path,
                                                                 str(e)))
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with tf.gfile.GFile(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string) 
开发者ID:tensorflow,项目名称:hub,代码行数:23,代码来源:retrain.py

示例11: _reshape_by_device_single

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def _reshape_by_device_single(x, n_devices):
  """Reshape x into a shape [n_devices, ...]."""
  x_shape = list(x.shape)
  batch_size = x_shape[0]
  batch_size_per_device = batch_size // n_devices
  # We require that n_devices divides batch_size evenly.
  if batch_size_per_device * n_devices != batch_size:
    logging.fatal(
        "We require that n_devices[%d] divides batch_size[%d] evenly.",
        n_devices, batch_size)
  # New shape.
  new_shape_prefix = [n_devices, batch_size_per_device]
  return np.reshape(x, new_shape_prefix + x_shape[1:]) 
开发者ID:yyht,项目名称:BERT,代码行数:15,代码来源:trax.py

示例12: __init__

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def __init__(
      self,
      kernel_shape,
      multiplier: multiplier_impl.IMultiplier,
  ):
    super().__init__()

    if len(kernel_shape) not in (
        2,
        4,
    ):
      logging.fatal(
          "unsupported kernel shape, "
          "it is neither a dense kernel of length 2,"
          " nor a convolution kernel of length 4")

    kernel_shape_excluding_output_dim = kernel_shape[:-1]
    kernel_add_ops = np.prod(kernel_shape_excluding_output_dim)

    # bias are associate with filters; each filter adds 1 bias
    bias_add = 1

    add_ops = kernel_add_ops + bias_add
    self.log_add_ops = int(np.ceil(np.log2(add_ops)))

    self.multiplier = multiplier
    self.output = quantizer_impl.QuantizedBits()
    self.output.bits = self.log_add_ops + self.multiplier.output.bits
    self.output.int_bits = self.log_add_ops + self.multiplier.output.int_bits
    self.output.is_signed = self.multiplier.output.is_signed
    self.output.op_type = "accumulator"

    assert not self.multiplier.output.is_floating_point
    self.output.is_floating_point = False 
开发者ID:google,项目名称:qkeras,代码行数:36,代码来源:accumulator_impl.py

示例13: _subset_filenames

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def _subset_filenames(dl_paths, split):
  """Get filenames for a particular split."""
  assert isinstance(dl_paths, dict), dl_paths
  # Get filenames for a split.
  if split == tfds.Split.TRAIN:
    urls = _get_url_hashes(dl_paths['train_urls'])
  elif split == tfds.Split.VALIDATION:
    urls = _get_url_hashes(dl_paths['val_urls'])
  elif split == tfds.Split.TEST:
    urls = _get_url_hashes(dl_paths['test_urls'])
  else:
    logging.fatal('Unsupported split: %s', split)
  cnn = _find_files(dl_paths, 'cnn', urls)
  dm = _find_files(dl_paths, 'dm', urls)
  return cnn + dm 
开发者ID:tensorflow,项目名称:datasets,代码行数:17,代码来源:cnn_dailymail.py

示例14: _decode_image

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def _decode_image(fobj, session, filename):
  """Reads and decodes an image from a file object as a Numpy array.

  The SUN dataset contains images in several formats (despite the fact that
  all of them have .jpg extension). Some of them are:
    - BMP (RGB)
    - PNG (grayscale, RGBA, RGB interlaced)
    - JPEG (RGB)
    - GIF (1-frame RGB)
  Since TFDS assumes that all images have the same number of channels, we
  convert all of them to RGB.

  Args:
    fobj: File object to read from.
    session: TF session used to decode the images.
    filename: Filename of the original image in the archive.

  Returns:
    Numpy array with shape (height, width, channels).
  """

  buf = fobj.read()
  image = tfds.core.lazy_imports.cv2.imdecode(
      np.fromstring(buf, dtype=np.uint8), flags=3)  # Note: Converts to RGB.
  if image is None:
    logging.warning(
        "Image %s could not be decoded by OpenCV, falling back to TF", filename)
    try:
      image = tf.image.decode_image(buf, channels=3)
      image = session.run(image)
    except tf.errors.InvalidArgumentError:
      logging.fatal("Image %s could not be decoded by Tensorflow", filename)

  # The GIF images contain a single frame.
  if len(image.shape) == 4:  # rank=4 -> rank=3
    image = image.reshape(image.shape[1:])

  return image 
开发者ID:tensorflow,项目名称:datasets,代码行数:40,代码来源:sun.py

示例15: infer

# 需要导入模块: from absl import logging [as 别名]
# 或者: from absl.logging import fatal [as 别名]
def infer(self, yield_single_examples=False):
    ''' inference '''
    logging.fatal("Not Implemented") 
开发者ID:didi,项目名称:delta,代码行数:5,代码来源:emotion_solver.py


注:本文中的absl.logging.fatal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。