当前位置: 首页>>代码示例>>Python>>正文


Python v1.logging方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.logging方法的典型用法代码示例。如果您正苦于以下问题:Python v1.logging方法的具体用法?Python v1.logging怎么用?Python v1.logging使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.logging方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: printable_text

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def printable_text(text):
    """Returns text encoded in a way suitable for print or `tf.logging`."""

    # These functions want `str` for both Python2 and Python3, but in one case
    # it's a Unicode string and in the other it's a byte string.
    if six.PY3:
        if isinstance(text, str):
            return text
        elif isinstance(text, bytes):
            return six.ensure_text(text, "utf-8", "ignore")
        else:
            raise ValueError("Unsupported string type: %s" % (type(text)))
    elif six.PY2:
        if isinstance(text, str):
            return text
        elif isinstance(text, six.text_type):
            return six.ensure_binary(text, "utf-8")
        else:
            raise ValueError("Unsupported string type: %s" % (type(text)))
    else:
        raise ValueError("Not running on Python2 or Python 3?") 
开发者ID:kpe,项目名称:bert-for-tf2,代码行数:23,代码来源:albert_tokenization.py

示例2: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
        self.vocab = None
        self.sp_model = None
        if spm_model_file:
            import sentencepiece as spm

            self.sp_model = spm.SentencePieceProcessor()
            tf.compat.v1.logging.info("loading sentence piece model")
            self.sp_model.Load(spm_model_file)
            # Note(mingdachen): For the purpose of consisent API, we are
            # generating a vocabulary for the sentence piece tokenizer.
            self.vocab = {self.sp_model.IdToPiece(i): i for i
                          in range(self.sp_model.GetPieceSize())}
        else:
            self.vocab = load_vocab(vocab_file)
            self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
            self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
        self.inv_vocab = {v: k for k, v in self.vocab.items()} 
开发者ID:kpe,项目名称:bert-for-tf2,代码行数:20,代码来源:albert_tokenization.py

示例3: _run_one_phase

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def _run_one_phase(self, min_steps, statistics, run_mode_str):
    # Mostly copy of parent method.
    step_count = 0
    num_episodes = 0
    sum_returns = 0.

    while step_count < min_steps:
      num_steps, episode_returns = self._run_one_episode()
      for episode_return in episode_returns:
        statistics.append({
            "{}_episode_lengths".format(run_mode_str):
                num_steps / self.batch_size,
            "{}_episode_returns".format(run_mode_str): episode_return
        })
      step_count += num_steps
      sum_returns += sum(episode_returns)
      num_episodes += self.batch_size
      # We use sys.stdout.write instead of tf.logging so as to flush frequently
      # without generating a line break.
      sys.stdout.write("Steps executed: {} ".format(step_count) +
                       "Batch episodes steps: {} ".format(num_steps) +
                       "Returns: {}\r".format(episode_returns))
      sys.stdout.flush()
    return step_count, sum_returns, num_episodes 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:dopamine_connector.py

示例4: initialize_from_ckpt

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def initialize_from_ckpt(ckpt_dir, hparams):
  """Initialize variables from given directory."""
  model_dir = hparams.get("model_dir", None)
  already_has_ckpt = (
      model_dir and tf.train.latest_checkpoint(model_dir) is not None)
  if already_has_ckpt:
    return

  tf.logging.info("Checkpoint dir: %s", ckpt_dir)
  reader = contrib.framework().load_checkpoint(ckpt_dir)
  variable_map = {}
  for var in contrib.framework().get_trainable_variables():
    var_name = var.name.split(":")[0]
    if reader.has_tensor(var_name):
      tf.logging.info("Loading variable from checkpoint: %s", var_name)
      variable_map[var_name] = var
    else:
      tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
                      var_name)
  tf.train.init_from_checkpoint(ckpt_dir, variable_map) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:t2t_model.py

示例5: printable_text

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def printable_text(text):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  if six.PY3:
    if isinstance(text, str):
      return text
    elif isinstance(text, bytes):
      return text.decode("utf-8", "ignore")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return text
    elif isinstance(text, unicode):
      return text.encode("utf-8")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  else:
    raise ValueError("Not running on Python2 or Python 3?") 
开发者ID:imcaspar,项目名称:gpt2-ml,代码行数:23,代码来源:tokenization.py

示例6: printable_text

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def printable_text(text):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  if six.PY3:
    if isinstance(text, str):
      return text
    elif isinstance(text, bytes):
      return six.ensure_text(text, "utf-8", "ignore")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return text
    elif isinstance(text, six.text_type):
      return six.ensure_binary(text, "utf-8")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  else:
    raise ValueError("Not running on Python2 or Python 3?") 
开发者ID:google-research,项目名称:albert,代码行数:23,代码来源:tokenization.py

示例7: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
    self.vocab = None
    self.sp_model = None
    if spm_model_file:
      self.sp_model = spm.SentencePieceProcessor()
      tf.logging.info("loading sentence piece model")
      # Handle cases where SP can't load the file, but gfile can.
      sp_model_ = tf.gfile.GFile(spm_model_file, "rb").read()
      self.sp_model.LoadFromSerializedProto(sp_model_)
      # Note(mingdachen): For the purpose of consisent API, we are
      # generating a vocabulary for the sentence piece tokenizer.
      self.vocab = {self.sp_model.IdToPiece(i): i for i
                    in range(self.sp_model.GetPieceSize())}
    else:
      self.vocab = load_vocab(vocab_file)
      self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
      self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
    self.inv_vocab = {v: k for k, v in self.vocab.items()} 
开发者ID:google-research,项目名称:albert,代码行数:20,代码来源:tokenization.py

示例8: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def main(_):
  with open(FLAGS.wiki103_raw, "r") as f:
    data = f.read().strip().split("\n")

  data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]

  sentences = []
  for para in data:
    for sent in para:
      sentences.append(sent + ".")
  data = "\n".join(sentences)

  data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
  data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
  data = data.replace(" ;", ";")

  data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])

  logging.info("length = %d", len(data.split("\n")))

  with open(FLAGS.output_path, "w") as f:
    f.write(data) 
开发者ID:google-research,项目名称:language,代码行数:24,代码来源:wiki103_sentencize.py

示例9: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def main(_):

  def load_dataset_file(dataset_file):
    with gfile.Open(dataset_file) as df:
      dataset_json = json.load(df)
    data = dataset_json['data']
    return data

  def load_preds_file(prediction_file):
    with gfile.Open(prediction_file) as pf:
      preds = json.load(pf)
    return preds

  dataset = load_dataset_file(FLAGS.watermark_file)
  preds = load_preds_file(FLAGS.watermark_output_file)
  logging.info('Watermark Label Accuracy =')
  logging.info(
      json.dumps(evaluate_dataset_preds(dataset, preds, ans_key='answers')))
  logging.info('Victim Label Accuracy =')
  logging.info(
      json.dumps(
          evaluate_dataset_preds(dataset, preds, ans_key='original_answers'))) 
开发者ID:google-research,项目名称:language,代码行数:24,代码来源:evaluate_squad_watermark.py

示例10: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def main(_):
  with open(FLAGS.wiki103_raw, "r") as f:
    data = f.read().strip().split("\n")

  data = [x for x in data if x.strip() and x.strip()[0] != "="]

  data = "\n".join(data)

  data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
  data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
  data = data.replace(" ;", ";").replace(" .", ".").replace(" :", ":")

  data = "\n".join([x for x in data.split("\n") if len(x.split()) > 20])

  logging.info("length = %d", len(data.split("\n")))

  with open(FLAGS.output_path, "w") as f:
    f.write(data) 
开发者ID:google-research,项目名称:language,代码行数:20,代码来源:wiki103_para_split.py

示例11: printable_text

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def printable_text(text, strip_roberta_space=False):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  strip = (lambda x: (x.replace('Ġ', '') if x.startswith('Ġ') else x)
          ) if strip_roberta_space else (lambda x: x)
  if six.PY3:
    if isinstance(text, str):
      return strip(text)
    elif isinstance(text, bytes):
      return strip(text.decode('utf-8', 'ignore'))
    else:
      raise ValueError('Unsupported string type: %s' % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return strip(text)
    elif isinstance(text, unicode):
      return strip(text.encode('utf-8'))
    else:
      raise ValueError('Unsupported string type: %s' % (type(text)))
  else:
    raise ValueError('Not running on Python2 or Python 3?') 
开发者ID:google-research,项目名称:language,代码行数:25,代码来源:tokenization.py

示例12: config_context

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def config_context(self, context, params=None):

    # declare the KG relations
    relation_filename = '%s/%s/%s' % (FLAGS.rootdir, FLAGS.task,
                                      FLAGS.relation_file)

    for line in tf.io.gfile.GFile(relation_filename):
      line = line.strip()
      rel, subj_type, obj_type = line.split('\t')
      context.declare_relation(rel, subj_type, obj_type)

    # we will also use NQL for a distance flag, which indicates if the relation
    # is followed forward or backward
    context.extend_type('distance_t', [str(h) for h in range(FLAGS.num_hops)])

    # load the lines from the KG
    start_time = time.time()
    kg_filename = '%s/%s/%s' % (FLAGS.rootdir, FLAGS.task, FLAGS.kg_file)
    logging.info('loading KG from %s', kg_filename)
    with tf.gfile.GFile(kg_filename) as fp:
      context.load_kg(files=fp)
    logging.info('loaded kg in %.3f sec', (time.time() - start_time))

    # finally extend the KG to allow us to use relation names as variables
    context.construct_relation_group('rel_ent2ent_g', 'entity_t', 'entity_t') 
开发者ID:google-research,项目名称:language,代码行数:27,代码来源:nell995.py

示例13: config_context

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def config_context(self, context, params=None):

    # declare the KG relations
    relation_filename = '%s/%s' % (FLAGS.rootdir, FLAGS.relation_file)
    for line in tf.io.gfile.GFile(relation_filename):
      rel = line.strip()
      context.declare_relation(rel, 'entity_t', 'entity_t')

    # we will also use NQL for a direction flag, which indicates if the relation
    # is followed forward or backward
    context.extend_type('direction_t', ['forward', 'backward'])

    # load the lines from the KG
    start_time = time.time()
    kg_filename = '%s/%s' % (FLAGS.rootdir, FLAGS.kg_file)
    logging.info('loading KG from %s', kg_filename)
    with tf.gfile.GFile(kg_filename) as fp:
      context.load_kg(files=fp)
    logging.info('loaded kg in %.3f sec', (time.time() - start_time))

    # finally extend the KG to allow us to use relation names as variables
    context.construct_relation_group('rel_g', 'entity_t', 'entity_t') 
开发者ID:google-research,项目名称:language,代码行数:24,代码来源:metaqa.py

示例14: convert_tokens_to_ids

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def convert_tokens_to_ids(self, tokens):
        if self.sp_model:
            tf.compat.v1.logging.info("using sentence piece tokenzier.")
            return [self.sp_model.PieceToId(
                printable_text(token)) for token in tokens]
        else:
            return convert_by_vocab(self.vocab, tokens) 
开发者ID:kpe,项目名称:bert-for-tf2,代码行数:9,代码来源:albert_tokenization.py

示例15: convert_ids_to_tokens

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import logging [as 别名]
def convert_ids_to_tokens(self, ids):
        if self.sp_model:
            tf.compat.v1.logging.info("using sentence piece tokenzier.")
            return [self.sp_model.IdToPiece(id_) for id_ in ids]
        else:
            return convert_by_vocab(self.inv_vocab, ids) 
开发者ID:kpe,项目名称:bert-for-tf2,代码行数:8,代码来源:albert_tokenization.py


注:本文中的tensorflow.compat.v1.logging方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。