當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.logging方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.logging方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.logging方法的具體用法?Python v1.logging怎麽用?Python v1.logging使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.logging方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: printable_text

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def printable_text(text):
    """Returns text encoded in a way suitable for print or `tf.logging`."""

    # These functions want `str` for both Python2 and Python3, but in one case
    # it's a Unicode string and in the other it's a byte string.
    if six.PY3:
        if isinstance(text, str):
            return text
        elif isinstance(text, bytes):
            return six.ensure_text(text, "utf-8", "ignore")
        else:
            raise ValueError("Unsupported string type: %s" % (type(text)))
    elif six.PY2:
        if isinstance(text, str):
            return text
        elif isinstance(text, six.text_type):
            return six.ensure_binary(text, "utf-8")
        else:
            raise ValueError("Unsupported string type: %s" % (type(text)))
    else:
        raise ValueError("Not running on Python2 or Python 3?") 
開發者ID:kpe,項目名稱:bert-for-tf2,代碼行數:23,代碼來源:albert_tokenization.py

示例2: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
        self.vocab = None
        self.sp_model = None
        if spm_model_file:
            import sentencepiece as spm

            self.sp_model = spm.SentencePieceProcessor()
            tf.compat.v1.logging.info("loading sentence piece model")
            self.sp_model.Load(spm_model_file)
            # Note(mingdachen): For the purpose of consisent API, we are
            # generating a vocabulary for the sentence piece tokenizer.
            self.vocab = {self.sp_model.IdToPiece(i): i for i
                          in range(self.sp_model.GetPieceSize())}
        else:
            self.vocab = load_vocab(vocab_file)
            self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
            self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
        self.inv_vocab = {v: k for k, v in self.vocab.items()} 
開發者ID:kpe,項目名稱:bert-for-tf2,代碼行數:20,代碼來源:albert_tokenization.py

示例3: _run_one_phase

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def _run_one_phase(self, min_steps, statistics, run_mode_str):
    # Mostly copy of parent method.
    step_count = 0
    num_episodes = 0
    sum_returns = 0.

    while step_count < min_steps:
      num_steps, episode_returns = self._run_one_episode()
      for episode_return in episode_returns:
        statistics.append({
            "{}_episode_lengths".format(run_mode_str):
                num_steps / self.batch_size,
            "{}_episode_returns".format(run_mode_str): episode_return
        })
      step_count += num_steps
      sum_returns += sum(episode_returns)
      num_episodes += self.batch_size
      # We use sys.stdout.write instead of tf.logging so as to flush frequently
      # without generating a line break.
      sys.stdout.write("Steps executed: {} ".format(step_count) +
                       "Batch episodes steps: {} ".format(num_steps) +
                       "Returns: {}\r".format(episode_returns))
      sys.stdout.flush()
    return step_count, sum_returns, num_episodes 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:dopamine_connector.py

示例4: initialize_from_ckpt

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def initialize_from_ckpt(ckpt_dir, hparams):
  """Initialize variables from given directory."""
  model_dir = hparams.get("model_dir", None)
  already_has_ckpt = (
      model_dir and tf.train.latest_checkpoint(model_dir) is not None)
  if already_has_ckpt:
    return

  tf.logging.info("Checkpoint dir: %s", ckpt_dir)
  reader = contrib.framework().load_checkpoint(ckpt_dir)
  variable_map = {}
  for var in contrib.framework().get_trainable_variables():
    var_name = var.name.split(":")[0]
    if reader.has_tensor(var_name):
      tf.logging.info("Loading variable from checkpoint: %s", var_name)
      variable_map[var_name] = var
    else:
      tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
                      var_name)
  tf.train.init_from_checkpoint(ckpt_dir, variable_map) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:t2t_model.py

示例5: printable_text

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def printable_text(text):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  if six.PY3:
    if isinstance(text, str):
      return text
    elif isinstance(text, bytes):
      return text.decode("utf-8", "ignore")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return text
    elif isinstance(text, unicode):
      return text.encode("utf-8")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  else:
    raise ValueError("Not running on Python2 or Python 3?") 
開發者ID:imcaspar,項目名稱:gpt2-ml,代碼行數:23,代碼來源:tokenization.py

示例6: printable_text

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def printable_text(text):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  if six.PY3:
    if isinstance(text, str):
      return text
    elif isinstance(text, bytes):
      return six.ensure_text(text, "utf-8", "ignore")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return text
    elif isinstance(text, six.text_type):
      return six.ensure_binary(text, "utf-8")
    else:
      raise ValueError("Unsupported string type: %s" % (type(text)))
  else:
    raise ValueError("Not running on Python2 or Python 3?") 
開發者ID:google-research,項目名稱:albert,代碼行數:23,代碼來源:tokenization.py

示例7: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
    self.vocab = None
    self.sp_model = None
    if spm_model_file:
      self.sp_model = spm.SentencePieceProcessor()
      tf.logging.info("loading sentence piece model")
      # Handle cases where SP can't load the file, but gfile can.
      sp_model_ = tf.gfile.GFile(spm_model_file, "rb").read()
      self.sp_model.LoadFromSerializedProto(sp_model_)
      # Note(mingdachen): For the purpose of consisent API, we are
      # generating a vocabulary for the sentence piece tokenizer.
      self.vocab = {self.sp_model.IdToPiece(i): i for i
                    in range(self.sp_model.GetPieceSize())}
    else:
      self.vocab = load_vocab(vocab_file)
      self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
      self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
    self.inv_vocab = {v: k for k, v in self.vocab.items()} 
開發者ID:google-research,項目名稱:albert,代碼行數:20,代碼來源:tokenization.py

示例8: main

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def main(_):
  with open(FLAGS.wiki103_raw, "r") as f:
    data = f.read().strip().split("\n")

  data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]

  sentences = []
  for para in data:
    for sent in para:
      sentences.append(sent + ".")
  data = "\n".join(sentences)

  data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
  data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
  data = data.replace(" ;", ";")

  data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])

  logging.info("length = %d", len(data.split("\n")))

  with open(FLAGS.output_path, "w") as f:
    f.write(data) 
開發者ID:google-research,項目名稱:language,代碼行數:24,代碼來源:wiki103_sentencize.py

示例9: main

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def main(_):

  def load_dataset_file(dataset_file):
    with gfile.Open(dataset_file) as df:
      dataset_json = json.load(df)
    data = dataset_json['data']
    return data

  def load_preds_file(prediction_file):
    with gfile.Open(prediction_file) as pf:
      preds = json.load(pf)
    return preds

  dataset = load_dataset_file(FLAGS.watermark_file)
  preds = load_preds_file(FLAGS.watermark_output_file)
  logging.info('Watermark Label Accuracy =')
  logging.info(
      json.dumps(evaluate_dataset_preds(dataset, preds, ans_key='answers')))
  logging.info('Victim Label Accuracy =')
  logging.info(
      json.dumps(
          evaluate_dataset_preds(dataset, preds, ans_key='original_answers'))) 
開發者ID:google-research,項目名稱:language,代碼行數:24,代碼來源:evaluate_squad_watermark.py

示例10: main

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def main(_):
  with open(FLAGS.wiki103_raw, "r") as f:
    data = f.read().strip().split("\n")

  data = [x for x in data if x.strip() and x.strip()[0] != "="]

  data = "\n".join(data)

  data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
  data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
  data = data.replace(" ;", ";").replace(" .", ".").replace(" :", ":")

  data = "\n".join([x for x in data.split("\n") if len(x.split()) > 20])

  logging.info("length = %d", len(data.split("\n")))

  with open(FLAGS.output_path, "w") as f:
    f.write(data) 
開發者ID:google-research,項目名稱:language,代碼行數:20,代碼來源:wiki103_para_split.py

示例11: printable_text

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def printable_text(text, strip_roberta_space=False):
  """Returns text encoded in a way suitable for print or `tf.logging`."""

  # These functions want `str` for both Python2 and Python3, but in one case
  # it's a Unicode string and in the other it's a byte string.
  strip = (lambda x: (x.replace('Ġ', '') if x.startswith('Ġ') else x)
          ) if strip_roberta_space else (lambda x: x)
  if six.PY3:
    if isinstance(text, str):
      return strip(text)
    elif isinstance(text, bytes):
      return strip(text.decode('utf-8', 'ignore'))
    else:
      raise ValueError('Unsupported string type: %s' % (type(text)))
  elif six.PY2:
    if isinstance(text, str):
      return strip(text)
    elif isinstance(text, unicode):
      return strip(text.encode('utf-8'))
    else:
      raise ValueError('Unsupported string type: %s' % (type(text)))
  else:
    raise ValueError('Not running on Python2 or Python 3?') 
開發者ID:google-research,項目名稱:language,代碼行數:25,代碼來源:tokenization.py

示例12: config_context

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def config_context(self, context, params=None):

    # declare the KG relations
    relation_filename = '%s/%s/%s' % (FLAGS.rootdir, FLAGS.task,
                                      FLAGS.relation_file)

    for line in tf.io.gfile.GFile(relation_filename):
      line = line.strip()
      rel, subj_type, obj_type = line.split('\t')
      context.declare_relation(rel, subj_type, obj_type)

    # we will also use NQL for a distance flag, which indicates if the relation
    # is followed forward or backward
    context.extend_type('distance_t', [str(h) for h in range(FLAGS.num_hops)])

    # load the lines from the KG
    start_time = time.time()
    kg_filename = '%s/%s/%s' % (FLAGS.rootdir, FLAGS.task, FLAGS.kg_file)
    logging.info('loading KG from %s', kg_filename)
    with tf.gfile.GFile(kg_filename) as fp:
      context.load_kg(files=fp)
    logging.info('loaded kg in %.3f sec', (time.time() - start_time))

    # finally extend the KG to allow us to use relation names as variables
    context.construct_relation_group('rel_ent2ent_g', 'entity_t', 'entity_t') 
開發者ID:google-research,項目名稱:language,代碼行數:27,代碼來源:nell995.py

示例13: config_context

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def config_context(self, context, params=None):

    # declare the KG relations
    relation_filename = '%s/%s' % (FLAGS.rootdir, FLAGS.relation_file)
    for line in tf.io.gfile.GFile(relation_filename):
      rel = line.strip()
      context.declare_relation(rel, 'entity_t', 'entity_t')

    # we will also use NQL for a direction flag, which indicates if the relation
    # is followed forward or backward
    context.extend_type('direction_t', ['forward', 'backward'])

    # load the lines from the KG
    start_time = time.time()
    kg_filename = '%s/%s' % (FLAGS.rootdir, FLAGS.kg_file)
    logging.info('loading KG from %s', kg_filename)
    with tf.gfile.GFile(kg_filename) as fp:
      context.load_kg(files=fp)
    logging.info('loaded kg in %.3f sec', (time.time() - start_time))

    # finally extend the KG to allow us to use relation names as variables
    context.construct_relation_group('rel_g', 'entity_t', 'entity_t') 
開發者ID:google-research,項目名稱:language,代碼行數:24,代碼來源:metaqa.py

示例14: convert_tokens_to_ids

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def convert_tokens_to_ids(self, tokens):
        if self.sp_model:
            tf.compat.v1.logging.info("using sentence piece tokenzier.")
            return [self.sp_model.PieceToId(
                printable_text(token)) for token in tokens]
        else:
            return convert_by_vocab(self.vocab, tokens) 
開發者ID:kpe,項目名稱:bert-for-tf2,代碼行數:9,代碼來源:albert_tokenization.py

示例15: convert_ids_to_tokens

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import logging [as 別名]
def convert_ids_to_tokens(self, ids):
        if self.sp_model:
            tf.compat.v1.logging.info("using sentence piece tokenzier.")
            return [self.sp_model.IdToPiece(id_) for id_ in ids]
        else:
            return convert_by_vocab(self.inv_vocab, ids) 
開發者ID:kpe,項目名稱:bert-for-tf2,代碼行數:8,代碼來源:albert_tokenization.py


注:本文中的tensorflow.compat.v1.logging方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。