當前位置: 首頁>>代碼示例>>Python>>正文


Python logging.set_verbosity方法代碼示例

本文整理匯總了Python中tensorflow.logging.set_verbosity方法的典型用法代碼示例。如果您正苦於以下問題:Python logging.set_verbosity方法的具體用法?Python logging.set_verbosity怎麽用?Python logging.set_verbosity使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.logging的用法示例。


在下文中一共展示了logging.set_verbosity方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:26,代碼來源:inference.py

示例2: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_dir is "":
    raise ValueError("'output_dir' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern,
      FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:26,代碼來源:inference-pre-ensemble.py

示例3: setup_tensorflow

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def setup_tensorflow():
    """Setup options for TensorFlow.

    These options should allow most users to run TensorFlow with either a
    GPU or CPU. It sets several options to avoid keras taking up too much
    memory space and ignore a common warnings about library conflicts that
    can occur on macOS. It also silences verbose warnings from TensorFlow
    that most users can safely ignore.
    """
    from keras.backend.tensorflow_backend import set_session
    from tensorflow import logging, ConfigProto, Session
    from os import environ

    # supress warnings
    logging.set_verbosity(logging.ERROR)
    environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # ensure that keras does not use all of the available memory
    config = ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.3
    config.gpu_options.visible_device_list = "0"
    set_session(Session(config=config))

    # fix a common local bug
    environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" 
開發者ID:distant-viewing,項目名稱:dvt,代碼行數:27,代碼來源:utils.py

示例4: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.checkpoint_file, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
開發者ID:mpekalski,項目名稱:Y8M,代碼行數:26,代碼來源:inference.py

示例5: __init__

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def __init__(self, folder_name, host="127.0.0.1", verbosity=logging.WARN):
        Thread.__init__(self)
        self.project_key = os.environ["DKU_CURRENT_PROJECT_KEY"]
        self.folder_name = folder_name
        self.client = dataiku.api_client()

        logging.set_verbosity(verbosity)

        # Getting app
        logs_path = self.__get_logs_path()
        app = self.__get_tb_app(logs_path)

        # Setting server
        self.srv = make_server(host, 0, app) 
開發者ID:dataiku,項目名稱:dataiku-contrib,代碼行數:16,代碼來源:tensorboard_handle.py

示例6: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.json_prediction_files_pattern:
    raise ValueError(
        "The flag --json_prediction_files_pattern must be specified.")

  if not FLAGS.csv_output_file:
    raise ValueError("The flag --csv_output_file must be specified.")

  logging.info("Looking for prediction files with pattern: %s", 
               FLAGS.json_prediction_files_pattern)

  file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)  
  logging.info("Found files: %s", file_paths)

  logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
  with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
    output_file.write(get_csv_header())

    for file_path in file_paths:
      logging.info("processing file: %s", file_path)

      with gfile.Open(file_path) as input_file:

        for line in input_file: 
          json_data = json.loads(line)
          output_file.write(to_csv_row(json_data))

    output_file.flush()
  logging.info("done") 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:33,代碼來源:convert_prediction_from_json_to_csv.py

示例7: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  evaluate() 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:6,代碼來源:eval.py

示例8: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  check_video_id() 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:6,代碼來源:check_distillation.py

示例9: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  inference() 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:6,代碼來源:inference.py

示例10: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  # Load the environment.
  env = json.loads(os.environ.get("TF_CONFIG", "{}"))

  # Load the cluster data from the environment.
  cluster_data = env.get("cluster", None)
  cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None

  # Load the task data from the environment.
  task_data = env.get("task", None) or {"type": "master", "index": 0}
  task = type("TaskSpec", (object,), task_data)

  # Logging the version.
  logging.set_verbosity(tf.logging.INFO)
  logging.info("%s: Tensorflow version: %s.",
               task_as_string(task), tf.__version__)

  # Dispatch to a master, a worker, or a parameter server.
  if not cluster or task.type == "master" or task.type == "worker":
    Trainer(cluster, task, FLAGS.train_dir, FLAGS.log_device_placement).run(
        start_new_model=FLAGS.start_new_model)
  elif task.type == "ps":
    ParameterServer(cluster, task).run()
  else:
    raise ValueError("%s: Invalid task_type: %s." %
                     (task_as_string(task), task.type)) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:28,代碼來源:train.py

示例11: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
    # Load the environment.
    env = json.loads(os.environ.get("TF_CONFIG", "{}"))

    # Load the cluster data from the environment.
    cluster_data = env.get("cluster", None)
    cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None

    # Load the task data from the environment.
    task_data = env.get("task", None) or {"type": "master", "index": 0}
    task = type("TaskSpec", (object,), task_data)

    # Logging the version.
    logging.set_verbosity(tf.logging.INFO)
    logging.info("%s: Tensorflow version: %s.",
                 task_as_string(task), tf.__version__)

    # Dispatch to a master, a worker, or a parameter server.
    if not cluster or task.type == "master" or task.type == "worker":
        Trainer(cluster, task, FLAGS.train_dir, FLAGS.log_device_placement).run(
            start_new_model=FLAGS.start_new_model)
    elif task.type == "ps":
        ParameterServer(cluster, task).run()
    else:
        raise ValueError("%s: Invalid task_type: %s." %
                         (task_as_string(task), task.type)) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:28,代碼來源:train-with-rebuild.py

示例12: main

# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import set_verbosity [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  model = find_class_by_name(FLAGS.model,
                             [frame_level_models, video_level_models])()
  transformer_fn = find_class_by_name(FLAGS.feature_transformer, 
                                         [feature_transform])

  build_graph(reader,
              model,
              input_data_pattern=FLAGS.input_data_pattern,
              batch_size=FLAGS.batch_size,
              transformer_class=transformer_fn)

  saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=10000000000)

  inference(saver, FLAGS.train_dir,
            FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:39,代碼來源:inference-sample-error.py


注:本文中的tensorflow.logging.set_verbosity方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。