当前位置: 首页>>代码示例>>Python>>正文


Python logging.set_verbosity方法代码示例

本文整理汇总了Python中tensorflow.logging.set_verbosity方法的典型用法代码示例。如果您正苦于以下问题:Python logging.set_verbosity方法的具体用法?Python logging.set_verbosity怎么用?Python logging.set_verbosity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.logging的用法示例。


在下文中一共展示了logging.set_verbosity方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:26,代码来源:inference.py

示例2: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_dir is "":
    raise ValueError("'output_dir' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern,
      FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:26,代码来源:inference-pre-ensemble.py

示例3: setup_tensorflow

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def setup_tensorflow():
    """Setup options for TensorFlow.

    These options should allow most users to run TensorFlow with either a
    GPU or CPU. It sets several options to avoid keras taking up too much
    memory space and ignore a common warnings about library conflicts that
    can occur on macOS. It also silences verbose warnings from TensorFlow
    that most users can safely ignore.
    """
    from keras.backend.tensorflow_backend import set_session
    from tensorflow import logging, ConfigProto, Session
    from os import environ

    # supress warnings
    logging.set_verbosity(logging.ERROR)
    environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # ensure that keras does not use all of the available memory
    config = ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.3
    config.gpu_options.visible_device_list = "0"
    set_session(Session(config=config))

    # fix a common local bug
    environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" 
开发者ID:distant-viewing,项目名称:dvt,代码行数:27,代码来源:utils.py

示例4: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  inference(reader, FLAGS.checkpoint_file, FLAGS.train_dir, FLAGS.input_data_pattern,
    FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:mpekalski,项目名称:Y8M,代码行数:26,代码来源:inference.py

示例5: __init__

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def __init__(self, folder_name, host="127.0.0.1", verbosity=logging.WARN):
        Thread.__init__(self)
        self.project_key = os.environ["DKU_CURRENT_PROJECT_KEY"]
        self.folder_name = folder_name
        self.client = dataiku.api_client()

        logging.set_verbosity(verbosity)

        # Getting app
        logs_path = self.__get_logs_path()
        app = self.__get_tb_app(logs_path)

        # Setting server
        self.srv = make_server(host, 0, app) 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:16,代码来源:tensorboard_handle.py

示例6: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.json_prediction_files_pattern:
    raise ValueError(
        "The flag --json_prediction_files_pattern must be specified.")

  if not FLAGS.csv_output_file:
    raise ValueError("The flag --csv_output_file must be specified.")

  logging.info("Looking for prediction files with pattern: %s", 
               FLAGS.json_prediction_files_pattern)

  file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)  
  logging.info("Found files: %s", file_paths)

  logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
  with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
    output_file.write(get_csv_header())

    for file_path in file_paths:
      logging.info("processing file: %s", file_path)

      with gfile.Open(file_path) as input_file:

        for line in input_file: 
          json_data = json.loads(line)
          output_file.write(to_csv_row(json_data))

    output_file.flush()
  logging.info("done") 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:33,代码来源:convert_prediction_from_json_to_csv.py

示例7: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  evaluate() 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:6,代码来源:eval.py

示例8: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  check_video_id() 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:6,代码来源:check_distillation.py

示例9: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  print("tensorflow version: %s" % tf.__version__)
  inference() 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:6,代码来源:inference.py

示例10: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  # Load the environment.
  env = json.loads(os.environ.get("TF_CONFIG", "{}"))

  # Load the cluster data from the environment.
  cluster_data = env.get("cluster", None)
  cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None

  # Load the task data from the environment.
  task_data = env.get("task", None) or {"type": "master", "index": 0}
  task = type("TaskSpec", (object,), task_data)

  # Logging the version.
  logging.set_verbosity(tf.logging.INFO)
  logging.info("%s: Tensorflow version: %s.",
               task_as_string(task), tf.__version__)

  # Dispatch to a master, a worker, or a parameter server.
  if not cluster or task.type == "master" or task.type == "worker":
    Trainer(cluster, task, FLAGS.train_dir, FLAGS.log_device_placement).run(
        start_new_model=FLAGS.start_new_model)
  elif task.type == "ps":
    ParameterServer(cluster, task).run()
  else:
    raise ValueError("%s: Invalid task_type: %s." %
                     (task_as_string(task), task.type)) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:28,代码来源:train.py

示例11: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
    # Load the environment.
    env = json.loads(os.environ.get("TF_CONFIG", "{}"))

    # Load the cluster data from the environment.
    cluster_data = env.get("cluster", None)
    cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None

    # Load the task data from the environment.
    task_data = env.get("task", None) or {"type": "master", "index": 0}
    task = type("TaskSpec", (object,), task_data)

    # Logging the version.
    logging.set_verbosity(tf.logging.INFO)
    logging.info("%s: Tensorflow version: %s.",
                 task_as_string(task), tf.__version__)

    # Dispatch to a master, a worker, or a parameter server.
    if not cluster or task.type == "master" or task.type == "worker":
        Trainer(cluster, task, FLAGS.train_dir, FLAGS.log_device_placement).run(
            start_new_model=FLAGS.start_new_model)
    elif task.type == "ps":
        ParameterServer(cluster, task).run()
    else:
        raise ValueError("%s: Invalid task_type: %s." %
                         (task_as_string(task), task.type)) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:28,代码来源:train-with-rebuild.py

示例12: main

# 需要导入模块: from tensorflow import logging [as 别名]
# 或者: from tensorflow.logging import set_verbosity [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  # convert feature_names and feature_sizes to lists of values
  feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
      FLAGS.feature_names, FLAGS.feature_sizes)

  if FLAGS.frame_features:
    reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
                                            feature_sizes=feature_sizes)
  else:
    reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
                                                 feature_sizes=feature_sizes)

  if FLAGS.output_file is "":
    raise ValueError("'output_file' was not specified. "
      "Unable to continue with inference.")

  if FLAGS.input_data_pattern is "":
    raise ValueError("'input_data_pattern' was not specified. "
      "Unable to continue with inference.")

  model = find_class_by_name(FLAGS.model,
                             [frame_level_models, video_level_models])()
  transformer_fn = find_class_by_name(FLAGS.feature_transformer, 
                                         [feature_transform])

  build_graph(reader,
              model,
              input_data_pattern=FLAGS.input_data_pattern,
              batch_size=FLAGS.batch_size,
              transformer_class=transformer_fn)

  saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=10000000000)

  inference(saver, FLAGS.train_dir,
            FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:39,代码来源:inference-sample-error.py


注:本文中的tensorflow.logging.set_verbosity方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。