本文整理汇总了Python中official.utils.logs.logger.BenchmarkFileLogger方法的典型用法代码示例。如果您正苦于以下问题:Python logger.BenchmarkFileLogger方法的具体用法?Python logger.BenchmarkFileLogger怎么用?Python logger.BenchmarkFileLogger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类official.utils.logs.logger
的用法示例。
在下文中一共展示了logger.BenchmarkFileLogger方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_log_multiple_metrics
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_multiple_metrics(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_metric("accuracy", 0.999, global_step=1e4, extras={"name": "value"})
log.log_metric("loss", 0.02, global_step=1e4)
metric_log = os.path.join(log_dir, "metric.log")
self.assertTrue(tf.io.gfile.exists(metric_log))
with tf.io.gfile.GFile(metric_log) as f:
accuracy = json.loads(f.readline())
self.assertEqual(accuracy["name"], "accuracy")
self.assertEqual(accuracy["value"], 0.999)
self.assertEqual(accuracy["unit"], None)
self.assertEqual(accuracy["global_step"], 1e4)
self.assertEqual(accuracy["extras"], [{"name": "name", "value": "value"}])
loss = json.loads(f.readline())
self.assertEqual(loss["name"], "loss")
self.assertEqual(loss["value"], 0.02)
self.assertEqual(loss["unit"], None)
self.assertEqual(loss["global_step"], 1e4)
self.assertEqual(loss["extras"], [])
示例2: test_log_run_info
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_run_info(self, mock_gather_run_info):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
run_info = {"model_name": "model_name",
"dataset": "dataset_name",
"run_info": "run_value"}
mock_gather_run_info.return_value = run_info
log.log_run_info("model_name", "dataset_name", {})
run_log = os.path.join(log_dir, "benchmark_run.log")
self.assertTrue(tf.io.gfile.exists(run_log))
with tf.io.gfile.GFile(run_log) as f:
run_info = json.loads(f.readline())
self.assertEqual(run_info["model_name"], "model_name")
self.assertEqual(run_info["dataset"], "dataset_name")
self.assertEqual(run_info["run_info"], "run_value")
示例3: test_log_multiple_metrics
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_multiple_metrics(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_metric("accuracy", 0.999, global_step=1e4, extras={"name": "value"})
log.log_metric("loss", 0.02, global_step=1e4)
metric_log = os.path.join(log_dir, "metric.log")
self.assertTrue(tf.gfile.Exists(metric_log))
with tf.gfile.GFile(metric_log) as f:
accuracy = json.loads(f.readline())
self.assertEqual(accuracy["name"], "accuracy")
self.assertEqual(accuracy["value"], 0.999)
self.assertEqual(accuracy["unit"], None)
self.assertEqual(accuracy["global_step"], 1e4)
self.assertEqual(accuracy["extras"], [{"name": "name", "value": "value"}])
loss = json.loads(f.readline())
self.assertEqual(loss["name"], "loss")
self.assertEqual(loss["value"], 0.02)
self.assertEqual(loss["unit"], None)
self.assertEqual(loss["global_step"], 1e4)
self.assertEqual(loss["extras"], [])
示例4: test_log_run_info
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_run_info(self, mock_gather_run_info):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
run_info = {"model_name": "model_name",
"dataset": "dataset_name",
"run_info": "run_value"}
mock_gather_run_info.return_value = run_info
log.log_run_info("model_name", "dataset_name", {})
run_log = os.path.join(log_dir, "benchmark_run.log")
self.assertTrue(tf.gfile.Exists(run_log))
with tf.gfile.GFile(run_log) as f:
run_info = json.loads(f.readline())
self.assertEqual(run_info["model_name"], "model_name")
self.assertEqual(run_info["dataset"], "dataset_name")
self.assertEqual(run_info["run_info"], "run_value")
示例5: test_log_evaluation_result
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_evaluation_result(self):
eval_result = {"loss": 0.46237424,
"global_step": 207082,
"accuracy": 0.9285}
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_evaluation_result(eval_result)
metric_log = os.path.join(log_dir, "metric.log")
self.assertTrue(tf.gfile.Exists(metric_log))
with tf.gfile.GFile(metric_log) as f:
accuracy = json.loads(f.readline())
self.assertEqual(accuracy["name"], "accuracy")
self.assertEqual(accuracy["value"], 0.9285)
self.assertEqual(accuracy["unit"], None)
self.assertEqual(accuracy["global_step"], 207082)
loss = json.loads(f.readline())
self.assertEqual(loss["name"], "loss")
self.assertEqual(loss["value"], 0.46237424)
self.assertEqual(loss["unit"], None)
self.assertEqual(loss["global_step"], 207082)
示例6: test_config_benchmark_file_logger
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_config_benchmark_file_logger(self):
# Set the benchmark_log_dir first since the benchmark_logger_type will need
# the value to be set when it does the validation.
with flagsaver.flagsaver(benchmark_log_dir="/tmp"):
with flagsaver.flagsaver(benchmark_logger_type="BenchmarkFileLogger"):
logger.config_benchmark_logger()
self.assertIsInstance(logger.get_benchmark_logger(),
logger.BenchmarkFileLogger)
示例7: test_create_logging_dir
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_create_logging_dir(self):
non_exist_temp_dir = os.path.join(self.get_temp_dir(), "unknown_dir")
self.assertFalse(tf.io.gfile.isdir(non_exist_temp_dir))
logger.BenchmarkFileLogger(non_exist_temp_dir)
self.assertTrue(tf.io.gfile.isdir(non_exist_temp_dir))
示例8: test_log_metric
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_metric(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_metric("accuracy", 0.999, global_step=1e4, extras={"name": "value"})
metric_log = os.path.join(log_dir, "metric.log")
self.assertTrue(tf.io.gfile.exists(metric_log))
with tf.io.gfile.GFile(metric_log) as f:
metric = json.loads(f.readline())
self.assertEqual(metric["name"], "accuracy")
self.assertEqual(metric["value"], 0.999)
self.assertEqual(metric["unit"], None)
self.assertEqual(metric["global_step"], 1e4)
self.assertEqual(metric["extras"], [{"name": "name", "value": "value"}])
示例9: test_log_non_number_value
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_non_number_value(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
const = tf.constant(1)
log.log_metric("accuracy", const)
metric_log = os.path.join(log_dir, "metric.log")
self.assertFalse(tf.io.gfile.exists(metric_log))
示例10: test_log_evaluation_result_with_invalid_type
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_evaluation_result_with_invalid_type(self):
eval_result = "{'loss': 0.46237424, 'global_step': 207082}"
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_evaluation_result(eval_result)
metric_log = os.path.join(log_dir, "metric.log")
self.assertFalse(tf.io.gfile.exists(metric_log))
示例11: test_config_benchmark_file_logger
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_config_benchmark_file_logger(self):
# Set the benchmark_log_dir first since the benchmark_logger_type will need
# the value to be set when it does the validation.
with flagsaver.flagsaver(benchmark_log_dir='/tmp'):
with flagsaver.flagsaver(benchmark_logger_type='BenchmarkFileLogger'):
logger.config_benchmark_logger()
self.assertIsInstance(logger.get_benchmark_logger(),
logger.BenchmarkFileLogger)
示例12: test_create_logging_dir
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_create_logging_dir(self):
non_exist_temp_dir = os.path.join(self.get_temp_dir(), "unknown_dir")
self.assertFalse(tf.gfile.IsDirectory(non_exist_temp_dir))
logger.BenchmarkFileLogger(non_exist_temp_dir)
self.assertTrue(tf.gfile.IsDirectory(non_exist_temp_dir))
示例13: test_log_metric
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_metric(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_metric("accuracy", 0.999, global_step=1e4, extras={"name": "value"})
metric_log = os.path.join(log_dir, "metric.log")
self.assertTrue(tf.gfile.Exists(metric_log))
with tf.gfile.GFile(metric_log) as f:
metric = json.loads(f.readline())
self.assertEqual(metric["name"], "accuracy")
self.assertEqual(metric["value"], 0.999)
self.assertEqual(metric["unit"], None)
self.assertEqual(metric["global_step"], 1e4)
self.assertEqual(metric["extras"], [{"name": "name", "value": "value"}])
示例14: test_log_non_number_value
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_non_number_value(self):
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
const = tf.constant(1)
log.log_metric("accuracy", const)
metric_log = os.path.join(log_dir, "metric.log")
self.assertFalse(tf.gfile.Exists(metric_log))
示例15: test_log_evaluation_result_with_invalid_type
# 需要导入模块: from official.utils.logs import logger [as 别名]
# 或者: from official.utils.logs.logger import BenchmarkFileLogger [as 别名]
def test_log_evaluation_result_with_invalid_type(self):
eval_result = "{'loss': 0.46237424, 'global_step': 207082}"
log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
log = logger.BenchmarkFileLogger(log_dir)
log.log_evaluation_result(eval_result)
metric_log = os.path.join(log_dir, "metric.log")
self.assertFalse(tf.gfile.Exists(metric_log))