本文整理汇总了Python中object_detection.protos.pipeline_pb2.TrainEvalPipelineConfig方法的典型用法代码示例。如果您正苦于以下问题:Python pipeline_pb2.TrainEvalPipelineConfig方法的具体用法?Python pipeline_pb2.TrainEvalPipelineConfig怎么用?Python pipeline_pb2.TrainEvalPipelineConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.protos.pipeline_pb2
的用法示例。
在下文中一共展示了pipeline_pb2.TrainEvalPipelineConfig方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_configs_from_pipeline_file
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def get_configs_from_pipeline_file():
"""Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads evaluation config from file specified by pipeline_config_path flag.
Returns:
model_config: a model_pb2.DetectionModel
eval_config: a eval_pb2.EvalConfig
input_config: a input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model
if FLAGS.eval_training_data:
eval_config = pipeline_config.train_config
else:
eval_config = pipeline_config.eval_config
input_config = pipeline_config.eval_input_reader
return model_config, eval_config, input_config
示例2: test_export_frozen_graph_with_moving_averages
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_export_frozen_graph_with_moving_averages(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=True)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
示例3: test_export_model_with_all_output_nodes
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_export_model_with_all_output_nodes(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('detection_masks:0')
inference_graph.get_tensor_by_name('num_detections:0')
示例4: test_export_model_with_detection_only_nodes
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_export_model_with_detection_only_nodes(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=False)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('num_detections:0')
with self.assertRaises(KeyError):
inference_graph.get_tensor_by_name('detection_masks:0')
示例5: get_configs_from_pipeline_file
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def get_configs_from_pipeline_file():
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
return model_config, train_config, input_config
示例6: test_export_frozen_graph
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_export_frozen_graph(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
示例7: test_get_configs_from_pipeline_file
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_get_configs_from_pipeline_file(self):
"""Test that proto configs can be read from pipeline config file."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config,
configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
示例8: test_create_configs_from_pipeline_proto
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_create_configs_from_pipeline_proto(self):
"""Tests creating configs dictionary from pipeline proto."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
configs = config_util.create_configs_from_pipeline_proto(pipeline_config)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
示例9: test_create_pipeline_proto_from_configs
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_create_pipeline_proto_from_configs(self):
"""Tests that proto can be reconstructed from configs dictionary."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
示例10: test_save_pipeline_config
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def test_save_pipeline_config(self):
"""Tests that the pipeline config is properly saved to disk."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
config_util.save_pipeline_config(pipeline_config, self.get_temp_dir())
configs = config_util.get_configs_from_pipeline_file(
os.path.join(self.get_temp_dir(), "pipeline.config"))
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
示例11: testNewMomentumOptimizerValue
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def testNewMomentumOptimizerValue(self):
"""Tests that new momentum value is updated appropriately."""
original_momentum_value = 0.4
hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
optimizer_config.momentum_optimizer_value = original_momentum_value
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
new_momentum_value = optimizer_config.momentum_optimizer_value
self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0.
示例12: testNewFocalLossParameters
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def testNewFocalLossParameters(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_alpha = 1.0
original_gamma = 1.0
new_alpha = 0.3
new_gamma = 2.0
hparams = tf.contrib.training.HParams(
focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
classification_loss = pipeline_config.model.ssd.loss.classification_loss
classification_loss.weighted_sigmoid_focal.alpha = original_alpha
classification_loss.weighted_sigmoid_focal.gamma = original_gamma
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
classification_loss = configs["model"].ssd.loss.classification_loss
self.assertAlmostEqual(new_alpha,
classification_loss.weighted_sigmoid_focal.alpha)
self.assertAlmostEqual(new_gamma,
classification_loss.weighted_sigmoid_focal.gamma)
示例13: testMergingKeywordArguments
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def testMergingKeywordArguments(self):
"""Tests that keyword arguments get merged as do hyperparameters."""
original_num_train_steps = 100
desired_num_train_steps = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.num_steps = original_num_train_steps
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_steps": desired_num_train_steps}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_steps = configs["train_config"].num_steps
self.assertEqual(desired_num_train_steps, train_steps)
示例14: testNewTrainInputPath
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def testNewTrainInputPath(self):
"""Tests that train input path can be overwritten with single file."""
original_train_path = ["path/to/data"]
new_train_path = "another/path/to/data"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual([new_train_path], final_path)
示例15: testNewTrainInputPathList
# 需要导入模块: from object_detection.protos import pipeline_pb2 [as 别名]
# 或者: from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig [as 别名]
def testNewTrainInputPathList(self):
"""Tests that train input path can be overwritten with multiple files."""
original_train_path = ["path/to/data"]
new_train_path = ["another/path/to/data", "yet/another/path/to/data"]
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual(new_train_path, final_path)