本文整理汇总了Python中google.protobuf.text_format.Parse方法的典型用法代码示例。如果您正苦于以下问题:Python text_format.Parse方法的具体用法?Python text_format.Parse怎么用?Python text_format.Parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.protobuf.text_format
的用法示例。
在下文中一共展示了text_format.Parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RunTraining
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def RunTraining(self, hyperparam_config):
master_spec = self.LoadSpec('master_spec_link.textproto')
self.assertTrue(isinstance(hyperparam_config, spec_pb2.GridPoint))
gold_doc = sentence_pb2.Sentence()
text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc)
gold_doc_2 = sentence_pb2.Sentence()
text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2)
reader_strings = [
gold_doc.SerializeToString(), gold_doc_2.SerializeToString()
]
tf.logging.info('Generating graph with config: %s', hyperparam_config)
with tf.Graph().as_default():
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
target = spec_pb2.TrainTarget()
target.name = 'testTraining-all'
train = builder.add_training_from_config(target)
with self.test_session() as sess:
logging.info('Initializing')
sess.run(tf.global_variables_initializer())
# Run one iteration of training and verify nothing crashes.
logging.info('Training')
sess.run(train['run'], feed_dict={train['input_batch']: reader_strings})
示例2: testFailsOnFixedFeature
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testFailsOnFixedFeature(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "annotate"
network_unit {
registered_name: "IdentityNetwork"
}
fixed_feature {
name: "fixed" embedding_dim: 32 size: 1
}
""", component_spec)
with tf.Graph().as_default():
comp = bulk_component.BulkAnnotatorComponentBuilder(
self.master, component_spec)
# Expect feature extraction to generate a runtime error due to the
# fixed feature.
with self.assertRaises(RuntimeError):
comp.build_greedy_training(self.master_state, self.network_states)
示例3: testBulkFeatureIdExtractorOkWithOneFixedFeature
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testBulkFeatureIdExtractorOkWithOneFixedFeature(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "IdentityNetwork"
}
fixed_feature {
name: "fixed" embedding_dim: -1 size: 1
}
""", component_spec)
with tf.Graph().as_default():
comp = bulk_component.BulkFeatureIdExtractorComponentBuilder(
self.master, component_spec)
# Should not raise errors.
self.network_states[component_spec.name] = component.NetworkState()
comp.build_greedy_training(self.master_state, self.network_states)
self.network_states[component_spec.name] = component.NetworkState()
comp.build_greedy_inference(self.master_state, self.network_states)
示例4: testBulkFeatureIdExtractorFailsOnLinkedFeature
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testBulkFeatureIdExtractorFailsOnLinkedFeature(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "IdentityNetwork"
}
fixed_feature {
name: "fixed" embedding_dim: -1 size: 1
}
linked_feature {
name: "linked" embedding_dim: -1 size: 1
source_translator: "identity"
source_component: "mock"
}
""", component_spec)
with tf.Graph().as_default():
with self.assertRaises(ValueError):
unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder(
self.master, component_spec)
示例5: main
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def main(unused_argv):
request = inference_flags.request_from_flags()
if not gfile.Exists(request.segmentation_output_dir):
gfile.MakeDirs(request.segmentation_output_dir)
bbox = bounding_box_pb2.BoundingBox()
text_format.Parse(FLAGS.bounding_box, bbox)
runner = inference.Runner()
runner.start(request)
runner.run((bbox.start.z, bbox.start.y, bbox.start.x),
(bbox.size.z, bbox.size.y, bbox.size.x))
counter_path = os.path.join(request.segmentation_output_dir, 'counters.txt')
if not gfile.Exists(counter_path):
runner.counters.dump(counter_path)
示例6: LoadInferenceGraph
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def LoadInferenceGraph(path, clear_device_placement=False):
"""Parse the given path as an InferenceGraph proto.
Args:
path: The path to the file to load.
clear_device_placement: If true, clears device field from nodes in graph.
Returns:
An InferenceGraph object.
"""
inference_graph = inference_graph_pb2.InferenceGraph()
with tf.io.gfile.GFile(path, "r") as f:
text_format.Parse(f.read(), inference_graph)
if clear_device_placement:
for node in inference_graph.graph_def.node:
node.ClearField("device")
for function in inference_graph.graph_def.library.function:
for node_def in function.node_def:
node_def.ClearField("device")
return inference_graph
示例7: get_proto
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def get_proto(push_list, output_dir):
bench_factory = aibench_pb2.BenchFactory()
model_factory = aibench_pb2.ModelFactory()
try:
with open("aibench/proto/benchmark.meta", "rb") as fin:
file_content = fin.read()
text_format.Parse(file_content, bench_factory)
filepath = output_dir + "/benchmark.pb"
with open(filepath, "wb") as fout:
fout.write(bench_factory.SerializeToString())
push_list.append(filepath)
with open("aibench/proto/model.meta", "rb") as fin:
file_content = fin.read()
text_format.Parse(file_content, model_factory)
filepath = output_dir + "/model.pb"
with open(filepath, "wb") as fout:
fout.write(model_factory.SerializeToString())
push_list.append(filepath)
except text_format.ParseError as e:
raise IOError("Cannot parse file.", e)
return bench_factory, model_factory
示例8: testParseExotic
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual('\u00fc\ua71f', message.repeated_string[2])
self.assertEqual('\u00fc', message.repeated_string[3])
示例9: testParseStringFieldUnescape
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
示例10: testParseMessageSet
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
示例11: testMergeExpandedAny
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testMergeExpandedAny(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
message.Clear()
text_format.Parse(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
示例12: testCreateLexiconContext
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testCreateLexiconContext(self):
expected_context = task_spec_pb2.TaskSpec()
text_format.Parse(_EXPECTED_CONTEXT, expected_context)
self.assertProtoEquals(
lexicon.create_lexicon_context('/tmp'), expected_context)
示例13: LoadSpec
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def LoadSpec(self, spec_path):
master_spec = spec_pb2.MasterSpec()
testdata = os.path.join(FLAGS.test_srcdir,
'dragnn/core/testdata')
with file(os.path.join(testdata, spec_path), 'r') as fin:
text_format.Parse(fin.read().replace('TESTDATA', testdata), master_spec)
return master_spec
示例14: testFailsOnNonIdentityTranslator
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testFailsOnNonIdentityTranslator(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "IdentityNetwork"
}
linked_feature {
name: "features" embedding_dim: -1 size: 1
source_translator: "history"
source_component: "mock"
}
""", component_spec)
# For feature extraction:
with tf.Graph().as_default():
comp = bulk_component.BulkFeatureExtractorComponentBuilder(
self.master, component_spec)
# Expect feature extraction to generate a error due to the "history"
# translator.
with self.assertRaises(NotImplementedError):
comp.build_greedy_training(self.master_state, self.network_states)
# As well as annotation:
with tf.Graph().as_default():
comp = bulk_component.BulkAnnotatorComponentBuilder(
self.master, component_spec)
with self.assertRaises(NotImplementedError):
comp.build_greedy_training(self.master_state, self.network_states)
示例15: testFailsOnRecurrentLinkedFeature
# 需要导入模块: from google.protobuf import text_format [as 别名]
# 或者: from google.protobuf.text_format import Parse [as 别名]
def testFailsOnRecurrentLinkedFeature(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "FeedForwardNetwork"
parameters {
key: 'hidden_layer_sizes' value: '64'
}
}
linked_feature {
name: "features" embedding_dim: -1 size: 1
source_translator: "identity"
source_component: "test"
source_layer: "layer_0"
}
""", component_spec)
# For feature extraction:
with tf.Graph().as_default():
comp = bulk_component.BulkFeatureExtractorComponentBuilder(
self.master, component_spec)
# Expect feature extraction to generate a error due to the "history"
# translator.
with self.assertRaises(RuntimeError):
comp.build_greedy_training(self.master_state, self.network_states)
# As well as annotation:
with tf.Graph().as_default():
comp = bulk_component.BulkAnnotatorComponentBuilder(
self.master, component_spec)
with self.assertRaises(RuntimeError):
comp.build_greedy_training(self.master_state, self.network_states)