本文整理汇总了Python中syntaxnet.ops.gen_parser_ops.feature_size方法的典型用法代码示例。如果您正苦于以下问题:Python gen_parser_ops.feature_size方法的具体用法?Python gen_parser_ops.feature_size怎么用?Python gen_parser_ops.feature_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类syntaxnet.ops.gen_parser_ops
的用法示例。
在下文中一共展示了gen_parser_ops.feature_size方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import feature_size [as 别名]
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
示例2: setUp
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import feature_size [as 别名]
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(
FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
示例3: setUp
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import feature_size [as 别名]
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(test_flags.source_root(),
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(test_flags.temp_dir(), 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', test_flags.source_root())
.replace('OUTPATH', test_flags.temp_dir()))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
示例4: testParsingReaderOpWhileLoop
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import feature_size [as 别名]
def testParsingReaderOpWhileLoop(self):
feature_size = 3
batch_size = 5
def ParserEndpoints():
return gen_parser_ops.gold_parse_reader(self._task_context,
feature_size,
batch_size,
corpus_name='training-corpus')
with self.test_session() as sess:
# The 'condition' and 'body' functions expect as many arguments as there
# are loop variables. 'condition' depends on the 'epoch' loop variable
# only, so we disregard the remaining unused function arguments. 'body'
# returns a list of updated loop variables.
def Condition(epoch, *unused_args):
return tf.less(epoch, 2)
def Body(epoch, num_actions, *feature_args):
# By adding one of the outputs of the reader op ('epoch') as a control
# dependency to the reader op we force the repeated evaluation of the
# reader op.
with epoch.graph.control_dependencies([epoch]):
features, epoch, gold_actions = ParserEndpoints()
num_actions = tf.maximum(num_actions,
tf.reduce_max(gold_actions, [0], False) + 1)
feature_ids = []
for i in range(len(feature_args)):
feature_ids.append(features[i])
return [epoch, num_actions] + feature_ids
epoch = ParserEndpoints()[-2]
num_actions = tf.constant(0)
loop_vars = [epoch, num_actions]
res = sess.run(
tf.while_loop(Condition, Body, loop_vars,
shape_invariants=[tf.TensorShape(None)] * 2,
parallel_iterations=1))
logging.info('Result: %s', res)
self.assertEqual(res[0], 2)