本文整理汇总了Python中syntaxnet.ops.gen_parser_ops.beam_parser方法的典型用法代码示例。如果您正苦于以下问题:Python gen_parser_ops.beam_parser方法的具体用法?Python gen_parser_ops.beam_parser怎么用?Python gen_parser_ops.beam_parser使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类syntaxnet.ops.gen_parser_ops
的用法示例。
在下文中一共展示了gen_parser_ops.beam_parser方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _BuildSequence
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import beam_parser [as 别名]
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}
示例2: _BuildSequence
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import beam_parser [as 别名]
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}
示例3: _BuildSequence
# 需要导入模块: from syntaxnet.ops import gen_parser_ops [as 别名]
# 或者: from syntaxnet.ops.gen_parser_ops import beam_parser [as 别名]
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(
dtype=tf.float32, size=0, infer_shape=False, dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}