本文整理汇总了Python中dragnn.python.digraph_ops.CombineArcAndRootPotentials方法的典型用法代码示例。如果您正苦于以下问题:Python digraph_ops.CombineArcAndRootPotentials方法的具体用法?Python digraph_ops.CombineArcAndRootPotentials怎么用?Python digraph_ops.CombineArcAndRootPotentials使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dragnn.python.digraph_ops
的用法示例。
在下文中一共展示了digraph_ops.CombineArcAndRootPotentials方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testCombineArcAndRootPotentials
# 需要导入模块: from dragnn.python import digraph_ops [as 别名]
# 或者: from dragnn.python.digraph_ops import CombineArcAndRootPotentials [as 别名]
def testCombineArcAndRootPotentials(self):
with self.test_session():
arcs = tf.constant([[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]],
[[3, 4, 5],
[2, 3, 4],
[1, 2, 3]]], tf.float32)
roots = tf.constant([[6, 7, 8],
[8, 7, 6]], tf.float32)
potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots)
self.assertAllEqual(potentials.eval(), [[[6, 2, 3],
[2, 7, 4],
[3, 4, 8]],
[[8, 4, 5],
[2, 7, 4],
[1, 2, 6]]])
示例2: testCombineArcAndRootPotentials
# 需要导入模块: from dragnn.python import digraph_ops [as 别名]
# 或者: from dragnn.python.digraph_ops import CombineArcAndRootPotentials [as 别名]
def testCombineArcAndRootPotentials(self):
with self.test_session():
arcs = tf.constant([[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]],
[[3, 4, 5],
[2, 3, 4],
[1, 2, 3]]], tf.float32) # pyformat: disable
roots = tf.constant([[6, 7, 8],
[8, 7, 6]], tf.float32) # pyformat: disable
potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots)
self.assertAllEqual(potentials.eval(), [[[6, 2, 3],
[2, 7, 4],
[3, 4, 8]],
[[8, 4, 5],
[2, 7, 4],
[1, 2, 6]]]) # pyformat: disable
示例3: create
# 需要导入模块: from dragnn.python import digraph_ops [as 别名]
# 或者: from dragnn.python.digraph_ops import CombineArcAndRootPotentials [as 别名]
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BiaffineDigraphNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# TODO(googleuser): Add dropout during training.
del during_training
# Retrieve (possibly averaged) weights.
weights_arc = self._component.get_variable('weights_arc')
weights_source = self._component.get_variable('weights_source')
root = self._component.get_variable('root')
# Extract the source and target token activations. Use |stride| to collapse
# batch and beam into a single dimension.
sources = network_units.lookup_named_tensor('sources', linked_embeddings)
targets = network_units.lookup_named_tensor('targets', linked_embeddings)
source_tokens_bxnxs = tf.reshape(sources.tensor,
[stride, -1, self._source_dim])
target_tokens_bxnxt = tf.reshape(targets.tensor,
[stride, -1, self._target_dim])
num_tokens = tf.shape(source_tokens_bxnxs)[1]
# Compute the arc, source, and root potentials.
arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens(
source_tokens_bxnxs, target_tokens_bxnxt, weights_arc)
sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens(
source_tokens_bxnxs, weights_source)
roots_bxn = digraph_ops.RootPotentialsFromTokens(
root, target_tokens_bxnxt, weights_arc)
# Combine them into a single matrix with the roots on the diagonal.
adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials(
arcs_bxnxn + sources_bxnxn, roots_bxn)
return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])]
示例4: create
# 需要导入模块: from dragnn.python import digraph_ops [as 别名]
# 或者: from dragnn.python.digraph_ops import CombineArcAndRootPotentials [as 别名]
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BiaffineDigraphNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# TODO(googleuser): Add dropout during training.
del during_training
# Retrieve (possibly averaged) weights.
weights_arc = self._component.get_variable('weights_arc')
weights_source = self._component.get_variable('weights_source')
root = self._component.get_variable('root')
# Extract the source and target token activations. Use |stride| to collapse
# batch and beam into a single dimension.
sources = network_units.lookup_named_tensor('sources', linked_embeddings)
targets = network_units.lookup_named_tensor('targets', linked_embeddings)
source_tokens_bxnxs = tf.reshape(sources.tensor,
[stride, -1, self._source_dim])
target_tokens_bxnxt = tf.reshape(targets.tensor,
[stride, -1, self._target_dim])
num_tokens = tf.shape(source_tokens_bxnxs)[1]
# Compute the arc, source, and root potentials.
arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens(
source_tokens_bxnxs, target_tokens_bxnxt, weights_arc)
sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens(
source_tokens_bxnxs, weights_source)
roots_bxn = digraph_ops.RootPotentialsFromTokens(
root, target_tokens_bxnxt, weights_arc, weights_source)
# Combine them into a single matrix with the roots on the diagonal.
adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials(
arcs_bxnxn + sources_bxnxn, roots_bxn)
# The adjacency matrix currently has sources on rows and targets on columns,
# but we want targets on rows so that maximizing within a row corresponds to
# selecting sources for a given target.
adjacency_bxnxn = tf.matrix_transpose(adjacency_bxnxn)
return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])]