本文整理匯總了Python中dragnn.python.network_units.NamedTensor方法的典型用法代碼示例。如果您正苦於以下問題:Python network_units.NamedTensor方法的具體用法?Python network_units.NamedTensor怎麽用?Python network_units.NamedTensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dragnn.python.network_units
的用法示例。
在下文中一共展示了network_units.NamedTensor方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testCanCreate
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def testCanCreate(self):
"""Tests that create() works on a good spec."""
with tf.Graph().as_default(), self.test_session():
master = MockMaster()
component = MockComponent(master, _make_biaffine_spec())
with tf.variable_scope(component.name, reuse=None):
component.network = biaffine_units.BiaffineDigraphNetwork(component)
with tf.variable_scope(component.name, reuse=True):
sources = network_units.NamedTensor(
tf.zeros([_BATCH_SIZE * _NUM_TOKENS, _TOKEN_DIM]), 'sources')
targets = network_units.NamedTensor(
tf.zeros([_BATCH_SIZE * _NUM_TOKENS, _TOKEN_DIM]), 'targets')
# No assertions on the result, just don't crash.
component.network.create(
fixed_embeddings=[],
linked_embeddings=[sources, targets],
context_tensor_arrays=None,
attention_tensor=None,
during_training=True,
stride=_BATCH_SIZE)
示例2: fetch_linked_embedding
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def fetch_linked_embedding(comp, network_states, feature_spec):
"""Looks up linked embeddings in other components.
Args:
comp: ComponentBuilder object with respect to which the feature is to be
fetched
network_states: dictionary of NetworkState objects
feature_spec: FeatureSpec proto for the linked feature to be looked up
Returns:
NamedTensor containing the linked feature tensor
Raises:
NotImplementedError: if a linked feature with source translator other than
'identity' is configured.
RuntimeError: if a recurrent linked feature is configured.
"""
if feature_spec.source_translator != 'identity':
raise NotImplementedError(feature_spec.source_translator)
if feature_spec.source_component == comp.name:
raise RuntimeError(
'Recurrent linked features are not supported in bulk extraction.')
tf.logging.info('[%s] Adding linked feature "%s"', comp.name,
feature_spec.name)
source = comp.master.lookup_component[feature_spec.source_component]
return network_units.NamedTensor(
network_states[source.name].activations[
feature_spec.source_layer].bulk_tensor,
feature_spec.name)
示例3: fetch_differentiable_fixed_embeddings
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def fetch_differentiable_fixed_embeddings(comp, state, stride):
"""Looks up fixed features with separate, differentiable, embedding lookup.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
state.handle, indices, ids, weights, num_steps = (
dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels))
fixed_embeddings = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
differentiable_or_constant = ('constant' if feature_spec.is_constant else
'differentiable')
tf.logging.info('[%s] Adding %s fixed feature "%s"', comp.name,
differentiable_or_constant, feature_spec.name)
size = stride * num_steps * feature_spec.size
fixed_embedding = network_units.embedding_lookup(
comp.get_variable(network_units.fixed_embeddings_name(channel)),
indices[channel], ids[channel], weights[channel], size)
if feature_spec.is_constant:
fixed_embedding = tf.stop_gradient(fixed_embedding)
fixed_embeddings.append(
network_units.NamedTensor(fixed_embedding, feature_spec.name))
return state.handle, fixed_embeddings
示例4: fetch_fast_fixed_embeddings
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def fetch_fast_fixed_embeddings(comp, state):
"""Looks up fixed features with fast, non-differentiable, op.
Since BulkFixedEmbeddings is non-differentiable with respect to the
embeddings, the idea is to call this function only when the graph is
not being used for training.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
tf.logging.info('[%s] Adding %d fast fixed features', comp.name, num_channels)
state.handle, bulk_embeddings, _ = dragnn_ops.bulk_fixed_embeddings(
state.handle, [
comp.get_variable(network_units.fixed_embeddings_name(c))
for c in range(num_channels)
],
component=comp.name)
bulk_embeddings = network_units.NamedTensor(bulk_embeddings,
'bulk-%s-fixed-features' %
comp.name)
return state.handle, [bulk_embeddings]
示例5: extract_fixed_feature_ids
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def extract_fixed_feature_ids(comp, state, stride):
"""Extracts fixed feature IDs.
Args:
comp: Component whose fixed feature IDs we wish to extract.
state: Live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: Updated state handle to be used after this call.
ids: List of [stride * num_steps, 1] feature IDs per channel. Missing IDs
(e.g., due to batch padding) are set to -1.
"""
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
for feature_spec in comp.spec.fixed_feature:
check.Eq(feature_spec.size, 1, 'All features must have size=1')
check.Lt(feature_spec.embedding_dim, 0, 'All features must be non-embedded')
state.handle, indices, ids, _, num_steps = dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels)
size = stride * num_steps
fixed_ids = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
tf.logging.info('[%s] Adding fixed feature IDs "%s"', comp.name,
feature_spec.name)
# The +1 and -1 increments ensure that missing IDs default to -1.
#
# TODO(googleuser): This formula breaks if multiple IDs are extracted at some
# step. Try using tf.unique() to enforce the unique-IDS precondition.
sums = tf.unsorted_segment_sum(ids[channel] + 1, indices[channel], size) - 1
sums = tf.expand_dims(sums, axis=1)
fixed_ids.append(network_units.NamedTensor(sums, feature_spec.name, dim=1))
return state.handle, fixed_ids
示例6: testConstantPadding
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def testConstantPadding(self):
with tf.Graph().as_default(), self.test_session():
with tf.variable_scope('test_scope'):
network = network_units.GatherNetwork(self._component)
# Construct a batch of two items with 3 and 2 steps, respectively.
indices = tf.constant([[1], [2], [0], # item 1
[-1], [0], [-1]], # item 2
dtype=tf.int64)
features = tf.constant([[1.0, 1.5], [2.0, 2.5], [3.0, 3.5], # item 1
[4.0, 4.5], [5.0, 5.5], [6.0, 6.5]], # item 2
dtype=tf.float32)
fixed_embeddings = []
linked_embeddings = [
network_units.NamedTensor(indices, 'indices', 1),
network_units.NamedTensor(features, 'features', 2)
]
with tf.variable_scope('test_scope', reuse=True):
outputs = network.create(fixed_embeddings, linked_embeddings, None,
None, True, 2)
gathered = outputs[0]
# Zeros will be substituted for index -1.
self.assertAllEqual(gathered.eval(),
[[2.0, 2.5], # gathered from 1
[3.0, 3.5], # gathered from 2
[1.0, 1.5], # gathered from 0
[0.0, 0.0], # gathered from -1
[4.0, 4.5], # gathered from 0
[0.0, 0.0]]) # gathered from -1
示例7: testTrainablePadding
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import NamedTensor [as 別名]
def testTrainablePadding(self):
self._component.spec.network_unit.parameters['trainable_padding'] = 'true'
with tf.Graph().as_default(), self.test_session():
with tf.variable_scope('test_scope'):
network = network_units.GatherNetwork(self._component)
# Construct a batch of two items with 3 and 2 steps, respectively.
indices = tf.constant([[1], [2], [0], # item 1
[-1], [0], [-1]], # item 2
dtype=tf.int64)
features = tf.constant([[1.0, 1.5], [2.0, 2.5], [3.0, 3.5], # item 1
[4.0, 4.5], [5.0, 5.5], [6.0, 6.5]], # item 2
dtype=tf.float32)
fixed_embeddings = []
linked_embeddings = [
network_units.NamedTensor(indices, 'indices', 1),
network_units.NamedTensor(features, 'features', 2)
]
with tf.variable_scope('test_scope', reuse=True):
outputs = network.create(fixed_embeddings, linked_embeddings, None,
None, True, 2)
gathered = outputs[0]
# Ensure that the padding variable is initialized.
tf.global_variables_initializer().run()
# Randomly-initialized padding will be substituted for index -1.
self.assertAllEqual(gathered[0].eval(), [2.0, 2.5]) # gathered from 1
self.assertAllEqual(gathered[1].eval(), [3.0, 3.5]) # gathered from 2
self.assertAllEqual(gathered[2].eval(), [1.0, 1.5]) # gathered from 0
tf.logging.info('padding = %s', gathered[3].eval()) # gathered from -1
self.assertAllEqual(gathered[4].eval(), [4.0, 4.5]) # gathered from 0
tf.logging.info('padding = %s', gathered[5].eval()) # gathered from -1
# Though random, the padding must identical.
self.assertAllEqual(gathered[3].eval(), gathered[5].eval())