本文整理汇总了Python中dragnn.python.network_units.fixed_embeddings_name方法的典型用法代码示例。如果您正苦于以下问题:Python network_units.fixed_embeddings_name方法的具体用法?Python network_units.fixed_embeddings_name怎么用?Python network_units.fixed_embeddings_name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dragnn.python.network_units
的用法示例。
在下文中一共展示了network_units.fixed_embeddings_name方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_hooks_for_fixed_embedding_matrix
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def _add_hooks_for_fixed_embedding_matrix(component, channel_id):
"""Adds runtime hooks for a fixed embedding matrix.
The hooks remove the last row from the embedding matrix. The extra row was
probably intended for out-of-vocabulary items, but those are handled in the
feature system and the extra row is never used.
Args:
component: Component for which to add hooks.
channel_id: Fixed embedding channel for which to add hooks.
"""
var_name = network_units.fixed_embeddings_name(channel_id)
extended_matrix = component.get_variable(var_name)
extended_num_rows = tf.shape(extended_matrix)[0]
matrix = tf.slice(extended_matrix, [0, 0], [extended_num_rows - 1, -1])
# TODO(googleuser): If the extra row is removed from the variable itself, remove
# the tf.slice() and point the hook directly at the variable.
_add_hook_node(matrix, _get_hook_name(component, var_name, '/trimmed'))
示例2: fetch_dense_ragged_embeddings
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def fetch_dense_ragged_embeddings(comp, state):
"""Gets embeddings in RaggedTensor format."""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
tf.logging.info('[%s] Adding %d fast fixed features', comp.name, num_channels)
features = [
comp.get_variable(network_units.fixed_embeddings_name(c))
for c in range(num_channels)
]
state.handle, data, offsets = dragnn_ops.bulk_embed_dense_fixed_features(
state.handle, features, component=comp.name)
data = network_units.NamedTensor(data, 'dense-%s-data' % comp.name)
offsets = network_units.NamedTensor(offsets, 'dense-%s-offsets' % comp.name)
return state.handle, [data, offsets]
示例3: fetch_differentiable_fixed_embeddings
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def fetch_differentiable_fixed_embeddings(comp, state, stride):
"""Looks up fixed features with separate, differentiable, embedding lookup.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
state.handle, indices, ids, weights, num_steps = (
dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels))
fixed_embeddings = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
differentiable_or_constant = ('constant' if feature_spec.is_constant else
'differentiable')
tf.logging.info('[%s] Adding %s fixed feature "%s"', comp.name,
differentiable_or_constant, feature_spec.name)
size = stride * num_steps * feature_spec.size
fixed_embedding = network_units.embedding_lookup(
comp.get_variable(network_units.fixed_embeddings_name(channel)),
indices[channel], ids[channel], weights[channel], size)
if feature_spec.is_constant:
fixed_embedding = tf.stop_gradient(fixed_embedding)
fixed_embeddings.append(
network_units.NamedTensor(fixed_embedding, feature_spec.name))
return state.handle, fixed_embeddings
示例4: fetch_fast_fixed_embeddings
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def fetch_fast_fixed_embeddings(comp, state):
"""Looks up fixed features with fast, non-differentiable, op.
Since BulkFixedEmbeddings is non-differentiable with respect to the
embeddings, the idea is to call this function only when the graph is
not being used for training.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
tf.logging.info('[%s] Adding %d fast fixed features', comp.name, num_channels)
state.handle, bulk_embeddings, _ = dragnn_ops.bulk_fixed_embeddings(
state.handle, [
comp.get_variable(network_units.fixed_embeddings_name(c))
for c in range(num_channels)
],
component=comp.name)
bulk_embeddings = network_units.NamedTensor(bulk_embeddings,
'bulk-%s-fixed-features' %
comp.name)
return state.handle, [bulk_embeddings]
示例5: testNormalFixedFeaturesAreDifferentiable
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def testNormalFixedFeaturesAreDifferentiable(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "IdentityNetwork"
}
fixed_feature {
name: "fixed" embedding_dim: 32 size: 1
pretrained_embedding_matrix { part {} }
vocab { part {} }
}
component_builder {
registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder"
}
""", component_spec)
with tf.Graph().as_default():
comp = bulk_component.BulkFeatureExtractorComponentBuilder(
self.master, component_spec)
# Get embedding matrix variables.
with tf.variable_scope(comp.name, reuse=True):
fixed_embedding_matrix = tf.get_variable(
network_units.fixed_embeddings_name(0))
# Get output layer.
comp.build_greedy_training(self.master_state, self.network_states)
activations = self.network_states[comp.name].activations
outputs = activations[comp.network.layers[0].name].bulk_tensor
# Compute the gradient of the output layer w.r.t. the embedding matrix.
# This should be well-defined for in the normal case.
gradients = tf.gradients(outputs, fixed_embedding_matrix)
self.assertEqual(len(gradients), 1)
self.assertFalse(gradients[0] is None)
示例6: testAddFixedHooks
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def testAddFixedHooks(self):
component = MockComponent()
fixed0 = component.spec.fixed_feature.add()
fixed1 = component.spec.fixed_feature.add()
fixed0.embedding_dim = -1
fixed1.embedding_dim = 32
fixed0.vocabulary_size = 100
fixed1.vocabulary_size = 1000
fixed0_matrix_name = network_units.fixed_embeddings_name(0)
fixed1_matrix_name = network_units.fixed_embeddings_name(1)
with self.test_session() as session:
graph = session.graph
# Create fixed embedding matrices. Only channel 1 uses one.
with tf.variable_scope(component.name):
tf.get_variable(
fixed1_matrix_name, shape=[1000 + 1, 32], dtype=tf.float32)
# Add hooks. This should ignore channel 0 and add hooks for channel 1.
with tf.variable_scope(component.name, reuse=True):
runtime_support.add_hooks(component, export_pb2.CellSubgraphSpec())
# Check that no hooks were added for channel 0.
with self.assertRaises(KeyError):
graph.get_tensor_by_name(
'{}/{}/trimmed:0'.format(component.name, fixed0_matrix_name))
# Get the hooks added for channel 1.
trimmed = graph.get_tensor_by_name(
'{}/{}/trimmed:0'.format(component.name, fixed1_matrix_name))
# Check dimensions of the hooks.
tf.global_variables_initializer().run()
self.assertAllEqual(tf.shape(trimmed).eval(), [1000, 32])
示例7: testConstantFixedFeaturesAreNotDifferentiableButOthersAre
# 需要导入模块: from dragnn.python import network_units [as 别名]
# 或者: from dragnn.python.network_units import fixed_embeddings_name [as 别名]
def testConstantFixedFeaturesAreNotDifferentiableButOthersAre(self):
component_spec = spec_pb2.ComponentSpec()
text_format.Parse("""
name: "test"
network_unit {
registered_name: "IdentityNetwork"
}
fixed_feature {
name: "constant" embedding_dim: 32 size: 1
is_constant: true
pretrained_embedding_matrix { part {} }
vocab { part {} }
}
fixed_feature {
name: "trainable" embedding_dim: 32 size: 1
pretrained_embedding_matrix { part {} }
vocab { part {} }
}
component_builder {
registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder"
}
""", component_spec)
with tf.Graph().as_default():
comp = bulk_component.BulkFeatureExtractorComponentBuilder(
self.master, component_spec)
# Get embedding matrix variables.
with tf.variable_scope(comp.name, reuse=True):
constant_embedding_matrix = tf.get_variable(
network_units.fixed_embeddings_name(0))
trainable_embedding_matrix = tf.get_variable(
network_units.fixed_embeddings_name(1))
# Get output layer.
comp.build_greedy_training(self.master_state, self.network_states)
activations = self.network_states[comp.name].activations
outputs = activations[comp.network.layers[0].name].bulk_tensor
# The constant embeddings are non-differentiable.
constant_gradients = tf.gradients(outputs, constant_embedding_matrix)
self.assertEqual(len(constant_gradients), 1)
self.assertTrue(constant_gradients[0] is None)
# The trainable embeddings are differentiable.
trainable_gradients = tf.gradients(outputs, trainable_embedding_matrix)
self.assertEqual(len(trainable_gradients), 1)
self.assertFalse(trainable_gradients[0] is None)