本文整理匯總了Python中dragnn.python.network_units.Layer方法的典型用法代碼示例。如果您正苦於以下問題:Python network_units.Layer方法的具體用法?Python network_units.Layer怎麽用?Python network_units.Layer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dragnn.python.network_units
的用法示例。
在下文中一共展示了network_units.Layer方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: create_hidden_layers
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import Layer [as 別名]
def create_hidden_layers(self, component, hidden_layer_sizes):
"""See base class."""
# Construct the layer meta info for the DRAGNN builder. Note that the order
# of h and c are reversed compared to the vanilla DRAGNN LSTM cell, as
# this is the standard in tf.contrib.rnn.
#
# NB: The h activations of the last LSTM must be the last layer, in order
# for _append_base_layers() to work.
layers = []
for index, num_units in enumerate(hidden_layer_sizes):
layers.append(
dragnn.Layer(component, name='state_c_%d' % index, dim=num_units))
layers.append(
dragnn.Layer(component, name='state_h_%d' % index, dim=num_units))
context_layers = list(layers) # copy |layers|, don't alias it
return layers, context_layers
示例2: __init__
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import Layer [as 別名]
def __init__(self, component):
super(PairwiseBilinearLabelNetwork, self).__init__(component)
parameters = component.spec.network_unit.parameters
self._num_labels = int(parameters['num_labels'])
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
self._weights = []
self._weights.append(
network_units.add_var_initialized('bilinear',
[self._source_dim,
self._num_labels,
self._target_dim],
'xavier'))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
self._layers.append(network_units.Layer(component,
name='bilinear_scores',
dim=self._num_labels))
示例3: __init__
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import Layer [as 別名]
def __init__(self, component):
"""Initializes weights and layers.
Args:
component: Parent ComponentBuilderBase object.
"""
super(BiaffineDigraphNetwork, self).__init__(component)
check.Eq(len(self._fixed_feature_dims.items()), 0,
'Expected no fixed features')
check.Eq(len(self._linked_feature_dims.items()), 2,
'Expected two linked features')
check.In('sources', self._linked_feature_dims,
'Missing required linked feature')
check.In('targets', self._linked_feature_dims,
'Missing required linked feature')
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
# TODO(googleuser): Make parameter initialization configurable.
self._weights = []
self._weights.append(tf.get_variable(
'weights_arc', [self._source_dim, self._target_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._weights.append(tf.get_variable(
'weights_source', [self._source_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._weights.append(tf.get_variable(
'root', [self._source_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
# Negative Layer.dim indicates that the dimension is dynamic.
self._layers.append(network_units.Layer(self, 'adjacency', -1))
示例4: __init__
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import Layer [as 別名]
def __init__(self, component):
"""Initializes weights and layers.
Args:
component: Parent ComponentBuilderBase object.
"""
super(BiaffineDigraphNetwork, self).__init__(component)
check.Eq(len(self._fixed_feature_dims.items()), 0,
'Expected no fixed features')
check.Eq(len(self._linked_feature_dims.items()), 2,
'Expected two linked features')
check.In('sources', self._linked_feature_dims,
'Missing required linked feature')
check.In('targets', self._linked_feature_dims,
'Missing required linked feature')
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
# TODO(googleuser): Make parameter initialization configurable.
self._weights = []
self._weights.append(tf.get_variable(
'weights_arc', [self._source_dim, self._target_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._weights.append(tf.get_variable(
'weights_source', [self._source_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._weights.append(tf.get_variable(
'root', [self._source_dim], tf.float32,
tf.random_normal_initializer(stddev=1e-4)))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
# Negative Layer.dim indicates that the dimension is dynamic.
self._layers.append(network_units.Layer(component, 'adjacency', -1))
示例5: __init__
# 需要導入模塊: from dragnn.python import network_units [as 別名]
# 或者: from dragnn.python.network_units import Layer [as 別名]
def __init__(self, component):
"""Initializes weights and layers.
Args:
component: Parent ComponentBuilderBase object.
"""
super(BiaffineDigraphNetwork, self).__init__(component)
check.Eq(len(self._fixed_feature_dims.items()), 0,
'Expected no fixed features')
check.Eq(len(self._linked_feature_dims.items()), 2,
'Expected two linked features')
check.In('sources', self._linked_feature_dims,
'Missing required linked feature')
check.In('targets', self._linked_feature_dims,
'Missing required linked feature')
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
self._weights = []
self._weights.append(
tf.get_variable('weights_arc', [self._source_dim, self._target_dim],
tf.float32, tf.orthogonal_initializer()))
self._weights.append(
tf.get_variable('weights_source', [self._source_dim], tf.float32,
tf.zeros_initializer()))
self._weights.append(
tf.get_variable('root', [self._source_dim], tf.float32,
tf.zeros_initializer()))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
# Add runtime hooks for pre-computed weights.
self._derived_params.append(self._get_root_weights)
self._derived_params.append(self._get_root_bias)
# Negative Layer.dim indicates that the dimension is dynamic.
self._layers.append(network_units.Layer(component, 'adjacency', -1))