本文整理汇总了Python中syntaxnet.util.check.Gt方法的典型用法代码示例。如果您正苦于以下问题:Python check.Gt方法的具体用法?Python check.Gt怎么用?Python check.Gt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类syntaxnet.util.check
的用法示例。
在下文中一共展示了check.Gt方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_array
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def create_array(self, stride):
"""Creates a new tensor array to store this layer's activations.
Arguments:
stride: Possibly dynamic batch * beam size with which to initialize the
tensor array
Returns:
TensorArray object
"""
check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
tensor_array = ta.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False,
name='%s_array' % self.name)
# Start each array with all zeros. Special values will still be learned via
# the extra embedding dimension stored for each linked feature channel.
initial_value = tf.zeros([stride, self.dim])
return tensor_array.write(0, initial_value)
示例2: __init__
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def __init__(self, component):
"""Initializes weights and layers.
Args:
component: Parent ComponentBuilderBase object.
"""
super(SplitNetwork, self).__init__(component)
parameters = component.spec.network_unit.parameters
self._num_slices = int(parameters['num_slices'])
check.Gt(self._num_slices, 0, 'Invalid number of slices.')
check.Eq(self._concatenated_input_dim % self._num_slices, 0,
'Input dimension %s does not evenly divide into %s slices' %
(self._concatenated_input_dim, self._num_slices))
self._slice_dim = int(self._concatenated_input_dim / self._num_slices)
for slice_index in xrange(self._num_slices):
self._layers.append(
Layer(self, 'slice_%s' % slice_index, self._slice_dim))
示例3: create_array
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def create_array(self, stride):
"""Creates a new tensor array to store this layer's activations.
Arguments:
stride: Possibly dynamic batch * beam size with which to initialize the
tensor array
Returns:
TensorArray object
"""
check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic')
tensor_array = ta.TensorArray(
dtype=tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False,
name='%s_array' % self.name)
# Start each array with all zeros. Special values will still be learned via
# the extra embedding dimension stored for each linked feature channel.
initial_value = tf.zeros([stride, self.dim])
return tensor_array.write(0, initial_value)
示例4: __init__
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def __init__(self, component):
"""Initializes weights and layers.
Args:
component: Parent ComponentBuilderBase object.
"""
super(SplitNetwork, self).__init__(component)
parameters = component.spec.network_unit.parameters
self._num_slices = int(parameters['num_slices'])
check.Gt(self._num_slices, 0, 'Invalid number of slices.')
check.Eq(self._concatenated_input_dim % self._num_slices, 0,
'Input dimension %s does not evenly divide into %s slices' %
(self._concatenated_input_dim, self._num_slices))
self._slice_dim = int(self._concatenated_input_dim / self._num_slices)
for slice_index in xrange(self._num_slices):
self._layers.append(
Layer(component, 'slice_%s' % slice_index, self._slice_dim))
示例5: fixed_feature_lookup
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def fixed_feature_lookup(component, state, channel_id, stride):
"""Looks up fixed features and passes them through embeddings.
Embedding vectors may be scaled by weights if the features specify it.
Args:
component: Component object in which to look up the fixed features.
state: MasterState object for the live nlp_saft::dragnn::MasterState.
channel_id: int id of the fixed feature to look up.
stride: int Tensor of current batch * beam size.
Returns:
NamedTensor object containing the embedding vectors.
"""
feature_spec = component.spec.fixed_feature[channel_id]
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))
with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
indices, ids, weights = dragnn_ops.extract_fixed_features(
state.handle, component=component.name, channel_id=channel_id)
size = stride * feature_spec.size
embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
dim = feature_spec.size * feature_spec.embedding_dim
return NamedTensor(
tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim)
示例6: _validate_embedded_fixed_features
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def _validate_embedded_fixed_features(comp):
"""Checks that the embedded fixed features of |comp| are set up properly."""
for feature in comp.spec.fixed_feature:
check.Gt(feature.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature)
if feature.is_constant:
check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
'Constant embeddings must be pretrained: %s' % feature)
示例7: testCheckGt
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def testCheckGt(self):
check.Gt(2, 1, 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.Gt(1, 1, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.Gt(-1, 1, 'baz', RuntimeError)
示例8: fixed_feature_lookup
# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import Gt [as 别名]
def fixed_feature_lookup(component, state, channel_id, stride):
"""Looks up fixed features and passes them through embeddings.
Embedding vectors may be scaled by weights if the features specify it.
Args:
component: Component object in which to look up the fixed features.
state: MasterState object for the live ComputeSession.
channel_id: int id of the fixed feature to look up.
stride: int Tensor of current batch * beam size.
Returns:
NamedTensor object containing the embedding vectors.
"""
feature_spec = component.spec.fixed_feature[channel_id]
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
embedding_matrix = component.get_variable(fixed_embeddings_name(channel_id))
with tf.op_scope([embedding_matrix], 'fixed_embedding_' + feature_spec.name):
indices, ids, weights = dragnn_ops.extract_fixed_features(
state.handle, component=component.name, channel_id=channel_id)
size = stride * feature_spec.size
embeddings = embedding_lookup(embedding_matrix, indices, ids, weights, size)
dim = feature_spec.size * feature_spec.embedding_dim
return NamedTensor(
tf.reshape(embeddings, [-1, dim]), feature_spec.name, dim=dim)