本文整理匯總了Python中syntaxnet.util.check.NotNone方法的典型用法代碼示例。如果您正苦於以下問題:Python check.NotNone方法的具體用法?Python check.NotNone怎麽用?Python check.NotNone使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類syntaxnet.util.check
的用法示例。
在下文中一共展示了check.NotNone方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例2: get_variable
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例3: testCheckNotNone
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def testCheckNotNone(self):
check.NotNone(1, 'foo')
check.NotNone([], 'foo')
with self.assertRaisesRegexp(ValueError, 'bar'):
check.NotNone(None, 'bar')
with self.assertRaisesRegexp(RuntimeError, 'baz'):
check.NotNone(None, 'baz', RuntimeError)
示例4: __init__
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def __init__(self, tensor=None, array=None, stride=None, dim=None):
"""Creates ops for converting the input to either format.
If 'tensor' is used, then a conversion from [stride * steps, dim] to
[steps + 1, stride, dim] is performed for dynamic_tensor reads.
If 'array' is used, then a conversion from [steps + 1, stride, dim] to
[stride * steps, dim] is performed for bulk_tensor reads.
Args:
tensor: Bulk tensor input.
array: TensorArray dynamic input.
stride: stride of bulk tensor. Not used for dynamic.
dim: dim of bulk tensor. Not used for dynamic.
"""
if tensor is not None:
check.IsNone(array, 'Cannot initialize from tensor and array')
check.NotNone(stride, 'Stride is required for bulk tensor')
check.NotNone(dim, 'Dim is required for bulk tensor')
self._bulk_tensor = tensor
if dim >= 0:
# These operations will fail if |dim| is negative.
with tf.name_scope('convert_to_dyn'):
tensor = tf.reshape(tensor, [stride, -1, dim])
tensor = tf.transpose(tensor, perm=[1, 0, 2])
pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
self._array_tensor = tf.concat([pad, tensor], 0)
if array is not None:
check.IsNone(tensor, 'Cannot initialize from both tensor and array')
with tf.name_scope('convert_to_bulk'):
self._bulk_tensor = convert_network_state_tensorarray(array)
with tf.name_scope('convert_to_dyn'):
self._array_tensor = array.stack()
示例5: get_variable
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def get_variable(self, var_name=None, var_params=None):
"""Returns either the original or averaged version of a given variable.
If the master.read_from_avg flag is set to True, and the
ExponentialMovingAverage (EMA) object has been attached, then this will ask
the EMA object for the given variable.
This is to allow executing inference from the averaged version of
parameters.
Arguments:
var_name: Name of the variable.
var_params: tf.Variable for which to retrieve an average.
Only one of |var_name| or |var_params| needs to be provided. If both are
provided, |var_params| takes precedence.
Returns:
tf.Variable object corresponding to original or averaged version.
"""
if var_params is not None:
var_name = var_params.name
else:
check.NotNone(var_name, 'specify at least one of var_name or var_params')
var_params = tf.get_variable(var_name)
if self.moving_average and self.master.read_from_avg:
logging.info('Retrieving average for: %s', var_name)
var_params = self.moving_average.average(var_params)
assert var_params
logging.info('Returning: %s', var_params.name)
return var_params
示例6: create
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BulkBiLSTMNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# Flatten the lengths into a vector.
lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.squeeze(lengths.tensor, [1])
# Collect all other inputs into a batched tensor.
linked_embeddings = [
named_tensor for named_tensor in linked_embeddings
if named_tensor.name != 'lengths'
]
inputs_sxnxd = dragnn.get_input_tensor_with_stride(
fixed_embeddings, linked_embeddings, stride)
# Since get_input_tensor_with_stride() concatenates the input embeddings, it
# obscures the static activation dimension, which the RNN library requires.
# Restore it using set_shape(). Note that set_shape() merges into the known
# shape, so only specify the activation dimension.
inputs_sxnxd.set_shape(
[tf.Dimension(None), tf.Dimension(None), self._input_dim])
initial_states_forward, initial_states_backward = (
self._create_initial_states(stride))
if during_training:
cells_forward = self._train_cells_forward
cells_backward = self._train_cells_backward
else:
cells_forward = self._inference_cells_forward
cells_backward = self._inference_cells_backward
def _bilstm_closure(scope):
"""Applies the bi-LSTM to the current inputs."""
outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_forward,
cells_backward,
inputs_sxnxd,
initial_states_fw=initial_states_forward,
initial_states_bw=initial_states_backward,
sequence_length=lengths_s,
parallel_iterations=self._attrs['parallel_iterations'],
scope=scope)
return outputs_sxnxd
# Layer outputs are not batched; flatten out the batch dimension.
outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)
outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])
return self._append_base_layers([outputs_snxd])
示例7: maybe_apply_dropout
# 需要導入模塊: from syntaxnet.util import check [as 別名]
# 或者: from syntaxnet.util.check import NotNone [as 別名]
def maybe_apply_dropout(inputs, keep_prob, per_sequence, stride=None):
"""Applies dropout, if so configured, to an input tensor.
The input may be rank 2 or 3 depending on whether the stride (i.e., batch
size) has been incorporated into the shape.
Args:
inputs: [stride * num_steps, dim] or [stride, num_steps, dim] input tensor.
keep_prob: Scalar probability of keeping each input element. If >= 1.0, no
dropout is performed.
per_sequence: If true, sample the dropout mask once per sequence, instead of
once per step. Requires |stride| when true.
stride: Scalar batch size. Optional if |per_sequence| is false.
Returns:
[stride * num_steps, dim] or [stride, num_steps, dim] tensor, matching the
shape of |inputs|, containing the masked or original inputs, depending on
whether dropout was actually performed.
"""
check.Ge(inputs.get_shape().ndims, 2, 'inputs must be rank 2 or 3')
check.Le(inputs.get_shape().ndims, 3, 'inputs must be rank 2 or 3')
flat = (inputs.get_shape().ndims == 2)
if keep_prob >= 1.0:
return inputs
if not per_sequence:
return tf.nn.dropout(inputs, keep_prob)
check.NotNone(stride, 'per-sequence dropout requires stride')
dim = inputs.get_shape().as_list()[-1]
check.NotNone(dim, 'inputs must have static activation dimension, but have '
'static shape %s' % inputs.get_shape().as_list())
# If needed, restore the batch dimension to separate the sequences.
inputs_sxnxd = tf.reshape(inputs, [stride, -1, dim]) if flat else inputs
# Replace |num_steps| with 1 in |noise_shape|, so the dropout mask broadcasts
# to all steps for a particular sequence.
noise_shape = [stride, 1, dim]
masked_sxnxd = tf.nn.dropout(inputs_sxnxd, keep_prob, noise_shape)
# If needed, flatten out the batch dimension in the return value.
return tf.reshape(masked_sxnxd, [-1, dim]) if flat else masked_sxnxd