当前位置: 首页>>代码示例>>Python>>正文


Python check.NotNone方法代码示例

本文整理汇总了Python中syntaxnet.util.check.NotNone方法的典型用法代码示例。如果您正苦于以下问题:Python check.NotNone方法的具体用法?Python check.NotNone怎么用?Python check.NotNone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在syntaxnet.util.check的用法示例。


在下文中一共展示了check.NotNone方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def __init__(self, tensor=None, array=None, stride=None, dim=None):
    """Creates ops for converting the input to either format.

    If 'tensor' is used, then a conversion from [stride * steps, dim] to
    [steps + 1, stride, dim] is performed for dynamic_tensor reads.

    If 'array' is used, then a conversion from [steps + 1, stride, dim] to
    [stride * steps, dim] is performed for bulk_tensor reads.

    Args:
      tensor: Bulk tensor input.
      array: TensorArray dynamic input.
      stride: stride of bulk tensor. Not used for dynamic.
      dim: dim of bulk tensor. Not used for dynamic.
    """
    if tensor is not None:
      check.IsNone(array, 'Cannot initialize from tensor and array')
      check.NotNone(stride, 'Stride is required for bulk tensor')
      check.NotNone(dim, 'Dim is required for bulk tensor')

      self._bulk_tensor = tensor
      with tf.name_scope('convert_to_dyn'):
        tensor = tf.reshape(tensor, [stride, -1, dim])
        tensor = tf.transpose(tensor, perm=[1, 0, 2])
        pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
        self._array_tensor = tf.concat([pad, tensor], 0)

    if array is not None:
      check.IsNone(tensor, 'Cannot initialize from both tensor and array')
      with tf.name_scope('convert_to_bulk'):
        self._bulk_tensor = convert_network_state_tensorarray(array)
      with tf.name_scope('convert_to_dyn'):
        self._array_tensor = array.stack() 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:35,代码来源:network_units.py

示例2: get_variable

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def get_variable(self, var_name=None, var_params=None):
    """Returns either the original or averaged version of a given variable.

    If the master.read_from_avg flag is set to True, and the
    ExponentialMovingAverage (EMA) object has been attached, then this will ask
    the EMA object for the given variable.

    This is to allow executing inference from the averaged version of
    parameters.

    Arguments:
      var_name: Name of the variable.
      var_params: tf.Variable for which to retrieve an average.

    Only one of |var_name| or |var_params| needs to be provided.  If both are
    provided, |var_params| takes precedence.

    Returns:
      tf.Variable object corresponding to original or averaged version.
    """
    if var_params:
      var_name = var_params.name
    else:
      check.NotNone(var_name, 'specify at least one of var_name or var_params')
      var_params = tf.get_variable(var_name)

    if self.moving_average and self.master.read_from_avg:
      logging.info('Retrieving average for: %s', var_name)
      var_params = self.moving_average.average(var_params)
      assert var_params
    logging.info('Returning: %s', var_params.name)
    return var_params 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:34,代码来源:component.py

示例3: testCheckNotNone

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def testCheckNotNone(self):
    check.NotNone(1, 'foo')
    check.NotNone([], 'foo')
    with self.assertRaisesRegexp(ValueError, 'bar'):
      check.NotNone(None, 'bar')
    with self.assertRaisesRegexp(RuntimeError, 'baz'):
      check.NotNone(None, 'baz', RuntimeError) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:9,代码来源:check_test.py

示例4: __init__

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def __init__(self, tensor=None, array=None, stride=None, dim=None):
    """Creates ops for converting the input to either format.

    If 'tensor' is used, then a conversion from [stride * steps, dim] to
    [steps + 1, stride, dim] is performed for dynamic_tensor reads.

    If 'array' is used, then a conversion from [steps + 1, stride, dim] to
    [stride * steps, dim] is performed for bulk_tensor reads.

    Args:
      tensor: Bulk tensor input.
      array: TensorArray dynamic input.
      stride: stride of bulk tensor. Not used for dynamic.
      dim: dim of bulk tensor. Not used for dynamic.
    """
    if tensor is not None:
      check.IsNone(array, 'Cannot initialize from tensor and array')
      check.NotNone(stride, 'Stride is required for bulk tensor')
      check.NotNone(dim, 'Dim is required for bulk tensor')

      self._bulk_tensor = tensor
      if dim >= 0:
        # These operations will fail if |dim| is negative.
        with tf.name_scope('convert_to_dyn'):
          tensor = tf.reshape(tensor, [stride, -1, dim])
          tensor = tf.transpose(tensor, perm=[1, 0, 2])
          pad = tf.zeros([1, stride, dim], dtype=tensor.dtype)
          self._array_tensor = tf.concat([pad, tensor], 0)

    if array is not None:
      check.IsNone(tensor, 'Cannot initialize from both tensor and array')
      with tf.name_scope('convert_to_bulk'):
        self._bulk_tensor = convert_network_state_tensorarray(array)
      with tf.name_scope('convert_to_dyn'):
        self._array_tensor = array.stack() 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:37,代码来源:network_units.py

示例5: get_variable

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def get_variable(self, var_name=None, var_params=None):
    """Returns either the original or averaged version of a given variable.

    If the master.read_from_avg flag is set to True, and the
    ExponentialMovingAverage (EMA) object has been attached, then this will ask
    the EMA object for the given variable.

    This is to allow executing inference from the averaged version of
    parameters.

    Arguments:
      var_name: Name of the variable.
      var_params: tf.Variable for which to retrieve an average.

    Only one of |var_name| or |var_params| needs to be provided.  If both are
    provided, |var_params| takes precedence.

    Returns:
      tf.Variable object corresponding to original or averaged version.
    """
    if var_params is not None:
      var_name = var_params.name
    else:
      check.NotNone(var_name, 'specify at least one of var_name or var_params')
      var_params = tf.get_variable(var_name)

    if self.moving_average and self.master.read_from_avg:
      logging.info('Retrieving average for: %s', var_name)
      var_params = self.moving_average.average(var_params)
      assert var_params
    logging.info('Returning: %s', var_params.name)
    return var_params 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:34,代码来源:component.py

示例6: create

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def create(self,
             fixed_embeddings,
             linked_embeddings,
             context_tensor_arrays,
             attention_tensor,
             during_training,
             stride=None):
    """Requires |stride|; otherwise see base class."""
    check.NotNone(stride,
                  'BulkBiLSTMNetwork requires "stride" and must be called '
                  'in the bulk feature extractor component.')

    # Flatten the lengths into a vector.
    lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)
    lengths_s = tf.squeeze(lengths.tensor, [1])

    # Collect all other inputs into a batched tensor.
    linked_embeddings = [
        named_tensor for named_tensor in linked_embeddings
        if named_tensor.name != 'lengths'
    ]
    inputs_sxnxd = dragnn.get_input_tensor_with_stride(
        fixed_embeddings, linked_embeddings, stride)

    # Since get_input_tensor_with_stride() concatenates the input embeddings, it
    # obscures the static activation dimension, which the RNN library requires.
    # Restore it using set_shape().  Note that set_shape() merges into the known
    # shape, so only specify the activation dimension.
    inputs_sxnxd.set_shape(
        [tf.Dimension(None), tf.Dimension(None), self._input_dim])

    initial_states_forward, initial_states_backward = (
        self._create_initial_states(stride))

    if during_training:
      cells_forward = self._train_cells_forward
      cells_backward = self._train_cells_backward
    else:
      cells_forward = self._inference_cells_forward
      cells_backward = self._inference_cells_backward

    def _bilstm_closure(scope):
      """Applies the bi-LSTM to the current inputs."""
      outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
          cells_forward,
          cells_backward,
          inputs_sxnxd,
          initial_states_fw=initial_states_forward,
          initial_states_bw=initial_states_backward,
          sequence_length=lengths_s,
          parallel_iterations=self._attrs['parallel_iterations'],
          scope=scope)
      return outputs_sxnxd

    # Layer outputs are not batched; flatten out the batch dimension.
    outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)
    outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])
    return self._append_base_layers([outputs_snxd]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:60,代码来源:wrapped_units.py

示例7: maybe_apply_dropout

# 需要导入模块: from syntaxnet.util import check [as 别名]
# 或者: from syntaxnet.util.check import NotNone [as 别名]
def maybe_apply_dropout(inputs, keep_prob, per_sequence, stride=None):
  """Applies dropout, if so configured, to an input tensor.

  The input may be rank 2 or 3 depending on whether the stride (i.e., batch
  size) has been incorporated into the shape.

  Args:
    inputs: [stride * num_steps, dim] or [stride, num_steps, dim] input tensor.
    keep_prob: Scalar probability of keeping each input element.  If >= 1.0, no
        dropout is performed.
    per_sequence: If true, sample the dropout mask once per sequence, instead of
        once per step.  Requires |stride| when true.
    stride: Scalar batch size.  Optional if |per_sequence| is false.

  Returns:
    [stride * num_steps, dim] or [stride, num_steps, dim] tensor, matching the
    shape of |inputs|, containing the masked or original inputs, depending on
    whether dropout was actually performed.
  """
  check.Ge(inputs.get_shape().ndims, 2, 'inputs must be rank 2 or 3')
  check.Le(inputs.get_shape().ndims, 3, 'inputs must be rank 2 or 3')
  flat = (inputs.get_shape().ndims == 2)

  if keep_prob >= 1.0:
    return inputs

  if not per_sequence:
    return tf.nn.dropout(inputs, keep_prob)

  check.NotNone(stride, 'per-sequence dropout requires stride')
  dim = inputs.get_shape().as_list()[-1]
  check.NotNone(dim, 'inputs must have static activation dimension, but have '
                'static shape %s' % inputs.get_shape().as_list())

  # If needed, restore the batch dimension to separate the sequences.
  inputs_sxnxd = tf.reshape(inputs, [stride, -1, dim]) if flat else inputs

  # Replace |num_steps| with 1 in |noise_shape|, so the dropout mask broadcasts
  # to all steps for a particular sequence.
  noise_shape = [stride, 1, dim]
  masked_sxnxd = tf.nn.dropout(inputs_sxnxd, keep_prob, noise_shape)

  # If needed, flatten out the batch dimension in the return value.
  return tf.reshape(masked_sxnxd, [-1, dim]) if flat else masked_sxnxd 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:46,代码来源:network_units.py


注:本文中的syntaxnet.util.check.NotNone方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。