當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.assert_greater方法代碼示例

本文整理匯總了Python中tensorflow.assert_greater方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.assert_greater方法的具體用法?Python tensorflow.assert_greater怎麽用?Python tensorflow.assert_greater使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.assert_greater方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: logistic_fixed_ends

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def logistic_fixed_ends(x, start=-1., end=1., L=1., **kwargs):
    """
    f is logistic with fixed ends, so that f(start) = 0, and f(end) = L.
    this is currently done a bit heuristically: it's a sigmoid, with a linear function added to correct the ends.
    """
    assert end > start, 'End of fixed points should be greater than start'
    # tf.assert_greater(end, start, message='assert')
    
    # clip to start and end
    x = tf.clip_by_value(x, start, end)
    
    # logistic function
    xv = logistic(x, L=L, **kwargs)
    
    # ends of linear corrective function
    sv = logistic(start, L=L, **kwargs)
    ev = logistic(end, L=L, **kwargs)
    
    # corrective function
    df = end - start
    linear_corr = (end-x)/df * (- sv) + (x-start)/df * (-ev + L)
    
    # return fixed logistic
    return xv + linear_corr 
開發者ID:adalca,項目名稱:neuron,代碼行數:26,代碼來源:utils.py

示例2: maybe_minimize

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def maybe_minimize(self, condition, loss):
    # loss = tf.cond(condition, lambda: loss, float)
    update_op, grad_norm = tf.cond(
        condition,
        lambda: self.minimize(loss),
        lambda: (tf.no_op(), 0.0))
    with tf.control_dependencies([update_op]):
      summary = tf.cond(
          tf.logical_and(condition, self._log),
          lambda: self.summarize(grad_norm), str)
    if self._debug:
      # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm)
      message = 'Zero gradient norm in {} optimizer.'.format(self._name)
      assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message)
      assert_op = tf.cond(condition, assertion, tf.no_op)
      with tf.control_dependencies([assert_op]):
        summary = tf.identity(summary)
    return summary, grad_norm 
開發者ID:google-research,項目名稱:planet,代碼行數:20,代碼來源:custom_optimizer.py

示例3: maybe_minimize

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def maybe_minimize(self, condition, loss):
    with tf.name_scope('optimizer_{}'.format(self._name)):
      # loss = tf.cond(condition, lambda: loss, float)
      update_op, grad_norm = tf.cond(
          condition,
          lambda: self.minimize(loss),
          lambda: (tf.no_op(), 0.0))
      with tf.control_dependencies([update_op]):
        summary = tf.cond(
            tf.logical_and(condition, self._log),
            lambda: self.summarize(grad_norm), str)
      if self._debug:
        # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm)
        message = 'Zero gradient norm in {} optimizer.'.format(self._name)
        assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message)
        assert_op = tf.cond(condition, assertion, tf.no_op)
        with tf.control_dependencies([assert_op]):
          summary = tf.identity(summary)
      return summary, grad_norm 
開發者ID:google-research,項目名稱:dreamer,代碼行數:21,代碼來源:custom_optimizer.py

示例4: _training

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def _training(self):
    """Perform multiple training iterations of both policy and value baseline.

    Training on the episodes collected in the memory. Reset the memory
    afterwards. Always returns a summary string.

    Returns:
      Summary tensor.
    """
    with tf.name_scope('training'):
      assert_full = tf.assert_equal(
          self._memory_index, self._config.update_every)
      with tf.control_dependencies([assert_full]):
        data = self._memory.data()
      (observ, action, old_mean, old_logstd, reward), length = data
      with tf.control_dependencies([tf.assert_greater(length, 0)]):
        length = tf.identity(length)
      observ = self._observ_filter.transform(observ)
      reward = self._reward_filter.transform(reward)
      policy_summary = self._update_policy(
          observ, action, old_mean, old_logstd, reward, length)
      with tf.control_dependencies([policy_summary]):
        value_summary = self._update_value(observ, reward, length)
      with tf.control_dependencies([value_summary]):
        penalty_summary = self._adjust_penalty(
            observ, old_mean, old_logstd, length)
      with tf.control_dependencies([penalty_summary]):
        clear_memory = tf.group(
            self._memory.clear(), self._memory_index.assign(0))
      with tf.control_dependencies([clear_memory]):
        weight_summary = utility.variable_summaries(
            tf.trainable_variables(), self._config.weight_summaries)
        return tf.summary.merge([
            policy_summary, value_summary, penalty_summary, weight_summary]) 
開發者ID:utra-robosoccer,項目名稱:soccer-matlab,代碼行數:36,代碼來源:algorithm.py

示例5: _training

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def _training(self):
    """Perform multiple training iterations of both policy and value baseline.

    Training on the episodes collected in the memory. Reset the memory
    afterwards. Always returns a summary string.

    Returns:
      Summary tensor.
    """
    with tf.name_scope('training'):
      assert_full = tf.assert_equal(
          self._memory_index, self._config.update_every)
      with tf.control_dependencies([assert_full]):
        data = self._memory.data()
      (observ, action, old_mean, old_logstd, reward), length = data
      with tf.control_dependencies([tf.assert_greater(length, 0)]):
        length = tf.identity(length)
      observ = self._observ_filter.transform(observ)
      reward = self._reward_filter.transform(reward)
      update_summary = self._perform_update_steps(
          observ, action, old_mean, old_logstd, reward, length)
      with tf.control_dependencies([update_summary]):
        penalty_summary = self._adjust_penalty(
            observ, old_mean, old_logstd, length)
      with tf.control_dependencies([penalty_summary]):
        clear_memory = tf.group(
            self._memory.clear(), self._memory_index.assign(0))
      with tf.control_dependencies([clear_memory]):
        weight_summary = utility.variable_summaries(
            tf.trainable_variables(), self._config.weight_summaries)
        return tf.summary.merge([
            update_summary, penalty_summary, weight_summary]) 
開發者ID:utra-robosoccer,項目名稱:soccer-matlab,代碼行數:34,代碼來源:algorithm.py

示例6: assert_positive_int32_scalar

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def assert_positive_int32_scalar(value, name):
    """
    Whether `value` is a integer(or 0-D `tf.int32` tensor) and positive.
    If `value` is the instance of built-in type, it will be checked directly.
    Otherwise, it will be converted to a `tf.int32` tensor and checked.

    :param value: The value to be checked.
    :param name: The name of `value` used in error message.

    :return: The checked value.
    """
    if isinstance(value, (int, float)):
        if isinstance(value, int) and value > 0:
            return value
        elif isinstance(value, float):
            raise TypeError(name + " must be integer")
        elif value <= 0:
            raise ValueError(name + " must be positive")
    else:
        try:
            tensor = tf.convert_to_tensor(value, tf.int32)
        except (TypeError, ValueError):
            raise TypeError(name + ' must be (convertible to) tf.int32')
        _assert_rank_op = tf.assert_rank(
            tensor, 0,
            message=name + " should be a scalar (0-D Tensor).")
        _assert_positive_op = tf.assert_greater(
            tensor, tf.constant(0, tf.int32),
            message=name + " must be positive")
        with tf.control_dependencies([_assert_rank_op,
                                      _assert_positive_op]):
            tensor = tf.identity(tensor)
        return tensor 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:35,代碼來源:utils.py

示例7: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def __init__(self,
                 logits,
                 n_experiments,
                 dtype=tf.int32,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._logits = tf.convert_to_tensor(logits)
        param_dtype = assert_same_float_dtype(
            [(self._logits, 'Binomial.logits')])

        assert_dtype_is_int_or_float(dtype)

        sign_err_msg = "n_experiments must be positive"
        if isinstance(n_experiments, int):
            if n_experiments <= 0:
                raise ValueError(sign_err_msg)
            self._n_experiments = n_experiments
        else:
            try:
                n_experiments = tf.convert_to_tensor(n_experiments, tf.int32)
            except ValueError:
                raise TypeError('n_experiments must be int32')
            _assert_rank_op = tf.assert_rank(
                n_experiments, 0,
                message="n_experiments should be a scalar (0-D Tensor).")
            _assert_positive_op = tf.assert_greater(
                n_experiments, 0, message=sign_err_msg)
            with tf.control_dependencies([_assert_rank_op,
                                          _assert_positive_op]):
                self._n_experiments = tf.identity(n_experiments)

        self._check_numerics = check_numerics
        super(Binomial, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs) 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:42,代碼來源:univariate.py

示例8: test_raises_when_equal

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def test_raises_when_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      with tf.control_dependencies(
          [tf.assert_greater(small, small, message="fail")]):
        out = tf.identity(small)
      with self.assertRaisesOpError("fail.*small.*small"):
        out.eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:10,代碼來源:check_ops_test.py

示例9: test_raises_when_less

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def test_raises_when_less(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 4], name="big")
      with tf.control_dependencies([tf.assert_greater(small, big)]):
        out = tf.identity(big)
      with self.assertRaisesOpError("small.*big"):
        out.eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:10,代碼來源:check_ops_test.py

示例10: test_doesnt_raise_when_greater

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def test_doesnt_raise_when_greater(self):
    with self.test_session():
      small = tf.constant([3, 1], name="small")
      big = tf.constant([4, 2], name="big")
      with tf.control_dependencies([tf.assert_greater(big, small)]):
        out = tf.identity(small)
      out.eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:9,代碼來源:check_ops_test.py

示例11: test_doesnt_raise_when_greater_and_broadcastable_shapes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1], name="small")
      big = tf.constant([3, 2], name="big")
      with tf.control_dependencies([tf.assert_greater(big, small)]):
        out = tf.identity(small)
      out.eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:9,代碼來源:check_ops_test.py

示例12: test_raises_when_greater_but_non_broadcastable_shapes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def test_raises_when_greater_but_non_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1, 1, 1], name="small")
      big = tf.constant([3, 2], name="big")
      with self.assertRaisesRegexp(ValueError, "must be"):
        with tf.control_dependencies([tf.assert_greater(big, small)]):
          out = tf.identity(small)
        out.eval() 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:10,代碼來源:check_ops_test.py

示例13: output

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def output(self) -> tf.Tensor:
        # Pad the sequence with a large negative value, but make sure it has
        # non-zero length.
        length = tf.reduce_sum(self._input_mask)
        with tf.control_dependencies([tf.assert_greater(length, 0.5)]):
            padded_input = self._masked_input + 1e-15 * (1 - self._input_mask)
        return tf.reduce_max(padded_input, axis=1) 
開發者ID:ufal,項目名稱:neuralmonkey,代碼行數:9,代碼來源:pooling.py

示例14: _training

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def _training(self):
    """Perform multiple training iterations of both policy and value baseline.

    Training on the episodes collected in the memory. Reset the memory
    afterwards. Always returns a summary string.

    Returns:
      Summary tensor.
    """
    with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
      with tf.name_scope('training'):
        assert_full = tf.assert_equal(
            self._num_finished_episodes, self._config.update_every)
        with tf.control_dependencies([assert_full]):
          data = self._finished_episodes.data()
        (observ, action, old_policy_params, reward), length = data
        # We set padding frames of the parameters to ones to prevent Gaussians
        # with zero variance. This would result in an infinite KL divergence,
        # which, even if masked out, would result in NaN gradients.
        old_policy_params = tools.nested.map(
            lambda param: self._mask(param, length, 1), old_policy_params)
        with tf.control_dependencies([tf.assert_greater(length, 0)]):
          length = tf.identity(length)
        observ = self._observ_filter.transform(observ)
        reward = self._reward_filter.transform(reward)
        update_summary = self._perform_update_steps(
            observ, action, old_policy_params, reward, length)
        with tf.control_dependencies([update_summary]):
          penalty_summary = self._adjust_penalty(
              observ, old_policy_params, length)
        with tf.control_dependencies([penalty_summary]):
          clear_memory = tf.group(
              self._finished_episodes.clear(),
              self._num_finished_episodes.assign(0))
        with tf.control_dependencies([clear_memory]):
          weight_summary = utility.variable_summaries(
              tf.trainable_variables(), self._config.weight_summaries)
          return tf.summary.merge([
              update_summary, penalty_summary, weight_summary]) 
開發者ID:google-research,項目名稱:batch-ppo,代碼行數:41,代碼來源:ppo.py

示例15: look_at

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import assert_greater [as 別名]
def look_at(eye, center, world_up):
  """Computes camera viewing matrices.

  Functionality mimes gluLookAt (third_party/GL/glu/include/GLU/glu.h).

  Args:
    eye: 2-D float32 tensor with shape [batch_size, 3] containing the XYZ world
        space position of the camera.
    center: 2-D float32 tensor with shape [batch_size, 3] containing a position
        along the center of the camera's gaze.
    world_up: 2-D float32 tensor with shape [batch_size, 3] specifying the
        world's up direction; the output camera will have no tilt with respect
        to this direction.

  Returns:
    A [batch_size, 4, 4] float tensor containing a right-handed camera
    extrinsics matrix that maps points from world space to points in eye space.
  """
  batch_size = center.shape[0].value
  vector_degeneracy_cutoff = 1e-6
  forward = center - eye
  forward_norm = tf.norm(forward, ord='euclidean', axis=1, keepdims=True)
  tf.assert_greater(
      forward_norm,
      vector_degeneracy_cutoff,
      message='Camera matrix is degenerate because eye and center are close.')
  forward = tf.divide(forward, forward_norm)

  to_side = tf.cross(forward, world_up)
  to_side_norm = tf.norm(to_side, ord='euclidean', axis=1, keepdims=True)
  tf.assert_greater(
      to_side_norm,
      vector_degeneracy_cutoff,
      message='Camera matrix is degenerate because up and gaze are close or'
      'because up is degenerate.')
  to_side = tf.divide(to_side, to_side_norm)
  cam_up = tf.cross(to_side, forward)

  w_column = tf.constant(
      batch_size * [[0., 0., 0., 1.]], dtype=tf.float32)  # [batch_size, 4]
  w_column = tf.reshape(w_column, [batch_size, 4, 1])
  view_rotation = tf.stack(
      [to_side, cam_up, -forward,
       tf.zeros_like(to_side, dtype=tf.float32)],
      axis=1)  # [batch_size, 4, 3] matrix
  view_rotation = tf.concat(
      [view_rotation, w_column], axis=2)  # [batch_size, 4, 4]

  identity_batch = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])
  view_translation = tf.concat([identity_batch, tf.expand_dims(-eye, 2)], 2)
  view_translation = tf.concat(
      [view_translation,
       tf.reshape(w_column, [batch_size, 1, 4])], 1)
  camera_matrices = tf.matmul(view_rotation, view_translation)
  return camera_matrices 
開發者ID:google,項目名稱:tf_mesh_renderer,代碼行數:57,代碼來源:camera_utils.py


注:本文中的tensorflow.assert_greater方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。