当前位置: 首页>>代码示例>>Python>>正文


Python v1.pow方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.pow方法的典型用法代码示例。如果您正苦于以下问题:Python v1.pow方法的具体用法?Python v1.pow怎么用?Python v1.pow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.pow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: smooth_schedule

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def smooth_schedule(step, init_step, final_step, init_value, final_value,
                    mid_point=.25, beta=4.):
  """Smooth schedule that slowly morphs into a linear schedule."""
  assert final_value > init_value
  assert final_step >= init_step
  assert beta >= 2.
  assert mid_point >= 0. and mid_point <= 1.
  mid_step = int((final_step - init_step) * mid_point) + init_step
  if mid_step <= init_step:
    alpha = 1.
  else:
    t = (mid_step - init_step) ** (beta - 1.)
    alpha = (final_value - init_value) / ((final_step - mid_step) * beta * t +
                                          (mid_step - init_step) * t)
  mid_value = alpha * (mid_step - init_step) ** beta + init_value
  # Tensorflow operation.
  is_ramp = tf.cast(step > init_step, tf.float32)
  is_linear = tf.cast(step >= mid_step, tf.float32)
  return (is_ramp * (
      (1. - is_linear) * (
          init_value +
          alpha * tf.pow(tf.cast(step - init_step, tf.float32), beta)) +
      is_linear * linear_schedule(
          step, mid_step, final_step, mid_value, final_value)) +
          (1. - is_ramp) * init_value) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:27,代码来源:utils.py

示例2: _apply_gradients

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def _apply_gradients(self, grads, x, optim_state):
    """Applies gradients."""
    lr = self._lr_fn(optim_state.t)
    new_optim_state = self._State(
        t=optim_state.t + 1,
        m=[None] * len(x),
        u=[None] * len(x))
    t = tf.cast(new_optim_state.t, tf.float32)
    new_x = [None] * len(x)
    for i in range(len(x)):
      g = grads[i]
      m_old = optim_state.m[i]
      u_old = optim_state.u[i]
      new_optim_state.m[i] = self._beta1 * m_old + (1. - self._beta1) * g
      new_optim_state.u[i] = self._beta2 * u_old + (1. - self._beta2) * g * g
      m_hat = new_optim_state.m[i] / (1. - tf.pow(self._beta1, t))
      u_hat = new_optim_state.u[i] / (1. - tf.pow(self._beta2, t))
      new_x[i] = x[i] - lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)
    return new_x, new_optim_state 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:21,代码来源:attacks.py

示例3: _make_net

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def _make_net(self, reg):
        '''
        Helper method to create a new net with a specified regularisation coefficient. The net is not
        initialised, so you must call init() or load() on it before any other method.

        Args:
            reg (float): Regularisation coefficient.
        '''
        def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
        creator = lambda: SingleNeuralNet(
                    self.num_params,
                    [64]*5, [gelu_fast]*5,
                    0.2, # train_threshold_ratio
                    16, # batch_size
                    1., # keep_prob
                    reg,
                    self.losses_list,
                    learner_archive_dir=self.learner_archive_dir,
                    start_datetime=self.start_datetime)
        return SampledNeuralNet(creator, 1) 
开发者ID:michaelhush,项目名称:M-LOOP,代码行数:23,代码来源:neuralnet.py

示例4: padded_rmse

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all):
  predictions = tf.to_float(predictions)
  labels = tf.to_float(labels)
  predictions, labels = common_layers.pad_with_zeros(predictions, labels)
  weights = weights_fn(labels)
  error = tf.pow(predictions - labels, 2)
  error_sqrt = tf.sqrt(tf.reduce_mean(error * weights))
  return error_sqrt, tf.reduce_sum(weights) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:10,代码来源:metrics.py

示例5: unpadded_mse

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def unpadded_mse(predictions, labels, weights_fn=common_layers.weights_all):
  predictions = tf.to_float(predictions)
  labels = tf.to_float(labels)
  weights = weights_fn(labels)
  error = tf.pow(predictions - labels, 2)
  mean_error = tf.reduce_mean(error * weights)
  return mean_error, tf.reduce_sum(weights) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:9,代码来源:metrics.py

示例6: padded_variance_explained

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def padded_variance_explained(predictions,
                              labels,
                              weights_fn=common_layers.weights_all):
  """Explained variance, also known as R^2."""
  predictions, labels = common_layers.pad_with_zeros(predictions, labels)
  targets = labels
  weights = weights_fn(targets)

  y_bar = tf.reduce_mean(weights * targets)
  tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2))
  res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2))
  r2 = 1. - res_ss / tot_ss
  return r2, tf.reduce_sum(weights) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:15,代码来源:metrics.py

示例7: _get_cubic_root

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def _get_cubic_root(self):
    """Get the cubic root."""
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substitution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    assert_array = [
        tf.Assert(
            tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._grad_var)),
            [self._grad_var,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._grad_var)),
            [self._grad_var,])
    ]
    with tf.control_dependencies(assert_array):
      p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
      w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
      w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
      y = w - p / 3.0 / w
      x = y + 1
    return x 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:39,代码来源:yellowfin.py

示例8: adafactor_decay_rate_adam

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def adafactor_decay_rate_adam(beta2):
  """Second-moment decay rate like Adam, subsuming the correction factor.

  Args:
    beta2: a float between 0 and 1
  Returns:
    a scalar
  """
  t = tf.to_float(tf.train.get_or_create_global_step()) + 1.0
  decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))
  # decay = tf.cond(tf.equal(t, 1.0), lambda: beta2, lambda: decay)
  return decay 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:14,代码来源:adafactor.py

示例9: adafactor_decay_rate_pow

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def adafactor_decay_rate_pow(exponent):
  """Second moment decay rate where memory-length grows as step_num^exponent.

  Args:
    exponent: a float between 0 and 1
  Returns:
    a scalar
  """
  return 1.0 - tf.pow((step_num() + 1.0), -exponent) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:11,代码来源:adafactor.py

示例10: adafactor_optimizer_from_hparams

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def adafactor_optimizer_from_hparams(hparams, lr):
  """Create an Adafactor optimizer based on model hparams.

  Args:
    hparams: model hyperparameters
    lr: learning rate scalar.
  Returns:
    an AdafactorOptimizer
  Raises:
    ValueError: on illegal values
  """
  if hparams.optimizer_adafactor_decay_type == "adam":
    decay_rate = adafactor_decay_rate_adam(
        hparams.optimizer_adafactor_beta2)
  elif hparams.optimizer_adafactor_decay_type == "pow":
    decay_rate = adafactor_decay_rate_pow(
        hparams.optimizer_adafactor_memory_exponent)
  else:
    raise ValueError("unknown optimizer_adafactor_decay_type")
  if hparams.weight_dtype == "bfloat16":
    parameter_encoding = quantization.EighthPowerEncoding()
  else:
    parameter_encoding = None
  return AdafactorOptimizer(
      multiply_by_parameter_scale=(
          hparams.optimizer_adafactor_multiply_by_parameter_scale),
      learning_rate=lr,
      decay_rate=decay_rate,
      beta1=hparams.optimizer_adafactor_beta1,
      clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
      factored=hparams.optimizer_adafactor_factored,
      simulated_quantize_bits=getattr(
          hparams, "simulated_parameter_quantize_bits", 0),
      parameter_encoding=parameter_encoding,
      use_locking=False,
      name="Adafactor") 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:38,代码来源:adafactor.py

示例11: encode

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def encode(self, x, noise):
    x = tf.to_float(x)
    # we can't use tf.pow(..., 8.0) because of a high-error approximation
    # on TPU.  Instead we square three times.
    x = tf.sign(x) * tf.square(tf.square(tf.square(tf.abs(x) * 128.0)))
    x = _to_bfloat16_unbiased(x, noise)
    return x 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:9,代码来源:quantization.py

示例12: max_pad_length

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def max_pad_length(self, features):
    """Finds max padding length.

    If target length not specified use fixed padding
    length from hparams.max_length.

    Args:
      features: Dictionary with input and target tensors

    Returns:
      tf.Tensor:  Length of input and output sequence. Length is power of 2.
    """

    if self.hparams.force_max_length or features.get("targets") is None:
      assert math.log(self.hparams.max_length, 2).is_integer(), \
        "hparams.max_length should be power of w"

      return self.hparams.max_length

    length = tf.shape(features["inputs"])[1]
    targets_length = tf.shape(features["targets"])[1]
    length = tf.maximum(length, targets_length)

    p = tf.log(tf.cast(length, tf.float32)) / tf.log(2.0)
    p = tf.cast(tf.ceil(p), tf.int32)
    return tf.pow(2, p) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:28,代码来源:shuffle_network.py

示例13: real_l2_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def real_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
  del model_hparams, vocab_size  # unused arg
  predictions = top_out
  if (len(common_layers.shape_list(top_out)) != len(
      common_layers.shape_list(targets))):
    predictions = tf.squeeze(top_out, axis=[-1])
  with tf.name_scope("l2"):
    weights = weights_fn(targets)
    l2 = tf.pow(predictions - targets, 2)
    return tf.reduce_sum(l2 * weights), tf.reduce_sum(weights) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:12,代码来源:modalities.py

示例14: gelu

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def gelu(x):
  """Gaussian Error Linear Unit.

  This is a smoother version of the RELU.
  Original paper: https://arxiv.org/abs/1606.08415

  Args:
    x: float Tensor to perform activation.

  Returns:
    x with the GELU activation applied.
  """
  cdf = 0.5 * (1.0 + tf.tanh(
      (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
  return x * cdf 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:17,代码来源:common_layers.py

示例15: testAreaMean

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import pow [as 别名]
def testAreaMean(self):
    batch_size = 256
    feature_len = 100
    memory_height = 10
    heads = 2
    key_len = 2
    depth = 128
    max_area_height = 3
    max_area_width = 3
    queries = tf.random_uniform([batch_size, heads, key_len, depth],
                                minval=-10.0, maxval=10.0)
    features = tf.random_uniform([batch_size, heads, feature_len, depth],
                                 minval=-10.0, maxval=10.0)
    target_values = tf.random_uniform([batch_size, heads, key_len, depth],
                                      minval=-0.2, maxval=0.2)
    keys = tf.layers.dense(features, units=depth)
    values = tf.layers.dense(features, units=depth)
    mean_attention = area_attention.dot_product_area_attention(
        queries, keys, values,
        bias=None,
        area_key_mode="mean",
        name="mean_key",
        max_area_width=max_area_width,
        max_area_height=max_area_height,
        memory_height=memory_height)
    mean_gradients = tf.gradients(
        tf.reduce_mean(
            tf.pow(target_values - mean_attention, 2)), features)
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      result = session.run([mean_gradients])
    self.assertFalse(np.any(np.logical_not(np.isfinite(result)))) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:34,代码来源:area_attention_test.py


注:本文中的tensorflow.compat.v1.pow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。