当前位置: 首页>>代码示例>>Python>>正文


Python math.exp方法代码示例

本文整理汇总了Python中math.exp方法的典型用法代码示例。如果您正苦于以下问题:Python math.exp方法的具体用法?Python math.exp怎么用?Python math.exp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在math的用法示例。


在下文中一共展示了math.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _compute_softmax

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def _compute_softmax(scores):
    """Compute softmax probability over raw logits."""
    if not scores:
        return []

    max_score = None
    for score in scores:
        if max_score is None or score > max_score:
            max_score = score

    exp_scores = []
    total_sum = 0.0
    for score in scores:
        x = math.exp(score - max_score)
        exp_scores.append(x)
        total_sum += x

    probs = []
    for score in exp_scores:
        probs.append(score / total_sum)
    return probs 
开发者ID:ymcui,项目名称:cmrc2019,代码行数:23,代码来源:run_cmrc2019_baseline.py

示例2: evaluate

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def evaluate(mod, data_iter, epoch, log_interval):
    """ Run evaluation on cpu. """
    start = time.time()
    total_L = 0.0
    nbatch = 0
    density = 0
    mod.set_states(value=0)
    for batch in data_iter:
        mod.forward(batch, is_train=False)
        outputs = mod.get_outputs(merge_multi_context=False)
        states = outputs[:-1]
        total_L += outputs[-1][0]
        mod.set_states(states=states)
        nbatch += 1
        # don't include padding data in the test perplexity
        density += batch.data[1].mean()
        if (nbatch + 1) % log_interval == 0:
            logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
    data_iter.reset()
    loss = (total_L / density).asscalar()
    ppl = math.exp(loss) if loss < 100 else 1e37
    end = time.time()
    logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
                 (epoch, loss, ppl, end - start))
    return loss 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:27,代码来源:run_utils.py

示例3: _compute_delta

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:accountant.py

示例4: compute_a

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def compute_a(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  a_lambda_first_term_exact = 0
  a_lambda_second_term_exact = 0
  for i in xrange(lmbd_int + 1):
    coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
    s1, s2 = 0, 0
    for j in xrange(i + 1):
      coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
      s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
      s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
    a_lambda_first_term_exact += coef_i * s1
    a_lambda_second_term_exact += coef_i * s2

  a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
                    q * a_lambda_second_term_exact)
  if verbose:
    print "A: by binomial expansion    {} = {} + {}".format(
        a_lambda_exact,
        (1.0 - q) * a_lambda_first_term_exact,
        q * a_lambda_second_term_exact)
  return _to_np_float64(a_lambda_exact) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:gaussian_moments.py

示例5: _compute_delta

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:gaussian_moments.py

示例6: compute_q_noisy_max

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def compute_q_noisy_max(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  q = 0.0
  for c in counts_rest:
    gap = -c
    q += (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:analysis.py

示例7: compute_q_noisy_max_approx

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def compute_q_noisy_max_approx(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables
  # This code uses an approximation that is faster and easier
  # to get local sensitivity bound on.

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  gap = -max(counts_rest)
  q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:analysis.py

示例8: smoothed_sens

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def smoothed_sens(counts, noise_eps, l, beta):
  """Compute beta-smooth sensitivity.

  Args:
    counts: array of scors
    noise_eps: noise parameter
    l: moment of interest
    beta: smoothness parameter
  Returns:
    smooth_sensitivity: a beta smooth upper bound
  """
  k = 0
  smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
  while k < max(counts):
    k += 1
    sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
    smoothed_sensitivity = max(
        smoothed_sensitivity,
        math.exp(-beta * k) * sensitivity_at_k)
    if sensitivity_at_k == 0.0:
      break
  return smoothed_sensitivity 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:analysis.py

示例9: select_action

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def select_action(self, state):
        """
        The action selection function, it either uses the model to choose an action or samples one uniformly.
        :param state: current state of the model
        :return:
        """
        if self.cuda:
            state = state.cuda()
        sample = random.random()
        eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
            -1. * self.current_iteration / self.config.eps_decay)
        self.current_iteration += 1
        if sample > eps_threshold:
            with torch.no_grad():
                return self.policy_model(state).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long) 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:19,代码来源:dqn.py

示例10: get_timing_signal

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:common_layers.py

示例11: mu_law_decoding

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def mu_law_decoding(
        x_mu: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Decode mu-law encoded signal.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This expects an input with values between 0 and quantization_channels - 1
    and returns a signal scaled between -1 and 1.

    Args:
        x_mu (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law decoding
    """
    mu = quantization_channels - 1.0
    if not x_mu.is_floating_point():
        x_mu = x_mu.to(torch.float)
    mu = torch.tensor(mu, dtype=x_mu.dtype)
    x = ((x_mu) / mu) * 2 - 1.0
    x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
    return x 
开发者ID:pytorch,项目名称:audio,代码行数:26,代码来源:functional.py

示例12: _gaussian

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def _gaussian(
        size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
        height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
        mean_vert=0.5):
    # handle some defaults
    if width is None:
        width = size
    if height is None:
        height = size
    if sigma_horz is None:
        sigma_horz = sigma
    if sigma_vert is None:
        sigma_vert = sigma
    center_x = mean_horz * width + 0.5
    center_y = mean_vert * height + 0.5
    gauss = np.empty((height, width), dtype=np.float32)
    # generate kernel
    for i in range(height):
        for j in range(width):
            gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
                sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
    if normalize:
        gauss = gauss / np.sum(gauss)
    return gauss 
开发者ID:protossw512,项目名称:AdaptiveWingLoss,代码行数:26,代码来源:utils.py

示例13: rampweight

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def rampweight(iteration):
    ramp_up_end = 32000
    ramp_down_start = 100000

    if(iteration<ramp_up_end):
        ramp_weight = math.exp(-5 * math.pow((1 - iteration / ramp_up_end),2))
    elif(iteration>ramp_down_start):
        ramp_weight = math.exp(-12.5 * math.pow((1 - (120000 - iteration) / 20000),2)) 
    else:
        ramp_weight = 1 


    if(iteration==0):
        ramp_weight = 0

    return ramp_weight 
开发者ID:soo89,项目名称:CSD-SSD,代码行数:18,代码来源:train_csd.py

示例14: erfcc

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def erfcc(x):
        """Complementary error function."""
        z = abs(x)
        t = 1 / (1 + 0.5 * z)
        r = t * math.exp(-z * z -
                         1.26551223 + t *
                         (1.00002368 + t *
                          (.37409196 + t *
                           (.09678418 + t *
                            (-.18628806 + t *
                             (.27886807 + t *
                              (-1.13520398 + t *
                               (1.48851587 + t *
                                (-.82215223 + t * .17087277)))))))))
        if x >= 0.:
            return r
        else:
            return 2. - r 
开发者ID:rafasashi,项目名称:razzy-spinner,代码行数:20,代码来源:gale_church.py

示例15: sigmoid

# 需要导入模块: import math [as 别名]
# 或者: from math import exp [as 别名]
def sigmoid(self, a): #numerically stable sigmoid function
        return math.exp(-np.logaddexp(0, -a)) -0.5 #compresses values from 0 to 1 and is reduced by 0.5 to get between -1/2 and 1/2 
开发者ID:gcallah,项目名称:indras_net,代码行数:4,代码来源:politicalSine.py


注:本文中的math.exp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。