當前位置: 首頁>>代碼示例>>Python>>正文


Python math.exp方法代碼示例

本文整理匯總了Python中math.exp方法的典型用法代碼示例。如果您正苦於以下問題:Python math.exp方法的具體用法?Python math.exp怎麽用?Python math.exp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在math的用法示例。


在下文中一共展示了math.exp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _compute_softmax

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def _compute_softmax(scores):
    """Compute softmax probability over raw logits."""
    if not scores:
        return []

    max_score = None
    for score in scores:
        if max_score is None or score > max_score:
            max_score = score

    exp_scores = []
    total_sum = 0.0
    for score in scores:
        x = math.exp(score - max_score)
        exp_scores.append(x)
        total_sum += x

    probs = []
    for score in exp_scores:
        probs.append(score / total_sum)
    return probs 
開發者ID:ymcui,項目名稱:cmrc2019,代碼行數:23,代碼來源:run_cmrc2019_baseline.py

示例2: evaluate

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def evaluate(mod, data_iter, epoch, log_interval):
    """ Run evaluation on cpu. """
    start = time.time()
    total_L = 0.0
    nbatch = 0
    density = 0
    mod.set_states(value=0)
    for batch in data_iter:
        mod.forward(batch, is_train=False)
        outputs = mod.get_outputs(merge_multi_context=False)
        states = outputs[:-1]
        total_L += outputs[-1][0]
        mod.set_states(states=states)
        nbatch += 1
        # don't include padding data in the test perplexity
        density += batch.data[1].mean()
        if (nbatch + 1) % log_interval == 0:
            logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
    data_iter.reset()
    loss = (total_L / density).asscalar()
    ppl = math.exp(loss) if loss < 100 else 1e37
    end = time.time()
    logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
                 (epoch, loss, ppl, end - start))
    return loss 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:27,代碼來源:run_utils.py

示例3: _compute_delta

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:21,代碼來源:accountant.py

示例4: compute_a

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def compute_a(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  a_lambda_first_term_exact = 0
  a_lambda_second_term_exact = 0
  for i in xrange(lmbd_int + 1):
    coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
    s1, s2 = 0, 0
    for j in xrange(i + 1):
      coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
      s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
      s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
    a_lambda_first_term_exact += coef_i * s1
    a_lambda_second_term_exact += coef_i * s2

  a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
                    q * a_lambda_second_term_exact)
  if verbose:
    print "A: by binomial expansion    {} = {} + {}".format(
        a_lambda_exact,
        (1.0 - q) * a_lambda_first_term_exact,
        q * a_lambda_second_term_exact)
  return _to_np_float64(a_lambda_exact) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:27,代碼來源:gaussian_moments.py

示例5: _compute_delta

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:gaussian_moments.py

示例6: compute_q_noisy_max

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def compute_q_noisy_max(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  q = 0.0
  for c in counts_rest:
    gap = -c
    q += (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:25,代碼來源:analysis.py

示例7: compute_q_noisy_max_approx

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def compute_q_noisy_max_approx(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables
  # This code uses an approximation that is faster and easier
  # to get local sensitivity bound on.

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  gap = -max(counts_rest)
  q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:25,代碼來源:analysis.py

示例8: smoothed_sens

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def smoothed_sens(counts, noise_eps, l, beta):
  """Compute beta-smooth sensitivity.

  Args:
    counts: array of scors
    noise_eps: noise parameter
    l: moment of interest
    beta: smoothness parameter
  Returns:
    smooth_sensitivity: a beta smooth upper bound
  """
  k = 0
  smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
  while k < max(counts):
    k += 1
    sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
    smoothed_sensitivity = max(
        smoothed_sensitivity,
        math.exp(-beta * k) * sensitivity_at_k)
    if sensitivity_at_k == 0.0:
      break
  return smoothed_sensitivity 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:analysis.py

示例9: select_action

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def select_action(self, state):
        """
        The action selection function, it either uses the model to choose an action or samples one uniformly.
        :param state: current state of the model
        :return:
        """
        if self.cuda:
            state = state.cuda()
        sample = random.random()
        eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
            -1. * self.current_iteration / self.config.eps_decay)
        self.current_iteration += 1
        if sample > eps_threshold:
            with torch.no_grad():
                return self.policy_model(state).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long) 
開發者ID:moemen95,項目名稱:Pytorch-Project-Template,代碼行數:19,代碼來源:dqn.py

示例10: get_timing_signal

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:24,代碼來源:common_layers.py

示例11: mu_law_decoding

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def mu_law_decoding(
        x_mu: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Decode mu-law encoded signal.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This expects an input with values between 0 and quantization_channels - 1
    and returns a signal scaled between -1 and 1.

    Args:
        x_mu (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law decoding
    """
    mu = quantization_channels - 1.0
    if not x_mu.is_floating_point():
        x_mu = x_mu.to(torch.float)
    mu = torch.tensor(mu, dtype=x_mu.dtype)
    x = ((x_mu) / mu) * 2 - 1.0
    x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
    return x 
開發者ID:pytorch,項目名稱:audio,代碼行數:26,代碼來源:functional.py

示例12: _gaussian

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def _gaussian(
        size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
        height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
        mean_vert=0.5):
    # handle some defaults
    if width is None:
        width = size
    if height is None:
        height = size
    if sigma_horz is None:
        sigma_horz = sigma
    if sigma_vert is None:
        sigma_vert = sigma
    center_x = mean_horz * width + 0.5
    center_y = mean_vert * height + 0.5
    gauss = np.empty((height, width), dtype=np.float32)
    # generate kernel
    for i in range(height):
        for j in range(width):
            gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
                sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
    if normalize:
        gauss = gauss / np.sum(gauss)
    return gauss 
開發者ID:protossw512,項目名稱:AdaptiveWingLoss,代碼行數:26,代碼來源:utils.py

示例13: rampweight

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def rampweight(iteration):
    ramp_up_end = 32000
    ramp_down_start = 100000

    if(iteration<ramp_up_end):
        ramp_weight = math.exp(-5 * math.pow((1 - iteration / ramp_up_end),2))
    elif(iteration>ramp_down_start):
        ramp_weight = math.exp(-12.5 * math.pow((1 - (120000 - iteration) / 20000),2)) 
    else:
        ramp_weight = 1 


    if(iteration==0):
        ramp_weight = 0

    return ramp_weight 
開發者ID:soo89,項目名稱:CSD-SSD,代碼行數:18,代碼來源:train_csd.py

示例14: erfcc

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def erfcc(x):
        """Complementary error function."""
        z = abs(x)
        t = 1 / (1 + 0.5 * z)
        r = t * math.exp(-z * z -
                         1.26551223 + t *
                         (1.00002368 + t *
                          (.37409196 + t *
                           (.09678418 + t *
                            (-.18628806 + t *
                             (.27886807 + t *
                              (-1.13520398 + t *
                               (1.48851587 + t *
                                (-.82215223 + t * .17087277)))))))))
        if x >= 0.:
            return r
        else:
            return 2. - r 
開發者ID:rafasashi,項目名稱:razzy-spinner,代碼行數:20,代碼來源:gale_church.py

示例15: sigmoid

# 需要導入模塊: import math [as 別名]
# 或者: from math import exp [as 別名]
def sigmoid(self, a): #numerically stable sigmoid function
        return math.exp(-np.logaddexp(0, -a)) -0.5 #compresses values from 0 to 1 and is reduced by 0.5 to get between -1/2 and 1/2 
開發者ID:gcallah,項目名稱:indras_net,代碼行數:4,代碼來源:politicalSine.py


注:本文中的math.exp方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。