当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.exp方法代码示例

本文整理汇总了Python中tensorflow.exp方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.exp方法的具体用法?Python tensorflow.exp怎么用?Python tensorflow.exp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: diag_gaussian_log_likelihood

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
  """Log-likelihood under a Gaussian distribution with diagonal covariance.
    Returns the log-likelihood for each dimension.  One should sum the
    results for the log-likelihood under the full multidimensional model.

  Args:
    z: The value to compute the log-likelihood.
    mu: The mean of the Gaussian
    logvar: The log variance of the Gaussian.

  Returns:
    The log-likelihood under the Gaussian model.
  """

  return -0.5 * (logvar + np.log(2*np.pi) + \
                 tf.square((z-mu)/tf.exp(0.5*logvar))) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:18,代码来源:distributions.py

示例2: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def __init__(self, batch_size, z_size, mean, logvar):
    """Create a diagonal gaussian distribution.

    Args:
      batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
      z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
      mean: The N-D mean of the distribution.
      logvar: The N-D log variance of the diagonal distribution.
    """
    size__xz = [None, z_size]
    self.mean = mean            # bxn already
    self.logvar = logvar        # bxn already
    self.noise = noise = tf.random_normal(tf.shape(logvar))
    self.sample = mean + tf.exp(0.5 * logvar) * noise
    mean.set_shape(size__xz)
    logvar.set_shape(size__xz)
    self.sample.set_shape(size__xz) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:distributions.py

示例3: gaussian_kernel_matrix

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def gaussian_kernel_matrix(x, y, sigmas):
  r"""Computes a Guassian Radial Basis Kernel between the samples of x and y.

  We create a sum of multiple gaussian kernels each having a width sigma_i.

  Args:
    x: a tensor of shape [num_samples, num_features]
    y: a tensor of shape [num_samples, num_features]
    sigmas: a tensor of floats which denote the widths of each of the
      gaussians in the kernel.
  Returns:
    A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel.
  """
  beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))

  dist = compute_pairwise_distances(x, y)

  s = tf.matmul(beta, tf.reshape(dist, (1, -1)))

  return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:utils.py

示例4: build_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:27,代码来源:competition_model_class.py

示例5: minus_plus_std_strategy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def minus_plus_std_strategy(self, pred_mean, pred_var, feature_name,\
                            timestep_to_ensemble=21, alpha=0):
        '''
        This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
                                    (alpha)*pred_mean + (1-alpha)*ruitu_inputs
        pred_mean: (10, 37, 3)
        pred_var: (10, 37, 3)
        timestep_to_ensemble: int32 (From 0 to 36)
        '''
        print('Using minus_plus_var_strategy with alpha {}'.format(alpha))
        assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
        assert -0.3<= alpha <=0.3, '-0.3<= alpha <=0.3!'
        assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
        one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
        pred_std = np.sqrt(np.exp(pred_var))           
        print('alpha:',alpha)

        pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
        pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
        alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]

        return pred_mean 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:24,代码来源:competition_model_class.py

示例6: sample_action

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def sample_action(self, policy_parameters):
        """
        constructs a symbolic operation for stochastically sampling from the policy
        distribution

        arguments:
            policy_parameters
                mean, log_std) of a Gaussian distribution over actions
                    sy_mean: (batch_size, self.ac_dim)
                    sy_logstd: (batch_size, self.ac_dim)

        returns:
            sy_sampled_ac:
                (batch_size, self.ac_dim)
        """
        sy_mean, sy_logstd = policy_parameters
        sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
        return sy_sampled_ac 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:20,代码来源:train_policy.py

示例7: make_encoder

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def make_encoder(self, state, z_size, scope, n_layers, hid_size):
        """
            ### PROBLEM 3
            ### YOUR CODE HERE

            args:
                state: tf variable
                z_size: output dimension of the encoder network
                scope: scope name
                n_layers: number of layers of the encoder network
                hid_size: hidden dimension of encoder network

            TODO:
                1. z_mean: the output of a neural network that takes the state as input,
                    has output dimension z_size, n_layers layers, and hidden 
                    dimension hid_size
                2. z_logstd: a trainable variable, initialized to 0
                    shape (z_size,)

            Hint: use build_mlp
        """
        z_mean = build_mlp(state, z_size, scope, n_layers, hid_size)
        z_logstd = tf.get_variable('z_logstd', shape=z_size, trainable=True,
                                   initializer=tf.constant_initializer(value=0.))
        return tfp.distributions.MultivariateNormalDiag(loc=z_mean, scale_diag=tf.exp(z_logstd)) 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:27,代码来源:density_model.py

示例8: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def call(self, inputs):
        mean_and_log_std = self.model(inputs)
        mean, log_std = tf.split(mean_and_log_std, num_or_size_splits=2, axis=1)
        log_std = tf.clip_by_value(log_std, -20., 2.)
        
        distribution = tfp.distributions.MultivariateNormalDiag(
            loc=mean,
            scale_diag=tf.exp(log_std)
        )
        
        raw_actions = distribution.sample()
        if not self._reparameterize:
            ### Problem 1.3.A
            ### YOUR CODE HERE
            raw_actions = tf.stop_gradient(raw_actions)
        log_probs = distribution.log_prob(raw_actions)
        log_probs -= self._squash_correction(raw_actions)

        ### Problem 2.A
        ### YOUR CODE HERE
        self.actions = tf.tanh(raw_actions)
            
        return self.actions, log_probs 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:25,代码来源:nn.py

示例9: _learning_rate_warmup

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None):
  """Learning rate warmup multiplier."""
  if not warmup_steps:
    return tf.constant(1.)

  tf.logging.info("Applying %s learning rate warmup for %d steps",
                  warmup_schedule, warmup_steps)

  warmup_steps = tf.to_float(warmup_steps)
  global_step = _global_step(hparams)

  if warmup_schedule == "exp":
    return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
  else:
    assert warmup_schedule == "linear"
    start = tf.constant(0.35)
    return ((tf.constant(1.) - start) / warmup_steps) * global_step + start 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:19,代码来源:learning_rate.py

示例10: bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:23,代码来源:autoencoders.py

示例11: get_timing_signal

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:common_layers.py

示例12: vae

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def vae(x, name, z_size):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    name: Name for the bottleneck scope.
    z_size: Number of bits used to produce discrete code; discrete codes range
      from 1 to 2**z_size.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:25,代码来源:discretization.py

示例13: expit_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def expit_tensor(x):
	return 1. / (1. + tf.exp(-x)) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:4,代码来源:train.py

示例14: exp2

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def exp2(x):
    with tf.name_scope('Exp2'):
        return tf.exp(x * np.float32(np.log(2.0))) 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:5,代码来源:tfutil.py

示例15: fprop

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import exp [as 别名]
def fprop(self, x, **kwargs):
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        mask = tf.to_float(x >= 0.)
        out = mask * x + (1. - mask) * \
            (alpha * tf.exp((1. - mask) * x) - alpha)
        return scale * out 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:9,代码来源:picklable_model.py


注:本文中的tensorflow.exp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。