本文整理汇总了Python中tensorflow.log方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.log方法的具体用法?Python tensorflow.log怎么用?Python tensorflow.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.log方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compute_delta
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def _compute_delta(self, log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
示例2: log_sum_exp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def log_sum_exp(x_k):
"""Computes log \sum exp in a numerically stable way.
log ( sum_i exp(x_i) )
log ( sum_i exp(x_i - m + m) ), with m = max(x_i)
log ( sum_i exp(x_i - m)*exp(m) )
log ( sum_i exp(x_i - m) + m
Args:
x_k - k -dimensional list of arguments to log_sum_exp.
Returns:
log_sum_exp of the arguments.
"""
m = tf.reduce_max(x_k)
x1_k = x_k - m
u_k = tf.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
示例3: gaussian_pos_log_likelihood
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
"""Gaussian log-likelihood function for a posterior in VAE
Note: This function is specialized for a posterior distribution, that has the
form of z = mean + sigma * noise.
Args:
unused_mean: ignore
logvar: The log variance of the distribution
noise: The noise used in the sampling of the posterior.
Returns:
The log-likelihood under the Gaussian model.
"""
# ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def __init__(self, batch_size, z_size, mean, logvar):
"""Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.
"""
size__xz = [None, z_size]
self.mean = mean # bxn already
self.logvar = logvar # bxn already
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = mean + tf.exp(0.5 * logvar) * noise
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
示例5: logp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample:
return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)
return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
示例6: logp_t
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def logp_t(self, z_t_bxu, z_tm1_bxu=None):
"""Compute the log-likelihood under the distribution for a given time t,
not the whole sequence.
Args:
z_t_bxu: sample to compute likelihood for at time t.
z_tm1_bxu (optional): sample condition probability of z_t upon.
Returns:
The likelihood of p_t under the model at time t. i.e.
p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)
"""
if z_tm1_bxu is None:
return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu,
self.logpvars_bxu)
else:
means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu
logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu,
means_t_bxu,
self.logevars_bxu)
return logp_tgtm1_bxu
示例7: _BuildLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def _BuildLoss(self):
# 1. reconstr_loss seems doesn't do better than l2 loss.
# 2. Only works when using reduce_mean. reduce_sum doesn't work.
# 3. It seems kl loss doesn't play an important role.
self.loss = 0
with tf.variable_scope('loss'):
if self.params['l2_loss']:
l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
tf.summary.scalar('l2_loss', l2_loss)
self.loss += l2_loss
if self.params['reconstr_loss']:
reconstr_loss = (-tf.reduce_mean(
self.diffs[1] * (1e-10 + self.diff_output) +
(1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
tf.summary.scalar('reconstr_loss', reconstr_loss)
self.loss += reconstr_loss
if self.params['kl_loss']:
kl_loss = (0.5 * tf.reduce_mean(
tf.square(self.z_mean) + tf.square(self.z_stddev) -
2 * self.z_stddev_log - 1))
tf.summary.scalar('kl_loss', kl_loss)
self.loss += kl_loss
tf.summary.scalar('loss', self.loss)
示例8: log_prob_action
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def log_prob_action(self, action, logits,
sampling_dim, act_dim, act_type):
"""Calculate log-prob of action sampled from distribution."""
if self.env_spec.is_discrete(act_type):
act_log_prob = tf.reduce_sum(
tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
- 0.5 * tf.square(action - means) / tf.square(std))
act_log_prob = tf.reduce_sum(act_log_prob, -1)
else:
assert False
return act_log_prob
示例9: single_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def single_step(self, prev, cur, greedy=False):
"""Single RNN step. Equivalently, single-time-step sampled actions."""
prev_internal_state, prev_actions, _, _, _, _ = prev
obs, actions = cur # state observed and action taken at this time step
# feed into RNN cell
output, next_state = self.core(
obs, prev_internal_state, prev_actions)
# sample actions with values and log-probs
(actions, logits, log_probs,
entropy, self_kl) = self.sample_actions(
output, actions=actions, greedy=greedy)
return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
tuple(entropy), tuple(self_kl))
示例10: build_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def build_graph(self):
#keras.backend.clear_session() # clear session/graph
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
lr=self.lr, decay=self.decay,
num_input_features=self.num_input_features, num_output_features=self.num_output_features,
num_decoder_features=self.num_decoder_features, layers=self.layers,
loss=self.loss, regulariser=self.regulariser)
def _mve_loss(y_true, y_pred):
pred_u = crop(2,0,3)(y_pred)
pred_sig = crop(2,3,6)(y_pred)
print(pred_sig)
#exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero
#precision = 1./exp_sig
precision = 1./pred_sig
#log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss=tf.reduce_mean(log_loss)
return log_loss
print(self.model.summary())
self.model.compile(optimizer = self.optimizer, loss=_mve_loss)
示例11: bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def bottleneck(self, x): # pylint: disable=arguments-differ
hparams = self.hparams
if hparams.unordered:
return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
noise = hparams.bottleneck_noise
hparams.bottleneck_noise = 0.0 # We'll add noise below.
x, loss = discretization.parametrized_bottleneck(x, hparams)
hparams.bottleneck_noise = noise
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
# We want a number p such that p^bottleneck_bits = 1 - noise.
# So log(p) * bottleneck_bits = log(noise)
log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
# Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
# Having the no-noise mask, we can make noise just uniformly at random.
ordered_noise = tf.random_uniform(tf.shape(x))
# We want our noise to be 1s at the start and random {-1, 1} bits later.
ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
# Now we flip the bits of x on the noisy positions (ordered and normal).
x *= 2.0 * ordered_noise - 1
return x, loss
示例12: lossfn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
"""Loss function."""
eps = 1e-12
with tf.variable_scope(name):
d1 = discriminator(real_input, compress, hparams, "discriminator")
d2 = discriminator(fake_input, compress, hparams, "discriminator",
reuse=True)
if lsgan:
dloss = tf.reduce_mean(
tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
loss = (dloss + gloss)/2
else: # cross_entropy
dloss = -tf.reduce_mean(
tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps))
gloss = -tf.reduce_mean(tf.log(d2 + eps))
loss = (dloss + gloss)/2
return loss
示例13: get_timing_signal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
示例14: binary_cross_entropy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def binary_cross_entropy(x, y, smoothing=0, epsilon=1e-12):
"""Computes the averaged binary cross entropy.
bce = y*log(x) + (1-y)*log(1-x)
Args:
x: The predicted labels.
y: The true labels.
smoothing: The label smoothing coefficient.
Returns:
The cross entropy.
"""
y = tf.to_float(y)
if smoothing > 0:
smoothing *= 2
y = y * (1 - smoothing) + 0.5 * smoothing
return -tf.reduce_mean(tf.log(x + epsilon) * y + tf.log(1.0 - x + epsilon) * (1 - y))
示例15: sample_dtype
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import log [as 别名]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)