本文整理汇总了Python中tensorflow.check_numerics方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.check_numerics方法的具体用法?Python tensorflow.check_numerics怎么用?Python tensorflow.check_numerics使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.check_numerics方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _BuildLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _BuildLoss(self):
# 1. reconstr_loss seems doesn't do better than l2 loss.
# 2. Only works when using reduce_mean. reduce_sum doesn't work.
# 3. It seems kl loss doesn't play an important role.
self.loss = 0
with tf.variable_scope('loss'):
if self.params['l2_loss']:
l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
tf.summary.scalar('l2_loss', l2_loss)
self.loss += l2_loss
if self.params['reconstr_loss']:
reconstr_loss = (-tf.reduce_mean(
self.diffs[1] * (1e-10 + self.diff_output) +
(1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
tf.summary.scalar('reconstr_loss', reconstr_loss)
self.loss += reconstr_loss
if self.params['kl_loss']:
kl_loss = (0.5 * tf.reduce_mean(
tf.square(self.z_mean) + tf.square(self.z_stddev) -
2 * self.z_stddev_log - 1))
tf.summary.scalar('kl_loss', kl_loss)
self.loss += kl_loss
tf.summary.scalar('loss', self.loss)
示例2: _value_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
示例3: _mask
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _mask(self, tensor, length):
"""Set padding elements of a batch of sequences to zero.
Useful to then safely sum along the time dimension.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = tf.cast(range_[None, :] < length[:, None], tf.float32)
masked = tensor * mask
return tf.check_numerics(masked, 'masked')
示例4: simulate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope('environment/simulate'):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, 'action')
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ, reward, done = tf.py_func(
lambda a: self._batch_env.step(a)[:3], [action],
[observ_dtype, tf.float32, tf.bool], name='step')
observ = tf.check_numerics(observ, 'observ')
reward = tf.check_numerics(reward, 'reward')
return tf.group(
self._observ.assign(observ),
self._action.assign(action),
self._reward.assign(reward),
self._done.assign(done))
示例5: reset
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def reset(self, indices=None):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
if indices is None:
indices = tf.range(len(self._batch_env))
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ = tf.py_func(
self._batch_env.reset, [indices], observ_dtype, name='reset')
observ = tf.check_numerics(observ, 'observ')
reward = tf.zeros_like(indices, tf.float32)
done = tf.zeros_like(indices, tf.bool)
with tf.control_dependencies([
tf.scatter_update(self._observ, indices, observ),
tf.scatter_update(self._reward, indices, reward),
tf.scatter_update(self._done, indices, done)]):
return tf.identity(observ)
示例6: calculate_generalized_advantage_estimator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
"""Generalized advantage estimator."""
# Below is slight weirdness, we set the last reward to 0.
# This makes the advantage to be 0 in the last timestep
reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0)
next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0)
next_not_done = 1 - tf.cast(tf.concat([done[1:, :],
tf.zeros_like(done[-1:, :])], axis=0),
tf.float32)
delta = reward + gae_gamma * next_value * next_not_done - value
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例7: simulate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope('environment/simulate'):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, 'action')
observ_dtype = utils.parse_dtype(self._batch_env.observation_space)
observ, reward, done = tf.py_func(
lambda a: self._batch_env.step(a)[:3], [action],
[observ_dtype, tf.float32, tf.bool], name='step')
observ = tf.check_numerics(observ, 'observ')
reward = tf.check_numerics(reward, 'reward')
reward.set_shape((len(self),))
done.set_shape((len(self),))
with tf.control_dependencies([self._observ.assign(observ)]):
return tf.identity(reward), tf.identity(done)
示例8: _reset_non_empty
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
observ_dtype = utils.parse_dtype(self._batch_env.observation_space)
observ = tf.py_func(
self._batch_env.reset, [indices], observ_dtype, name='reset')
observ = tf.check_numerics(observ, 'observ')
with tf.control_dependencies([
tf.scatter_update(self._observ, indices, observ)]):
return tf.identity(observ)
示例9: get_acceptance_rate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
old_hamiltonian, old_log_prob = hamiltonian(
q, p, log_posterior, mass, data_axes)
new_hamiltonian, new_log_prob = hamiltonian(
new_q, new_p, log_posterior, mass, data_axes)
old_log_prob = tf.check_numerics(
old_log_prob,
'HMC: old_log_prob has numeric errors! Try better initialization.')
acceptance_rate = tf.exp(
tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
tf.is_finite(new_log_prob))
acceptance_rate = tf.where(is_finite, acceptance_rate,
tf.zeros_like(acceptance_rate))
return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
acceptance_rate
示例10: _log_prob
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _log_prob(self, given):
mean, cov_tril = (self.path_param(self.mean),
self.path_param(self.cov_tril))
log_det = 2 * tf.reduce_sum(
tf.log(tf.matrix_diag_part(cov_tril)), axis=-1)
n_dim = tf.cast(self._n_dim, self.dtype)
log_z = - n_dim / 2 * tf.log(
2 * tf.constant(np.pi, dtype=self.dtype)) - log_det / 2
# log_z.shape == batch_shape
if self._check_numerics:
log_z = tf.check_numerics(log_z, "log[det(Cov)]")
# (given-mean)' Sigma^{-1} (given-mean) =
# (g-m)' L^{-T} L^{-1} (g-m) = |x|^2, where Lx = g-m =: y.
y = tf.expand_dims(given - mean, -1)
L, _ = maybe_explicit_broadcast(
cov_tril, y, 'MultivariateNormalCholesky.cov_tril',
'expand_dims(given, -1)')
x = tf.matrix_triangular_solve(L, y, lower=True)
x = tf.squeeze(x, -1)
stoc_dist = -0.5 * tf.reduce_sum(tf.square(x), axis=-1)
return log_z + stoc_dist
示例11: _log_prob
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _log_prob(self, given):
# TODO: not right when given=0 or 1
alpha, beta = self.alpha, self.beta
log_given = tf.log(given)
log_1_minus_given = tf.log(1 - given)
lgamma_alpha, lgamma_beta = tf.lgamma(alpha), tf.lgamma(beta)
lgamma_alpha_plus_beta = tf.lgamma(alpha + beta)
if self._check_numerics:
log_given = tf.check_numerics(log_given, "log(given)")
log_1_minus_given = tf.check_numerics(
log_1_minus_given, "log(1 - given)")
lgamma_alpha = tf.check_numerics(lgamma_alpha, "lgamma(alpha)")
lgamma_beta = tf.check_numerics(lgamma_beta, "lgamma(beta)")
lgamma_alpha_plus_beta = tf.check_numerics(
lgamma_alpha_plus_beta, "lgamma(alpha + beta)")
return (alpha - 1) * log_given + (beta - 1) * log_1_minus_given - (
lgamma_alpha + lgamma_beta - lgamma_alpha_plus_beta)
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def __init__(self,
rate,
dtype=tf.int32,
group_ndims=0,
check_numerics=False,
**kwargs):
self._rate = tf.convert_to_tensor(rate)
param_dtype = assert_same_float_dtype(
[(self._rate, 'Poisson.rate')])
assert_dtype_is_int_or_float(dtype)
self._check_numerics = check_numerics
super(Poisson, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)
示例13: calculate_generalized_advantage_estimator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例14: layer_normalize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def layer_normalize(tensor):
'''Apologies if I've abused this term'''
in_shape = tf.shape(tensor)
axes = list(range(1, len(tensor.shape)))
# Keep batch axis
t = tf.reduce_sum(tensor, axis=axes )
t += EPSILON
t = tf.reciprocal(t)
t = tf.check_numerics(t, "1/sum")
tensor = tf.einsum('brc,b->brc', tensor, t)
tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
return tensor
示例15: _u_to_v
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import check_numerics [as 别名]
def _u_to_v(self, log_alpha, u, eps = 1e-8):
"""Convert u to tied randomness in v."""
u_prime = tf.nn.sigmoid(-log_alpha) # g(u') = 0
v_1 = (u - u_prime) / tf.clip_by_value(1 - u_prime, eps, 1)
v_1 = tf.clip_by_value(v_1, 0, 1)
v_1 = tf.stop_gradient(v_1)
v_1 = v_1*(1 - u_prime) + u_prime
v_0 = u / tf.clip_by_value(u_prime, eps, 1)
v_0 = tf.clip_by_value(v_0, 0, 1)
v_0 = tf.stop_gradient(v_0)
v_0 = v_0 * u_prime
v = tf.where(u > u_prime, v_1, v_0)
v = tf.check_numerics(v, 'v sampling is not numerically stable.')
v = v + tf.stop_gradient(-v + u) # v and u are the same up to numerical errors
return v