本文整理汇总了Python中tensorflow.python.ops.distributions.util.gen_new_seed函数的典型用法代码示例。如果您正苦于以下问题:Python gen_new_seed函数的具体用法?Python gen_new_seed怎么用?Python gen_new_seed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gen_new_seed函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: default_exchange_proposed_fn_
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
num_replica = tf.to_int32(num_replica)
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
random_uniform = tf.random_uniform([], seed=seed)
accept_proposed_exchange = random_uniform < probs
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
zero_start = tf.random_uniform([], seed=seed) > 0.5
if num_replica % 2 == 0:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica),
tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
tf.range(1, num_replica - 1)))
exchange_proposed_n = tf.where(zero_start, num_replica // 2,
num_replica // 2 - 1)
else:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
exchange_proposed_n = num_replica // 2
exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
tf.zeros_like(exchange_proposed))
exchange_proposed_n = tf.where(accept_proposed_exchange,
exchange_proposed_n,
tf.zeros_like(exchange_proposed_n))
return exchange_proposed, exchange_proposed_n
示例2: body
def body(i, next_replica_idx):
"""`tf.while_loop` body."""
ratio = (
sampled_replica_ratios[next_replica_idx[exchange_proposed[i, 0]]]
- sampled_replica_ratios[next_replica_idx[exchange_proposed[i, 1]]])
ratio *= (
self.inverse_temperatures[exchange_proposed[i, 1]]
- self.inverse_temperatures[exchange_proposed[i, 0]])
self._seed_stream = distributions_util.gen_new_seed(
self._seed_stream, salt='replica_exchange_one_step')
log_uniform = tf.log(tf.random_uniform(
shape=tf.shape(ratio),
dtype=ratio.dtype.base_dtype,
seed=self._seed_stream))
exchange = log_uniform < ratio
exchange_op = tf.sparse_to_dense(
[exchange_proposed[i, 0], exchange_proposed[i, 1]],
[self.num_replica],
[next_replica_idx[exchange_proposed[i, 1]] -
next_replica_idx[exchange_proposed[i, 0]],
next_replica_idx[exchange_proposed[i, 0]] -
next_replica_idx[exchange_proposed[i, 1]]])
next_replica_idx = tf.cond(exchange,
lambda: next_replica_idx + exchange_op,
lambda: next_replica_idx)
return [i + 1, next_replica_idx]
示例3: _sample_n
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = (np.prod(self.batch_shape.as_list(), dtype=np.int32)
if self.batch_shape.is_fully_defined()
else math_ops.reduce_prod(self.batch_shape_tensor()))
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size])),
seed=distribution_util.gen_new_seed(
seed, "poisson_lognormal_quadrature_compound"))
# Stride `quadrature_degree` for `batch_size` number of times.
offset = math_ops.range(start=0,
limit=batch_size * len(self.quadrature_probs),
delta=len(self.quadrature_probs),
dtype=ids.dtype)
ids += offset
rate = array_ops.gather(
array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
rate = array_ops.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return random_ops.random_poisson(
lam=rate, shape=[], dtype=self.dtype, seed=seed)
示例4: one_step
def one_step(self, current_state, previous_kernel_results):
with tf.name_scope(
name=mcmc_util.make_name(self.name, 'rwm', 'one_step'),
values=[self.seed,
current_state,
previous_kernel_results.target_log_prob]):
with tf.name_scope('initialize'):
current_state_parts = (list(current_state)
if mcmc_util.is_list_like(current_state)
else [current_state])
current_state_parts = [tf.convert_to_tensor(s, name='current_state')
for s in current_state_parts]
self._seed_stream = distributions_util.gen_new_seed(
self._seed_stream, salt='rwm_kernel_proposal')
new_state_fn = self.new_state_fn
next_state_parts = new_state_fn(current_state_parts, self._seed_stream)
# Compute `target_log_prob` so its available to MetropolisHastings.
next_target_log_prob = self.target_log_prob_fn(*next_state_parts)
def maybe_flatten(x):
return x if mcmc_util.is_list_like(current_state) else x[0]
return [
maybe_flatten(next_state_parts),
UncalibratedRandomWalkResults(
log_acceptance_correction=tf.zeros(
shape=tf.shape(next_target_log_prob),
dtype=next_target_log_prob.dtype.base_dtype),
target_log_prob=next_target_log_prob,
),
]
示例5: _apply_variational_kernel
def _apply_variational_kernel(self, inputs):
if (not isinstance(self.kernel_posterior, tfd.Independent) or
not isinstance(self.kernel_posterior.distribution, tfd.Normal)):
raise TypeError(
'`DenseFlipout` requires '
'`kernel_posterior_fn` produce an instance of '
'`tf.distributions.Independent(tf.distributions.Normal)` '
'(saw: \"{}\").'.format(self.kernel_posterior.name))
self.kernel_posterior_affine = tfd.Normal(
loc=tf.zeros_like(self.kernel_posterior.distribution.loc),
scale=self.kernel_posterior.distribution.scale)
self.kernel_posterior_affine_tensor = (
self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))
self.kernel_posterior_tensor = None
input_shape = tf.shape(inputs)
batch_shape = input_shape[:-1]
sign_input = random_rademacher(
input_shape,
dtype=inputs.dtype,
seed=self.seed)
sign_output = random_rademacher(
tf.concat([batch_shape,
tf.expand_dims(self.units, 0)], 0),
dtype=inputs.dtype,
seed=distribution_util.gen_new_seed(
self.seed, salt='dense_flipout'))
perturbed_inputs = self._matmul(
inputs * sign_input, self.kernel_posterior_affine_tensor) * sign_output
outputs = self._matmul(inputs, self.kernel_posterior.distribution.loc)
outputs += perturbed_inputs
return outputs
示例6: generate_one
def generate_one(d):
seed[0] = distributions_util.gen_new_seed(
seed[0], salt='mcmc_sample_halton_sequence_4')
fn = lambda _: tf.random_shuffle(tf.range(d), seed=seed[0])
return tf.map_fn(
fn,
sample_range,
parallel_iterations=1 if seed[0] is not None else 10)
示例7: _fn
def _fn(state_parts, seed):
next_state_parts = []
for state in state_parts:
# Mutate seed with each use.
seed = distributions_util.gen_new_seed(
seed, salt='random_walk_cauchy_increment')
next_state_parts.append(state + cauchy.sample(
sample_shape=state.shape, seed=seed))
return next_state_parts
示例8: _sample_n
def _sample_n(self, n, seed=None):
if seed is None:
seed = distribution_util.gen_new_seed(
seed=np.random.randint(2**32 - 1),
salt="autoregressive")
samples = self.distribution0.sample(n, seed=seed)
for _ in range(self._num_steps):
samples = self.distribution_fn(samples).sample(seed=seed)
return samples
示例9: _sample_n
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat([[n], batch_shape, event_shape], 0)
# Complexity: O(nbk**2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=[n],
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk**2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, [n]], 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
示例10: _sample_n
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
示例11: _randomize
def _randomize(coeffs, radixes, seed=None):
"""Applies the Owen (2017) randomization to the coefficients."""
given_dtype = coeffs.dtype
coeffs = tf.to_int32(coeffs)
num_coeffs = tf.shape(coeffs)[-1]
radixes = tf.reshape(tf.to_int32(radixes), shape=[-1])
seed = distributions_util.gen_new_seed(
seed, salt='mcmc_sample_halton_sequence_3')
perms = _get_permutations(num_coeffs, radixes, seed=seed)
perms = tf.reshape(perms, shape=[-1])
radix_sum = tf.reduce_sum(radixes)
radix_offsets = tf.reshape(tf.cumsum(radixes, exclusive=True),
shape=[-1, 1])
offsets = radix_offsets + tf.range(num_coeffs) * radix_sum
permuted_coeffs = tf.gather(perms, coeffs + offsets)
return tf.cast(permuted_coeffs, dtype=given_dtype)
示例12: __init__
def __init__(self, target_log_prob_fn, inverse_temperatures,
make_kernel_fn,
exchange_proposed_fn=default_exchange_proposed_fn(1.),
seed=None, name=None, **kwargs):
"""Instantiates this object.
Args:
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
inverse_temperatures: sequence of inverse temperatures to perform
samplings with each replica. Must have statically known `rank` and
statically known leading shape, i.e.,
`inverse_temperatures.shape[0].value is not None`
make_kernel_fn: Python callable which takes target_log_prob_fn and seed
args and returns a TransitionKernel instance.
exchange_proposed_fn: Python callable which take a number of replicas, and
return combinations of replicas for exchange and a number of
combinations.
seed: Python integer to seed the random number generator.
Default value: `None` (i.e., no seed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "remc_kernel").
**kwargs: Arguments for `make_kernel_fn`.
Raises:
ValueError: if `inverse_temperatures` doesn't have statically known rank
and statically known leading shape
"""
if inverse_temperatures.shape.ndims is None or \
inverse_temperatures.shape[0].value is None:
raise ValueError('"inverse_temperatures" must have statically known rank '
'and statically known leading shape')
self._seed_stream = seed # This will be mutated with use.
self._parameters = dict(target_log_prob_fn=target_log_prob_fn,
inverse_temperatures=inverse_temperatures,
num_replica=inverse_temperatures.shape[0],
exchange_proposed_fn=exchange_proposed_fn,
seed=seed, name=name)
self.replica_kernels = []
for i in range(self.num_replica):
self._seed_stream = distributions_util.gen_new_seed(
self._seed_stream, salt='replica_kernels')
self.replica_kernels.append(make_kernel_fn(
target_log_prob_fn=_replica_log_prob_fn(
inverse_temperatures[i], target_log_prob_fn),
seed=self._seed_stream))
示例13: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例14: _sample_n
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
gamma1_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration1,
dtype=self.dtype,
seed=seed)
gamma2_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration0,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
示例15: _sample_n
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.