本文整理汇总了Python中tensorflow.compat.v1.multinomial方法的典型用法代码示例。如果您正苦于以下问题:Python v1.multinomial方法的具体用法?Python v1.multinomial怎么用?Python v1.multinomial使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.multinomial方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: vq_nearest_neighbor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例2: sample_temperature_per_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def sample_temperature_per_example(logits, temperature, sampling_keep_top_k=-1):
"""Either random sampling with different temperature per example.
Args:
logits: a Tensor.
temperature: a float vector of same size as logits.
sampling_keep_top_k: If not -1, only sample from the top k logits.
Returns:
a Tensor with one fewer dimension than logits.
"""
logits = _select_top_k(logits, sampling_keep_top_k)
logits /= tf.reshape(temperature, [-1] + [1] * (len(logits.shape) - 1))
reshaped_logits = tf.reshape(logits, [-1, shape_list(logits)[-1]])
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
示例3: multinomial_sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def multinomial_sample(x, vocab_size=None, sampling_method="random",
temperature=1.0):
"""Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
"""
vocab_size = vocab_size or common_layers.shape_list(x)[-1]
if sampling_method == "random" and temperature > 0.0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
示例4: categorical_sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def categorical_sample(logits, dtype=tf.int32,
sample_shape=(), seed=None):
"""Samples from categorical distribution."""
logits = tf.convert_to_tensor(logits, name="logits")
event_size = tf.shape(logits)[-1]
batch_shape_tensor = tf.shape(logits)[:-1]
def _sample_n(n):
"""Sample vector of categoricals."""
if logits.shape.ndims == 2:
logits_2d = logits
else:
logits_2d = tf.reshape(logits, [-1, event_size])
sample_dtype = tf.int64 if logits.dtype.size > 4 else tf.int32
draws = tf.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = tf.reshape(
tf.transpose(draws),
tf.concat([[n], batch_shape_tensor], 0))
return tf.cast(draws, dtype)
return _call_sampler(_sample_n, sample_shape)
示例5: sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
logits /= tf.reshape(temperature, [-1] + [1] * (len(logits_shape) - 1))
reshaped_logits = tf.reshape(logits, [-1, logits_shape[-1]])
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
temperature = features.get("sampling_temp", self.hparams.sampling_temp)
samples = multinomial_squeeze(logits, temperature)
return samples, logits, losses
示例6: multinomial_sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def multinomial_sample(x, vocab_size, temperature):
"""Multinomial sampling from a n-dimensional tensor."""
if temperature > 0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
示例7: video_pixel_noise_bottom
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def video_pixel_noise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for video."""
input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25)
inputs = x
if model_hparams.mode == tf.estimator.ModeKeys.TRAIN:
background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3])
input_shape = common_layers.shape_list(inputs)
input_size = tf.reduce_prod(input_shape[:-1])
input_mask = tf.multinomial(
tf.log([[input_noise, 1.-input_noise]]), input_size)
input_mask = tf.reshape(tf.cast(input_mask, tf.int32),
input_shape[:-1]+[1])
inputs = inputs * input_mask + background * (1 - input_mask)
return video_bottom(inputs, model_hparams, vocab_size)
示例8: ae_latent_softmax
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss
示例9: vq_nearest_neighbor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def vq_nearest_neighbor(x, means,
soft_em=False, num_samples=10, temperature=None):
"""Find the nearest element in means to elements in x."""
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if soft_em:
x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=common_layers.shape_list(means)[0])
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
if temperature is None:
x_means_idx = tf.argmax(-dist, axis=-1)
else:
x_means_idx = tf.multinomial(- dist / temperature, 1)
x_means_idx = tf.squeeze(x_means_idx, axis=-1)
if (common_layers.should_generate_summaries() and
not common_layers.is_xla_compiled()):
tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1]))
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
x_means = tf.matmul(x_means_hot_flat, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss, dist
示例10: provide_one_hot_labels
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def provide_one_hot_labels(self, batch_size):
"""Provides one hot labels."""
pitch_counts = self.get_pitch_counts()
pitches = sorted(pitch_counts.keys())
counts = [pitch_counts[p] for p in pitches]
indices = tf.reshape(
tf.multinomial(tf.log([tf.to_float(counts)]), batch_size), [batch_size])
one_hot_labels = tf.one_hot(indices, depth=len(pitches))
return one_hot_labels
示例11: sample_with_temperature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def sample_with_temperature(logits, temperature):
"""Either argmax after softmax or random sample along the pitch axis.
Args:
logits: a Tensor of shape (batch, time, pitch, instrument).
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor of the same shape, with one_hots on the pitch dimension.
"""
logits = tf.transpose(logits, [0, 1, 3, 2])
pitch_range = tf.shape(logits)[-1]
def sample_from_logits(logits):
with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]):
logits = tf.identity(logits)
reshaped_logits = (
tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
tf.shape(logits)[:logits.get_shape().ndims - 1])
return choices
choices = tf.cond(tf.equal(temperature, 0.0),
lambda: tf.argmax(tf.nn.softmax(logits), -1),
lambda: sample_from_logits(logits))
samples_onehot = tf.one_hot(choices, pitch_range)
return tf.transpose(samples_onehot, [0, 1, 3, 2])
示例12: ae_latent_softmax
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def ae_latent_softmax(latents_pred, latents_discrete, hparams):
"""Latent prediction and loss."""
vocab_size = 2 ** hparams.z_size
if hparams.num_decode_blocks < 2:
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="extra_logits")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = None
if latents_discrete is not None:
if hparams.soft_em:
# latents_discrete is actually one-hot of multinomial samples
assert hparams.num_decode_blocks == 1
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete, logits=latents_logits)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=latents_discrete, logits=latents_logits)
sample = multinomial_sample(
latents_logits, vocab_size, hparams.sampling_temp)
return sample, loss
# Multi-block case.
vocab_bits = int(math.log(vocab_size, 2))
assert vocab_size == 2**vocab_bits
assert vocab_bits % hparams.num_decode_blocks == 0
block_vocab_size = 2**(vocab_bits // hparams.num_decode_blocks)
latents_logits = [
tf.layers.dense(
latents_pred, block_vocab_size, name="extra_logits_%d" % i)
for i in range(hparams.num_decode_blocks)
]
loss = None
if latents_discrete is not None:
losses = []
for i in range(hparams.num_decode_blocks):
d = tf.floormod(tf.floordiv(latents_discrete,
block_vocab_size**i), block_vocab_size)
losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=d, logits=latents_logits[i]))
loss = sum(losses)
samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp)
for l in latents_logits]
sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)])
return sample, loss
示例13: nearest_neighbor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def nearest_neighbor(self, x, means):
"""Find the nearest element in means to elements in x.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape [-1, num_blocks, block_dim].
means: Embedding means of shape.
Returns:
Tensor with nearest element in mean encoded in one-hot notation.
"""
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
scalar_prod = tf.matmul(
tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
dist = x_norm_sq + tf.transpose(
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
return nearest_hot
示例14: _preprocess
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import multinomial [as 别名]
def _preprocess(self, features):
"""Preprocesses features for multilingual translation."""
seqs, tags = {}, {}
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
seqs["src"] = features["inputs"]
seqs["tgt"] = features["targets"]
seqs["aux"] = None
tags["src"] = features["input_tags"]
tags["tgt"] = features["target_tags"]
tags["aux"] = None
# Construct a tensor of auxiliary tags.
batch_size = common_layers.shape_list(features["all_tags"])[0]
num_all_tags = common_layers.shape_list(features["all_tags"])[1]
# <float32> [num_all_tags, 1, emb_dim].
all_tags = features["all_tags"][0] # batch elements are identical.
# <int32> [batch_size].
aux_tag_index = tf.multinomial(
tf.ones([1, num_all_tags]), batch_size,
output_dtype=tf.int32)[0]
# <float32> [batch_size, 1, 1, emb_dim].
tags["aux"] = tf.expand_dims(tf.gather(all_tags, aux_tag_index), 1)
from_domains = ["src", "src", "tgt"]
to_domains = ["tgt", "aux", "aux"]
else:
seqs["src"] = features["inputs"]
seqs["tgt"] = features["targets"]
tags["src"] = None
tags["tgt"] = features["target_tags"]
# Expand target tags to beam width, if necessary.
if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
tags["tgt"] = tf.tile(tags["tgt"], [self._hparams.beam_width, 1, 1, 1])
from_domains = ["src"]
to_domains = ["tgt"]
# Construct inputs and targets.
inputs, targets = {}, {}
for fd, td in zip(from_domains, to_domains):
key = "%s>%s" % (fd, td)
inputs[key], targets[key] = self._build_inputs_and_targets(
seqs[fd], tags[fd], seqs[td], tags[td])
return inputs, targets