本文整理汇总了Python中tensorflow.multinomial方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.multinomial方法的具体用法?Python tensorflow.multinomial怎么用?Python tensorflow.multinomial使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.multinomial方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: vq_nearest_neighbor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例2: multinomial_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def multinomial_sample(x, vocab_size, sampling_method, temperature):
"""Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
"""
if sampling_method == "random":
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
示例3: vq_nearest_neighbor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def vq_nearest_neighbor(x, means, soft_em=False, num_samples=10):
"""Find the nearest element in means to elements in x."""
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if soft_em:
x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=common_layers.shape_list(means)[0])
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
x_means = tf.matmul(x_means_hot_flat, means)
e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例4: _head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def _head(self, policy_input, heading, xy, target_xy):
"""Build the head of the agent: linear policy and value function, and pass
the auxiliary outputs through.
"""
# Linear policy and value function.
policy_logits = snt.Linear(
self._num_actions, name='policy_logits')(policy_input)
baseline = tf.squeeze(snt.Linear(1, name='baseline')(policy_input), axis=-1)
# Sample an action from the policy.
new_action = tf.multinomial(
policy_logits, num_samples=1, output_dtype=tf.int32)
new_action = tf.squeeze(new_action, 1, name='new_action')
return AgentOutput(
new_action, policy_logits, baseline, heading, xy, target_xy)
示例5: vq_nearest_neighbor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例6: multinomial_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def multinomial_sample(x, vocab_size=None, sampling_method="random",
temperature=1.0):
"""Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
"""
vocab_size = vocab_size or common_layers.shape_list(x)[-1]
if sampling_method == "random" and temperature > 0.0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
示例7: sample_with_temperature
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def sample_with_temperature(logits, temperature):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
示例8: sampleAction
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def sampleAction(self, states):
# TODO: use this code piece when tf.multinomial gets better
# sample action from current policy
# actions = self.session.run(self.predicted_actions, {self.states: states})[0]
# return actions[0]
# temporary workaround
def softmax(y):
""" simple helper function here that takes unnormalized logprobs """
maxy = np.amax(y)
e = np.exp(y - maxy)
return e / np.sum(e)
# epsilon-greedy exploration strategy
if random.random() < self.exploration:
return random.randint(0, self.num_actions-1)
else:
action_scores = self.session.run(self.action_scores, {self.states: states})[0]
action_probs = softmax(action_scores) - 1e-5
action = np.argmax(np.random.multinomial(1, action_probs))
return action
示例9: create_softmax_from_logits
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def create_softmax_from_logits(logits):
"Create nodes for softmax computation from logits."
temperature = tf.placeholder_with_default(
1.0, shape=(), name='temperature')
logits = logits / temperature
logits_shape = tf.shape(logits)
logits_dim = logits_shape[-1]
logits_2d = tf.reshape(logits, [-1, logits_dim])
samples = tf.multinomial(logits_2d, 1)
samples = tf.reshape(samples, logits_shape[:-1])
probs = tf.nn.softmax(logits)
predictions = tf.argmax(probs, axis=2)
return logits, probs, predictions, samples, temperature
# Embedding
示例10: predict_from_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def predict_from_model(logit_groups_geometry, logit_groups_semantics,
temperature):
"""Reconstruct predicted geometry and semantics from model output."""
predictions_geometry_list = []
for logit_group in logit_groups_geometry:
if FLAGS.p_norm > 0:
predictions_geometry_list.append(logit_group[:, :, :, :, 0])
else:
logit_group_shape = logit_group.shape_as_list()
logit_group = tf.reshape(logit_group, [-1, logit_group_shape[-1]])
samples = tf.multinomial(temperature * logit_group, 1)
predictions_geometry_list.append(
tf.reshape(samples, logit_group_shape[:-1]))
predictions_semantics_list = []
if FLAGS.predict_semantics:
for logit_group in logit_groups_semantics:
predictions_semantics_list.append(tf.argmax(logit_group, 4))
else:
predictions_semantics_list = [
tf.zeros(shape=predictions_geometry_list[0].shape, dtype=tf.uint8)
] * len(predictions_geometry_list)
return predictions_geometry_list, predictions_semantics_list
示例11: build_forward
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def build_forward(self, _input):
output = _input # [batch_size, num_steps, rnn_units]
feature_dim = int(output.get_shape()[2]) # rnn_units
output = tf.reshape(output, [-1, feature_dim]) # [batch_size * num_steps, rnn_units]
final_activation = 'sigmoid' if self.out_dim == 1 else 'softmax'
if self.net_type == 'simple':
net_config = [] if self.net_config is None else self.net_config
with tf.variable_scope('wider_actor'):
for layer in net_config:
units, activation = layer.get('units'), layer.get('activation', 'relu')
output = BasicModel.fc_layer(output, units, use_bias=True)
output = BasicModel.activation(output, activation)
logits = BasicModel.fc_layer(output, self.out_dim, use_bias=True) # [batch_size * num_steps, out_dim]
probs = BasicModel.activation(logits, final_activation) # [batch_size * num_steps, out_dim]
probs_dim = self.out_dim
if self.out_dim == 1:
probs = tf.concat([1 - probs, probs], axis=1)
probs_dim = 2
self.decision = tf.multinomial(tf.log(probs), 1) # [batch_size * num_steps, 1]
self.decision = tf.reshape(self.decision, [-1, self.num_steps]) # [batch_size, num_steps]
self.probs = tf.reshape(probs, [-1, self.num_steps, probs_dim]) # [batch_size, num_steps, out_dim]
else:
raise ValueError('Do not support %s' % self.net_type)
示例12: sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
示例13: multinomial_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def multinomial_sample(x, vocab_size, temperature):
"""Multinomial sampling from a n-dimensional tensor."""
if temperature > 0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
示例14: ae_latent_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def ae_latent_softmax(latents_pred, latents_discrete_hot, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
hparams: tf.contrib.training.HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
vocab_size = 2**hparams.bottleneck_bits
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss
示例15: _head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multinomial [as 别名]
def _head(self, core_output):
"""Build the head of the agent: linear policy and value function."""
policy_logits = snt.Linear(
self._num_actions, name='policy_logits')(
core_output)
baseline = tf.squeeze(snt.Linear(1, name='baseline')(core_output), axis=-1)
# Sample an action from the policy.
new_action = tf.multinomial(
policy_logits, num_samples=1, output_dtype=tf.int32)
new_action = tf.squeeze(new_action, 1, name='new_action')
return AgentOutput(new_action, policy_logits, baseline)