本文整理汇总了Python中tensorflow.python.ops.random_ops.multinomial函数的典型用法代码示例。如果您正苦于以下问题:Python multinomial函数的具体用法?Python multinomial怎么用?Python multinomial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了multinomial函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _make_ops
def _make_ops(self, num_samples, seed=None):
prob_dist = tf.constant([[0.15, 0.5, 0.3, 0.05]])
logits = tf.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
示例2: _get_batch
def _get_batch(per_class_queues, probs, batch_size):
"""Generates batches according to per-class-probabilities."""
num_classes = probs.size
# Number of examples per class is governed by a multinomial distribution.
# Note: multinomial takes unnormalized log probabilities for its first
# argument, of dimension [batch_size, num_classes].
examples = random_ops.multinomial(
np.expand_dims(np.log(probs), 0), batch_size)
# Prepare the data and label batches.
val_list = []
label_list = []
for i in range(num_classes):
num_examples = math_ops.reduce_sum(
math_ops.cast(math_ops.equal(examples, i), dtypes.int32))
val_list.append(per_class_queues[i].dequeue_many(num_examples))
label_list.append(array_ops.ones([num_examples], dtype=dtypes.int32) * i)
# Create a tensor of labels.
batch_labels = array_ops.concat(0, label_list)
batch_labels.set_shape([batch_size])
# Debug instrumentation.
sample_tags = ['stratified_sample/samples_class%i' % i for i in
range(num_classes)]
logging_ops.scalar_summary(sample_tags, math_ops.reduce_sum(
array_ops.one_hot(batch_labels, num_classes), 0))
return array_ops.concat(0, val_list), batch_labels
示例3: sample
def sample(self, n, seed=None, name="sample"):
"""Sample `n` observations from the Categorical distribution.
Args:
n: 0-D. Number of independent samples to draw for each distribution.
seed: Random seed (optional).
name: A name for this operation (optional).
Returns:
An `int64` `Tensor` with shape `[n, batch_shape, event_shape]`
"""
with ops.name_scope(self.name):
with ops.op_scope([self.logits, n], name):
n = ops.convert_to_tensor(n, name="n")
logits_2d = array_ops.reshape(
self.logits, array_ops.pack([-1, self.num_classes]))
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self._dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(
0, [array_ops.expand_dims(n, 0), self.batch_shape()]))
ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
.concatenate(self.get_batch_shape()))
return ret
示例4: sample_n
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Categorical distribution.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Random seed (optional).
name: A name for this operation (optional).
Returns:
An `int64` `Tensor` with shape `[n, batch_shape, event_shape]`
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.logits, n]):
n = ops.convert_to_tensor(n, name="n")
logits_2d = array_ops.reshape(
self.logits, array_ops.pack([-1, self.num_classes]))
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self._dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(0, ([n], self.batch_shape())))
ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
.concatenate(self.get_batch_shape()))
return ret
示例5: _sample_single
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
示例6: testMatchStatefulMultinomial
def testMatchStatefulMultinomial(self):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
num_samples = 4
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key,
(seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with self.test_session(use_gpu=True):
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
logits_t = constant_op.constant(logits, dtype=logits_dtype)
stateful = random_ops.multinomial(
logits_t,
num_samples,
seed=seed[1],
output_dtype=output_dtype)
pure = stateless.stateless_multinomial(
logits_t,
num_samples,
seed=preseed,
output_dtype=output_dtype)
self.assertAllEqual(stateful.eval(), pure.eval())
示例7: sample
def sample(self, n, seed=None, name="sample"):
"""Generate `n` samples.
Args:
n: scalar. Number of samples to draw from each distribution.
seed: Python integer seed for RNG.
name: name to give to the op.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape` with values of type
`self.dtype`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.p, n], name):
n = ops.convert_to_tensor(n, name="n")
p_2d = array_ops.reshape(self.p, array_ops.pack([-1, 1]))
q_2d = 1. - p_2d
probs = array_ops.concat(1, [q_2d, p_2d])
samples = random_ops.multinomial(math_ops.log(probs), n, seed=seed)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(0,
[array_ops.expand_dims(n, 0), self.batch_shape()]))
ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
.concatenate(self.get_batch_shape()))
return math_ops.cast(ret, self.dtype)
示例8: _do_sampling
def _do_sampling(self, logits, num_samples):
"""Categorical samples from given input.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.cached_session(), self.test_scope():
random_seed.set_random_seed(1618)
op = random_ops.multinomial(logits, num_samples,
output_dtype=dtypes.int32)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
示例9: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
if self.total_count.get_shape().ndims is not None:
if self.total_count.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape_tensor()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape_tensor()).
x = random_ops.multinomial(
logits=array_ops.reshape(self.logits, [-1, k]),
num_samples=n * n_draws,
seed=seed)
x = array_ops.reshape(x, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k),
axis=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
示例10: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.alpha,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2)
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例11: testNegativeMinLogits
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with self.test_session(use_gpu=True):
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1023] * num_samples], samples)
示例12: testNegativeMinLogits
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
示例13: testEmpty
def testEmpty(self):
with self.cached_session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.zeros([42, 40]), 0, output_dtype=dtypes.int32)
y = self.evaluate(x)
self.assertEqual(y.shape, (42, 0))
示例14: testEmpty
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=True):
for batch in 0, 3:
for samples in 0, 7:
x = random_ops.multinomial(
array_ops.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
示例15: testSmallEntropy
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
with self.test_session(use_gpu=self.use_gpu):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)