本文整理汇总了Python中tensorflow.python.ops.random_ops.multinomial方法的典型用法代码示例。如果您正苦于以下问题:Python random_ops.multinomial方法的具体用法?Python random_ops.multinomial怎么用?Python random_ops.multinomial使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.random_ops
的用法示例。
在下文中一共展示了random_ops.multinomial方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
if self.total_count.get_shape().ndims is not None:
if self.total_count.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape_tensor()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape_tensor()).
draws = random_ops.multinomial(
logits=array_ops.reshape(self.logits, [-1, k]),
num_samples=n * n_draws,
seed=seed)
draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
axis=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例2: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例3: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape()).
logits = array_ops.reshape(self.logits, [-1, k])
draws = random_ops.multinomial(logits=logits,
num_samples=n * n_draws,
seed=seed)
draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例4: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
if self.total_count.get_shape().ndims is not None:
if self.total_count.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape_tensor()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape_tensor()).
x = random_ops.multinomial(
logits=array_ops.reshape(self.logits, [-1, k]),
num_samples=n * n_draws,
seed=seed)
x = array_ops.reshape(x, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k),
axis=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:27,代码来源:multinomial.py
示例5: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:dirichlet_multinomial.py
示例6: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat([[n], self.batch_shape_tensor()], 0))
return ret
示例7: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = array_ops.transpose(samples)
samples = array_ops.one_hot(samples, self.event_size, dtype=self.dtype)
ret = array_ops.reshape(samples, sample_shape)
return ret
示例8: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, self.num_classes])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = array_ops.transpose(samples)
samples = array_ops.one_hot(samples, self.num_classes, dtype=self.dtype)
ret = array_ops.reshape(samples, sample_shape)
return ret
示例9: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.alpha,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2)
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
示例10: _sample_n
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(([n], self.batch_shape()), 0))
return ret
示例11: testSmallEntropy
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def testSmallEntropy(self):
tf.set_random_seed(1618)
with self.test_session(use_gpu=self.use_gpu):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = tf.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = tf.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
示例12: testLargeLogits
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def testLargeLogits(self):
for neg in [True, False]:
with self.test_session(use_gpu=self.use_gpu):
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = tf.multinomial(logits, 10).eval()
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
示例13: _make_ops
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def _make_ops(self, num_samples, seed=None):
prob_dist = tf.constant([[0.15, 0.5, 0.3, 0.05]])
logits = tf.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
示例14: testEmpty
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=self.use_gpu):
for batch in 0, 3:
for samples in 0, 7:
x = tf.multinomial(tf.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
示例15: testEmptyClasses
# 需要导入模块: from tensorflow.python.ops import random_ops [as 别名]
# 或者: from tensorflow.python.ops.random_ops import multinomial [as 别名]
def testEmptyClasses(self):
with self.test_session(use_gpu=self.use_gpu):
x = tf.multinomial(tf.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
x.eval()