本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_logsumexp函数的具体用法?Python reduce_logsumexp怎么用?Python reduce_logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_logsumexp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _forward
def _forward(state_log_prob, obs_log_prob):
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast.
state_log_prob += state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
state_log_prob += obs_log_prob
log_prob_sum = math_ops.reduce_logsumexp(
state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
return state_log_prob
示例2: _compute_energy_change
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the energy change."""
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
# Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
# since they're a mouthful and lets us inline more.
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # log_potential
lp1 = -proposed_target_log_prob # proposed_log_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
示例3: _state_to_olabel
def _state_to_olabel(labels, num_labels, states):
"""Sum state log probs to ilabel log probs."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
one_hot = array_ops.one_hot(
labels - 1, depth=(num_labels - 1),
on_value=0.0, off_value=math_ops.log(0.0))
one_hot = array_ops.expand_dims(one_hot, axis=0)
label_states = array_ops.expand_dims(label_states, axis=3)
label_olabels = math_ops.reduce_logsumexp(label_states + one_hot, axis=2)
blank_olabels = math_ops.reduce_logsumexp(
blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
示例4: _sum_states
def _sum_states(idx, states):
"""Take logsumexp for each unique state out of all label states.
Args:
idx: tensor of shape [batch, label_length] For each sequence, indices into a
set of unique labels as computed by calling unique.
states: tensor of shape [frames, batch, label_length] Log probabilities for
each label state.
Returns:
tensor of shape [frames, batch_size, label_length], log probabilites summed
for each unique label of the sequence.
"""
with ops.name_scope("sum_states"):
idx = ops.convert_to_tensor(idx, name="idx")
num_states = _get_dim(states, 2)
states = array_ops.expand_dims(states, axis=2)
one_hot = array_ops.one_hot(
idx,
depth=num_states,
on_value=0.0,
off_value=math_ops.log(0.0),
axis=1)
return math_ops.reduce_logsumexp(states + one_hot, axis=-1)
示例5: _state_to_olabel_unique
def _state_to_olabel_unique(labels, num_labels, states, unique):
"""Sum state log probs to ilabel log probs using unique label indices."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
unique_y, unique_idx = unique
mul_reduce = _sum_states(unique_idx, label_states)
num_frames = states.shape[0]
batch_size = states.shape[1]
num_states = num_label_states - 1
batch_state_major = array_ops.transpose(mul_reduce, perm=[1, 2, 0])
batch_state_major = array_ops.reshape(
batch_state_major, [batch_size * num_states, num_frames])
batch_offset = math_ops.range(batch_size, dtype=unique_y.dtype) * num_labels
indices = unique_y + array_ops.expand_dims(batch_offset, axis=-1)
indices = array_ops.reshape(indices, [-1, 1])
scatter = array_ops.scatter_nd(
indices=indices,
updates=batch_state_major,
shape=[batch_size * num_labels, num_frames])
scatter = array_ops.reshape(scatter, [batch_size, num_labels, num_frames])
scatter = array_ops.where(
math_ops.equal(scatter, 0.0),
array_ops.fill(array_ops.shape(scatter), math_ops.log(0.0)),
scatter)
label_olabels = array_ops.transpose(scatter, [2, 0, 1])
label_olabels = label_olabels[:, :, 1:]
blank_olabels = math_ops.reduce_logsumexp(
blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
示例6: _define_score_samples
def _define_score_samples(self):
"""Defines the likelihood of each data sample."""
op = []
for shard_id, prior_probs in enumerate(self._prior_probs):
op.append(prior_probs + math_ops.log(self._w[shard_id]))
self._scores = array_ops.squeeze(
math_ops.reduce_logsumexp(op, axis=2, keepdims=True), axis=0)
示例7: crf_log_norm
def crf_log_norm(inputs, sequence_lengths, transition_params):
"""Computes the normalization for a CRF.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
sequence_lengths: A [batch_size] vector of true sequence lengths.
transition_params: A [num_tags, num_tags] transition matrix.
Returns:
log_norm: A [batch_size] vector of normalizers for a CRF.
"""
# Split up the first and rest of the inputs in preparation for the forward
# algorithm.
first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
first_input = array_ops.squeeze(first_input, [1])
rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])
# Compute the alpha values in the forward algorithm in order to get the
# partition function.
forward_cell = CrfForwardRnnCell(transition_params)
_, alphas = rnn.dynamic_rnn(
cell=forward_cell,
inputs=rest_of_input,
sequence_length=sequence_lengths - 1,
initial_state=first_input,
dtype=dtypes.float32)
log_norm = math_ops.reduce_logsumexp(alphas, [1])
return log_norm
示例8: testCrfLogLikelihood
def testCrfLogLikelihood(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_log_likelihoods = []
# Make sure all probabilities sum to 1.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
sequence_log_likelihood, _ = crf.crf_log_likelihood(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
all_sequence_log_likelihoods.append(sequence_log_likelihood)
total_log_likelihood = math_ops.reduce_logsumexp(
all_sequence_log_likelihoods)
tf_total_log_likelihood = sess.run(total_log_likelihood)
self.assertAllClose(tf_total_log_likelihood, 0.0)
示例9: _log_variance
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = array_ops.stack([
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self._log_rate,
# log((Mean[d] - Mean)**2)
2. * math_ops.log(
math_ops.abs(self.distribution.mean()
- self._mean()[..., array_ops.newaxis])),
], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits[..., array_ops.newaxis] + v,
axis=[-2, -1])
示例10: _log_cdf
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
示例11: testReduceLogSumExp
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
y_np = log(np.sum(exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
示例12: _single_seq_fn
def _single_seq_fn():
log_norm = math_ops.reduce_logsumexp(first_input, [1])
# Mask `log_norm` of the sequences with length <= zero.
log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
array_ops.zeros_like(log_norm),
log_norm)
return log_norm
示例13: __call__
def __call__(self, inputs, state, scope=None):
"""Build the CrfForwardRnnCell.
Args:
inputs: A [batch_size, num_tags] matrix of unary potentials.
state: A [batch_size, num_tags] matrix containing the previous alpha
values.
scope: Unused variable scope of this cell.
Returns:
new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
values containing the new alpha values.
"""
state = array_ops.expand_dims(state, 2)
# This addition op broadcasts self._transitions_params along the zeroth
# dimension and state along the second dimension. This performs the
# multiplication of previous alpha values and the current binary potentials
# in log space.
transition_scores = state + self._transition_params
new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])
# Both the state and the output of this RNN cell contain the alphas values.
# The output value is currently unused and simply satisfies the RNN API.
# This could be useful in the future if we need to compute marginal
# probabilities, which would require the accumulated alpha values at every
# time step.
return new_alphas, new_alphas
示例14: testCrfLogNorm
def testCrfLogNorm(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
all_sequence_scores = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequence_scores.append(
crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params)))
brute_force_log_norm = math_ops.reduce_logsumexp(all_sequence_scores)
log_norm = crf.crf_log_norm(
inputs=array_ops.expand_dims(inputs, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
log_norm = array_ops.squeeze(log_norm, [0])
tf_brute_force_log_norm, tf_log_norm = sess.run(
[brute_force_log_norm, log_norm])
self.assertAllClose(tf_log_norm, tf_brute_force_log_norm)
示例15: testKeepDims
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True).eval()
self.assertEqual(y_tf_np.ndim, x_np.ndim)
y_np = log(np.sum(exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)