本文整理汇总了Python中tensorflow.verify_tensor_all_finite函数的典型用法代码示例。如果您正苦于以下问题:Python verify_tensor_all_finite函数的具体用法?Python verify_tensor_all_finite怎么用?Python verify_tensor_all_finite使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了verify_tensor_all_finite函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss
def loss(self, y_true, y_pred):
""" categorical crossentropy loss """
if self.crop_indices is not None:
y_true = utils.batch_gather(y_true, self.crop_indices)
y_pred = utils.batch_gather(y_pred, self.crop_indices)
if self.use_float16:
y_true = K.cast(y_true, 'float16')
y_pred = K.cast(y_pred, 'float16')
# scale and clip probabilities
# this should not be necessary for softmax output.
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# compute log probability
log_post = K.log(y_pred) # likelihood
# loss
loss = - y_true * log_post
# weighted loss
if self.weights is not None:
loss *= self.weights
if self.vox_weights is not None:
loss *= self.vox_weights
# take the total loss
# loss = K.batch_flatten(loss)
mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
tf.verify_tensor_all_finite(mloss, 'Loss not finite')
return mloss
示例2: kl_multivariate_normal
def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
"""Calculate the KL of multivariate normal distributions with
diagonal covariances.
Parameters
----------
loc_one : tf.Tensor
A 0-D tensor, 1-D tensor of length n, or 2-D tensor of shape M
x n where each row represents the mean of a n-dimensional
Gaussian.
scale_one : tf.Tensor
A tensor of same shape as ``loc_one``, representing the
standard deviation.
loc_two : tf.Tensor, optional
A tensor of same shape as ``loc_one``, representing the
mean of another Gaussian.
scale_two : tf.Tensor, optional
A tensor of same shape as ``loc_one``, representing the
standard deviation of another Gaussian.
Returns
-------
tf.Tensor
For 0-D or 1-D tensor inputs, outputs the 0-D tensor
``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
For 2-D tensor inputs, outputs the 1-D tensor
``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``
Raises
------
InvalidArgumentError
If the location variables have Inf or NaN values, or if the scale
variables are not positive.
"""
dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
tf.verify_tensor_all_finite(loc_two, msg=''),
tf.assert_positive(scale_one),
tf.assert_positive(scale_two)]
loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
loc_one = tf.cast(loc_one, tf.float32)
scale_one = tf.cast(scale_one, tf.float32)
if loc_two == 0.0 and scale_two == 1.0:
# With default arguments, we can avoid some intermediate computation.
out = tf.square(scale_one) + tf.square(loc_one) - \
1.0 - 2.0 * tf.log(scale_one)
else:
loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
loc_two = tf.cast(loc_two, tf.float32)
scale_two = tf.cast(scale_two, tf.float32)
out = tf.square(scale_one/scale_two) + \
tf.square((loc_two - loc_one)/scale_two) - \
1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one)
if len(out.get_shape()) <= 1: # scalar or vector
return 0.5 * tf.reduce_sum(out)
else: # matrix
return 0.5 * tf.reduce_sum(out, 1)
示例3: create_generative
def create_generative(parameters):
print('Creating the neural network model.')
tf.reset_default_graph()
# tf Graph input
x = tf.placeholder(tf.float32, shape=(1, parameters['n_input']), name='input')
x = tf.verify_tensor_all_finite(x, "X not finite!")
y = tf.placeholder(tf.float32, shape=(1, parameters['n_output']), name='expected_output')
y = tf.verify_tensor_all_finite(y, "Y not finite!")
x = tf.Print(x, [x], "X: ")
y = tf.Print(y, [y], "Y: ")
lstm_state_size = np.sum(parameters['lstm_layers']) * 2
# Note: Batch size is the first dimension in istate.
istate = tf.placeholder(tf.float32, shape=(None, lstm_state_size), name='internal_state')
lr = tf.placeholder(tf.float32, name='learning_rate')
# The target to track itself and its peers, each with x, y ## and velocity x and y.
input_size = (parameters['n_peers'] + 1) * 2
inputToRnn = parameters['input_layer']
if (parameters['input_layer'] == None):
inputToRnn = parameters['n_input']
cells = [rnn_cell.LSTMCell(l, parameters['lstm_layers'][i-1] if (i > 0) else inputToRnn,
num_proj=parameters['lstm_layers'][i],
cell_clip=parameters['lstm_clip'],
use_peepholes=True) for i,l in enumerate(parameters['lstm_layers'])]
# TODO: GRUCell support here.
# cells = [rnn_cell.GRUCell(l, parameters['lstm_layers'][i-1] if (i > 0) else inputToRnn) for i,l in enumerate(parameters['lstm_layers'])]
model = {
'input_weights': tf.Variable(tf.random_normal(
[input_size, parameters['input_layer']]), name='input_weights'),
'input_bias': tf.Variable(tf.random_normal([parameters['input_layer']]), name='input_bias'),
'output_weights': tf.Variable(tf.random_normal([parameters['lstm_layers'][-1],
# 6 = 2 sigma, 2 mean, weight, rho
parameters['n_mixtures'] * 6]),
name='output_weights'),
# We need to put at least the standard deviation output biases to about 5 to prevent zeros and infinities.
# , mean = 5.0, stddev = 3.0
'output_bias': tf.Variable(tf.random_normal([parameters['n_mixtures'] * 6]),
name='output_bias'),
'rnn_cell': rnn_cell.MultiRNNCell(cells),
'lr': lr,
'x': x,
'y': y,
'keep_prob': tf.placeholder(tf.float32),
'istate': istate
}
# The next variables need to be remapped, because we don't have RNN context anymore:
# RNN/MultiRNNCell/Cell0/LSTMCell/ -> MultiRNNCell/Cell0/LSTMCell/
# B, W_F_diag, W_O_diag, W_I_diag, W_0
with tf.variable_scope("RNN"):
pred = RNN_generative(parameters, x, model, istate)
model['pred'] = pred[0]
model['last_state'] = pred[1]
return model
示例4: hessian
def hessian(y, xs):
"""Calculate Hessian of y with respect to each x in xs.
Parameters
----------
y : tf.Tensor
Tensor to calculate Hessian of.
xs : list of tf.Variable
List of TensorFlow variables to calculate with respect to.
The variables can have different shapes.
Returns
-------
tf.Tensor
A 2-D tensor where each row is
.. math:: \partial_{xs} ( [ \partial_{xs} y ]_j ).
Raises
------
InvalidArgumentError
If the inputs have Inf or NaN values.
"""
dependencies = [tf.verify_tensor_all_finite(y, msg='')]
dependencies.extend([tf.verify_tensor_all_finite(x, msg='') for x in xs])
with tf.control_dependencies(dependencies):
# Calculate flattened vector grad_{xs} y.
grads = tf.gradients(y, xs)
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(0, grads)
# Loop over each element in the vector.
mat = []
d = grads.get_shape()[0]
if not isinstance(d, int):
d = grads.eval().shape[0]
for j in range(d):
# Calculate grad_{xs} ( [ grad_{xs} y ]_j ).
gradjgrads = tf.gradients(grads[j], xs)
# Flatten into vector.
hi = []
for l in range(len(xs)):
hij = gradjgrads[l]
# return 0 if gradient doesn't exist; TensorFlow returns None
if hij is None:
hij = tf.zeros(xs[l].get_shape(), dtype=tf.float32)
hij = tf.reshape(hij, [-1])
hi.append(hij)
hi = tf.concat(0, hi)
mat.append(hi)
# Form matrix where each row is grad_{xs} ( [ grad_{xs} y ]_j ).
return tf.pack(mat)
示例5: _validate
def _validate(self):
vops = [tf.assert_positive(self._scale),
tf.assert_positive(self._high - self._low),
tf.verify_tensor_all_finite(self._high,
"Upper bound not finite"),
tf.verify_tensor_all_finite(self._low,
"Lower bound not finite"),
tf.verify_tensor_all_finite(self._loc,
"Loc not finite"),
tf.verify_tensor_all_finite(self._scale,
"Scale not finite"),
]
return tf.group(*vops, name="ValidationOps")
示例6: kl_multivariate_normal
def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
"""Calculate the KL of multivariate normal distributions with
diagonal covariances.
Parameters
----------
loc_one : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the mean of a n-dimensional Gaussian
scale_one : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the standard deviation of a n-dimensional Gaussian
loc_two : tf.Tensor, optional
n-dimensional vector, or M x n-dimensional matrix where each
row represents the mean of a n-dimensional Gaussian
scale_two : tf.Tensor, optional
n-dimensional vector, or M x n-dimensional matrix where each
row represents the standard deviation of a n-dimensional Gaussian
Returns
-------
tf.Tensor
for scalar or vector inputs, outputs the scalar
``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
for matrix inputs, outputs the vector
``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``
Raises
------
InvalidArgumentError
If the location variables have Inf or NaN values, or if the scale
variables are not positive.
"""
dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
tf.verify_tensor_all_finite(loc_two, msg=''),
tf.assert_positive(scale_one),
tf.assert_positive(scale_two)]
loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
if loc_two == 0.0 and scale_two == 1.0:
return 0.5 * tf.reduce_sum(
tf.square(scale_one) + tf.square(loc_one) - \
1.0 - 2.0 * tf.log(scale_one))
else:
return 0.5 * tf.reduce_sum(
tf.square(scale_one/scale_two) + \
tf.square((loc_two - loc_one)/scale_two) - \
1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one), 1)
示例7: mean_dice
def mean_dice(self, y_true, y_pred):
""" weighted mean dice across all patches and labels """
# compute dice, which will now be [batch_size, nb_labels]
dice_metric = self.dice(y_true, y_pred)
# weigh the entries in the dice matrix:
if self.weights is not None:
dice_metric *= self.weights
if self.vox_weights is not None:
dice_metric *= self.vox_weights
# return one minus mean dice as loss
mean_dice_metric = K.mean(dice_metric)
tf.verify_tensor_all_finite(mean_dice_metric, 'metric not finite')
return mean_dice_metric
示例8: testVerifyTensorAllFiniteSucceeds
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with self.test_session():
t = tf.constant(x, shape=x_shape, dtype=tf.float32)
t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.")
self.assertAllClose(x, t_verified.eval())
示例9: log_sum_exp
def log_sum_exp(x):
"""Compute the ``log_sum_exp`` of the elements in x.
Parameters
----------
x : tf.Tensor
vector or matrix with second dimension 1
shape=TensorShape([Dimension(N)])
shape=TensorShape([Dimension(N), Dimension(1)])
Returns
-------
tf.Tensor
scalar if vector input, vector if matrix tensor input
Raises
------
InvalidArgumentError
If the input has Inf or NaN values.
"""
dependencies = [tf.verify_tensor_all_finite(x, msg='')]
x = control_flow_ops.with_dependencies(dependencies, x);
x_max = tf.reduce_max(x)
return tf.add(x_max, tf.log(tf.reduce_sum(tf.exp(tf.sub(x, x_max)))))
示例10: __init__
def __init__(self, rnn_states, type_embedder, name='DelexicalizedDynamicPredicateEmbedder'):
"""Construct DelexicalizedDynamicPredicateEmbedder.
Args:
rnn_states (SequenceBatch): of shape (num_contexts, seq_length, rnn_state_dim)
type_embedder (TokenEmbedder)
name (str)
"""
self._type_embedder = type_embedder
with tf.name_scope(name):
# column indices of rnn_states (indexes time)
self._col_indices = FeedSequenceBatch() # (num_predicates, max_predicate_mentions)
# row indices of rnn_states (indexes utterance)
self._row_indices = tf.placeholder(dtype=tf.int32, shape=[None]) # (num_predicates,)
row_indices_expanded = expand_dims_for_broadcast(self._row_indices, self._col_indices.values)
# (num_predicates, max_predicate_mentions, rnn_state_dim)
rnn_states_selected = SequenceBatch(
gather_2d(rnn_states.values, row_indices_expanded, self._col_indices.values),
self._col_indices.mask)
# (num_predicates, rnn_state_dim)
rnn_embeds = reduce_mean(rnn_states_selected, allow_empty=True)
rnn_embeds = tf.verify_tensor_all_finite(rnn_embeds, "RNN-state-based embeddings")
self._type_seq_embedder = MeanSequenceEmbedder(type_embedder.embeds, name='TypeEmbedder')
self._embeds = tf.concat(1, [rnn_embeds, self._type_seq_embedder.embeds])
示例11: log_sum_exp
def log_sum_exp(input_tensor, reduction_indices=None, keep_dims=False):
"""Compute the ``log_sum_exp`` of elements in a tensor, taking
the sum across axes given by ``reduction_indices``.
Parameters
----------
input_tensor : tf.Tensor
The tensor to reduce. Should have numeric type.
reduction_indices : int or list of int, optional
The dimensions to reduce. If `None` (the default), reduces all
dimensions.
keep_dims : bool, optional
If true, retains reduced dimensions with length 1.
Returns
-------
tf.Tensor
The reduced tensor.
Raises
------
InvalidArgumentError
If the input has Inf or NaN values.
"""
dependencies = [tf.verify_tensor_all_finite(input_tensor, msg='')]
input_tensor = control_flow_ops.with_dependencies(dependencies, input_tensor);
input_tensor = tf.cast(input_tensor, dtype=tf.float32)
x_max = tf.reduce_max(input_tensor, reduction_indices, keep_dims=True)
return tf.squeeze(x_max) + tf.log(tf.reduce_sum(
tf.exp(input_tensor - x_max), reduction_indices, keep_dims))
示例12: cumprod
def cumprod(xs):
"""Cumulative product of a tensor along its outer dimension.
https://github.com/tensorflow/tensorflow/issues/813
Parameters
----------
xs : tf.Tensor
A 1-D or higher tensor.
Returns
-------
tf.Tensor
A tensor with `cumprod` applied along its outer dimension.
Raises
------
InvalidArgumentError
If the input has Inf or NaN values.
"""
dependencies = [tf.verify_tensor_all_finite(xs, msg='')]
xs = control_flow_ops.with_dependencies(dependencies, xs)
xs = tf.cast(xs, dtype=tf.float32)
values = tf.unpack(xs)
out = []
prev = tf.ones_like(values[0])
for val in values:
s = prev * val
out.append(s)
prev = s
result = tf.pack(out)
return result
示例13: init_target
def init_target(self):
with tf.name_scope('target') as scope:
self.target = self.reduced_loss + self.reg * self.regularization
self.checked_target = tf.verify_tensor_all_finite(
self.target,
msg='NaN or Inf in target value',
name='target')
tf.summary.scalar('target', self.checked_target)
示例14: multivariate_rbf
def multivariate_rbf(x, y=0.0, sigma=1.0, l=1.0):
"""Squared-exponential kernel
.. math:: k(x, y) = \sigma^2 \exp{ -1/(2l^2) \sum_i (x_i - y_i)^2 }
Parameters
----------
x : tf.Tensor
A n-D tensor.
y : tf.Tensor, optional
A tensor of same shape as ``x``.
sigma : tf.Tensor, optional
A 0-D tensor, representing the standard deviation of radial
basis function.
l : tf.Tensor, optional
A 0-D tensor, representing the lengthscale of radial basis
function.
Returns
-------
tf.Tensor
A tensor of one less dimension than the input.
Raises
------
InvalidArgumentError
If the mean variables have Inf or NaN values, or if the scale
and length variables are not positive.
"""
dependencies = [tf.verify_tensor_all_finite(x, msg=''),
tf.verify_tensor_all_finite(y, msg=''),
tf.assert_positive(sigma),
tf.assert_positive(l)]
x = control_flow_ops.with_dependencies(dependencies, x)
y = control_flow_ops.with_dependencies(dependencies, y)
sigma = control_flow_ops.with_dependencies(dependencies, sigma)
l = control_flow_ops.with_dependencies(dependencies, l)
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.float32)
sigma = tf.cast(sigma, dtype=tf.float32)
l = tf.cast(l, dtype=tf.float32)
return tf.pow(sigma, 2.0) * \
tf.exp(-1.0/(2.0*tf.pow(l, 2.0)) * \
tf.reduce_sum(tf.pow(x - y , 2.0)))
示例15: l1_normalize
def l1_normalize(x, dim, name=None):
"""l1 normalizes x.
Args:
x: The tensor to normalize.
dim: The dimension to normalize along.
name: Optional name for this op.
Returns:
x normalized along dim.
"""
with tf.op_scope([x], name, 'l1_normalize') as scope:
x = tf.convert_to_tensor(x, name='x')
x = tf.verify_tensor_all_finite(x, 'Error at input %s' % scope)
x_norm = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
return tf.verify_tensor_all_finite(tf.div(x,
x_norm,
name=scope),
'Error at %s' % scope)