本文整理汇总了Python中tensorflow.is_finite方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.is_finite方法的具体用法?Python tensorflow.is_finite怎么用?Python tensorflow.is_finite使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.is_finite方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: updateK
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def updateK(self, k, prepVars, U):
f = self.__f
UfShape = U[f].get_shape()
lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
postfk = lhUfk*self.prior[k].cond()
Ufk = postfk.draw()
Ufk = tf.expand_dims(Ufk, 0)
normUfk = tf.norm(Ufk)
notNanNorm = tf.logical_not(tf.is_nan(normUfk))
finiteNorm = tf.is_finite(normUfk)
positiveNorm = normUfk > 0.
isValid = tf.logical_and(notNanNorm,
tf.logical_and(finiteNorm,
positiveNorm))
Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
lambda: U[f])
# TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
Uf.set_shape(UfShape)
U[f] = Uf
return(U)
示例2: get_acceptance_rate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
old_hamiltonian, old_log_prob = hamiltonian(
q, p, log_posterior, mass, data_axes)
new_hamiltonian, new_log_prob = hamiltonian(
new_q, new_p, log_posterior, mass, data_axes)
old_log_prob = tf.check_numerics(
old_log_prob,
'HMC: old_log_prob has numeric errors! Try better initialization.')
acceptance_rate = tf.exp(
tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
tf.is_finite(new_log_prob))
acceptance_rate = tf.where(is_finite, acceptance_rate,
tf.zeros_like(acceptance_rate))
return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
acceptance_rate
示例3: mean_acc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def mean_acc(y_true, y_pred):
s = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
# correctly classified
clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped
correct_pixels_per_class = K.sum(equal_entries, axis=1)
n_pixels_per_class = K.sum(y_true_reshaped,axis=1)
acc = correct_pixels_per_class / n_pixels_per_class
acc_mask = tf.is_finite(acc)
acc_masked = tf.boolean_mask(acc,acc_mask)
return K.mean(acc_masked)
示例4: mean_IoU
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def mean_IoU(y_true, y_pred):
s = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
# correctly classified
clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped
intersection = K.sum(equal_entries, axis=1)
union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)
iou = intersection / (union_per_class - intersection)
iou_mask = tf.is_finite(iou)
iou_masked = tf.boolean_mask(iou,iou_mask)
return K.mean( iou_masked )
示例5: _loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def _loss(self, predictions):
with tf.name_scope("loss"):
# if training then crop center of y, else, padding was applied
slice_amt = (np.sum(self.filter_sizes) - len(self.filter_sizes)) / 2
slice_y = self.y_norm[:,slice_amt:-slice_amt, slice_amt:-slice_amt]
_y = tf.cond(self.is_training, lambda: slice_y, lambda: self.y_norm)
tf.subtract(predictions, _y)
err = tf.square(predictions - _y)
err_filled = utils.fill_na(err, 0)
finite_count = tf.reduce_sum(tf.cast(tf.is_finite(err), tf.float32))
mse = tf.reduce_sum(err_filled) / finite_count
return mse
示例6: fill_na
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def fill_na(x, fillval=0):
fill = tf.ones_like(x) * fillval
return tf.where(tf.is_finite(x), x, fill)
示例7: nanmean
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def nanmean(x, axis=None):
x_filled = fill_na(x, 0)
x_sum = tf.reduce_sum(x_filled, axis=axis)
x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
return tf.div(x_sum, x_count)
示例8: nanvar
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def nanvar(x, axis=None):
x_filled = fill_na(x, 0)
x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
x_mean = nanmean(x, axis=axis)
x_ss = tf.reduce_sum((x_filled - x_mean)**2, axis=axis)
return x_ss / x_count
示例9: _mapper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def _mapper(self, grad, var):
# this is very slow...
#op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
grad = tf.check_numerics(grad, 'CheckGradient')
return grad
示例10: aggregate_single_gradient_using_copy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = gradients_impl._AggregateIndexedSlicesGradients(grads) # pylint: disable=protected-access
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
示例11: custom_svd_v_column
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def custom_svd_v_column(M, col_index=-1):
# Must make sure M is finite. Otherwise cudaSolver might fail.
assert_op = tf.Assert(tf.logical_not(tf.reduce_any(tf.logical_not(tf.is_finite(M)))), [M], summarize=10)
with tf.control_dependencies([assert_op]):
with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
s, u, v = tf.svd(M, name='Svd') # M = usv^T
return v[:, :, col_index]
示例12: _compare
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
inx), tf.is_nan(inx)
tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
示例13: _mapper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def _mapper(self, grad, var):
# this was very slow.... see #3649
# op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
return grad
示例14: mask_nans
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def mask_nans(x):
x_zeros = tf.zeros_like(x)
x_mask = tf.is_finite(x)
y = tf.where(x_mask, x, x_zeros)
return y
示例15: aggregate_single_gradient_using_copy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import is_finite [as 别名]
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None