本文整理汇总了Python中tensorflow.verify_tensor_all_finite方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.verify_tensor_all_finite方法的具体用法?Python tensorflow.verify_tensor_all_finite怎么用?Python tensorflow.verify_tensor_all_finite使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.verify_tensor_all_finite方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mean_dice
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def mean_dice(self, y_true, y_pred):
""" weighted mean dice across all patches and labels """
# compute dice, which will now be [batch_size, nb_labels]
dice_metric = self.dice(y_true, y_pred)
# weigh the entries in the dice matrix:
if self.weights is not None:
dice_metric *= self.weights
if self.vox_weights is not None:
dice_metric *= self.vox_weights
# return one minus mean dice as loss
mean_dice_metric = K.mean(dice_metric)
tf.verify_tensor_all_finite(mean_dice_metric, 'metric not finite')
return mean_dice_metric
示例2: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def loss(self, y_true, y_pred):
""" the loss. Assumes y_pred is prob (in [0,1] and sum_row = 1) """
# compute dice, which will now be [batch_size, nb_labels]
dice_metric = self.dice(y_true, y_pred)
# loss
dice_loss = 1 - dice_metric
# weigh the entries in the dice matrix:
if self.weights is not None:
dice_loss *= self.weights
# return one minus mean dice as loss
mean_dice_loss = K.mean(dice_loss)
tf.verify_tensor_all_finite(mean_dice_loss, 'Loss not finite')
return mean_dice_loss
示例3: testVerifyTensorAllFiniteFails
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def testVerifyTensorAllFiniteFails(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
my_msg = "Input is not a number."
# Test NaN.
x[0] = np.nan
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = tf.constant(x, shape=x_shape, dtype=tf.float32)
t_verified = tf.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
# Test Inf.
x[0] = np.inf
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = tf.constant(x, shape=x_shape, dtype=tf.float32)
t_verified = tf.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
示例4: add_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def add_softmax(self):
"""Adds a softmax operation to this model"""
with tf.variable_scope(self._get_layer_str()):
this_input = tf.square(self.get_output())
reduction_indices = list(range(1, len(this_input.get_shape())))
acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True)
out = this_input / (acc+FLAGS.epsilon)
#out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?")
self.outputs.append(out)
return self
示例5: set_up_sigmoid_pixelwise_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def set_up_sigmoid_pixelwise_loss(self, logits):
"""Sets up the loss function of the model."""
assert self.labels is not None
assert self.loss_weights is not None
pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=self.labels)
pixel_loss *= self.loss_weights
self.loss = tf.reduce_mean(pixel_loss)
tf.summary.scalar('pixel_loss', self.loss)
self.loss = tf.verify_tensor_all_finite(self.loss, 'Invalid loss detected')
示例6: init_learnable_params
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def init_learnable_params(self):
self.w = [None] * self.order
for i in range(1, self.order + 1):
r = self.rank
if i == 1:
r = 1
rnd_weights = tf.random_uniform([self.n_features, r], -self.init_std, self.init_std)
self.w[i - 1] = tf.verify_tensor_all_finite(
tf.Variable(rnd_weights, trainable=True, name='embedding_' + str(i)),
msg='NaN or Inf in w[{}].'.format(i-1))
self.b = tf.Variable(self.init_std, trainable=True, name='bias')
tf.summary.scalar('bias', self.b)
示例7: init_target
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def init_target(self):
with tf.name_scope('target') as scope:
self.target = self.reduced_loss + self.reg * self.regularization
self.checked_target = tf.verify_tensor_all_finite(
self.target,
msg='NaN or Inf in target value',
name='target')
tf.summary.scalar('target', self.checked_target)
示例8: testVerifyTensorAllFiniteSucceeds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(x, shape=x_shape, dtype=tf.float32)
t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.")
self.assertAllClose(x, t_verified.eval())
示例9: build_summary_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def build_summary_op(self):
cfg = self.config
self.saver = tf.train.Saver(max_to_keep=5)
self.summary_writer = tf.summary.FileWriter(
cfg['log/dir'], self.session.graph, flush_secs=2)
assert_op = tf.verify_tensor_all_finite(self.elbo_sum, 'ELBO check')
with tf.control_dependencies([assert_op]):
self.summary_op = tf.summary.merge_all()
示例10: embed
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def embed(sequence_batch, embeds):
mask = sequence_batch.mask
embedded_values = tf.gather(embeds, sequence_batch.values)
embedded_values = tf.verify_tensor_all_finite(embedded_values, 'embedded_values')
# set all pad embeddings to zero
broadcasted_mask = expand_dims_for_broadcast(mask, embedded_values)
embedded_values *= broadcasted_mask
return SequenceBatch(embedded_values, mask)
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def __init__(self, rnn_states, type_embedder, name='DelexicalizedDynamicPredicateEmbedder'):
"""Construct DelexicalizedDynamicPredicateEmbedder.
Args:
rnn_states (SequenceBatch): of shape (num_contexts, seq_length, rnn_state_dim)
type_embedder (TokenEmbedder)
name (str)
"""
self._type_embedder = type_embedder
with tf.name_scope(name):
# column indices of rnn_states (indexes time)
self._col_indices = FeedSequenceBatch() # (num_predicates, max_predicate_mentions)
# row indices of rnn_states (indexes utterance)
self._row_indices = tf.placeholder(dtype=tf.int32, shape=[None]) # (num_predicates,)
row_indices_expanded = expand_dims_for_broadcast(self._row_indices, self._col_indices.values)
# (num_predicates, max_predicate_mentions, rnn_state_dim)
rnn_states_selected = SequenceBatch(
gather_2d(rnn_states.values, row_indices_expanded, self._col_indices.values),
self._col_indices.mask)
# (num_predicates, rnn_state_dim)
rnn_embeds = reduce_mean(rnn_states_selected, allow_empty=True)
rnn_embeds = tf.verify_tensor_all_finite(rnn_embeds, "RNN-state-based embeddings")
self._type_seq_embedder = MeanSequenceEmbedder(type_embedder.embeds, name='TypeEmbedder')
self._embeds = tf.concat(1, [rnn_embeds, self._type_seq_embedder.embeds])
示例12: well_defined
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def well_defined():
"""A decorator which checks function argument tensors.
Checked tensors must have the same shape at graph runtime as they had at graph
construction time.
Checked tensors must contain only finite values.
This calls either tf.verify_tensor_all_finite or lt.verify_tensor_all_finite
on all input tf.Tensors and lt.LabeledTensors.
Returns:
A function to use as a decorator.
"""
def check(f):
"""Check the inputs."""
# TODO(ericmc): Should we also check kwds?
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
new_args = []
for a in args:
float_types = [tf.float16, tf.float32, tf.float64]
if isinstance(a, tf.Tensor):
new_a = shape_unlabeled(a)
if a.dtype in float_types:
new_a = tf.verify_tensor_all_finite(new_a, msg='')
elif isinstance(a, lt.LabeledTensor):
new_a = shape(a)
if a.tensor.dtype in float_types:
new_a = lt.verify_tensor_all_finite(new_a, message='')
else:
new_a = a
new_args.append(new_a)
return f(*new_args, **kwds)
return new_f
return check
示例13: add_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def add_softmax(self):
"""Adds a softmax operation to this model"""
this_input = tf.square(self.get_output())
reduction_indices = list(range(1, len(this_input.get_shape())))
acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True)
out = this_input / (acc+FLAGS.epsilon)
#out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?")
self.outputs.append(out)
return self
示例14: _build_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def _build_graph(self):
self._add_placeholders()
_logits, self._predictions = self._build_body()
_weights = tf.expand_dims(self._tgt_weights, -1)
_loss_tensor = \
tf.losses.sparse_softmax_cross_entropy(logits=_logits,
labels=self._decoder_outputs,
weights=_weights,
reduction=tf.losses.Reduction.NONE)
# normalize loss by batch_size
_loss_tensor = \
tf.verify_tensor_all_finite(_loss_tensor, "Non finite values in loss tensor.")
self._loss = tf.reduce_sum(_loss_tensor) / tf.cast(self._batch_size, tf.float32)
# self._loss = tf.reduce_mean(_loss_tensor, name='loss')
# TODO: tune clip_norm
self._train_op = \
self.get_train_op(self._loss,
learning_rate=self._learning_rate,
optimizer=self._optimizer,
clip_norm=2.)
# log.info("Trainable variables")
# for v in tf.trainable_variables():
# log.info(v)
# self.print_number_of_parameters()
示例15: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import verify_tensor_all_finite [as 别名]
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps)
t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps)))
if self.debug:
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans')]
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
with tf.control_dependencies(assert_ops):
final = t1 + t2
else:
final = t1 + t2
final = _nan2inf(final)
if mean:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final