本文整理汇总了Python中tensorflow.assert_less方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.assert_less方法的具体用法?Python tensorflow.assert_less怎么用?Python tensorflow.assert_less使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.assert_less方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: replace
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
replace_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, episodes):
replace_op = tf.scatter_update(buffer_, rows, elements)
replace_ops.append(replace_op)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
示例2: sparse_softmax_cross_entropy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def sparse_softmax_cross_entropy(labels,
logits,
num_classes,
weights=1.0,
label_smoothing=0.1):
"""Softmax cross entropy with example weights, label smoothing."""
assert_valid_label = [
tf.assert_greater_equal(labels, tf.cast(0, dtype=tf.int64)),
tf.assert_less(labels, tf.cast(num_classes, dtype=tf.int64))
]
with tf.control_dependencies(assert_valid_label):
labels = tf.reshape(labels, [-1])
dense_labels = tf.one_hot(labels, num_classes)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=dense_labels,
logits=logits,
weights=weights,
label_smoothing=label_smoothing)
return loss
示例3: replace
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
示例4: append
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
append_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, transitions):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
示例5: tf_assert_almost_equal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def tf_assert_almost_equal(x, y, delta=0.001, **kwargs):
return tf.assert_less(tf.abs(x-y), delta, **kwargs)
示例6: test_raises_when_equal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_less(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
示例7: test_raises_when_greater
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
示例8: test_doesnt_raise_when_less
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def test_doesnt_raise_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
示例9: test_doesnt_raise_when_less_and_broadcastable_shapes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
示例10: test_raises_when_less_but_non_broadcastable_shapes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
示例11: append
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
示例12: _distribution_statistics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def _distribution_statistics(distribution: tf.Tensor) -> tf.Tensor:
"""Implementation of `distribution_statisticsy`."""
_, num_classes = distribution.shape.as_list()
assert num_classes is not None
# Each batch element is a probability distribution.
max_discrepancy = tf.reduce_max(
tf.abs(tf.reduce_sum(distribution, axis=1) - 1.0))
with tf.control_dependencies([tf.assert_less(max_discrepancy, 0.0001)]):
values = tf.reshape(tf.linspace(0.0, 1.0, num_classes), [1, num_classes])
mode = tf.to_float(tf.argmax(distribution,
axis=1)) / tf.constant(num_classes - 1.0)
median = tf.reduce_sum(
tf.to_float(tf.cumsum(distribution, axis=1) < 0.5),
axis=1) / tf.constant(num_classes - 1.0)
mean = tf.reduce_sum(distribution * values, axis=1)
standard_deviation = tf.sqrt(
tf.reduce_sum(
((values - tf.reshape(mean, [-1, 1]))**2) * distribution, axis=1))
probability_nonzero = 1.0 - distribution[:, 0]
entropy = tf.reduce_sum(
-(distribution * tf.log(distribution + 0.0000001)), axis=1) / tf.log(
float(num_classes))
statistics = tf.stack(
[mode, median, mean, standard_deviation, probability_nonzero, entropy],
axis=1)
return statistics
示例13: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def call(self, inputs, **kwargs):
with tf.control_dependencies([tf.assert_greater_equal(inputs, self.index_offset),
tf.assert_less(inputs, self.index_offset + self._num_symbols)]):
return tf.nn.embedding_lookup(self._embedding, inputs - self.index_offset)
示例14: model_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del labels
del config
classes = features['classes']
scores = features['scores']
with tf.control_dependencies(
[tf.assert_less(tf.shape(classes)[0], tf.constant(2))]):
scores = tf.identity(scores)
predictions = {
prediction_keys.PredictionKeys.LOGITS: scores,
prediction_keys.PredictionKeys.PROBABILITIES: scores,
prediction_keys.PredictionKeys.PREDICTIONS: scores,
prediction_keys.PredictionKeys.CLASSES: classes,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.ClassificationOutput(
scores=scores, classes=classes),
})
loss = tf.constant(0.0)
train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions,
eval_metric_ops=eval_metric_ops)
示例15: create_initial_softmax_from_labels
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_less [as 别名]
def create_initial_softmax_from_labels(last_frame_labels, reference_labels,
decoder_output_stride, reduce_labels):
"""Creates initial softmax predictions from last frame labels.
Args:
last_frame_labels: last frame labels of shape [1, height, width, 1].
reference_labels: reference frame labels of shape [1, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder. Can be None, in
this case it's assumed that the last_frame_labels and reference_labels
are already scaled to the decoder output resolution.
reduce_labels: Boolean, whether to reduce the depth of the softmax one_hot
encoding to the actual number of labels present in the reference frame
(otherwise the depth will be the highest label index + 1).
Returns:
init_softmax: the initial softmax predictions.
"""
if decoder_output_stride is None:
labels_output_size = last_frame_labels
reference_labels_output_size = reference_labels
else:
h = tf.shape(last_frame_labels)[1]
w = tf.shape(last_frame_labels)[2]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
labels_output_size = tf.image.resize_nearest_neighbor(
last_frame_labels, [h_sub, w_sub], align_corners=True)
reference_labels_output_size = tf.image.resize_nearest_neighbor(
reference_labels, [h_sub, w_sub], align_corners=True)
if reduce_labels:
unique_labels, _ = tf.unique(tf.reshape(reference_labels_output_size, [-1]))
depth = tf.size(unique_labels)
else:
depth = tf.reduce_max(reference_labels_output_size) + 1
one_hot_assertion = tf.assert_less(tf.reduce_max(labels_output_size), depth)
with tf.control_dependencies([one_hot_assertion]):
init_softmax = tf.one_hot(tf.squeeze(labels_output_size,
axis=-1),
depth=depth,
dtype=tf.float32)
return init_softmax