本文整理汇总了Python中tensorflow.less_equal函数的典型用法代码示例。如果您正苦于以下问题:Python less_equal函数的具体用法?Python less_equal怎么用?Python less_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了less_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prune_completely_outside_window
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
示例2: _get_input_filter
def _get_input_filter(width, width_threshold, length, length_threshold):
"""Boolean op for discarding input data based on string or image size
Input:
width : Tensor representing the image width
width_threshold : Python numerical value (or None) representing the
maximum allowable input image width
length : Tensor representing the ground truth string length
length_threshold : Python numerical value (or None) representing the
maximum allowable input string length
Returns:
keep_input : Boolean Tensor indicating whether to keep a given input
with the specified image width and string length
"""
keep_input = None
if width_threshold!=None:
keep_input = tf.less_equal(width, width_threshold)
if length_threshold!=None:
length_filter = tf.less_equal(length, length_threshold)
if keep_input==None:
keep_input = length_filter
else:
keep_input = tf.logical_and( keep_input, length_filter)
if keep_input==None:
keep_input = True
else:
keep_input = tf.reshape( keep_input, [] ) # explicitly make a scalar
return keep_input
示例3: getReward_touch
def getReward_touch(objCoordinates, sampled_locs, numObjsPresented, objSize, batch_size):
# preallocate for the reward
corner = tf.zeros((2,), dtype=tf.float32, name=None)
# reward = np.zeros(batch_size)
# loop over all examples in the batch
# for b in xrange(batch_size):
b = 0
objCoords_b = objCoordinates[b,:,:]
sampled_locs_b = sampled_locs[b,:,:]
numObjsPres_b = numObjsPresented[b]
nObjTouched = 0
# for the ith-example in the batch, loop over all object
for j in xrange(maxNumObj):
objCoords_cur = objCoords_b[j,:]
nTimesObjTouched = 0
# for the j-th objects, loop over all glimpses to determine if it is fixated
for i in xrange(nGlimpses):
sampledCoord_cur = toMnistCoordinates_tf(sampled_locs_b[i,:], img_size)
l2Diff_obj = l2distance(objCoords_cur, sampledCoord_cur)
l2Diff_corner = l2distance(corner, sampledCoord_cur)
isTouchingObj = tf.less_equal(l2Diff_obj, objSize)
isNotTouchingCorner = tf.greater_equal(l2Diff_corner, objSize)
# true if the current glimpse is fixated on an object
tempTouchFlag = tf.cast(tf.logical_and(isTouchingObj, isNotTouchingCorner), tf.int32)
nTimesObjTouched = nTimesObjTouched + tempTouchFlag
# for the b-th example in the batch, if all objects are touched, then reward = 1, else reward = 0
nObjTouched = nObjTouched + tf.cast(tf.greater_equal(nTimesObjTouched,1), tf.int32)
R_bth = tf.equal(nObjTouched, tf.cast(numObjsPres_b, tf.int32))
return R_bth
示例4: get_mask
def get_mask(gt, num_classes, ignore_label):
less_equal_class = tf.less_equal(gt, num_classes-1)
not_equal_ignore = tf.not_equal(gt, ignore_label)
mask = tf.logical_and(less_equal_class, not_equal_ignore)
indices = tf.squeeze(tf.where(mask), 1)
return indices
示例5: ImageSample
def ImageSample(inputs, borderMode='repeat'):
"""
Sample the images using the given coordinates, by bilinear interpolation.
This was described in the paper:
`Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.
This is equivalent to `torch.nn.functional.grid_sample`,
up to some non-trivial coordinate transformation.
This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0).
Note that this may not be what you need.
Args:
inputs (list): [images, coords]. images has shape NHWC.
coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value
coordinate.
borderMode: either "repeat" or "constant" (zero-filled)
Returns:
tf.Tensor: a tensor named ``output`` of shape (N, H', W', C).
"""
log_deprecated("ImageSample", "Please implement it in your own code instead!", "2018-12-01")
image, mapping = inputs
assert image.get_shape().ndims == 4 and mapping.get_shape().ndims == 4
input_shape = image.get_shape().as_list()[1:]
assert None not in input_shape, \
"Images in ImageSample layer must have fully-defined shape"
assert borderMode in ['repeat', 'constant']
orig_mapping = mapping
mapping = tf.maximum(mapping, 0.0)
lcoor = tf.floor(mapping)
ucoor = lcoor + 1
diff = mapping - lcoor
neg_diff = 1.0 - diff # bxh2xw2x2
lcoory, lcoorx = tf.split(lcoor, 2, 3)
ucoory, ucoorx = tf.split(ucoor, 2, 3)
lyux = tf.concat([lcoory, ucoorx], 3)
uylx = tf.concat([ucoory, lcoorx], 3)
diffy, diffx = tf.split(diff, 2, 3)
neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3)
ret = tf.add_n([sample(image, lcoor) * neg_diffx * neg_diffy,
sample(image, ucoor) * diffx * diffy,
sample(image, lyux) * neg_diffy * diffx,
sample(image, uylx) * diffy * neg_diffx], name='sampled')
if borderMode == 'constant':
max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32)
mask = tf.greater_equal(orig_mapping, 0.0)
mask2 = tf.less_equal(orig_mapping, max_coor)
mask = tf.logical_and(mask, mask2) # bxh2xw2x2
mask = tf.reduce_all(mask, [3]) # bxh2xw2 boolean
mask = tf.expand_dims(mask, 3)
ret = ret * tf.cast(mask, tf.float32)
return tf.identity(ret, name='output')
示例6: __init__
def __init__(self, embedding=None, hidden_state_d=100, max_length=80, learning_rate=0.001, dropout_rate=0.5, vocab_size=400001, embedding_d=300, num_classes=2):
self.data = tf.placeholder(dtype=tf.int32, shape=[None, max_length])
self.len = tf.placeholder(dtype=tf.int32, shape=[None])
self.label = tf.placeholder(dtype=tf.float32, shape=[None])
self.neg_label = 1 - self.label
self.co_label = tf.transpose(tf.reshape(tf.concat(0, [self.label, self.neg_label]), [2, -1]))
self.init_embedding(embedding, vocab_size, embedding_d)
# filter len to maxlength
self.maxlen = tf.cast(tf.fill([tf.shape(self.len)[0]], max_length), tf.int64)
self.filter = tf.less_equal(tf.cast(self.len, tf.int64), self.maxlen)
self.clean_len = tf.select(self.filter, tf.cast(self.len, tf.int64), self.maxlen)
self.vec_data = tf.nn.embedding_lookup(self.embedding, self.data)
self.reversed_vec_data = tf.reverse_sequence(self.vec_data, seq_dim=1, seq_lengths=self.clean_len)
with tf.variable_scope('left2right'):
left2right_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)
self.output, self.state = tf.nn.dynamic_rnn(
left2right_lstm_cell,
self.vec_data,
dtype=tf.float32,
sequence_length=self.len,
)
with tf.variable_scope('right2left'):
right2left_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)
self.reversed_output, self.reversed_state = tf.nn.dynamic_rnn(
right2left_lstm_cell,
self.reversed_vec_data,
dtype=tf.float32,
sequence_length=self.len,
)
self.last = BiLSTM.last_relevant(self.output, self.len)
self.reversed_last = BiLSTM.last_relevant(self.reversed_output, self.len)
self.final_output = tf.concat(1, [self.last, self.reversed_last])
self.dropout_last = tf.nn.dropout(self.final_output, keep_prob=dropout_rate)
self.weight = tf.Variable(tf.truncated_normal([hidden_state_d * 2, num_classes], stddev=0.1))
self.bias = tf.Variable(tf.constant(0.1, shape=[num_classes]))
self.prediction = tf.nn.softmax(tf.matmul(self.final_output, self.weight) + self.bias)
self.cost = tf.nn.softmax_cross_entropy_with_logits(tf.matmul(self.dropout_last, self.weight) + self.bias, self.co_label)
self.train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
self.init_op = tf.initialize_all_variables()
self.prediction_a = tf.argmax(self.prediction, dimension=1)
self.prediction_b = tf.argmax(self.co_label, dimension=1)
self.score = tf.reduce_sum(tf.cast(tf.equal(self.prediction_a, self.prediction_b), dtype=tf.int32)) / tf.size(self.label)
self.sess = tf.Session()
self.sess.run(self.init_op)
示例7: test_setup
def test_setup(self):
# Create queue coordinator.
self.coord = tf.train.Coordinator()
# Load reader
with tf.name_scope("create_inputs"):
reader = ImageReader(
self.conf.data_dir,
self.conf.valid_data_list,
None, # the images have different sizes
False, # no data-aug
False, # no data-aug
self.conf.ignore_label,
IMG_MEAN,
self.coord)
image, label = reader.image, reader.label # [h, w, 3 or 1]
# Add one batch dimension [1, h, w, 3 or 1]
self.image_batch, self.label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0)
# Create network
if self.conf.encoder_name not in ['res101', 'res50', 'deeplab']:
print('encoder_name ERROR!')
print("Please input: res101, res50, or deeplab")
sys.exit(-1)
elif self.conf.encoder_name == 'deeplab':
net = Deeplab_v2(self.image_batch, self.conf.num_classes, False)
else:
net = ResNet_segmentation(self.image_batch, self.conf.num_classes, False, self.conf.encoder_name)
# predictions
raw_output = net.outputs
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(self.image_batch)[1:3,])
raw_output = tf.argmax(raw_output, axis=3)
pred = tf.expand_dims(raw_output, dim=3)
self.pred = tf.reshape(pred, [-1,])
# labels
gt = tf.reshape(self.label_batch, [-1,])
# Ignoring all labels greater than or equal to n_classes.
temp = tf.less_equal(gt, self.conf.num_classes - 1)
weights = tf.cast(temp, tf.int32)
# fix for tf 1.3.0
gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))
# Pixel accuracy
self.accu, self.accu_update_op = tf.contrib.metrics.streaming_accuracy(
self.pred, gt, weights=weights)
# mIoU
self.mIoU, self.mIou_update_op = tf.contrib.metrics.streaming_mean_iou(
self.pred, gt, num_classes=self.conf.num_classes, weights=weights)
# confusion matrix
self.confusion_matrix = tf.contrib.metrics.confusion_matrix(
self.pred, gt, num_classes=self.conf.num_classes, weights=weights)
# Loader for loading the checkpoint
self.loader = tf.train.Saver(var_list=tf.global_variables())
示例8: ImageSample
def ImageSample(inputs, borderMode='repeat'):
"""
Sample the template image using the given coordinate, by bilinear interpolation.
This was described in the paper:
`Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.
Args:
inputs (list): [template, coords]. template has shape NHWC.
coords has shape (N,H',W',2), where each pair of the last dimension is a (y, x) real-value
coordinate.
borderMode: either "repeat" or "constant" (zero-filled)
Returns:
tf.Tensor: a tensor named ``output`` of shape (N,H',W',C).
"""
# TODO borderValue
template, mapping = inputs
assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4
input_shape = template.get_shape().as_list()[1:]
assert None not in input_shape, \
"Images in ImageSample layer must have fully-defined shape"
assert borderMode in ['repeat', 'constant']
orig_mapping = mapping
mapping = tf.maximum(mapping, 0.0)
lcoor = tf.floor(mapping)
ucoor = lcoor + 1
diff = mapping - lcoor
neg_diff = 1.0 - diff # bxh2xw2x2
lcoory, lcoorx = tf.split(lcoor, 2, 3)
ucoory, ucoorx = tf.split(ucoor, 2, 3)
lyux = tf.concat([lcoory, ucoorx], 3)
uylx = tf.concat([ucoory, lcoorx], 3)
diffy, diffx = tf.split(diff, 2, 3)
neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3)
# prod = tf.reduce_prod(diff, 3, keep_dims=True)
# diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
# tf.reduce_max(diff), diff], summarize=50)
ret = tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
sample(template, ucoor) * diffx * diffy,
sample(template, lyux) * neg_diffy * diffx,
sample(template, uylx) * diffy * neg_diffx], name='sampled')
if borderMode == 'constant':
max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32)
mask = tf.greater_equal(orig_mapping, 0.0)
mask2 = tf.less_equal(orig_mapping, max_coor)
mask = tf.logical_and(mask, mask2) # bxh2xw2x2
mask = tf.reduce_all(mask, [3]) # bxh2xw2 boolean
mask = tf.expand_dims(mask, 3)
ret = ret * tf.cast(mask, tf.float32)
return tf.identity(ret, name='output')
示例9: losses
def losses(self, targets, logits, seq_len,
scope='ctc_losses'):
"""Define the network losses.
"""
with tf.control_dependencies([tf.less_equal(targets.dense_shape[1], tf.reduce_max(tf.cast(seq_len, tf.int64)))]):
with tf.name_scope(scope):
loss = tf.nn.ctc_loss(targets, logits, seq_len, time_major=False, ignore_longer_outputs_than_inputs=True)
cost = tf.reduce_mean(loss)
return cost
示例10: example_to_bucket_id
def example_to_bucket_id(example_input, example_target):
"""Return int64 bucket id for this example, calculated based on length."""
seq_length = _get_example_length((example_input, example_target))
# TODO: investigate whether removing code branching improves performance.
conditions_c = tf.logical_and(
tf.less_equal(buckets_min, seq_length),
tf.less(seq_length, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
示例11: _log_prob_single
def _log_prob_single(tensor):
stddev = tf.sqrt(scale_factor / calculate_variance_factor(tensor.shape, mode))
z = (tensor - mean) / stddev
log_prob_z = - (z ** 2 + tf.log(2 * pi)) / 2
log_prob = tf.reduce_sum(log_prob_z)
if truncated:
from numpy import inf
log_prob -= tf.log(TRUNCATED_NORMALIZER)
invalid = tf.logical_or(tf.less_equal(z, -2), tf.greater_equal(z, 2))
log_prob = tf.where(invalid, -inf, log_prob)
# Return negative as this is a regularizer
return - log_prob
示例12: example_to_bucket_id
def example_to_bucket_id(example):
"""Return int64 id of the length bucket for this example."""
seq_length = example_length_fn(example)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = tf.logical_and(
tf.less_equal(buckets_min, seq_length),
tf.less(seq_length, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
示例13: _subsample_selection_to_desired_neg_pos_ratio
def _subsample_selection_to_desired_neg_pos_ratio(self,
indices,
match,
max_negatives_per_positive,
min_negatives_per_image=0):
"""Subsample a collection of selected indices to a desired neg:pos ratio.
This function takes a subset of M indices (indexing into a large anchor
collection of N anchors where M<N) which are labeled as positive/negative
via a Match object (matched indices are positive, unmatched indices
are negative). It returns a subset of the provided indices retaining all
positives as well as up to the first K negatives, where:
K=floor(num_negative_per_positive * num_positives).
For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
with positives=[2, 5] and negatives=[4, 7, 9, 10] and
num_negatives_per_positive=1, then the returned subset of indices
is [2, 4, 5, 7].
Args:
indices: An integer tensor of shape [M] representing a collection
of selected anchor indices
match: A matcher.Match object encoding the match between anchors and
groundtruth boxes for a given image, with rows of the Match objects
corresponding to groundtruth boxes and columns corresponding to anchors.
max_negatives_per_positive: (float) maximum number of negatives for
each positive anchor.
min_negatives_per_image: minimum number of negative anchors for a given
image. Allow sampling negatives in image without any positive anchors.
Returns:
selected_indices: An integer tensor of shape [M'] representing a
collection of selected anchor indices with M' <= M.
num_positives: An integer tensor representing the number of positive
examples in selected set of indices.
num_negatives: An integer tensor representing the number of negative
examples in selected set of indices.
"""
positives_indicator = tf.gather(match.matched_column_indicator(), indices)
negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
num_positives = tf.reduce_sum(tf.to_int32(positives_indicator))
max_negatives = tf.maximum(min_negatives_per_image,
tf.to_int32(max_negatives_per_positive *
tf.to_float(num_positives)))
topk_negatives_indicator = tf.less_equal(
tf.cumsum(tf.to_int32(negatives_indicator)), max_negatives)
subsampled_selection_indices = tf.where(
tf.logical_or(positives_indicator, topk_negatives_indicator))
num_negatives = tf.size(subsampled_selection_indices) - num_positives
return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]),
num_positives, num_negatives)
示例14: _length_constraints
def _length_constraints(length, maximum_length):
# Work with lists of lengths which correspond to the general multi source case.
if not isinstance(length, list):
length = [length]
if not isinstance(maximum_length, list):
maximum_length = [maximum_length]
# Unset maximum lengths are set to None (i.e. no constraint).
maximum_length += [None] * (len(length) - len(maximum_length))
constraints = []
for l, maxlen in zip(length, maximum_length):
constraints.append(tf.greater(l, 0))
if maxlen is not None:
constraints.append(tf.less_equal(l, maxlen))
return constraints
示例15: sampling_loop
def sampling_loop(prev_state, i):
"""
Loop function performing the scheduled sampling
(http://arxiv.org/pdf/1506.03099v3.pdf) with the inverse
sigmoid decay.
"""
threshold = scheduled_sampling / (scheduled_sampling + tf.exp(
tf.to_float(self.learning_step) / scheduled_sampling))
condition = tf.less_equal(
tf.random_uniform(tf.shape(embedded_gt_inputs[0])),
threshold)
return tf.select(condition, embedded_gt_inputs[i],
loop(prev_state, i))