本文整理汇总了Python中tensorflow.minimum函数的典型用法代码示例。如果您正苦于以下问题:Python minimum函数的具体用法?Python minimum怎么用?Python minimum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了minimum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: IoU
def IoU(bbox, gt):
# bbox = [ x , y , w , h ] ( x , y left up)
shape = [-1, 1]
x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))
inter_w = tf.sub(x2,x1)
inter_h = tf.sub(y2,y1)
inter = tf.cast(inter_w * inter_h, tf.float32)
bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)
ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)
#iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))
iou = inter / (bounding_box + ground_truth - inter)
# limit the iou range between 0 and 1
mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
#mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
iou = tf.mul(iou, mask_less)
#iou = tf.mul(iou, positive_mask)
return iou
示例2: compute_IOU
def compute_IOU(bboxA, bboxB):
"""Compute the Intersection Over Union.
Args:
bboxA: [N X 4 tensor] format = [left, top, right, bottom]
bboxB: [N X 4 tensor]
Return:
IOU: [N X 1 tensor]
"""
x1A, y1A, x2A, y2A = tf.split(1, 4, bboxA)
x1B, y1B, x2B, y2B = tf.split(1, 4, bboxB)
# compute intersection
x1_max = tf.maximum(x1A, x1B)
y1_max = tf.maximum(y1A, y1B)
x2_min = tf.minimum(x2A, x2B)
y2_min = tf.minimum(y2A, y2B)
# overlap_flag = tf.logical_and( tf.less(x1_max, x2_min), tf.less(y1_max, y2_min))
overlap_flag = tf.to_float(tf.less(x1_max, x2_min)) * \
tf.to_float(tf.less(y1_max, y2_min))
overlap_area = tf.mul(overlap_flag, tf.mul(
x2_min - x1_max, y2_min - y1_max))
# compute union
areaA = tf.mul(x2A - x1A, y2A - y1A)
areaB = tf.mul(x2B - x1B, y2B - y1B)
union_area = areaA + areaB - overlap_area
return tf.div(overlap_area, union_area)
示例3: sample_from_discretized_mix_logistic
def sample_from_discretized_mix_logistic(l, nr_mix):
ls = int_shape(l)
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
示例4: bboxes_clip
def bboxes_clip(bbox_ref, bboxes, scope=None):
"""Clip bounding boxes to a reference box.
Batch-compatible if the first dimension of `bbox_ref` and `bboxes`
can be broadcasted.
Args:
bbox_ref: Reference bounding box. Nx4 or 4 shaped-Tensor;
bboxes: Bounding boxes to clip. Nx4 or 4 shaped-Tensor or dictionary.
Return:
Clipped bboxes.
"""
# Bboxes is dictionary.
if isinstance(bboxes, dict):
with tf.name_scope(scope, 'bboxes_clip_dict'):
d_bboxes = {}
for c in bboxes.keys():
d_bboxes[c] = bboxes_clip(bbox_ref, bboxes[c])
return d_bboxes
# Tensors inputs.
with tf.name_scope(scope, 'bboxes_clip'):
# Easier with transposed bboxes. Especially for broadcasting.
bbox_ref = tf.transpose(bbox_ref)
bboxes = tf.transpose(bboxes)
# Intersection bboxes and reference bbox.
ymin = tf.maximum(bboxes[0], bbox_ref[0])
xmin = tf.maximum(bboxes[1], bbox_ref[1])
ymax = tf.minimum(bboxes[2], bbox_ref[2])
xmax = tf.minimum(bboxes[3], bbox_ref[3])
# Double check! Empty boxes when no-intersection.
ymin = tf.minimum(ymin, ymax)
xmin = tf.minimum(xmin, xmax)
bboxes = tf.transpose(tf.stack([ymin, xmin, ymax, xmax], axis=0))
return bboxes
示例5: make_minibatch
def make_minibatch(self, valid_anchors):
with tf.variable_scope('rpn_minibatch'):
# in labels(shape is [N, ]): 1 is positive, 0 is negative, -1 is ignored
labels, anchor_matched_gtboxes, object_mask = \
self.rpn_find_positive_negative_samples(valid_anchors) # [num_of_valid_anchors, ]
positive_indices = tf.reshape(tf.where(tf.equal(labels, 1.0)), [-1]) # use labels is same as object_mask
num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
tf.cast(self.rpn_mini_batch_size * self.rpn_positives_ratio, tf.int32))
# num of positives <= minibatch_size * 0.5
positive_indices = tf.random_shuffle(positive_indices)
positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])
# positive_anchors = tf.gather(self.anchors, positive_indices)
negative_indices = tf.reshape(tf.where(tf.equal(labels, 0.0)), [-1])
num_of_negatives = tf.minimum(self.rpn_mini_batch_size - num_of_positives,
tf.shape(negative_indices)[0])
negative_indices = tf.random_shuffle(negative_indices)
negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])
# negative_anchors = tf.gather(self.anchors, negative_indices)
minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
minibatch_indices = tf.random_shuffle(minibatch_indices)
minibatch_anchor_matched_gtboxes = tf.gather(anchor_matched_gtboxes, minibatch_indices)
object_mask = tf.gather(object_mask, minibatch_indices)
labels = tf.cast(tf.gather(labels, minibatch_indices), tf.int32)
labels_one_hot = tf.one_hot(labels, depth=2)
return minibatch_indices, minibatch_anchor_matched_gtboxes, object_mask, labels_one_hot
示例6: bboxes_intersection
def bboxes_intersection(bbox_ref, bboxes, name=None):
"""Compute relative intersection between a reference box and a
collection of bounding boxes. Namely, compute the quotient between
intersection area and box area.
Args:
bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
bboxes: (N, 4) Tensor, collection of bounding boxes.
Return:
(N,) Tensor with relative intersection.
"""
with tf.name_scope(name, 'bboxes_intersection'):
# Should be more efficient to first transpose.
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# Intersection bbox and volume.
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])
scores = tfe_math.safe_divide(inter_vol, bboxes_vol, 'intersection')
return scores
示例7: bboxes_jaccard
def bboxes_jaccard(bbox_ref, bboxes, name=None):
"""Compute jaccard score between a reference box and a collection
of bounding boxes.
Args:
bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
bboxes: (N, 4) Tensor, collection of bounding boxes.
Return:
(N,) Tensor with Jaccard scores.
"""
with tf.name_scope(name, 'bboxes_jaccard'):
# Should be more efficient to first transpose.
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# Intersection bbox and volume.
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
union_vol = -inter_vol \
+ (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1]) \
+ (bbox_ref[2] - bbox_ref[0]) * (bbox_ref[3] - bbox_ref[1])
jaccard = tfe_math.safe_divide(inter_vol, union_vol, 'jaccard')
return jaccard
示例8: disjunction_of_literals
def disjunction_of_literals(literals, label="no_label"):
list_of_literal_tensors = [lit.tensor for lit in literals]
literals_tensor = tf.concat(1,list_of_literal_tensors)
if default_tnorm == "product":
result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
if default_tnorm == "yager2":
result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
if default_tnorm == "luk":
print "data aggregator is lukas"
result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
PR(result)
if default_tnorm == "goedel":
result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
if default_aggregator == "product":
return tf.reduce_prod(result, keep_dims=True)
if default_aggregator == "mean":
print "data aggregator is mean"
return tf.reduce_mean(result, keep_dims=True, name=label)
if default_aggregator == "gmean":
return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
tf.inv(tf.to_float(tf.size(result)))), name=label)
if default_aggregator == "hmean":
print "data aggregator is hmean"
return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
if default_aggregator == "min":
print "data aggregator is min"
return tf.reduce_min(result, keep_dims=True, name=label)
if default_aggregator == "qmean":
print "data aggregator is qmean"
return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
if default_aggregator == "cmean":
print "data aggregator is cmean"
return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
示例9: IoULoss
def IoULoss(self, pd, gt):
mask = tf.cast(
tf.greater(tf.reduce_sum(
tf.cast(tf.greater(gt, 0), tf.int8), 3), 3),
tf.float32
)
npd = tf.transpose(pd, [3, 0, 1, 2])
ngt = tf.transpose(gt, [3, 0, 1, 2])
area_x = tf.mul(
tf.add(tf.gather(npd, 0), tf.gather(npd, 2)),
tf.add(tf.gather(npd, 1), tf.gather(npd, 3)),
)
area_g = tf.mul(
tf.add(tf.gather(ngt, 0), tf.gather(ngt, 2)),
tf.add(tf.gather(ngt, 1), tf.gather(ngt, 3)),
)
w_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
tf.minimum(tf.gather(npd, 0), tf.gather(ngt, 0)),
tf.minimum(tf.gather(npd, 2), tf.gather(ngt, 2)),
))
h_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
tf.minimum(tf.gather(npd, 1), tf.gather(ngt, 1)),
tf.minimum(tf.gather(npd, 3), tf.gather(ngt, 3)),
))
area_overlap = tf.mul(w_overlap, h_overlap)
area_u = tf.sub(tf.add(area_x, area_g), area_overlap)
iou = tf.div(area_overlap, tf.add(area_u, tf.constant(1, tf.float32)))
iou = tf.maximum(iou, tf.constant(1e-4, tf.float32))
cost = -tf.log(iou)
cost = tf.mul(cost, mask)
cost = tf.reduce_sum(cost)
return cost
示例10: batch_iou
def batch_iou(bboxes, bbox):
"""Compute iou of a batch of boxes with another box. Box format '[y_min, x_min, y_max, x_max]'.
Args:
bboxes: A batch of boxes. 2-D with shape `[B, 4]`.
bbox: A single box. 1-D with shape `[4]`.
Returns:
Batch of IOUs
"""
lr = tf.maximum(
tf.minimum(bboxes[:, 3], bbox[3]) -
tf.maximum(bboxes[:, 1], bbox[1]),
0
)
tb = tf.maximum(
tf.minimum(bboxes[:, 2], bbox[2]) -
tf.maximum(bboxes[:, 0], bbox[0]),
0
)
intersection = tf.multiply(tb, lr)
union = tf.subtract(
tf.multiply((bboxes[:, 3] - bboxes[:, 1]), (bboxes[:, 2] - bboxes[:, 0])) +
tf.multiply((bbox[3] - bbox[1]), (bbox[2] - bbox[0])),
intersection
)
iou = tf.div(intersection, union)
return iou
示例11: get_next_input
def get_next_input(output):
# the next location is computed by the location network
baseline = tf.sigmoid(tf.matmul(output,Wb_h_b) + Bb_h_b)
baselines.append(baseline)
# compute the next location, then impose noise
if eyeCentered:
# add the last sampled glimpse location
# TODO max(-1, min(1, u + N(output, sigma) + prevLoc))
mean_loc = tf.maximum(-1.0, tf.minimum(1.0, tf.matmul(output, Wl_h_l) + sampled_locs[-1] ))
else:
mean_loc = tf.matmul(output, Wl_h_l)
# mean_loc = tf.stop_gradient(mean_loc)
mean_locs.append(mean_loc)
mean_locs_stopGrad.append(tf.stop_gradient(mean_loc))
# add noise
# sample_loc = tf.tanh(mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd))
sample_loc = tf.maximum(-1.0, tf.minimum(1.0, mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd)))
# don't propagate throught the locations
# sample_loc = tf.stop_gradient(sample_loc)
sampled_locs.append(sample_loc)
sampled_locs_stopGrad.append(tf.stop_gradient(sample_loc))
return get_glimpse(sample_loc)
示例12: _update_lipschitz
def _update_lipschitz(self,v,i):
config = self.config
if len(v.shape) > 1:
k = self.config.weight_constraint_k or 100.0000
wi_hat = v
if len(v.shape) == 4:
#fij = tf.reduce_sum(tf.abs(wi_hat), axis=[0,1])
fij = wi_hat
fij = tf.reduce_sum(tf.abs(fij), axis=[1])
fij = tf.reduce_max(fij, axis=[0])
else:
fij = wi_hat
if self.config.ortho_pnorm == "inf":
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=0), axis=0)
else:
# conv
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=1), axis=0)
ratio = (1.0/tf.maximum(1.0, wp/k))
if self.config.weight_bounce:
bounce = tf.minimum(1.0, tf.ceil(wp/k-0.999))
ratio -= tf.maximum(0.0, bounce) * 0.2
if self.config.weight_scaleup:
up = tf.minimum(1.0, tf.ceil(0.02-wp/k))
ratio += tf.maximum(0.0, up) * k/wp * 0.2
wi = ratio*(wi_hat)
#self.gan.metrics['wi'+str(i)]=wp
#self.gan.metrics['wk'+str(i)]=ratio
#self.gan.metrics['bouce'+str(i)]=bounce
return tf.assign(v, wi)
return None
示例13: loss
def loss(y_true_cls, y_pred_cls,
y_true_geo, y_pred_geo,
training_mask):
'''
define the loss used for training, contraning two part,
the first part we use dice loss instead of weighted logloss,
the second part is the iou loss defined in the paper
:param y_true_cls: ground truth of text
:param y_pred_cls: prediction os text
:param y_true_geo: ground truth of geometry
:param y_pred_geo: prediction of geometry
:param training_mask: mask used in training, to ignore some text annotated by ###
:return:
'''
classification_loss = dice_coefficient(y_true_cls, y_pred_cls, training_mask)
# scale classification loss to match the iou loss part
classification_loss *= 0.01
# d1 -> top, d2->right, d3->bottom, d4->left
d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = tf.split(value=y_true_geo, num_or_size_splits=5, axis=3)
d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = tf.split(value=y_pred_geo, num_or_size_splits=5, axis=3)
area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
w_union = tf.minimum(d2_gt, d2_pred) + tf.minimum(d4_gt, d4_pred)
h_union = tf.minimum(d1_gt, d1_pred) + tf.minimum(d3_gt, d3_pred)
area_intersect = w_union * h_union
area_union = area_gt + area_pred - area_intersect
L_AABB = -tf.log((area_intersect + 1.0)/(area_union + 1.0))
L_theta = 1 - tf.cos(theta_pred - theta_gt)
tf.summary.scalar('geometry_AABB', tf.reduce_mean(L_AABB * y_true_cls * training_mask))
tf.summary.scalar('geometry_theta', tf.reduce_mean(L_theta * y_true_cls * training_mask))
L_g = L_AABB + 20 * L_theta
return tf.reduce_mean(L_g * y_true_cls * training_mask) + classification_loss
示例14: sum_ohem_loss
def sum_ohem_loss(cls_score, label, bbox_pred, bbox_targets,
bbox_inside_weights, bbox_outside_weights,
nr_ohem_sampling, sigma=1.0, dim=[1]):
cls_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=cls_score, labels=label)
box_loss_base = _smooth_l1_loss_base(bbox_pred, bbox_targets,
bbox_inside_weights,
bbox_outside_weights,
sigma=sigma, dim=[1])
box_loss = tf.reduce_sum(box_loss_base, axis=dim)
cls_box_loss = cls_loss + box_loss
nr_ohem_sampling = tf.minimum(nr_ohem_sampling,
tf.shape(cls_box_loss)[0])
topk_val, topk_idx = tf.nn.top_k(cls_box_loss, k=nr_ohem_sampling,
sorted=True, name='ohem_loss_index')
cls_loss_ohem = tf.gather(cls_loss, topk_idx, name='ohem_cls_loss')
box_loss_ohem = tf.gather(box_loss, topk_idx, name='ohem_box_loss')
box_loss_ohem = tf.reduce_sum(box_loss_ohem) / \
tf.to_float(nr_ohem_sampling)
cls_norm = tf.stop_gradient(tf.minimum(nr_ohem_sampling,
tf.shape(topk_val)[0]))
# db_cls_norm = tf.py_func(debug_single, [cls_loss, box_loss, topk_idx,
# cls_loss_ohem, box_loss_ohem, cls_norm], [tf.bool])
# with tf.control_dependencies(db_cls_norm):
cls_loss_ohem = tf.reduce_sum(cls_loss_ohem) / tf.to_float(cls_norm)
return cls_loss_ohem, box_loss_ohem
示例15: transformer_policy
def transformer_policy(global_step, learning_rate, d_model, warmup_steps,
max_lr=None, coefficient=1.0, dtype=tf.float32):
"""Transformer's learning rate policy from https://arxiv.org/pdf/1706.03762.pdf
with a hat (max_lr) (also called "noam" learning rate decay scheme).
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): initial learning rate to use.
d_model (int): model dimensionality.
warmup_steps (int): number of warm-up steps.
max_lr (float): maximal learning rate, i.e. hat.
coefficient (float): optimizer adjustment.
Recommended 0.002 if using "Adam" else 1.0.
dtype: dtype for this policy.
Returns:
learning rate at step ``global_step``.
"""
step_num = tf.cast(global_step, dtype=dtype)
ws = tf.cast(warmup_steps, dtype=dtype)
decay = coefficient * d_model ** -0.5 * tf.minimum(
(step_num + 1) * ws ** -1.5, (step_num + 1) ** -0.5)
new_lr = decay * learning_rate
if max_lr is not None:
return tf.minimum(max_lr, new_lr)
else:
return new_lr