本文整理汇总了Python中tensorflow.maximum函数的典型用法代码示例。如果您正苦于以下问题:Python maximum函数的具体用法?Python maximum怎么用?Python maximum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了maximum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: adv_net_loss
def adv_net_loss(input, model, labels, target, adv_output_layer, confidence, c):
# calculate l2 distance between ori_input and adversarial examples
adv_output = model.get_layer(input, adv_output_layer)
dif = tf.subtract(adv_output, input)
# reshape_dif = tf.reshape(dif, shape=(dif.get_shape()[0],-1))
# l2_dis_loss = tf.norm(reshape_dif, axis=1)
l2_dis_loss = tf.square(dif)
l2_dis_loss = tf.reduce_mean(l2_dis_loss, name='l2_dis_loss')
tf.add_to_collection('losses', l2_dis_loss)
# attack target loss
logits = model(input)
one_hot_labels = tf.one_hot(labels,10)
real = tf.reduce_sum(one_hot_labels*logits, 1)
other_max = tf.reduce_max((1-one_hot_labels)*logits-one_hot_labels*10000, 1)
if target:
attack_loss = tf.maximum(0.0, other_max - real + confidence)
else:
attack_loss = tf.maximum(0.0, real - other_max + confidence)
attack_loss = tf.reduce_mean(attack_loss, name='attack_loss')
tf.add_to_collection('losses', attack_loss)
# total loss
total_loss = l2_dis_loss*c + attack_loss*0
return total_loss
示例2: bboxes_intersection
def bboxes_intersection(bbox_ref, bboxes, name=None):
"""Compute relative intersection between a reference box and a
collection of bounding boxes. Namely, compute the quotient between
intersection area and box area.
Args:
bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
bboxes: (N, 4) Tensor, collection of bounding boxes.
Return:
(N,) Tensor with relative intersection.
"""
with tf.name_scope(name, 'bboxes_intersection'):
# Should be more efficient to first transpose.
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# Intersection bbox and volume.
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])
scores = tfe_math.safe_divide(inter_vol, bboxes_vol, 'intersection')
return scores
示例3: output_dropout_no_bias
def output_dropout_no_bias(self,x,keep_prob=0.5):
if(self.activation == 'sigmoid'):
return tf.nn.dropout(tf.nn.sigmoid(tf.matmul(x,self.W)), keep_prob)
elif(self.activation == 'relu'):
return tf.nn.dropout(tf.nn.relu(tf.matmul(x,self.W)), keep_prob)
elif(self.activation == 'relu6'):
return tf.nn.dropout(tf.nn.relu6(tf.matmul(x,self.W)), keep_prob)
elif(self.activation == 'leaky_relu'):
return tf.nn.dropout(tf.maximum(0.1*tf.matmul(x,self.W),tf.matmul(x,self.W)),keep_prob)
elif(self.activation == 'leaky_relu6'):
return tf.nn.dropout(tf.maximum(0.1*tf.matmul(x,self.W),6),keep_prob)
elif(self.activation == 'linear'):
return tf.nn.dropout(tf.matmul(x,self.W),keep_prob)
elif(self.activation == 'softplus'):
return tf.nn.dropout(tf.nn.softplus(tf.matmul(x,self.W)),keep_prob)
elif(self.activation == 'tanh'):
return tf.nn.dropout(tf.tanh(tf.matmul(x,self.W)),keep_prob)
else:
print "No known activation function selected, using linear"
return tf.matmul(x,self.W)
示例4: _conv
def _conv(self, input, shape, strides, name, alpha=0.1):
"""
args:
shape : [3, 3, in, out]
"""
if self.bn_mode:
with tf.variable_scope(name) as scope:
kernel = self._variable_trunc_normal('weights', shape)
conv = tf.nn.conv2d(input, kernel, strides, padding='SAME')
bn_conv = self._batch_normalization(conv, shape[-1], [0, 1, 2])
conv_ = tf.maximum(bn_conv, alpha*bn_conv, name=scope.name)
if tf.get_variable_scope().reuse is False:
self._add_weight_decay(kernel)
self._activation_summary(conv_)
else:
with tf.variable_scope(name) as scope:
kernel = self._variable_trunc_normal('weights', shape)
conv = tf.nn.conv2d(input,kernel,strides, padding='SAME')
biases = self._variable_constant('biases', shape[-1], value=0.01)
bias = tf.nn.bias_add(conv, biases)
conv_ = tf.maximum(bias, alpha*bias, name=scope.name)
if tf.get_variable_scope().reuse is False:
self._add_weight_decay(kernel)
self._activation_summary(conv_)
return conv_
示例5: bboxes_clip
def bboxes_clip(bbox_ref, bboxes, scope=None):
"""Clip bounding boxes to a reference box.
Batch-compatible if the first dimension of `bbox_ref` and `bboxes`
can be broadcasted.
Args:
bbox_ref: Reference bounding box. Nx4 or 4 shaped-Tensor;
bboxes: Bounding boxes to clip. Nx4 or 4 shaped-Tensor or dictionary.
Return:
Clipped bboxes.
"""
# Bboxes is dictionary.
if isinstance(bboxes, dict):
with tf.name_scope(scope, 'bboxes_clip_dict'):
d_bboxes = {}
for c in bboxes.keys():
d_bboxes[c] = bboxes_clip(bbox_ref, bboxes[c])
return d_bboxes
# Tensors inputs.
with tf.name_scope(scope, 'bboxes_clip'):
# Easier with transposed bboxes. Especially for broadcasting.
bbox_ref = tf.transpose(bbox_ref)
bboxes = tf.transpose(bboxes)
# Intersection bboxes and reference bbox.
ymin = tf.maximum(bboxes[0], bbox_ref[0])
xmin = tf.maximum(bboxes[1], bbox_ref[1])
ymax = tf.minimum(bboxes[2], bbox_ref[2])
xmax = tf.minimum(bboxes[3], bbox_ref[3])
# Double check! Empty boxes when no-intersection.
ymin = tf.minimum(ymin, ymax)
xmin = tf.minimum(xmin, xmax)
bboxes = tf.transpose(tf.stack([ymin, xmin, ymax, xmax], axis=0))
return bboxes
示例6: f_iou_box
def f_iou_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes IoU of boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
iou: [B, T]
"""
inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
inter_area = tf.maximum(inter_area, 1e-6)
ndims = tf.shape(tf.shape(top_left_a))
# area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
# area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
union_area = (area_a + area_b - inter_area + 1e-5)
union_area = tf.maximum(union_area, 1e-5)
iou = inter_area / union_area
iou = tf.maximum(iou, 1e-5)
iou = tf.minimum(iou, 1.0)
return iou
示例7: loss
def loss(self):
# 1. The margin loss
# [batch_size, 10, 1, 1]
# max_l = max(0, m_plus-||v_c||)^2
max_l = tf.square(tf.maximum(0., cfg.m_plus - self.v_length))
# max_r = max(0, ||v_c||-m_minus)^2
max_r = tf.square(tf.maximum(0., self.v_length - cfg.m_minus))
assert max_l.get_shape() == [cfg.batch_size, 10, 1, 1]
# reshape: [batch_size, 10, 1, 1] => [batch_size, 10]
max_l = tf.reshape(max_l, shape=(cfg.batch_size, -1))
max_r = tf.reshape(max_r, shape=(cfg.batch_size, -1))
# calc T_c: [batch_size, 10]
# T_c = Y, is my understanding correct? Try it.
T_c = self.Y
# [batch_size, 10], element-wise multiply
L_c = T_c * max_l + cfg.lambda_val * (1 - T_c) * max_r
self.margin_loss = tf.reduce_mean(tf.reduce_sum(L_c, axis=1))
# 2. The reconstruction loss
orgin = tf.reshape(self.X, shape=(cfg.batch_size, -1))
squared = tf.square(self.decoded - orgin)
self.reconstruction_err = tf.reduce_mean(squared)
# 3. Total loss
# The paper uses sum of squared error as reconstruction error, but we
# have used reduce_mean in `# 2 The reconstruction loss` to calculate
# mean squared error. In order to keep in line with the paper,the
# regularization scale should be 0.0005*784=0.392
self.total_loss = self.margin_loss + cfg.regularization_scale * self.reconstruction_err
示例8: get_next_input
def get_next_input(output):
# the next location is computed by the location network
baseline = tf.sigmoid(tf.matmul(output,Wb_h_b) + Bb_h_b)
baselines.append(baseline)
# compute the next location, then impose noise
if eyeCentered:
# add the last sampled glimpse location
# TODO max(-1, min(1, u + N(output, sigma) + prevLoc))
mean_loc = tf.maximum(-1.0, tf.minimum(1.0, tf.matmul(output, Wl_h_l) + sampled_locs[-1] ))
else:
mean_loc = tf.matmul(output, Wl_h_l)
# mean_loc = tf.stop_gradient(mean_loc)
mean_locs.append(mean_loc)
mean_locs_stopGrad.append(tf.stop_gradient(mean_loc))
# add noise
# sample_loc = tf.tanh(mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd))
sample_loc = tf.maximum(-1.0, tf.minimum(1.0, mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd)))
# don't propagate throught the locations
# sample_loc = tf.stop_gradient(sample_loc)
sampled_locs.append(sample_loc)
sampled_locs_stopGrad.append(tf.stop_gradient(sample_loc))
return get_glimpse(sample_loc)
示例9: _update_lipschitz
def _update_lipschitz(self,v,i):
config = self.config
if len(v.shape) > 1:
k = self.config.weight_constraint_k or 100.0000
wi_hat = v
if len(v.shape) == 4:
#fij = tf.reduce_sum(tf.abs(wi_hat), axis=[0,1])
fij = wi_hat
fij = tf.reduce_sum(tf.abs(fij), axis=[1])
fij = tf.reduce_max(fij, axis=[0])
else:
fij = wi_hat
if self.config.ortho_pnorm == "inf":
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=0), axis=0)
else:
# conv
wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=1), axis=0)
ratio = (1.0/tf.maximum(1.0, wp/k))
if self.config.weight_bounce:
bounce = tf.minimum(1.0, tf.ceil(wp/k-0.999))
ratio -= tf.maximum(0.0, bounce) * 0.2
if self.config.weight_scaleup:
up = tf.minimum(1.0, tf.ceil(0.02-wp/k))
ratio += tf.maximum(0.0, up) * k/wp * 0.2
wi = ratio*(wi_hat)
#self.gan.metrics['wi'+str(i)]=wp
#self.gan.metrics['wk'+str(i)]=ratio
#self.gan.metrics['bouce'+str(i)]=bounce
return tf.assign(v, wi)
return None
示例10: prune_conv_w
def prune_conv_w(self, w, w_abs_mean):
with tf.name_scope("Prune_conv"):
conv_gamma = 0.25 * self.gamma
log_w = tf.log(tf.maximum(self.eps, tf.abs(w) / (w_abs_mean * conv_gamma)))
if self.max_ratio > 0:
log_w = tf.minimum(self.max_ratio, self.beta * log_w)
return w * tf.maximum(self.alpha / self.beta * log_w, log_w)
示例11: preprocess
def preprocess(img, input_size, model):
# Convert RGB to BGR
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= IMG_MEAN
if model == 'fcn-8s':
shape = tf.shape(img)
img = tf.expand_dims(img, dim=0)
output = tf.image.resize_bilinear(img, input_size)
return output, shape
elif model == 'pspnet50':
shape = tf.shape(img)
h, w = (tf.maximum(input_size[0], shape[0]), tf.maximum(input_size[1], shape[1]))
pad_img = tf.image.pad_to_bounding_box(img, 0, 0, h, w)
output = tf.expand_dims(pad_img, dim=0)
return output, h, w, shape
elif model == 'icnet':
img = tf.expand_dims(img, dim=0)
output = tf.image.resize_bilinear(img, input_size)
return output, input_size
示例12: prune_w
def prune_w(self, w, w_abs, w_abs_mean, w_abs_std):
self.cursor += 1
with tf.name_scope("Prune"):
if self.cond_placeholder is None:
log_w = tf.log(tf.maximum(self.eps, w_abs / (w_abs_mean * self.gamma)))
if self.max_ratio > 0:
log_w = tf.minimum(self.max_ratio, self.beta * log_w)
self.masks.append(tf.maximum(self.alpha / self.beta * log_w, log_w))
return w * self.masks[self.cursor]
self.masks.append(tf.Variable(np.ones(w.get_shape(), np.float32), trainable=False))
def prune(i, do_prune):
def sub():
if not do_prune:
mask = self.masks[i]
self.masks[i] = tf.assign(mask, tf.where(
tf.logical_and(
tf.equal(mask, 1),
tf.less_equal(w_abs, 0.9 * tf.maximum(w_abs_mean + self.beta * w_abs_std, self.eps))
),
tf.zeros_like(mask), mask
))
mask = self.masks[i]
self.masks[i] = tf.assign(mask, tf.where(
tf.logical_and(
tf.equal(mask, 0),
tf.greater(w_abs, 1.1 * tf.maximum(w_abs_mean + self.beta * w_abs_std, self.eps))
),
tf.ones_like(mask), mask
))
return w * self.masks[i]
return sub
return tf.cond(self.cond_placeholder, prune(self.cursor, True), prune(self.cursor, False))
示例13: clip_eta
def clip_eta(eta, ord, eps):
"""
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epilson, bound of the perturbation.
"""
# Clipping perturbation eta to self.ord norm ball
if ord not in [np.inf, 1, 2]:
raise ValueError('ord must be np.inf, 1, or 2.')
reduc_ind = list(xrange(1, len(eta.get_shape())))
avoid_zero_div = 1e-12
if ord == np.inf:
eta = tf.clip_by_value(eta, -eps, eps)
else:
if ord == 1:
norm = tf.maximum(avoid_zero_div,
reduce_sum(tf.abs(eta),
reduc_ind, keepdims=True))
elif ord == 2:
# avoid_zero_div must go inside sqrt to avoid a divide by zero
# in the gradient through this operation
norm = tf.sqrt(tf.maximum(avoid_zero_div,
reduce_sum(tf.square(eta),
reduc_ind,
keepdims=True)))
# We must *clip* to within the norm ball, not *normalize* onto the
# surface of the ball
factor = tf.minimum(1., eps / norm)
eta = eta * factor
return eta
示例14: run_tf_simulation
def run_tf_simulation(self, c_in, h_in, timesteps=100, dt=0.005):
r_e = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
r_i = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
W_EE = tf.placeholder(tf.float32)
W_EI = tf.placeholder(tf.float32)
W_IE = tf.placeholder(tf.float32)
W_II = tf.placeholder(tf.float32)
k = tf.placeholder(tf.float32)
n_E = tf.placeholder(tf.float32)
n_I = tf.placeholder(tf.float32)
tau_E = tf.placeholder(tf.float32)
tau_I = tf.placeholder(tf.float32)
c0 = tf.constant(c_in)
h0 = tf.constant(h_in)
# Compile functions:
I_E = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_EE * r_e, [1,2]), [75,75])) \
- tf.transpose(tf.reshape(tf.reduce_sum(W_EI * r_i, [1,2]), [75,75]))
I_I = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_IE * r_e, [1,2]), [75,75])) \
- tf.transpose(tf.reshape(tf.reduce_sum(W_II * r_i, [1,2]), [75,75]))
I_thresh_E = tf.maximum(0., I_E)
I_thresh_I = tf.maximum(0., I_I)
r_SS_E = k * tf.pow(I_thresh_E, n_E)
r_SS_I = k * tf.pow(I_thresh_I, n_I)
rE_out = r_e + dt*(-r_e+r_SS_E)/tau_E
rI_out = r_i + dt*(-r_i+r_SS_I)/tau_I
update_rE = tf.assign(r_e, rE_out)
update_rI = tf.assign(r_i, rI_out)
init = tf.initialize_all_variables()
rE = 0
rI = 0
fd = {W_EE:self.W_EE.astype(np.float32),
W_EI:self.W_EI.astype(np.float32),
W_IE:self.W_IE.astype(np.float32),
W_II:self.W_II.astype(np.float32),
k:self.k.astype(np.float32),
n_E:self.n_E.astype(np.float32),
n_I:self.n_I.astype(np.float32),
tau_E:self.tau_E.astype(np.float32),
tau_I:self.tau_I.astype(np.float32)}
with tf.Session() as sess:
sess.run(init, feed_dict=fd)
for t in range(timesteps):
# run the simulation
sess.run([update_rE, update_rI], feed_dict=fd)
# fetch the rates
rE = sess.run([r_e], feed_dict=fd)
rI = sess.run([r_i], feed_dict=fd)
return rE, rI
示例15: IoU
def IoU(bbox, gt):
# bbox = [ x , y , w , h ] ( x , y left up)
shape = [-1, 1]
x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))
inter_w = tf.sub(x2,x1)
inter_h = tf.sub(y2,y1)
inter = tf.cast(inter_w * inter_h, tf.float32)
bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)
ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)
#iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))
iou = inter / (bounding_box + ground_truth - inter)
# limit the iou range between 0 and 1
mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
#mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
iou = tf.mul(iou, mask_less)
#iou = tf.mul(iou, positive_mask)
return iou