本文整理汇总了Python中tensorflow.scalar_mul方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scalar_mul方法的具体用法?Python tensorflow.scalar_mul怎么用?Python tensorflow.scalar_mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.scalar_mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: focal_loss_
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def focal_loss_(labels, pred, anchor_state, alpha=0.25, gamma=2.0):
# filter out "ignore" anchors
indices = tf.reshape(tf.where(tf.not_equal(anchor_state, -1)), [-1, ])
labels = tf.gather(labels, indices)
pred = tf.gather(pred, indices)
logits = tf.cast(pred, tf.float32)
onehot_labels = tf.cast(labels, tf.float32)
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=onehot_labels, logits=logits)
predictions = tf.sigmoid(logits)
predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions)
alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)
loss = ce * tf.pow(1-predictions_pt, gamma) * alpha_t
positive_mask = tf.cast(tf.greater(labels, 0), tf.float32)
return tf.reduce_sum(loss) / tf.maximum(tf.reduce_sum(positive_mask), 1)
示例2: clip_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def clip_norm(g, c, n):
if c > 0:
if K.backend() == 'tensorflow':
import tensorflow as tf
import copy
condition = n >= c
then_expression = tf.scalar_mul(c / n, g)
else_expression = g
if hasattr(then_expression, 'get_shape'):
g_shape = copy.copy(then_expression.get_shape())
elif hasattr(then_expression, 'dense_shape'):
g_shape = copy.copy(then_expression.dense_shape)
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
g = K.tensorflow_backend.control_flow_ops.cond(
condition, lambda: then_expression, lambda: else_expression)
if hasattr(then_expression, 'get_shape'):
g.set_shape(g_shape)
elif hasattr(then_expression, 'dense_shape'):
g._dense_shape = g_shape
else:
g = K.switch(n >= c, g * c / n, g)
return g
示例3: compute_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def compute_gradients(self, loss, var_list=None, *args, **kwargs):
if var_list is None:
var_list = (
tf.trainable_variables() +
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
replaced_list = var_list
if self._scale != 1.0:
loss = tf.scalar_mul(self._scale, loss)
gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs)
final_gradvar = []
for orig_var, (grad, var) in zip(var_list, gradvar):
if var is not orig_var:
grad = tf.cast(grad, orig_var.dtype)
if self._scale != 1.0:
grad = tf.scalar_mul(1. / self._scale, grad)
final_gradvar.append((grad, orig_var))
return final_gradvar
示例4: difference
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def difference(predicted, target, loss_difference, epsilon=1e-2):
if loss_difference == LossDifferenceEnum.DIFFERENCE:
result = tf.subtract(predicted, target)
elif loss_difference == LossDifferenceEnum.ABSOLUTE:
difference = tf.subtract(predicted, target)
result = tf.abs(difference)
elif loss_difference == LossDifferenceEnum.SMOOTH_ABSOLUTE:
difference = tf.subtract(predicted, target)
absolute_difference = tf.abs(difference)
result = tf.where(
tf.less(absolute_difference, 1),
tf.scalar_mul(0.5, tf.square(absolute_difference)),
tf.subtract(absolute_difference, 0.5))
elif loss_difference == LossDifferenceEnum.SQUARED:
result = tf.squared_difference(predicted, target)
elif loss_difference == LossDifferenceEnum.SMAPE:
# https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
absolute_difference = tf.abs(tf.subtract(predicted, target))
denominator = tf.add(tf.add(tf.abs(predicted), tf.abs(target)), epsilon)
result = tf.divide(absolute_difference, denominator)
result = tf.reduce_sum(result, axis=3)
return result
示例5: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
if inputs.shape[1] != self.num_fields:
raise ValueError("Mismatch in number of fields {} and \
concatenated embeddings dims {}".format(self.num_fields, inputs.shape[1]))
pairwise_inner_prods = []
for fi, fj in itertools.combinations(range(self.num_fields), 2):
# get field strength for pair fi and fj
r_ij = self.field_strengths[fi, fj]
# get embeddings for the features of both the fields
feat_embed_i = tf.squeeze(inputs[0:, fi:fi + 1, 0:], axis=1)
feat_embed_j = tf.squeeze(inputs[0:, fj:fj + 1, 0:], axis=1)
f = tf.scalar_mul(r_ij, batch_dot(feat_embed_i, feat_embed_j, axes=1))
pairwise_inner_prods.append(f)
sum_ = tf.add_n(pairwise_inner_prods)
return sum_
示例6: soft_arg_min
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def soft_arg_min(filtered_cost_volume, name):
with tf.variable_scope(name):
#input.shape (batch, depth, H, W)
# softargmin to disp image, outsize of (B, H, W)
#print('filtered_cost_volume:',filtered_cost_volume.shape)
probability_volume = tf.nn.softmax(tf.scalar_mul(-1, filtered_cost_volume),
dim=1, name='prob_volume')
#print('probability_volume:',probability_volume.shape)
volume_shape = tf.shape(probability_volume)
soft_1d = tf.cast(tf.range(0, volume_shape[1], dtype=tf.int32),tf.float32)
soft_4d = tf.tile(soft_1d, tf.stack([volume_shape[0] * volume_shape[2] * volume_shape[3]]))
soft_4d = tf.reshape(soft_4d, [volume_shape[0], volume_shape[2], volume_shape[3], volume_shape[1]])
soft_4d = tf.transpose(soft_4d, [0, 3, 1, 2])
estimated_disp_image = tf.reduce_sum(soft_4d * probability_volume, axis=1)
#print(estimated_disp_image.shape)
#estimated_disp_image = tf.expand_dims(estimated_disp_image, axis=3)
return estimated_disp_image
示例7: ddx
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def ddx(inpt, channel, dx, scope='ddx', name=None):
inpt_shape = inpt.get_shape().as_list()
var = tf.expand_dims( inpt[:,:,:,:,channel], axis=4 )
with tf.variable_scope(scope):
ddx1D = tf.constant([-1./60., 3./20., -3./4., 0., 3./4., -3./20., 1./60.], dtype=tf.float32)
ddx3D = tf.reshape(ddx1D, shape=(-1,1,1,1,1))
strides = [1,1,1,1,1]
var_pad = periodic_padding( var, ((3,3),(0,0),(0,0)) )
output = tf.nn.conv3d(var_pad, ddx3D, strides, padding = 'VALID',
data_format = 'NDHWC', name=name)
output = tf.scalar_mul(1./dx, output)
return output
示例8: ddy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def ddy(inpt, channel, dy, scope='ddy', name=None):
inpt_shape = inpt.get_shape().as_list()
var = tf.expand_dims( inpt[:,:,:,:,channel], axis=4 )
with tf.variable_scope(scope):
ddy1D = tf.constant([-1./60., 3./20., -3./4., 0., 3./4., -3./20., 1./60.], dtype=tf.float32)
ddy3D = tf.reshape(ddy1D, shape=(1,-1,1,1,1))
strides = [1,1,1,1,1]
var_pad = periodic_padding( var, ((0,0),(3,3),(0,0)) )
output = tf.nn.conv3d(var_pad, ddy3D, strides, padding = 'VALID',
data_format = 'NDHWC', name=name)
output = tf.scalar_mul(1./dy, output)
return output
示例9: ddz
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def ddz(inpt, channel, dz, scope='ddz', name=None):
inpt_shape = inpt.get_shape().as_list()
var = tf.expand_dims( inpt[:,:,:,:,channel], axis=4 )
with tf.variable_scope(scope):
ddz1D = tf.constant([-1./60., 3./20., -3./4., 0., 3./4., -3./20., 1./60.], dtype=tf.float32)
ddz3D = tf.reshape(ddz1D, shape=(1,1,-1,1,1))
strides = [1,1,1,1,1]
var_pad = periodic_padding( var, ((0,0),(0,0),(3,3)) )
output = tf.nn.conv3d(var_pad, ddz3D, strides, padding = 'VALID',
data_format = 'NDHWC', name=name)
output = tf.scalar_mul(1./dz, output)
return output
示例10: d2dx2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def d2dx2(inpt, channel, dx, scope='d2dx2', name=None):
inpt_shape = inpt.get_shape().as_list()
var = tf.expand_dims( inpt[:,:,:,:,channel], axis=4 )
with tf.variable_scope(scope):
ddx1D = tf.constant([1./90., -3./20., 3./2., -49./18., 3./2., -3./20., 1./90.], dtype=tf.float32)
ddx3D = tf.reshape(ddx1D, shape=(-1,1,1,1,1))
strides = [1,1,1,1,1]
var_pad = periodic_padding( var, ((3,3),(0,0),(0,0)) )
output = tf.nn.conv3d(var_pad, ddx3D, strides, padding = 'VALID',
data_format = 'NDHWC', name=name)
output = tf.scalar_mul(1./dx**2, output)
return output
示例11: d2dy2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def d2dy2(inpt, channel, dy, scope='d2dy2', name=None):
inpt_shape = inpt.get_shape().as_list()
var = tf.expand_dims( inpt[:,:,:,:,channel], axis=4 )
with tf.variable_scope(scope):
ddy1D = tf.constant([1./90., -3./20., 3./2., -49./18., 3./2., -3./20., 1./90.], dtype=tf.float32)
ddy3D = tf.reshape(ddy1D, shape=(1,-1,1,1,1))
strides = [1,1,1,1,1]
var_pad = periodic_padding( var, ((0,0),(3,3),(0,0)) )
output = tf.nn.conv3d(var_pad, ddy3D, strides, padding = 'VALID',
data_format = 'NDHWC', name=name)
output = tf.scalar_mul(1./dy**2, output)
return output
示例12: preprocess_for_eval
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def preprocess_for_eval(image,
classes,
boxes,
resolution,
speed_mode=False):
if speed_mode:
pass
else:
# mean subtraction
means = [_R_MEAN, _G_MEAN, _B_MEAN]
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
for i in range(3):
channels[i] -= means[i]
# image = tf.concat(axis=2, values=channels)
# caffe swaps color channels
image = tf.concat(axis=2, values=[channels[2], channels[1], channels[0]])
image, scale, translation = bilinear_resize(image, resolution, depth=3, resize_mode="bilinear")
# Need this to make later tensor unstack working
image.set_shape([resolution, resolution, 3])
x1, y1, x2, y2 = tf.unstack(boxes, 4, axis=1)
x1 = tf.scalar_mul(scale[1], x1)
y1 = tf.scalar_mul(scale[0], y1)
x2 = tf.scalar_mul(scale[1], x2)
y2 = tf.scalar_mul(scale[0], y2)
boxes = tf.concat([tf.expand_dims(x1, -1),
tf.expand_dims(y1, -1),
tf.expand_dims(x2, -1),
tf.expand_dims(y2, -1)], axis=1)
boxes = boxes + [translation[1], translation[0], translation[1], translation[0]]
return image, classes, boxes, scale, translation
示例13: focal_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def focal_loss(self, pred, label, num_class=21, alpha=0.25, gamma=2.0):
label_one_hot = tf.one_hot(indices=label, depth=num_class, on_value=1.0, off_value=0.0, axis=-1, dtype=tf.float32)
pt = tf.reduce_sum(tf.multiply(pred, label_one_hot), axis=1)
gamma_tf = tf.scalar_mul(gamma, tf.ones_like(pt, tf.float32))
alpha_tf = tf.map_fn(lambda x: 1.0 - alpha if x == 0 else alpha, label, dtype=tf.float32)
cls_loss = alpha_tf*(-1.0 * tf.pow(1 - pt, gamma_tf) * tf.log(pt))
#cls_loss = -1.0 * tf.log(pt)
#cls_loss = -1 * tf.multiply(alpha, tf.multiply(tf.pow(1 - pt, gamma), tf.log(pt)))
return cls_loss
示例14: focal_loss_sigmoid
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def focal_loss_sigmoid(self, pred, label, num_class=20, alpha=0.25, gamma=2.0):
label_one_hot = tf.one_hot(indices=label - 1, depth=num_class, on_value=1.0, off_value=0.0, axis=-1, dtype=tf.float32)
pt = tf.where(tf.equal(label_one_hot, 1.0), pred, 1.0 - pred)
gamma_tf = tf.scalar_mul(gamma, tf.ones_like(pt, tf.float32))
#alpha_tf = tf.map_fn(lambda x: 1.0 - alpha if x == 0 else alpha, label_one_hot, dtype=tf.float32)
alpha_tf = tf.scalar_mul(alpha, tf.ones_like(pt, tf.float32))
alpha_tf = tf.where(tf.equal(label_one_hot, 1.0), alpha_tf, 1.0 - alpha_tf)
#alpha_tf = 1.0
cls_loss = alpha_tf*(-1.0 * tf.pow(1 - pt, gamma_tf) * tf.log(pt))
return cls_loss
示例15: calculate_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_mul [as 别名]
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))