本文整理汇总了Python中tensorflow.sub方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sub方法的具体用法?Python tensorflow.sub怎么用?Python tensorflow.sub使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sub方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_binary_ops_combined
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def test_binary_ops_combined(self):
# computation
a = tf.placeholder(tf.float32, shape=(2, 3))
b = tf.placeholder(tf.float32, shape=(2, 3))
c = tf.add(a, b)
d = tf.mul(c, a)
e = tf.div(d, b)
f = tf.sub(a, e)
g = tf.maximum(a, f)
# value
a_val = np.random.rand(*tf_obj_shape(a))
b_val = np.random.rand(*tf_obj_shape(b))
# test
self.run(g, tf_feed_dict={a: a_val, b: b_val})
示例2: class_balanced_binary_class_cross_entropy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def class_balanced_binary_class_cross_entropy(pred, label, name='cross_entropy_loss'):
"""
The class-balanced cross entropy loss for binary classification,
as in `Holistically-Nested Edge Detection
<http://arxiv.org/abs/1504.06375>`_.
:param pred: size: b x ANYTHING. the predictions in [0,1].
:param label: size: b x ANYTHING. the ground truth in {0,1}.
:returns: class-balanced binary classification cross entropy loss
"""
z = batch_flatten(pred)
y = tf.cast(batch_flatten(label), tf.float32)
count_neg = tf.reduce_sum(1. - y)
count_pos = tf.reduce_sum(y)
beta = count_neg / (count_neg + count_pos)
eps = 1e-8
loss_pos = -beta * tf.reduce_mean(y * tf.log(tf.abs(z) + eps), 1)
loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(tf.abs(1. - z) + eps), 1)
cost = tf.sub(loss_pos, loss_neg)
cost = tf.reduce_mean(cost, name=name)
return cost
示例3: _activation_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % LSPGlobals.TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
示例4: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def loss(logits, labels):
"""Calculates Mean Pixel Error.
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
labelValidity = tf.sign(labels, name='label_validity')
minop = tf.sub(logits, labels, name='Diff_Op')
absop = tf.abs(minop, name='Abs_Op')
lossValues = tf.mul(labelValidity, absop, name='lossValues')
loss_mean = tf.reduce_mean(lossValues, name='MeanPixelError')
tf.add_to_collection('losses', loss_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_mean
示例5: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def __init__(self):
self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
self.y_ = tf.placeholder(tf.float32, [None, 1])
(self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
(self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
(self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
(self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
(self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
self.h_conv5_flat = flattened(self.h_conv5)
(self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
(self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
(self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
(self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
W_fc5 = weight_variable([10, 1])
b_fc5 = bias_variable([1])
self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
self.loss = tf.reduce_mean(tf.abs(tf.sub(self.y_, self.y_out)))
示例6: tf_mse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def tf_mse(a, b, reduction_indices=None, name='mse'):
"""
Mean squared error for TensorFlow.
:param a: First input tensor
:type b: tf.Tensor
:param a: Second input tensor
:type b: tf.Tensor
:param reduction_indices: Dimensions to reduce. If None all dimensions are reduced.
:type reduction_indices: List or None
:param name: Variable scope name
:type reduction_indices: String
:returns: MSE between a and b
:rtype: tf.Tensor
"""
with tf.variable_scope(name):
return tf.reduce_mean(tf.pow(tf.sub(a, b), 2),
reduction_indices=reduction_indices)
示例7: testFloatBasic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
示例8: testDoubleBasic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
示例9: testInt32Basic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
示例10: testCondIndexedSlicesDifferentTypes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = tf.constant(10)
i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
x = tf.IndexedSlices(values, i_32)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
示例11: testWhileCondGrad_UnknownShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = tf.placeholder(tf.float32)
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: tf.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
示例12: _testStackWhileSwap
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(0)
h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
def c(x):
return tf.less(x, 10)
def b(x):
with tf.control_dependencies([x]):
a = tf.constant(np.ones(2000), dtype=tf.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
with tf.control_dependencies([v]):
return tf.add(x, 1)
r = tf.while_loop(c, b, [n])
v = tf.constant(np.zeros(2000), dtype=tf.float32)
def c1(x, y):
return tf.greater(x, 0)
def b1(x, y):
nx = tf.sub(x, 1)
ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
return [nx, ny]
rx, ry = tf.while_loop(c1, b1, [r, v],
[r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
示例13: cost
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def cost(training, classes, inputs, kernel_type="gaussian", C=1, gamma=1):
"""Returns the kernelised cost to be minimised."""
beta = tf.Variable(tf.zeros([inputs, 1]))
offset = tf.Variable(tf.zeros([1]))
if kernel_type == "linear":
kernel = linear_kernel(training, inputs, training, inputs)
elif kernel_type == "gaussian":
kernel = gaussian_kernel(training, inputs, training, inputs, gamma)
x = tf.reshape(tf.div(tf.matmul(tf.matmul(
beta, kernel, transpose_a=True), beta), tf.constant([2.0])), [1])
y = tf.sub(tf.ones([1]), tf.mul(classes, tf.add(
tf.matmul(kernel, beta, transpose_a=True), offset)))
z = tf.mul(tf.reduce_sum(tf.reduce_max(
tf.concat(1, [y, tf.zeros_like(y)]), reduction_indices=1)),
tf.constant([C], dtype=tf.float32))
cost = tf.add(x, z)
return beta, offset, cost
示例14: calculatCA
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def calculatCA(_tp1, _tp2, size, _b_size):
first = True
tp1 = tf.split(0, _b_size, _tp1)
tp2 = tf.split(0, _b_size, _tp2)
for i in range(_b_size):
input1 = tf.reshape(tp1[i], shape=[size, 1])
input2 = tf.reshape(tp2[i], shape=[size, 1])
upper = tf.matmul(tf.transpose(tf.sub(input1, tf.reduce_mean(input1))), tf.sub(input2, tf.reduce_mean(input2)))
_tp1 = tf.reduce_sum(tf.mul(tf.sub(input1, tf.reduce_mean(input1)), tf.sub(input1, tf.reduce_mean(input1))))
_tp2 = tf.reduce_sum(tf.mul(tf.sub(input2, tf.reduce_mean(input2)), tf.sub(input2, tf.reduce_mean(input2))))
down = tf.sqrt(tf.mul(_tp1, _tp2))
factor = tf.abs(tf.div(upper, down))
if first:
output = factor
first = False
else:
output = tf.concat(1, [output, factor])
return tf.transpose(output)
# Create model
示例15: calculatCA
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sub [as 别名]
def calculatCA(_tp1, _tp2, size, _b_size):
first = True
tp1 = tf.split(0, _b_size, _tp1)
tp2 = tf.split(0, _b_size, _tp2)
for i in range(_b_size):
input1 = tf.reshape(tp1[i], shape=[size, 1])
input2 = tf.reshape(tp2[i], shape=[size, 1])
upper = tf.matmul(tf.transpose(tf.sub(input1, tf.reduce_mean(input1))), tf.sub(input2, tf.reduce_mean(input2)))
_tp1 = tf.reduce_sum(tf.mul(tf.sub(input1, tf.reduce_mean(input1)), tf.sub(input1, tf.reduce_mean(input1))))
_tp2 = tf.reduce_sum(tf.mul(tf.sub(input2, tf.reduce_mean(input2)), tf.sub(input2, tf.reduce_mean(input2))))
down = tf.sqrt(tf.mul(_tp1, _tp2))
factor = tf.abs(tf.div(upper, down))
if first:
output = factor
first = False
else:
output = tf.concat(1, [output, factor])
return tf.transpose(output)
# Create model