本文整理汇总了Python中tensorflow.squared_difference函数的典型用法代码示例。如果您正苦于以下问题:Python squared_difference函数的具体用法?Python squared_difference怎么用?Python squared_difference使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了squared_difference函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_cost_spacing
def create_cost_spacing(t, length, normalized = True):
d = tf.sqrt(tf.reduce_sum(tf.square(t), reduction_indices = 1));
if normalized:
s = t.get_shape().as_list();
return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / s[0], "float32")));
else:
return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
示例2: mean_squared_error
def mean_squared_error(output, target, is_mean=False):
"""Return the TensorFlow expression of mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
return mse
示例3: arg_closest_anchor
def arg_closest_anchor(bboxes, anchors):
"""Find the closest anchor. Box Format [ymin, xmin, ymax, xmax]
"""
num_anchors = anchors.get_shape().as_list()[0]
num_bboxes = tf.shape(bboxes)[0]
_indices = tf.reshape(tf.range(num_bboxes), shape=[-1, 1])
_indices = tf.reshape(tf.stack([_indices] * num_anchors, axis=1), shape=[-1, 1])
bboxes_m = tf.gather_nd(bboxes, _indices)
# bboxes_m = tf.Print(bboxes_m, [bboxes_m], "bboxes_m", summarize=100)
anchors_m = tf.tile(anchors, [num_bboxes, 1])
# anchors_m = tf.Print(anchors_m, [anchors_m], "anchors_m", summarize=100)
square_dist = tf.squared_difference(bboxes_m[:, 0], anchors_m[:, 0]) + \
tf.squared_difference(bboxes_m[:, 1], anchors_m[:, 1]) + \
tf.squared_difference(bboxes_m[:, 2], anchors_m[:, 2]) + \
tf.squared_difference(bboxes_m[:, 3], anchors_m[:, 3])
square_dist = tf.reshape(square_dist, shape=[num_bboxes, num_anchors])
# square_dist = tf.Print(square_dist, [square_dist], "square_dist", summarize=100)
indices = tf.arg_min(square_dist, dimension=1)
return indices
示例4: LSGAN_losses
def LSGAN_losses(real, fake):
d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real')
d_fake = tf.reduce_mean(tf.square(fake), name='d_fake')
d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss')
g_loss = tf.reduce_mean(tf.squared_difference(fake, 1), name='g_loss')
add_moving_summary(g_loss, d_loss)
return g_loss, d_loss
示例5: __init__
def __init__(self, sess, state, action, learning_rate, tau):
self.sess = sess
self.state_dim = len(state)
self.action_dim = len(action)
self.rate = learning_rate
self.tau = tau
# create critic network
train_network = self.create_network('critic_train')
self.inputs = train_network[0]
self.actions = train_network[1]
self.q_outputs = train_network[2]
self.state_outputs = train_network[3]
self.train_net = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=train_network[4]
)
# create target critic network
target_network = self.create_network('critic_target')
self.target_inputs = target_network[0]
self.target_actions = target_network[1]
self.target_q_outputs = target_network[2]
self.target_state_outputs = target_network[3]
self.target_net = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_network[4]
)
# op for updating target network with train network weights
self.update = [self.target_net[i].assign(
tf.mul(self.train_net[i], self.tau) +
tf.mul(self.target_net[i], 1. - self.tau)
)
for i in range(len(self.target_net))]
# define loss and optimization op
self.state_prime = tf.placeholder(fl32, [None, self.state_dim])
self.batch_state_loss = \
tf.squared_difference(self.s_prime, self.state_outputs)
self.state_loss = tf.reduce_mean(self.batch_state_loss)
self.y = tf.placeholder(fl32, [None, 1])
self.batch_q_loss = tf.squared_difference(self.y, self.q_outputs)
self.q_loss = tf.reduce_mean(self.batch_loss)
self.train_q = \
tf.train.AdamOptimizer(self.rate).minimize(self.q_loss)
self.train_state = \
tf.train.AdamOptimizer(self.rate).minimize(self.state_loss)
# get the gradient of the train net with respect to actions
self.policy_gradient = tf.gradients(self.q_outputs, self.actions)
self.explore_gradient = tf.gradients(self.state_loss, self.actions)
# print total number of parameters (neurons)
vars = self.train_net
print sum([sum([reduce(
lambda x, y: x * y, l.get_shape().as_list()) for l in e])
for e in [vars]])
示例6: build_model
def build_model(self):
dense_masker01 = tf.sparse_tensor_to_dense(self.mask)
dense_masker02 = tf.sparse_tensor_to_dense(self.mask1)
dense_masker03 = tf.sparse_tensor_to_dense(self.mask2)
with tf.name_scope('encoding'):
encoding = tf.add(tf.sparse_tensor_dense_matmul(self.X, self.W) , self.b, name= 'raw_values')
encoded_values = self.enc_func(encoding, name = 'encoded_values') - self.enc_func(self.b)
encoding1 = tf.add(tf.sparse_tensor_dense_matmul(self.X1, self.W) , self.b, name= 'raw_values1')
encoded_values1 = self.enc_func(encoding1, name = 'encoded_values1') - self.enc_func(self.b)
encoding2 = tf.add(tf.sparse_tensor_dense_matmul(self.X2, self.W) , self.b, name= 'raw_values2')
encoded_values2 = self.enc_func(encoding2, name = 'encoded_values2') - self.enc_func(self.b)
with tf.name_scope('decoding'):
decoding = tf.nn.xw_plus_b(encoded_values, self.W_prime, self.b_prime)
decoded_values = self.dec_func(decoding, name = 'decoded_values')
decoding1 = tf.nn.xw_plus_b(encoded_values1, self.W_prime, self.b_prime)
decoded_values1 = self.dec_func(decoding1, name = 'decoded_values1')
decoding2 = tf.nn.xw_plus_b(encoded_values2, self.W_prime, self.b_prime)
decoded_values2 = self.dec_func(decoding2, name = 'decoded_values2')
masked_decoded_values = tf.multiply(dense_masker01, decoded_values)
with tf.name_scope('training_process'):
diff01 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y) , decoded_values)
diff02 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y1) , decoded_values1)
diff03 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y2) , decoded_values2)
L_R = tf.reduce_sum( tf.multiply(dense_masker01, diff01)) \
+ tf.reduce_sum( tf.multiply(dense_masker02, diff02)) \
+ tf.reduce_sum( tf.multiply(dense_masker03, diff03))
L_T = tf.reduce_sum( tf.log(1+ tf.exp( tf.reduce_sum( tf.multiply(encoded_values, encoded_values2), 1) - tf.reduce_sum(tf.multiply(encoded_values, encoded_values1),1))))
error = L_R + self.alpha_enc * L_T
reg = 0
for param in self.params.items():
reg += tf.nn.l2_loss(param[1])* self.lambda_w
loss = error + reg
model_params = [p for p in self.params.values()]
train_step = self._optimize(loss, model_params)
tf.summary.scalar('error', error)
tf.summary.scalar('loss', loss)
for param in self.params.items():
tf.summary.histogram(param[0], param[1])
merged_summary = tf.summary.merge_all()
return encoded_values, decoded_values, masked_decoded_values, error, loss, train_step, merged_summary
示例7: create_cost_spacing
def create_cost_spacing(self, c, length, normalized = True):
c_shape = c.get_shape().as_list();
c1 = tf.slice(c, [1,0], [-1,-1]);
c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
if normalized:
return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
else:
return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
示例8: TV_loss
def TV_loss(img,tv_weight):
shape = tf.shape(img) # the shape of the img is (1,H,W,C)
img_row_before = tf.slice(img,[0,0,0,0],[-1,-1,shape[2]-1,-1])
img_row_after = tf.slice(img,[0,0,1,0],[-1,-1,shape[2]-1,-1])
img_col_before = tf.slice(img,[0,0,0,0],[-1,shape[1]-1,-1,-1])
img_col_after = tf.slice(img,[0,1,0,0],[-1,shape[1]-1,-1,-1])
tv_loss = tv_weight*(tf.reduce_sum(tf.squared_difference(img_col_after,img_col_before))+\
tf.reduce_sum(tf.squared_difference(img_row_after,img_row_before)))
return tv_loss
示例9: GMM_M_Step
def GMM_M_Step(X, Gama, ClusterNo, name='GMM_Statistics', **kwargs):
D, h, s = tf.split(X, [1,1,1], axis=3)
WXd = tf.multiply(Gama, tf.tile(D ,[1,1,1,ClusterNo]))
WXa = tf.multiply(Gama, tf.tile(h ,[1,1,1,ClusterNo]))
WXb = tf.multiply(Gama, tf.tile(s ,[1,1,1,ClusterNo]))
S = tf.reduce_sum(tf.reduce_sum(Gama, axis=1), axis=1)
S = tf.add(S, tf.contrib.keras.backend.epsilon())
S = tf.reshape(S,[1, ClusterNo])
M_d = tf.div(tf.reduce_sum(tf.reduce_sum(WXd, axis=1), axis=1) , S)
M_a = tf.div(tf.reduce_sum(tf.reduce_sum(WXa, axis=1), axis=1) , S)
M_b = tf.div(tf.reduce_sum(tf.reduce_sum(WXb, axis=1), axis=1) , S)
Mu = tf.split(tf.concat([M_d, M_a, M_b],axis=0), ClusterNo, 1)
Norm_d = tf.squared_difference(D, tf.reshape(M_d,[1, ClusterNo]))
Norm_h = tf.squared_difference(h, tf.reshape(M_a,[1, ClusterNo]))
Norm_s = tf.squared_difference(s, tf.reshape(M_b,[1, ClusterNo]))
WSd = tf.multiply(Gama, Norm_d)
WSh = tf.multiply(Gama, Norm_h)
WSs = tf.multiply(Gama, Norm_s)
S_d = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSd, axis=1), axis=1) , S))
S_h = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSh, axis=1), axis=1) , S))
S_s = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSs, axis=1), axis=1) , S))
Std = tf.split(tf.concat([S_d, S_h, S_s],axis=0), ClusterNo, 1)
dist = list()
for k in range(0, ClusterNo):
dist.append(tf.contrib.distributions.MultivariateNormalDiag(tf.reshape(Mu[k],[1,3]), tf.reshape(Std[k],[1,3])))
PI = tf.split(Gama, ClusterNo, axis=3)
Prob0 = list()
for k in range(0, ClusterNo):
Prob0.append(tf.multiply(tf.squeeze(dist[k].prob(X)), tf.squeeze(PI[k])))
Prob = tf.convert_to_tensor(Prob0, dtype=tf.float32)
Prob = tf.minimum(tf.add(tf.reduce_sum(Prob, axis=0), tf.contrib.keras.backend.epsilon()), tf.constant(1.0, tf.float32))
Log_Prob = tf.negative(tf.log(Prob))
Log_Likelihood = tf.reduce_mean(Log_Prob)
return Log_Likelihood, Mu, Std
示例10: _get_cost
def _get_cost(self, outputs):
"""Construct the cost function from the outputs of the last layer. This
will be used through SGD to train the network.
Parameters
----------
outputs: tuple fo tensors (n_out)
a tuple of tensor containing the output from the last layer of the
network
Returns
-------
cost: a tensor computing the cost function of the network.
reg: a tensor for computing regularization of the parameters.
It should be None if no regularization is needed.
"""
Zk, X, lmbd = outputs
with tf.name_scope("reconstruction_zD"):
rec = tf.matmul(Zk, tf.constant(self.D))
with tf.name_scope("norm_2"):
Er = tf.multiply(
tf.constant(.5, dtype=tf.float32),
tf.reduce_mean(tf.reduce_sum(tf.squared_difference(rec, X),
reduction_indices=[1])))
with tf.name_scope("norm_1"):
l1 = lmbd * tf.reduce_mean(tf.reduce_sum(
tf.abs(Zk), reduction_indices=[1]))
return tf.add(Er, l1, name="cost")
示例11: _compute_data_loss
def _compute_data_loss(self):
if self.hparams.loss == "cross_entropy_loss":
data_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.reshape(self.logit, [-1]),
labels=tf.reshape(self.iterator.labels, [-1]),
)
)
elif self.hparams.loss == "square_loss":
data_loss = tf.sqrt(
tf.reduce_mean(
tf.squared_difference(
tf.reshape(self.pred, [-1]),
tf.reshape(self.iterator.labels, [-1]),
)
)
)
elif self.hparams.loss == "log_loss":
data_loss = tf.reduce_mean(
tf.losses.log_loss(
predictions=tf.reshape(self.pred, [-1]),
labels=tf.reshape(self.iterator.labels, [-1]),
)
)
else:
raise ValueError("this loss not defined {0}".format(self.hparams.loss))
return data_loss
示例12: _build_model
def _build_model(self):
# placeholders
self.input = tf.placeholder(
shape=[None, 84, 84, 4], dtype=tf.float32, name='inputs')
self.actions = tf.placeholder(
shape=[None], dtype=tf.int32, name='actions')
self.next_input = tf.placeholder(
shape=[None], dtype=tf.float32, name='next_inputs')
qvals = []
for i in range(self.k + 1):
with tf.variable_scope('qnet-{}'.format(i)):
qvals.append(self._net(self.input, i == 0))
self.qvals = qvals[0]
self.target_qvals = tf.stack(qvals[1:])
trainable_variables = tf.trainable_variables('qnet-0/')
batch_size = tf.shape(self.input)[0]
gather_indices = tf.range(batch_size) * self.n_ac + self.actions
action_q = tf.gather(tf.reshape(self.qvals, [-1]), gather_indices)
self.loss = tf.reduce_mean(
tf.squared_difference(self.next_input, action_q))
self.max_qval = tf.reduce_max(self.qvals)
self.train_op = self.optimizer.minimize(
self.loss,
global_step=tf.train.get_global_step(),
var_list=trainable_variables)
self.update_target_op = self._get_update_target_op()
示例13: _test_grads_images
def _test_grads_images(self,
interpolation='linear',
boundary='replicate',
ndim=2):
if ndim == 2:
test_image, input_shape = get_multiple_2d_images()
test_target, target_shape = get_multiple_2d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.]] * 4
else:
test_image, input_shape = get_multiple_3d_images()
test_target, target_shape = get_multiple_3d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.,
1., 0., 0., 0., 1., 0.]] * 4
affine_var = tf.get_variable('affine', initializer=identity_affine)
grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
output_shape=target_shape[1:-1],
constraints=None)
warp_coords = grid(affine_var)
resampler = ResamplerLayer(interpolation, boundary=boundary)
new_image = resampler(tf.constant(test_image, dtype=tf.float32),
warp_coords)
diff = tf.reduce_mean(tf.squared_difference(
new_image, tf.constant(test_target, dtype=tf.float32)))
optimiser = tf.train.AdagradOptimizer(0.01)
grads = optimiser.compute_gradients(diff)
opt = optimiser.apply_gradients(grads)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
init_val, affine_val = sess.run([diff, affine_var])
for _ in range(5):
_, diff_val, affine_val = sess.run([opt, diff, affine_var])
print('{}, {}'.format(diff_val, affine_val[0]))
self.assertGreater(init_val, diff_val)
示例14: drawGraph
def drawGraph(self, n_row, n_latent, n_col):
with tf.name_scope('matDecomp'):
self._p = tf.placeholder(tf.float32, shape=[None, n_col])
self._c = tf.placeholder(tf.float32, shape=[None, n_col])
self._lambda = tf.placeholder(tf.float32)
self._index = tf.placeholder(tf.float32, shape=[None, n_row])
self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
self._h = tf.matmul(tf.matmul(self._index, self._A), self._B)
weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
self._weighted_loss = weighted_loss
l2_A = tf.reduce_sum(tf.square(self._A))
l2_B = tf.reduce_sum(tf.square(self._B))
n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
reg_term = tf.mul(self._lambda, l2)
self._loss = tf.add(weighted_loss, reg_term)
self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
one = tf.constant(1, tf.float32)
pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))
self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
tf.scalar_summary('training_weighted_loss_l2', self._loss)
tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
merged = tf.merge_all_summaries()
示例15: _apply
def _apply(self, grad, var, indices=None):
lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m_scaled_g_values = grad * (1 - beta1_t)
m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices)
m_gathered = self._gather(m_t, indices=indices)
# Also see tf.nn.moments.
variance = tf.squared_difference(grad, m_gathered)
# v_t = beta2 * v + (1 - beta2) * variance
v_scaled_new_values = variance * (1 - beta2_t)
v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._assign_add(v, updates=v_scaled_new_values, indices=indices)
v_gathered = self._gather(v_t, indices=indices)
factor = v_gathered / (variance + epsilon_t)
update = lr * grad * tf.minimum(factor, 1.0)
var_update = self._assign_sub(ref=var, updates=update, indices=indices)
return tf.group(*[var_update, m_t])