本文整理汇总了Python中tensorflow.multiply方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.multiply方法的具体用法?Python tensorflow.multiply怎么用?Python tensorflow.multiply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.multiply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def _decay(self):
"""L2 weight decay loss."""
if self.decay_cost is not None:
return self.decay_cost
costs = []
if self.device_name is None:
for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var))
else:
for layer in self.layers:
for var in layer.params_device[self.device_name].values():
if (isinstance(var, tf.Variable) and
var.op.name.find(r'DW') > 0):
costs.append(tf.nn.l2_loss(var))
self.decay_cost = tf.multiply(self.hps.weight_decay_rate,
tf.add_n(costs))
return self.decay_cost
示例2: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例3: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例4: l1_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def l1_regularizer(weight=1.0, scope=None):
"""Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1Regularizer', [tensor]):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
return regularizer
示例5: l2_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
示例6: l1_l2_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
"""Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2,
dtype=tensor.dtype.base_dtype,
name='weight_l2')
reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
name='value_l1')
reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),
name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
return regularizer
示例7: l1_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def l1_loss(tensor, weight=1.0, scope=None):
"""Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
the L1 loss op.
"""
with tf.name_scope(scope, 'L1Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
示例8: l2_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for name_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope, 'L2Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
示例9: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with tf.variable_scope(name, reuse=reuse):
assert (len(tf.get_variable_scope().name.split('/')) == 2)
w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(x, w), b)
示例10: mean_iou
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def mean_iou(y_true, y_pred, cls_num=CLS_NUM):
result = 0
nc = tf.cast(tf.shape(y_true)[-1], tf.float32)
for i in range(cls_num):
# nii = number of pixels of classe i predicted to belong to class i
nii = tf.reduce_sum(tf.round(tf.multiply(
y_true[:, :, :, i], y_pred[:, :, :, i])))
ti = tf.reduce_sum(y_true[:, :, :, i]) # number of pixels of class i
loc_sum = 0
for j in range(cls_num):
# number of pixels of classe j predicted to belong to class i
nji = tf.reduce_sum(tf.round(tf.multiply(
y_true[:, :, :, j], y_pred[:, :, :, i])))
loc_sum += nji
result += nii / (ti - nii + loc_sum)
return (1 / nc) * result
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:18,代码来源:metric.py
示例11: testRandomFlipBoxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def testRandomFlipBoxes(self):
boxes = self.createTestBoxes()
# Case where the boxes are flipped.
boxes_expected1 = self.expectedBoxesAfterMirroring()
# Case where the boxes are not flipped.
boxes_expected2 = boxes
# After elementwise multiplication, the result should be all-zero since one
# of them is all-zero.
boxes_diff = tf.multiply(
tf.squared_difference(boxes, boxes_expected1),
tf.squared_difference(boxes, boxes_expected2))
expected_result = tf.zeros_like(boxes_diff)
with self.test_session() as sess:
(boxes_diff, expected_result) = sess.run([boxes_diff, expected_result])
self.assertAllEqual(boxes_diff, expected_result)
示例12: get_metrics
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def get_metrics(predictions, true_predictions, no_turns, mask, num_slots):
mask = tf.reshape(mask, [-1, num_slots])
correct_prediction = tf.cast(tf.equal(predictions, true_predictions), "float32") * mask
num_positives = tf.reduce_sum(true_predictions)
classified_positives = tf.reduce_sum(predictions)
true_positives = tf.multiply(predictions, true_predictions)
num_true_positives = tf.reduce_sum(true_positives)
recall = num_true_positives / num_positives
precision = num_true_positives / classified_positives
f_score = (2 * recall * precision) / (recall + precision)
accuracy = tf.reduce_sum(correct_prediction) / (tf.cast(tf.reduce_sum(no_turns), dtype="float32") * num_slots)
return precision, recall, f_score, accuracy
# main.py
示例13: log_likelihood
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def log_likelihood(mu, var, x, muq, varq, a, mask_flat, config):
if config.out_distr == 'bernoulli':
log_lik = log_bernoulli(x, mu, eps=1e-6) # (bs*L, d1*d2)
elif config.out_distr == 'gaussian':
log_lik = log_gaussian(x, mu, var)
log_lik = tf.reduce_sum(log_lik, 1) # (bs*L, )
log_lik = tf.multiply(mask_flat, log_lik)
# TODO: dropout scales the output as input/keep_prob. Issue?
if config.ll_keep_prob < 1.0:
log_lik = tf.layers.dropout(log_lik, config.ll_keep_prob)
# We compute the log-likelihood *per frame*
num_el = tf.reduce_sum(mask_flat)
log_px_given_a = tf.truediv(tf.reduce_sum(log_lik), num_el) # ()
if config.use_vae:
log_qa_given_x = tf.reduce_sum(log_gaussian(a, muq, varq), 1) # (bs*L, )
log_qa_given_x = tf.multiply(mask_flat, log_qa_given_x)
log_qa_given_x = tf.truediv(tf.reduce_sum(log_qa_given_x), num_el) # ()
else:
log_qa_given_x = tf.constant(0.0, dtype=tf.float32, shape=())
LL = log_px_given_a - log_qa_given_x
return LL, log_px_given_a, log_qa_given_x
示例14: compute_forwards
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def compute_forwards(self, reuse=None):
"""Compute the forward step in the Kalman filter.
The forward pass is intialized with p(z_1)=N(self.mu, self.Sigma).
We then return the mean and covariances of the predictive distribution p(z_t|z_tm1,u_t), t=2,..T+1
and the filtering distribution p(z_t|x_1:t,u_1:t), t=1,..T
We follow the notation of Murphy's book, section 18.3.1
"""
# To make sure we are not accidentally using the real outputs in the steps with missing values, set them to 0.
y_masked = tf.multiply(tf.expand_dims(self.mask, 2), self.y)
inputs = tf.concat([y_masked, self.u, tf.expand_dims(self.mask, 2)], axis=2)
y_prev = tf.expand_dims(self.y_0, 0) # (1, dim_y)
y_prev = tf.tile(y_prev, (tf.shape(self.mu)[0], 1))
alpha, state, u, buffer = self.alpha(y_prev, self.state, self.u[:, 0], init_buffer=True, reuse= reuse)
# dummy matrix to initialize B and C in scan
dummy_init_A = tf.ones([self.Sigma.get_shape()[0], self.dim_z, self.dim_z])
dummy_init_B = tf.ones([self.Sigma.get_shape()[0], self.dim_z, self.dim_u])
dummy_init_C = tf.ones([self.Sigma.get_shape()[0], self.dim_y, self.dim_z])
forward_states = tf.scan(self.forward_step_fn, tf.transpose(inputs, [1, 0, 2]),
initializer=(self.mu, self.Sigma, self.mu, self.Sigma, alpha, u, state, buffer,
dummy_init_A, dummy_init_B, dummy_init_C),
parallel_iterations=1, name='forward')
return forward_states
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import multiply [as 别名]
def __init__(self, state_size, action_size, lr, n_h1=400, n_h2=300, tau=0.001):
self.state_size = state_size
self.action_size = action_size
self.optimizer = tf.train.AdamOptimizer(lr)
self.tau = tau
self.n_h1 = n_h1
self.n_h2 = n_h2
self.input_s, self.action, self.critic_variables, self.q_value = self._build_network("critic")
self.input_s_target, self.action_target, self.critic_variables_target, self.q_value_target = self._build_network("critic_target")
self.target = tf.placeholder(tf.float32, [None])
self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.critic_variables])
self.loss = tf.reduce_mean(tf.square(self.target - self.q_value)) + 0.01*self.l2_loss
self.optimize = self.optimizer.minimize(self.loss)
self.update_target_op = [self.critic_variables_target[i].assign(tf.multiply(self.critic_variables[i], self.tau) + tf.multiply(self.critic_variables_target[i], 1 - self.tau)) for i in range(len(self.critic_variables))]
self.action_gradients = tf.gradients(self.q_value, self.action)