本文整理汇总了Python中tensorflow.divide方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.divide方法的具体用法?Python tensorflow.divide怎么用?Python tensorflow.divide使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.divide方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
return loss
示例2: get_loss_b
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)
# Use 1024 Points to find loss.
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
# loss = 0
return loss
示例3: _compute_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
weights = tf.reduce_mean(weights, axis=2)
num_classes = prediction_tensor.get_shape().as_list()[-1]
prediction_tensor = tf.divide(
prediction_tensor, self._logit_scale, name='scale_logit')
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
示例4: convert_class_logits_to_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0):
"""Converts multiclass logits to softmax scores after applying temperature.
Args:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
temperature: Scale factor to use prior to applying softmax. Larger
temperatures give more uniform distruibutions after softmax.
Returns:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] with scaling and softmax applied.
"""
# Multiclass scores must be stored as logits. Apply temp and softmax.
multiclass_scores_scaled = tf.divide(
multiclass_scores, temperature, name='scale_logits')
multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax')
return multiclass_scores
示例5: read_tensor_from_image_file
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def read_tensor_from_image_file(frames, input_height=299, input_width=299, input_mean=0, input_std=255):
input_name = "file_reader"
frames = [(tf.read_file(frame, input_name), frame) for frame in frames]
decoded_frames = []
for frame in frames:
file_name = frame[1]
file_reader = frame[0]
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(file_reader, channels=3, name="jpeg_reader")
decoded_frames.append(image_reader)
float_caster = [tf.cast(image_reader, tf.float32) for image_reader in decoded_frames]
float_caster = tf.stack(float_caster)
resized = tf.image.resize_bilinear(float_caster, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
示例6: calculate_model_precision
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def calculate_model_precision(input_tensor, label_tensor):
"""
calculate accuracy acc = correct_nums / ground_truth_nums
:param input_tensor: binary segmentation logits
:param label_tensor: binary segmentation label
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(final_output, 1))
pix_cls_ret = tf.gather_nd(label_tensor, idx)
accuracy = tf.count_nonzero(pix_cls_ret)
accuracy = tf.divide(
accuracy,
tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64))
return accuracy
示例7: calculate_model_fp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def calculate_model_fp(input_tensor, label_tensor):
"""
calculate fp figure
:param input_tensor:
:param label_tensor:
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(final_output, 1))
pix_cls_ret = tf.gather_nd(final_output, idx)
false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero(
tf.gather_nd(label_tensor, idx)
)
return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64))
示例8: calculate_model_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def calculate_model_fn(input_tensor, label_tensor):
"""
calculate fn figure
:param input_tensor:
:param label_tensor:
:return:
"""
logits = tf.nn.softmax(logits=input_tensor)
final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)
idx = tf.where(tf.equal(label_tensor, 1))
pix_cls_ret = tf.gather_nd(final_output, idx)
label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1)))
mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret)
return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64))
示例9: divide
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def divide(x, y, safe_mode=True, epsilon=None, name=None):
""" A wrapper of `tf.divide`, computes Python style division of x by y but extends safe divide support.
If safe_mode is `True` or epsilon is given(a small float number), the absolute value of denominator
in the division will be clip to make sure it's bigger than epsilon(default is 1e-13).
Args:
safe_mode: Use safe divide mode.
epsilon: Float number. Default is `1e-13`.
"""
if not safe_mode and epsilon is None:
return tf.divide(x, y, name=name)
else:
epsilon = 1e-20 if epsilon is None else epsilon
name = "safe_divide" if name is None else name
with tf.name_scope(name):
y = tf.where(tf.greater(tf.abs(y), epsilon), y, y + tf.sign(y) * epsilon)
return tf.divide(x, y)
示例10: result
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def result(self):
true_mean = tf.divide(self._true_sum, self._count)
true_mean2 = tf.math.square(true_mean)
pred_mean = tf.divide(self._pred_sum, self._count)
pred_mean2 = tf.math.square(pred_mean)
term1 = self._product
term2 = -tf.multiply(true_mean, self._pred_sum)
term3 = -tf.multiply(pred_mean, self._true_sum)
term4 = tf.multiply(self._count, tf.multiply(true_mean, pred_mean))
covariance = term1 + term2 + term3 + term4
true_var = self._true_sumsq - tf.multiply(self._count, true_mean2)
pred_var = self._pred_sumsq - tf.multiply(self._count, pred_mean2)
tp_var = tf.multiply(tf.math.sqrt(true_var), tf.math.sqrt(pred_var))
correlation = tf.divide(covariance, tp_var)
if self._summarize:
return tf.reduce_mean(correlation)
else:
return correlation
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def __init__(self, distribution_spec, average_time_steps=False, scope="negative-log-likelihood-loss", **kwargs):
"""
Args:
average_time_steps (bool): Whether, if a time rank is given, to divide by th esequence lengths to get
the mean or not (leave as sum).
"""
super(NegativeLogLikelihoodLoss, self).__init__(scope=scope, **kwargs)
self.distribution = Distribution.from_spec(distribution_spec)
self.average_time_steps = average_time_steps
self.add_components(self.distribution)
#self.reduce_ranks = None
self.time_rank = None
self.time_major = None
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def __init__(self, sparse=True, with_kl_regularizer=True, average_time_steps=False, scope="cross-entropy-loss",
**kwargs):
"""
Args:
sparse (bool): Whether we have sparse labels. Sparse labels can only assign one category to each
sample, so labels are ints. If False, labels are already softmaxed categorical distribution probs
OR simple logits.
average_time_steps (bool): Whether, if a time rank is given, to divide by th esequence lengths to get
the mean or not (leave as sum).
"""
super(CategoricalCrossEntropyLoss, self).__init__(scope=scope, **kwargs)
self.sparse = sparse
self.with_kl_regularizer = with_kl_regularizer
self.average_time_steps = average_time_steps
#self.reduce_ranks = None
#self.time_rank = None
#self.time_major = None
#self.is_bool = None
示例13: _compute_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
prediction_tensor = tf.divide(
prediction_tensor, self._logit_scale, name='scale_logit')
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
示例14: version_10
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def version_10(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
x = tensor_dict[node.inputs[0]]
y_scale = tensor_dict[node.inputs[1]]
x = tf.cast(x, tf.float32)
y = tf.divide(x, y_scale)
y = tf.round(y)
if len(node.inputs) == 3:
y_zero_point = tensor_dict[node.inputs[2]]
y_dtype = y_zero_point.dtype
y_zero_point = tf.cast(y_zero_point, tf.float32)
y = tf.add(y, y_zero_point)
else: # y_zero_point default dtype = uint8
y_dtype = tf.uint8
y = tf.saturate_cast(y, y_dtype)
return [y]
示例15: _compute_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import divide [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
prediction_tensor = tf.divide(
prediction_tensor, self._logit_scale, name='scale_logit')
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
if self._anchorwise_output:
return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
return tf.reduce_sum(per_row_cross_ent * tf.reshape(weights, [-1]))