本文整理汇总了Python中tensorflow.to_float函数的典型用法代码示例。如果您正苦于以下问题:Python to_float函数的具体用法?Python to_float怎么用?Python to_float使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_float函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ctrl_rewards
def ctrl_rewards(states,
actions,
rewards,
next_states,
contexts,
reward_scales=1.0):
"""Returns the negative control cost.
Args:
states: A [batch_size, num_state_dims] Tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] Tensor representing a batch
of actions.
rewards: A [batch_size] Tensor representing a batch of rewards.
next_states: A [batch_size, num_state_dims] Tensor representing a batch
of next states.
contexts: A list of [batch_size, num_context_dims] Tensor representing
a batch of contexts.
reward_scales: multiplicative scale for rewards. A scalar or 1D tensor,
must be broadcastable to number of reward dimensions.
Returns:
A new tf.float32 [batch_size] rewards Tensor, and
tf.float32 [batch_size] discounts tensor.
"""
del states, rewards, contexts # Unused
if actions is None:
rewards = tf.to_float(tf.zeros(shape=next_states.shape[:1]))
else:
rewards = -tf.reduce_sum(tf.square(actions), axis=1)
rewards *= reward_scales
rewards = tf.to_float(rewards)
return rewards, tf.ones_like(rewards)
示例2: testPaddingCrossEntropyFactored
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
示例3: compute_IOU
def compute_IOU(bboxA, bboxB):
"""Compute the Intersection Over Union.
Args:
bboxA: [N X 4 tensor] format = [left, top, right, bottom]
bboxB: [N X 4 tensor]
Return:
IOU: [N X 1 tensor]
"""
x1A, y1A, x2A, y2A = tf.split(1, 4, bboxA)
x1B, y1B, x2B, y2B = tf.split(1, 4, bboxB)
# compute intersection
x1_max = tf.maximum(x1A, x1B)
y1_max = tf.maximum(y1A, y1B)
x2_min = tf.minimum(x2A, x2B)
y2_min = tf.minimum(y2A, y2B)
# overlap_flag = tf.logical_and( tf.less(x1_max, x2_min), tf.less(y1_max, y2_min))
overlap_flag = tf.to_float(tf.less(x1_max, x2_min)) * \
tf.to_float(tf.less(y1_max, y2_min))
overlap_area = tf.mul(overlap_flag, tf.mul(
x2_min - x1_max, y2_min - y1_max))
# compute union
areaA = tf.mul(x2A - x1A, y2A - y1A)
areaB = tf.mul(x2B - x1B, y2B - y1B)
union_area = areaA + areaB - overlap_area
return tf.div(overlap_area, union_area)
示例4: crop_or_pad
def crop_or_pad(waves, length, channels):
"""Crop or pad wave to have shape [N, length, channels].
Args:
waves: A 3D `Tensor` of NLC format.
length: A Python scalar. The output wave size.
channels: Number of output waves channels.
Returns:
A 3D `Tensor` of NLC format with shape [N, length, channels].
"""
waves = tf.convert_to_tensor(waves)
batch_size = waves.shape[0].value
waves_shape = tf.shape(waves)
# Force audio length.
pad = tf.maximum(0, length - waves_shape[1])
right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
left_pad = pad - right_pad
waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
waves = waves[:, :length, :]
# Force number of channels.
num_repeats = tf.to_int32(
tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]
waves.set_shape([batch_size, length, channels])
return waves
示例5: top_1_and_5
def top_1_and_5(predictions, labels):
#test_size = FLAGS.test_size #tf.shape(predictions)[0]
in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))
in_top5 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=5))
num_correct_1 = tf.reduce_sum(in_top1, name ="top1")
num_correct_5 = tf.reduce_sum(in_top5, name ="top5")
return num_correct_1, num_correct_5
示例6: _smallest_size_at_least
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
示例7: _summarize_input
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
示例8: _scale
def _scale(x):
min_x_valuef = tf.to_float(min_x_value)
max_x_valuef = tf.to_float(max_x_value)
output_minf = tf.to_float(output_min)
output_maxf = tf.to_float(output_max)
return ((((tf.to_float(x) - min_x_valuef) * (output_maxf - output_minf)) /
(max_x_valuef - min_x_valuef)) + output_minf)
示例9: _get_sampling_probability
def _get_sampling_probability(hparams, is_training):
"""Returns `sampling_probabiliy` if `sampling schedule` given or 0."""
if (not hasattr(hparams, 'sampling_schedule') or
not hparams.sampling_schedule):
return tf.convert_to_tensor(0.0, tf.float32)
if not is_training:
# This is likely an eval/test job associated with a training job using
# scheduled sampling.
tf.logging.warning(
'Setting non-training sampling schedule from %s:%f to constant:1.0.',
hparams.sampling_schedule, hparams.sampling_rate)
hparams.sampling_schedule = 'constant'
hparams.sampling_rate = 1.0
if hparams.sampling_schedule == 'constant':
sampling_probability = tf.constant(hparams.sampling_rate)
elif hparams.sampling_schedule == 'inverse_sigmoid':
k = tf.constant(hparams.sampling_rate)
sampling_probability = 1.0 - (
k / (k + tf.exp(tf.to_float(tf.train.get_or_create_global_step()) / k)))
elif hparams.sampling_schedule == 'exponential':
if not 0 < hparams.sampling_rate < 1:
raise ValueError(
'Exponential sampling rate must be in the interval (0, 1). Got %f.'
% hparams.sampling_rate)
k = tf.constant(hparams.sampling_rate)
sampling_probability = (
1.0 - tf.pow(k, tf.to_float(tf.train.get_or_create_global_step())))
else:
tf.logging.fatal('Invalid sampling_schedule: %s',
hparams.sampling_schedule)
tf.summary.scalar('sampling_probability', sampling_probability)
return tf.convert_to_tensor(sampling_probability, tf.float32)
示例10: total_variation_loss
def total_variation_loss(stylized_inputs, total_variation_weight):
"""Total variation regularization loss.
This loss improves the smoothness of the image by expressing high frequency
variations as a loss.
http://link.springer.com/article/10.1023/B:JMIV.0000011325.36760.1e
Args:
stylized_inputs: The batched set of images.
total_variation_weight: Weight of total variation loss.
Returns:
Tensor for the total variation loss, dict mapping loss names to losses.
"""
shape = tf.shape(stylized_inputs)
batch_size = shape[0]
height = shape[1]
width = shape[2]
channels = shape[3]
y_size = tf.to_float((height - 1) * width * channels)
x_size = tf.to_float(height * (width - 1) * channels)
y_loss = tf.nn.l2_loss(
stylized_inputs[:, 1:, :, :] - stylized_inputs[:, :-1, :, :]) / y_size
x_loss = tf.nn.l2_loss(
stylized_inputs[:, :, 1:, :] - stylized_inputs[:, :, :-1, :]) / x_size
loss = (y_loss + x_loss) / tf.to_float(batch_size)
weighted_loss = loss * total_variation_weight
return weighted_loss, {
'total_variation_loss': loss,
'weighted_total_variation_loss': weighted_loss
}
示例11: compute_metrics
def compute_metrics(output_video, target_video):
max_pixel_value = 255.0
output_video = tf.to_float(output_video)
target_video = tf.to_float(target_video)
psnr = tf.image.psnr(output_video, target_video, max_pixel_value)
ssim = tf.image.ssim(output_video, target_video, max_pixel_value)
return {"PSNR": psnr, "SSIM": ssim}
示例12: total_variation_loss
def total_variation_loss(layer):
shape = tf.shape(layer)
height = shape[1]
width = shape[2]
y = tf.slice(layer, [0,0,0,0], tf.pack([-1,height-1,-1,-1])) - tf.slice(layer, [0,1,0,0], [-1,-1,-1,-1])
x = tf.slice(layer, [0,0,0,0], tf.pack([-1,-1,width-1,-1])) - tf.slice(layer, [0,0,1,0], [-1,-1,-1,-1])
return tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
示例13: f_conf_loss
def f_conf_loss(s_out, match, timespan, use_cum_min=True):
"""Loss function for confidence score sequence.
Args:
s_out:
match:
use_cum_min:
"""
s_out_shape = tf.shape(s_out)
num_ex = tf.to_float(s_out_shape[0])
max_num_obj = tf.to_float(s_out_shape[1])
match_sum = tf.reduce_sum(match, reduction_indices=[2])
# Loss for confidence scores.
if use_cum_min:
# [B, N]
s_out_min = f_cum_min(s_out, timespan)
s_out_max = f_cum_max(s_out, timespan)
# [B, N]
s_bce = f_bce_minmax(s_out_min, s_out_max, match_sum)
else:
s_bce = f_bce(s_out, match_sum)
loss = tf.reduce_sum(s_bce) / num_ex / max_num_obj
return loss
示例14: f_iou_box
def f_iou_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes IoU of boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
iou: [B, T]
"""
inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
inter_area = tf.maximum(inter_area, 1e-6)
ndims = tf.shape(tf.shape(top_left_a))
# area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
# area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
union_area = (area_a + area_b - inter_area + 1e-5)
union_area = tf.maximum(union_area, 1e-5)
iou = inter_area / union_area
iou = tf.maximum(iou, 1e-5)
iou = tf.minimum(iou, 1.0)
return iou
示例15: summarize
def summarize(self):
"""Summarize the number of positives and negatives after mining."""
if self._num_positives_list and self._num_negatives_list:
avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list))
avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list))
tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives)