本文整理汇总了Python中tensorflow.compat.v1.cast方法的典型用法代码示例。如果您正苦于以下问题:Python v1.cast方法的具体用法?Python v1.cast怎么用?Python v1.cast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.cast方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_function
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
示例2: _fp16_variable_creator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def _fp16_variable_creator(next_creator, **kwargs):
"""Variable creator to create variables in fp32 and cast them to fp16."""
dtype = kwargs.get('dtype', None)
initial_value = kwargs.get('initial_value', None)
if dtype is None:
if initial_value is not None and not callable(initial_value):
dtype = initial_value.dtype
if dtype == tf.float16:
if callable(initial_value):
new_initial_value = lambda: tf.cast(initial_value(), tf.float32)
else:
new_initial_value = tf.cast(initial_value, tf.float32)
kwargs['dtype'] = tf.float32
kwargs['initial_value'] = new_initial_value
var = next_creator(**kwargs)
return tf.cast(var, dtype=tf.float16)
else:
return next_creator(**kwargs)
示例3: build_network
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def build_network(self, images, phase_train=True, nclass=1001,
data_type=tf.float32):
# pylint: disable=g-import-not-at-top
try:
from official.resnet.r1.imagenet_main import ImagenetModel
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
raise
images = tf.cast(images, data_type)
model_class = ImagenetModel(resnet_size=self.resnet_size,
resnet_version=self.version,
# The official model dtype seems to be ignored,
# as the dtype it uses is the dtype of the input
# images. Doesn't hurt to set it though.
dtype=data_type)
logits = model_class(images, phase_train)
logits = tf.cast(logits, tf.float32)
return model_lib.BuildNetworkResult(logits=logits, extra_info=None)
示例4: preprocess
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def preprocess(self, image_buffer, bbox, batch_position):
# pylint: disable=g-import-not-at-top
try:
from official.r1.resnet.imagenet_preprocessing import preprocess_image
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
raise
if self.train:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=True)
else:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=False)
return tf.cast(image, self.dtype)
示例5: _ensure_keep_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def _ensure_keep_mask(self, x):
if self._keep_mask is None or not self._share_mask:
shape = tf.shape(x)
k = shape[1]
# To make this class a drop-in replacement for bernoulli dropout we
# paramaterize it with keep_prob. Set alpha of the dirichlet so that the
# variance is equal to the variance of the bernoulli with p=keep_prob
# divided by keep_prob.
# Now the variance of the dirichlet with k equal alphas is
# (k-1)/(k^2(k*alpha+1). Solve that for alpha.
kf = tf.cast(k, tf.float32)
alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf
dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha)
assert (dist.reparameterization_type ==
tfp.distributions.FULLY_REPARAMETERIZED)
# The E[dir(alpha)] = 1/k for all elements, but we want the expectation to
# be keep_prob, hence the multiplication.
self._keep_mask = kf * dist.sample(shape[0])
self._keep_mask.set_shape(x.get_shape())
return self._keep_mask
示例6: mask_from_lengths
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None):
"""Convert a length scalar to a vector of binary masks.
This function will convert a vector of lengths to a matrix of binary masks.
E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]]
Args:
lengths: a d-dimensional vector of integers corresponding to lengths.
max_length: an optional (default: None) scalar-like or 0-dimensional tensor
indicating the maximum length of the masks. If not provided, the maximum
length will be inferred from the lengths vector.
dtype: the dtype of the returned mask, if specified. If None, the dtype of
the lengths will be used.
name: a name for the operation (optional).
Returns:
A d x max_length tensor of binary masks (int32).
"""
with tf.name_scope(name, 'mask_from_lengths'):
dtype = lengths.dtype if dtype is None else dtype
max_length = tf.reduce_max(lengths) if max_length is None else max_length
indexes = tf.range(max_length, dtype=lengths.dtype)
mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1))
cast_mask = tf.cast(mask, dtype)
return tf.stop_gradient(cast_mask)
示例7: calculate_generalized_advantage_estimator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例8: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def __init__(
self, batch_size, observation_space, action_space, policy_hparams,
policy_dir, sampling_temp
):
super(PolicyAgent, self).__init__(
batch_size, observation_space, action_space
)
self._sampling_temp = sampling_temp
with tf.Graph().as_default():
self._observations_t = tf.placeholder(
shape=((batch_size,) + self.observation_space.shape),
dtype=self.observation_space.dtype
)
(logits, self._values_t) = rl.get_policy(
self._observations_t, policy_hparams, self.action_space
)
actions = common_layers.sample_with_temperature(logits, sampling_temp)
self._probs_t = tf.nn.softmax(logits / sampling_temp)
self._actions_t = tf.cast(actions, tf.int32)
model_saver = tf.train.Saver(
tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
示例9: set_precision
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例10: set_recall
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例11: image_summary
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
"""
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions)
示例12: _quantize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例13: average_sharded_losses
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
示例14: top_k_with_unique
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def top_k_with_unique(inputs, k):
"""Finds the values and indices of the k largests entries.
Instead of doing sort like tf.nn.top_k, this function finds the max value
k times. The running time is proportional to k, which is be faster when k
is small. The current implementation supports only inputs of rank 2.
In addition, iota is used to replace the lower bits of each element, this
makes the selection more stable when there are equal elements. The
overhead is that output values are approximated.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
top_values: A tensor, the k largest elements in sorted order.
[batch_size, k].
indices: A tensor, indices of the top_values. [batch_size, k].
"""
unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
top_values, indices = _create_topk_unique(unique_inputs, k)
top_values = tf.cast(top_values, inputs.dtype)
return top_values, indices
示例15: pad_batch
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cast [as 别名]
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
# TODO(lukaszkaiser): refactor the API to not be just a list of self params
# but make sense for other uses too.