本文整理匯總了Python中tensorflow.compat.v1.bfloat16方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.bfloat16方法的具體用法?Python v1.bfloat16怎麽用?Python v1.bfloat16使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.bfloat16方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _custom_getter
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def _custom_getter(self):
if self.hparams.weight_dtype == "bfloat16":
if self.hparams.optimizer != "Adafactor":
raise NotImplementedError(
"weight_dtype=bfloat16 only implemented with Adafactor optimizer")
activation_dtype = tf.float32
if self.hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
return quantization.EighthPowerEncoding().custom_getter(
activation_dtype=activation_dtype)
elif self.hparams.activation_dtype == "bfloat16":
return quantization.bfloat16_activations_var_getter
elif mixed_precision_is_enabled(hparams=self.hparams):
return quantization.float16_activations_var_getter
else:
return None
示例2: bfloat16_activations_var_getter
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def bfloat16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and bfloat16 activations.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.bfloat16:
kwargs["dtype"] = tf.float32
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
示例3: _to_bfloat16_unbiased
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def _to_bfloat16_unbiased(x, noise):
"""Convert a float32 to a bfloat16 using randomized roundoff.
Args:
x: A float32 Tensor.
noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
Returns:
A float32 Tensor.
"""
x_sign = tf.sign(x)
# Make sure x is positive. If it is zero, the two candidates are identical.
x = x * x_sign + 1e-30
cand1 = tf.to_bfloat16(x)
cand1_f = tf.to_float(cand1)
# This relies on the fact that for a positive bfloat16 b,
# b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the
# next lower one. Both 1.005 and 0.995 are ballpark estimation.
cand2 = tf.to_bfloat16(
tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))
ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)
return ret * tf.to_bfloat16(x_sign)
示例4: custom_getter
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def custom_getter(self, activation_dtype=tf.bfloat16):
"""A custom getter that uses the encoding for bfloat16 and float32 vars.
When a bfloat16 or float32 variable is requsted, an encoded float16
varaible is created, which is then decoded and cast to a bfloat16
activation.
Args:
activation_dtype: a dtype to which to convert the decoded value.
Returns:
a function.
"""
def getter_fn(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype in (tf.bfloat16, tf.float32):
kwargs["dtype"] = tf.bfloat16
kwargs["initializer"] = _EncodingInitializer(
kwargs["initializer"], self)
ret = self._decode_with_identity_gradient(getter(*args, **kwargs))
return tf.cast(ret, activation_dtype)
return getter(*args, **kwargs)
return getter_fn
示例5: replace_dtype
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def replace_dtype(tensor_spec_struct, from_dtype,
to_dtype):
"""Replaces all elements of type from_dtype with to_dtype.
This functionality is useful for TPU training since it is most efficient with
bfloat16 whereas preprocessing on CPU only operates on float32.
Args:
tensor_spec_struct: The instance of TensorSpecStruct which will be updated
in-place.
from_dtype: The dtype which will be replaced.
to_dtype: The target dtype.
Returns:
The in-place updated TensorSpecStruct.
"""
for key, value in tensor_spec_struct.items():
if value.dtype == from_dtype:
tensor_spec_struct[key] = ExtendedTensorSpec.from_spec(
spec=value, dtype=to_dtype)
return tensor_spec_struct
示例6: cast_float32_to_bfloat16
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def cast_float32_to_bfloat16(tensor_spec_struct,
output_spec):
"""Casts tensors with dtype float32 to bfloat16 depending on the out spec.
Args:
tensor_spec_struct: The instance of TensorSpecStruct which will be updated
in-place.
output_spec: The reference TensorSpecStruct which allows to infer which
tensors should be cast to bfloat16.
Returns:
The in-place updated TensorSpecStruct.
"""
for key, value in output_spec.items():
if value is not None and value.dtype == tf.bfloat16:
if tensor_spec_struct[key].dtype != tf.float32:
raise ValueError(
'Attempting to convert non tf.float32 type {} to tf.bfloat16 '
'for the element {} with the name {}.'.format(
tensor_spec_struct[key].dtype, tensor_spec_struct[key], key))
tensor_spec_struct[key] = tf.cast(
tensor_spec_struct[key], dtype=tf.bfloat16)
return tensor_spec_struct
示例7: test_parsing
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def test_parsing(self):
base_dir = 'tensor2robot'
file_pattern = os.path.join(
FLAGS.test_srcdir, base_dir, 'test_data/pose_env_test_data.tfrecord')
dataset = tfdata.parallel_read(file_patterns=file_pattern)
state_spec = TSPEC(shape=(64, 64, 3), dtype=tf.uint8, name='state/image',
data_format='jpeg')
action_spec = TSPEC(shape=(2), dtype=tf.bfloat16, name='pose')
reward_spec = TSPEC(shape=(), dtype=tf.float32, name='reward')
feature_tspec = PoseEnvFeature(state=state_spec, action=action_spec)
label_tspec = PoseEnvLabel(reward=reward_spec)
batched_dataset = dataset.batch(batch_size=1)
dataset = tfdata.serialized_to_parsed(batched_dataset, feature_tspec,
label_tspec)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
tensorspec_utils.assert_equal(feature_tspec, features, ignore_batch=True)
tensorspec_utils.assert_equal(label_tspec, labels, ignore_batch=True)
with self.session() as session:
features_, labels_ = session.run([features, labels])
self.assertAllEqual([1, 64, 64, 3], features_.state.shape)
self.assertAllEqual([1, 2], features_.action.shape)
self.assertAllEqual((1,), labels_.reward.shape)
示例8: get_out_feature_specification
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def get_out_feature_specification(
self, mode):
"""The specification for the output features after executing preprocess_fn.
Note, we strip all optional specs to further reduce communication and
computation overhead for feeding to TPUs.
Arguments:
mode: mode key for this feature specification
Returns:
A TensorSpecStruct describing the required and optional tensors.
"""
return tensorspec_utils.replace_dtype(
tensorspec_utils.filter_required_flat_tensor_spec(
self._preprocessor.get_out_feature_specification(mode)),
from_dtype=tf.float32,
to_dtype=tf.bfloat16)
示例9: get_out_label_specification
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def get_out_label_specification(
self, mode):
"""The specification for the output labels after executing preprocess_fn.
Note, we strip all optional specs to further reduce communication and
computation overhead for feeding to TPUs.
Arguments:
mode: mode key for this feature specification
Returns:
A TensorSpecStruct describing the required and optional tensors.
"""
return tensorspec_utils.replace_dtype(
tensorspec_utils.filter_required_flat_tensor_spec(
self._preprocessor.get_out_label_specification(mode)),
from_dtype=tf.float32,
to_dtype=tf.bfloat16)
示例10: preprocess_for_eval
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
示例11: get_variable_dtype
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def get_variable_dtype(
master_dtype=tf.bfloat16,
slice_dtype=tf.float32,
activation_dtype=tf.float32):
"""Datatypes to use for the run.
Args:
master_dtype: string, datatype for checkpoints
keep this the same between training and eval/inference
slice_dtype: string, datatype for variables in memory
must be tf.float32 for training
activation_dtype: string, datatype for activations
less memory usage if tf.bfloat16 but possible numerical issues
Returns:
a mtf.VariableDtype
"""
return mtf.VariableDType(
master_dtype=tf.as_dtype(master_dtype),
slice_dtype=tf.as_dtype(slice_dtype),
activation_dtype=tf.as_dtype(activation_dtype))
示例12: bfloat16_to_float32_nested
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def bfloat16_to_float32_nested(input_nested):
"""Convert float32 tensors in a nested structure to bfloat16.
Args:
input_nested: A Python dict, values being Tensor or Python list/tuple of
Tensor or Non-Tensor.
Returns:
A Python dict with the same structure as `tensor_dict`,
with all bfloat16 tensors converted to float32.
"""
if isinstance(input_nested, tf.Tensor):
if input_nested.dtype == tf.bfloat16:
return tf.cast(input_nested, dtype=tf.float32)
else:
return input_nested
elif isinstance(input_nested, (list, tuple)):
out_tensor_dict = [bfloat16_to_float32_nested(t) for t in input_nested]
elif isinstance(input_nested, dict):
out_tensor_dict = {
k: bfloat16_to_float32_nested(v) for k, v in input_nested.items()
}
else:
return input_nested
return out_tensor_dict
示例13: bfloat16_to_float32_nested
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def bfloat16_to_float32_nested(bfloat16_tensor_dict):
"""Converts bfloat16 tensors in a nested structure to float32.
Other tensors not of dtype bfloat16 will be left as is.
Args:
bfloat16_tensor_dict: A Python dict, values being Tensor or Python
list/tuple of Tensor.
Returns:
A Python dict with the same structure as `bfloat16_tensor_dict`,
with all bfloat16 tensors converted to float32.
"""
float32_tensor_dict = {}
for k, v in bfloat16_tensor_dict.items():
if isinstance(v, tf.Tensor):
float32_tensor_dict[k] = bfloat16_to_float32(v)
elif isinstance(v, (list, tuple)):
float32_tensor_dict[k] = [bfloat16_to_float32(t) for t in v]
return float32_tensor_dict
示例14: model_fn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def model_fn(self, features):
with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
self._add_variable_scope("model_fn", vs)
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
with tf.variable_scope("body") as body_vs:
self._add_variable_scope("body", body_vs)
log_info("Building model body")
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = 0.0
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
losses["training"] = self.loss(logits, features)
return logits, losses
示例15: adafactor_optimizer_from_hparams
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import bfloat16 [as 別名]
def adafactor_optimizer_from_hparams(hparams, lr):
"""Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values
"""
if hparams.optimizer_adafactor_decay_type == "adam":
decay_rate = adafactor_decay_rate_adam(
hparams.optimizer_adafactor_beta2)
elif hparams.optimizer_adafactor_decay_type == "pow":
decay_rate = adafactor_decay_rate_pow(
hparams.optimizer_adafactor_memory_exponent)
else:
raise ValueError("unknown optimizer_adafactor_decay_type")
if hparams.weight_dtype == "bfloat16":
parameter_encoding = quantization.EighthPowerEncoding()
else:
parameter_encoding = None
return AdafactorOptimizer(
multiply_by_parameter_scale=(
hparams.optimizer_adafactor_multiply_by_parameter_scale),
learning_rate=lr,
decay_rate=decay_rate,
beta1=hparams.optimizer_adafactor_beta1,
clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
factored=hparams.optimizer_adafactor_factored,
simulated_quantize_bits=getattr(
hparams, "simulated_parameter_quantize_bits", 0),
parameter_encoding=parameter_encoding,
use_locking=False,
name="Adafactor")