本文整理汇总了Python中tensorflow.compat.v1.float16方法的典型用法代码示例。如果您正苦于以下问题:Python v1.float16方法的具体用法?Python v1.float16怎么用?Python v1.float16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.float16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _fp16_variable_creator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _fp16_variable_creator(next_creator, **kwargs):
"""Variable creator to create variables in fp32 and cast them to fp16."""
dtype = kwargs.get('dtype', None)
initial_value = kwargs.get('initial_value', None)
if dtype is None:
if initial_value is not None and not callable(initial_value):
dtype = initial_value.dtype
if dtype == tf.float16:
if callable(initial_value):
new_initial_value = lambda: tf.cast(initial_value(), tf.float32)
else:
new_initial_value = tf.cast(initial_value, tf.float32)
kwargs['dtype'] = tf.float32
kwargs['initial_value'] = new_initial_value
var = next_creator(**kwargs)
return tf.cast(var, dtype=tf.float16)
else:
return next_creator(**kwargs)
示例2: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def __init__(self,
model_name,
batch_size,
learning_rate,
fp16_loss_scale,
params=None):
self.model_name = model_name
self.batch_size = batch_size
self.default_batch_size = batch_size
self.learning_rate = learning_rate
# TODO(reedwm) Set custom loss scales for each model instead of using the
# default of 128.
self.fp16_loss_scale = fp16_loss_scale
# use_tf_layers specifies whether to build the model using tf.layers.
# fp16_vars specifies whether to create the variables in float16.
if params:
self.use_tf_layers = params.use_tf_layers
self.fp16_vars = params.fp16_vars
self.data_type = tf.float16 if params.use_fp16 else tf.float32
else:
self.use_tf_layers = True
self.fp16_vars = False
self.data_type = tf.float32
示例3: _quantize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例4: float16_scope
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def float16_scope():
"""Scope class for float16."""
def _custom_getter(getter, *args, **kwargs):
"""Returns a custom getter that methods must be called under."""
cast_to_float16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == tf.float16:
kwargs['dtype'] = tf.float32
cast_to_float16 = True
var = getter(*args, **kwargs)
if cast_to_float16:
var = tf.cast(var, tf.float16)
return var
with tf.variable_scope('', custom_getter=_custom_getter) as varscope:
yield varscope
示例5: _verify_infiniteness_ops
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ['in_data:0'], '{}:0'.format(name))
示例6: _stacked_separable_conv
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _stacked_separable_conv(net, stride, operation, filter_size):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
net_type = net.dtype
net = tf.cast(net, tf.float32) if net_type == tf.float16 else net
for layer_num in range(num_layers - 1):
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
net = tf.cast(net, net_type)
return net
示例7: build_network
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def build_network(self, inputs, phase_train=True, nclass=1001):
try:
from official.recommendation import neumf_model # pylint: disable=g-import-not-at-top
except ImportError as e:
if 'neumf_model' not in e.message:
raise
raise ImportError('To use the experimental NCF model, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models to the PYTHONPATH.')
del nclass
users, items, _ = inputs
params = {
'num_users': _NUM_USERS_20M,
'num_items': _NUM_ITEMS_20M,
'model_layers': (256, 256, 128, 64),
'mf_dim': 64,
'mf_regularization': 0,
'mlp_reg_layers': (0, 0, 0, 0),
'use_tpu': False
}
user_input = tf.keras.layers.Input(tensor=users, name='user_input')
item_input = tf.keras.layers.Input(tensor=items, name='item_input')
if self.data_type == tf.float32:
keras_model = neumf_model.construct_model(user_input, item_input, params)
logits = keras_model.output
else:
assert self.data_type == tf.float16
old_floatx = tf.keras.backend.floatx()
try:
tf.keras.backend.set_floatx('float16')
# We cannot rely on the variable_scope's fp16 custom getter here,
# because the NCF model uses keras layers, which ignore variable scopes.
# So we use a variable_creator_scope instead.
with tf.variable_creator_scope(_fp16_variable_creator):
keras_model = neumf_model.construct_model(user_input, item_input,
params)
logits = tf.cast(keras_model.output, tf.float32)
finally:
tf.keras.backend.set_floatx(old_floatx)
return model.BuildNetworkResult(logits=logits, extra_info=None)
示例8: _assert_correct_var_type
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _assert_correct_var_type(self, var, params):
if 'gpu_cached_inputs' not in var.name:
if params.use_fp16 and params.fp16_vars and 'batchnorm' not in var.name:
expected_type = tf.float16
else:
expected_type = tf.float32
self.assertEqual(var.dtype.base_dtype, expected_type)
示例9: get_custom_getter
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def get_custom_getter(self):
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
if not self.use_tf_layers:
return getter(*args, **kwargs)
requested_dtype = kwargs['dtype']
if not (requested_dtype == tf.float32 and
self.variable_dtype == tf.float16):
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs['dtype'] = self.variable_dtype
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
return inner_custom_getter
示例10: simulate
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope("environment/simulate"):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, "action")
def step(action):
step_response = self._batch_env.step(action)
# Current env doesn't return `info`, but EnvProblem does.
# TODO(afrozm): The proper way to do this is to make T2TGymEnv return
# an empty info return value.
if len(step_response) == 3:
(observ, reward, done) = step_response
else:
(observ, reward, done, _) = step_response
return (observ, reward.astype(np.float32), done)
observ, reward, done = tf.py_func(
step, [action],
[self.observ_dtype, tf.float32, tf.bool], name="step")
reward = tf.check_numerics(reward, "reward")
reward.set_shape((len(self),))
done.set_shape((len(self),))
with tf.control_dependencies([self._observ.assign(observ)]):
return tf.identity(reward), tf.identity(done)
示例11: _mixed_precision_is_enabled
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def _mixed_precision_is_enabled(hparams):
"""Should be the same as in common_attention, avoiding import."""
activation_dtype = hparams.activation_dtype
weight_dtype = hparams.weight_dtype
return activation_dtype == tf.float16 and weight_dtype == tf.float32
示例12: make_diet_var_getter
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def make_diet_var_getter(params):
"""Create a custom variable getter for diet variables according to params."""
def diet_var_initializer(shape, dtype, partition_info=None):
"""Initializer for a diet variable."""
del dtype
del partition_info
with common_layers.fn_device_dependency("diet_init") as out_deps:
float_range = math.sqrt(3)
ret = tf.random_uniform(shape, -float_range, float_range)
if params.quantize:
ret = _quantize(ret, params, randomize=False)
out_deps.append(ret)
return ret
def diet_var_getter(getter, **kwargs):
"""Get diet variable and return it dequantized."""
if params.quantize:
kwargs["dtype"] = tf.float16
kwargs["initializer"] = diet_var_initializer
kwargs["trainable"] = False
base_var = getter(**kwargs)
dequantized = _dequantize(base_var, params)
if not hasattr(params, "dequantized"):
params.dequantized = defaultdict(list)
params.dequantized[base_var.name].append(dequantized)
return dequantized
return diet_var_getter
示例13: float16_activations_var_getter
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def float16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and float16 activations.
This function ensures the following:
1. All variables requested with type fp16 are stored as type fp32.
2. All variables requested with type fp32 are returned as type fp16.
See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/
#training_tensorflow for more information on this strategy.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.float16:
kwargs["dtype"] = tf.float32
if requested_dtype == tf.float32:
requested_dtype = tf.float16
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
示例14: set_activation_type
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def set_activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype
示例15: activation_type
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import float16 [as 别名]
def activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype