本文整理汇总了Python中tensorflow.DType方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.DType方法的具体用法?Python tensorflow.DType怎么用?Python tensorflow.DType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.DType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getEstimator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def getEstimator(cls, priors: Tuple[Distribution, ...], K: int,
dtype: tf.DType = tf.float32,
isFullyObserved: bool = True,
noiseUniformity: NoiseUniformity = HOMOGENEOUS,
stopCriterionInit=LlhStall(10),
stopCriterionEM=LlhStall(100),
stopCriterionBCD=LlhImprovementThreshold(1e-2),
path: str = "/tmp", device: str = "/cpu:0",
cv: CV = None):
def model_fn(features, labels, mode):
es = cls.__estimatorSpec(mode=mode, features=features,
isFullyObserved=isFullyObserved,
device=device, priors=priors,
noiseUniformity=noiseUniformity,
stopCriterionInit=stopCriterionInit,
stopCriterionEM=stopCriterionEM,
stopCriterionBCD=stopCriterionBCD,
cv=cv, path=path, K=K,
transform=False, dtype=dtype)
return(es)
est = tf.estimator.Estimator(model_fn=model_fn,
model_dir=path)
return(est)
示例2: parameterInfo
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def parameterInfo(self,
shape: Tuple[int, ...] = (1,),
latentShape: Tuple[int, ...] = ()) -> ParameterInfo:
"""Initializers of the parameters of the distribution.
Draw random initialization values for each parameter matching the
provided `shape`, `lantentShape`, and `dtype`. This method has to
be implemented by concrete distributions to provide reasonable
random initalizations used during `Distribution.random`.
Arguments:
shape: `Tuple[int, ...]` the shape of the distribution.
latentShape: `Tuple[int, ...]` the latent shape of the
distribution.
dtype: `DType` the data type of the distribution.
Returns:
`Dict[str, Tensor]` map from parameter names to `Tensor` s
containing the random initial values for the parameters.
"""
...
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def __init__(self, in_layers=None, dtype=None, **kwargs):
"""
Parameters
----------
dtype: tf.DType
the dtype to cast the in_layer to
e.x. tf.int32
"""
if dtype is None:
raise ValueError("Must cast to a dtype")
self.dtype = dtype
super(Cast, self).__init__(in_layers, **kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = parent_shape
except:
pass
示例4: decode_field
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def decode_field(self,
field_name: str,
field_value: Union[tf.Tensor, tf.SparseTensor],
field_type: Optional[tf.DType] = None) -> tf.Tensor:
"""
Decode a field from a tfrecord example
Parameters
----------
field_name
name of the field, if nested - will be separated using "/"
field_value
value of the field from tfrecords example
field_type
type of the decoded field from self.get_tfrecords_output_types
or None, if it was not provided
"""
# pylint: disable=no-self-use
# is designed to be overridden
# pylint: disable=unused-argument
# this method is really an interface, but has a default implementation.
if field_type is None:
return field_value
return tf.decode_raw(field_value, field_type)
示例5: layer_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def layer_norm(inputs, epsilon=1e-6, dtype=None, scope=None):
"""
Layer Normalization
:param inputs: A Tensor of shape [..., channel_size]
:param epsilon: A floating number
:param dtype: An optional instance of tf.DType
:param scope: An optional string
:returns: A Tensor with the same shape as inputs
"""
with tf.variable_scope(scope, default_name="layer_norm", values=[inputs],
dtype=dtype):
channel_size = inputs.get_shape().as_list()[-1]
scale = tf.get_variable("scale", shape=[channel_size],
initializer=tf.ones_initializer())
offset = tf.get_variable("offset", shape=[channel_size],
initializer=tf.zeros_initializer())
mean = tf.reduce_mean(inputs, -1, True)
variance = tf.reduce_mean(tf.square(inputs - mean), -1, True)
norm_inputs = (inputs - mean) * tf.rsqrt(variance + epsilon)
return norm_inputs * scale + offset
示例6: select_eps_for_addition
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def select_eps_for_addition(dtype):
"""Returns 2 * machine epsilon based on `dtype`.
This function picks an epsilon slightly greater than the machine epsilon,
which is the upper bound on relative error. This value ensures that
`1.0 + eps != 1.0`.
Args:
dtype: The `tf.DType` of the tensor to which eps will be added.
Raises:
ValueError: If `dtype` is not a floating type.
Returns:
A `float` to be used to make operations safe.
"""
return 2.0 * np.finfo(dtype.as_numpy_dtype).eps
示例7: select_eps_for_division
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def select_eps_for_division(dtype):
"""Selects default values for epsilon to make divisions safe based on dtype.
This function returns an epsilon slightly greater than the smallest positive
floating number that is representable for the given dtype. This is mainly used
to prevent division by zero, which produces Inf values. However, if the
nominator is orders of magnitude greater than `1.0`, eps should also be
increased accordingly. Only floating types are supported.
Args:
dtype: The `tf.DType` of the tensor to which eps will be added.
Raises:
ValueError: If `dtype` is not a floating type.
Returns:
A `float` to be used to make operations safe.
"""
return 10.0 * np.finfo(dtype.as_numpy_dtype).tiny
# The util functions or classes are not exported.
示例8: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def __init__(self, shape, dtype=np.float32):
"""Creates a `ShapeDtype` instance, with canonicalized `shape` and `dtype`.
Args:
shape: A tuple or list, each element of which is an int or, less often,
`None`.
dtype: A `dtype` object, either from NumPy or TensorFlow.
Returns:
A `ShapeDtype` instance whose `shape` is a tuple and `dtype` is a NumPy
`dtype` object.
"""
# Canonicalize shape and dtype.
if isinstance(shape, list):
shape = tuple(shape)
if not isinstance(shape, tuple):
raise TypeError('shape must be tuple or list; got: {}'.format(shape))
if isinstance(dtype, tf.DType):
dtype = dtype.as_numpy_dtype
self.shape = shape
self.dtype = dtype
示例9: is_structure_of_integers
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def is_structure_of_integers(type_spec: computation_types.Type) -> bool:
"""Determines if `type_spec` is a structure of integers.
Args:
type_spec: A `computation_types.Type`.
Returns:
`True` iff `type_spec` is a structure of integers, otherwise `False`.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if type_spec.is_tensor():
py_typecheck.check_type(type_spec.dtype, tf.DType)
return type_spec.dtype.is_integer
elif type_spec.is_tuple():
return all(
is_structure_of_integers(v)
for _, v in anonymous_tuple.iter_elements(type_spec))
elif type_spec.is_federated():
return is_structure_of_integers(type_spec.member)
else:
return False
示例10: create_tensorflow_to_broadcast_scalar
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def create_tensorflow_to_broadcast_scalar(
scalar_type: tf.dtypes.DType,
new_shape: tf.TensorShape) -> building_blocks.CompiledComputation:
"""Creates TF function broadcasting scalar to shape `new_shape`.
Args:
scalar_type: Instance of `tf.DType`, the type of the scalar we are looking
to broadcast.
new_shape: Instance of `tf.TensorShape`, the shape we wish to broadcast to.
Must be fully defined.
Returns:
Instance of `building_blocks.CompiledComputation` representing
a function declaring a scalar parameter of dtype `scalar_type`, and
returning a tensor of this same dtype and shape `new_shape`, with the same
value in each entry as its scalar argument.
Raises:
TypeError: If the types of the arguments do not match the declared arg
types.
ValueError: If `new_shape` is not fully defined.
"""
proto = tensorflow_computation_factory.create_broadcast_scalar_to_shape(
scalar_type, new_shape)
return building_blocks.CompiledComputation(proto)
示例11: open_detached
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def open_detached(
ciphertext: Ciphertext,
mac: Mac,
nonce: Nonce,
public_sender: PublicKey,
secretkey_receiver: SecretKey,
plaintext_dtype: tf.DType,
) -> tf.Tensor:
plaintext = sodium_module.sodium_easy_box_open_detached(
ciphertext.raw,
mac.raw,
nonce.raw,
public_sender.raw,
secretkey_receiver.raw,
plaintext_dtype,
)
return plaintext
示例12: register_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def register_inputs(self) -> None:
feedables = set.union(*[ex.feedables for ex in self.model.runners])
if self.train_mode:
feedables |= set.union(
*[ex.feedables for ex in self.model.trainers])
# collect input shapes and types
input_types = {} # type: Dict[str, tf.DType]
input_shapes = {} # type: Dict[str, tf.TensorShape]
for feedable in feedables:
input_types.update(feedable.input_types)
input_shapes.update(feedable.input_shapes)
dataset = {} # type: Dict[str, tf.Tensor]
for s_id, dtype in input_types.items():
shape = input_shapes[s_id]
dataset[s_id] = tf.placeholder(dtype, shape, s_id)
for feedable in feedables:
feedable.register_input(dataset)
self.model.dataset_runner.register_input(dataset)
示例13: get_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def get_variable(name: str,
shape: ShapeSpec = None,
dtype: tf.DType = None,
initializer: Callable = None,
**kwargs) -> tf.Variable:
"""Get an existing variable with these parameters or create a new one.
This is a wrapper around `tf.get_variable`. The `initializer` parameter is
treated as a default which can be overriden by a call to
`update_initializers`.
This should only be called during model building.
"""
return tf.get_variable(
name=name, shape=shape, dtype=dtype,
initializer=get_initializer(name, initializer),
**kwargs)
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def __init__(self,
U: List[Tensor],
priorU: List[Distribution],
likelihood: Likelihood,
dtype: tf.DType,
stopCriterion,
phase: Phase,
noiseUniformity: NoiseUniformity,
transform: bool = False) -> None:
# setup the model
self.dtype = dtype
self.__transform = transform
self.__noiseUniformity = noiseUniformity
self.likelihood = likelihood
self.stopCriterion = stopCriterion
self.postU = [] # type: List[PostU]
for f, priorUf in enumerate(priorU):
postUf = PostU(likelihood, priorUf, f)
self.postU.append(postUf)
# create or reuse the variables for the filter banks
for f, Uf in enumerate(copy(U)):
if transform and (f == 0):
paramName = "{}tr".format(f)
else:
paramName = "{}".format(f)
with tf.variable_scope("U", reuse=tf.AUTO_REUSE):
UfVar = tf.get_variable(paramName,
dtype=dtype,
initializer=Uf)
U[f] = UfVar
self.__U = tuple(U)
if phase == Phase.EM or phase == Phase.INIT:
self.__setEm()
elif phase == Phase.BCD:
self.__setBcd()
else:
raise ValueError
示例15: random
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import DType [as 别名]
def random(cls,
priorU: List[Distribution],
likelihood: Likelihood,
M: Tuple[int, ...],
K: int,
dtype: tf.DType,
phase: Phase,
stopCriterion,
noiseUniformity: NoiseUniformity = HOMOGENEOUS,
transform: bool = False) -> "TensorFactorisation":
# initialize U
dtype = tf.as_dtype(dtype)
zero = tf.constant(0., dtype=dtype)
one = tf.constant(1., dtype=dtype)
normal = tf.distributions.Normal(loc=zero, scale=one)
F = len(M)
U = []
for f in range(F):
if priorU[f].nonNegative:
UfInit = tf.abs(normal.sample(sample_shape=(K, M[f])))
else:
UfInit = normal.sample(sample_shape=(K, M[f]))
U.append(UfInit)
# instantiate
tefa = TensorFactorisation(U=U,
priorU=priorU,
likelihood=likelihood,
dtype=dtype,
phase=phase,
transform=transform,
noiseUniformity=noiseUniformity,
stopCriterion=stopCriterion)
return(tefa)