本文整理匯總了Python中tensorflow.as_dtype方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.as_dtype方法的具體用法?Python tensorflow.as_dtype怎麽用?Python tensorflow.as_dtype使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow
的用法示例。
在下文中一共展示了tensorflow.as_dtype方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self, model, dtypestr='float32'):
"""
:param model: An instance of the cleverhans.model.Model class.
:param back: The backend to use. Inherited from AttackBase class.
:param dtypestr: datatype of the input data samples and crafted
adversarial attacks.
"""
# Validate the input arguments.
if dtypestr != 'float32' and dtypestr != 'float64':
raise ValueError("Unexpected input for argument dtypestr.")
import tensorflow as tf
tfe = tf.contrib.eager
self.tf_dtype = tf.as_dtype(dtypestr)
self.np_dtype = np.dtype(dtypestr)
if not isinstance(model, Model):
raise ValueError("The model argument should be an instance of"
" the cleverhans.model.Model class.")
# Prepare attributes
self.model = model
self.dtypestr = dtypestr
示例2: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self,
dtype,
batch_shape=None,
value_shape=None,
group_ndims=0,
is_continuous=None,
**kwargs):
dtype = tf.float32 if dtype is None else tf.as_dtype(dtype).base_dtype
self.explicit_batch_shape = tf.TensorShape(batch_shape)
self.explicit_value_shape = tf.TensorShape(value_shape)
if is_continuous is None:
is_continuous = dtype.is_floating
super(Empirical, self).__init__(
dtype=dtype,
param_dtype=None,
is_continuous=is_continuous,
is_reparameterized=False,
use_path_derivative=False,
group_ndims=group_ndims,
**kwargs)
示例3: _legacy_output_transform_func
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
示例4: get_initializer
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def get_initializer(initializer, initializer_gain):
tfdtype = tf.as_dtype(dtype.floatx())
if initializer == "uniform":
max_val = initializer_gain
return tf.random_uniform_initializer(-max_val, max_val, dtype=tfdtype)
elif initializer == "normal":
return tf.random_normal_initializer(0.0, initializer_gain, dtype=tfdtype)
elif initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(initializer_gain,
mode="fan_avg",
distribution="normal",
dtype=tfdtype)
elif initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(initializer_gain,
mode="fan_avg",
distribution="uniform",
dtype=tfdtype)
else:
tf.logging.warn("Unrecognized initializer: %s" % initializer)
tf.logging.warn("Return to default initializer: glorot_uniform_initializer")
return tf.glorot_uniform_initializer(dtype=tfdtype)
示例5: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self, shape, dtype):
"""Creates a schema for a variable used in policy.
Allows for symbolic definition of shape. Shape can consist of integers, as well as
strings BATCH and TIMESTEPS. This is taken advantage of in the optimizers, to
create placeholders or variables that asynchronously prefetch the inputs.
Parameters
----------
shape: [int, np.int64, np.int32, or str]
shape of the variable, e.g. [12, 4], [BATCH, 12], [BATCH, 'timestep']
dtype:
tensorflow type of the variable, e.g. tf.float32, tf.int32
"""
assert all(isinstance(s, (int, np.int64, np.int32)) or s in [BATCH, TIMESTEPS] for s in shape), 'Bad shape %s' % shape
self.shape = shape
self.dtype = tf.as_dtype(dtype)
示例6: read_summaries
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def read_summaries(event_dir, event_file_pattern="events.out.tfevents.*"):
"""Reads summaries from TensorFlow event files.
Args:
event_dir: Directory containing event files.
event_file_pattern: The pattern to look for event files.
Returns:
A list of tuple (step, dict of summaries), sorted by step.
"""
if not tf.io.gfile.exists(event_dir):
return []
summaries = collections.defaultdict(dict)
for event_file in tf.io.gfile.glob(os.path.join(event_dir, event_file_pattern)):
for event in tf.compat.v1.train.summary_iterator(event_file):
if not event.HasField("summary"):
continue
for value in event.summary.value:
tensor_proto = value.tensor
tensor = tf.io.parse_tensor(
tensor_proto.SerializeToString(), tf.as_dtype(tensor_proto.dtype))
summaries[event.step][value.tag] = tf.get_static_value(tensor)
return list(sorted(summaries.items(), key=lambda x: x[0]))
示例7: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self,
mean_initializer=tf.random_normal_initializer(stddev=0.1),
stddev_initializer=tf.random_uniform_initializer(
minval=1e-5, maxval=0.1),
mean_regularizer=None,
stddev_regularizer=None,
mean_constraint=None,
stddev_constraint=positive(),
seed=None,
dtype=tf.float32):
"""Constructs the initializer."""
super(TrainableNormal, self).__init__()
self.mean_initializer = mean_initializer
self.stddev_initializer = stddev_initializer
self.mean_regularizer = mean_regularizer
self.stddev_regularizer = stddev_regularizer
self.mean_constraint = mean_constraint
self.stddev_constraint = stddev_constraint
self.seed = seed
self.dtype = tf.as_dtype(dtype)
示例8: build
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def build(self, shape, dtype=None, add_variable_fn=None):
"""Builds the initializer, with the variables captured by the caller."""
if dtype is None:
dtype = self.dtype
self.shape = shape
self.dtype = tf.as_dtype(dtype)
self.mean = add_variable_fn(
'mean',
shape=shape,
initializer=self.mean_initializer,
regularizer=self.mean_regularizer,
constraint=self.mean_constraint,
dtype=dtype,
trainable=True)
self.stddev = add_variable_fn(
'stddev',
shape=shape,
initializer=self.stddev_initializer,
regularizer=self.stddev_regularizer,
constraint=self.stddev_constraint,
dtype=dtype,
trainable=True)
self.built = True
示例9: fprop
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def fprop(self, x, **kwargs):
del kwargs
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
w1 = tf.constant([[1.5, .3], [-2, 0.3]],
dtype=tf.as_dtype(x.dtype))
w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
dtype=tf.as_dtype(x.dtype))
h1 = tf.nn.sigmoid(tf.matmul(x, w1))
res = tf.matmul(h1, w2)
return {self.O_FEATURES: [h1, res],
self.O_LOGITS: res,
self.O_PROBS: tf.nn.softmax(res)}
示例10: fprop
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def fprop(self, x, **kwargs):
del kwargs
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
w1 = tf.constant([[1.5, .3], [-2, 0.3]],
dtype=tf.as_dtype(x.dtype))
w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
dtype=tf.as_dtype(x.dtype))
h1 = tf.nn.sigmoid(tf.matmul(x, w1))
res = tf.matmul(h1, w2)
return {self.O_LOGITS: res,
self.O_PROBS: tf.nn.softmax(res)}
示例11: fprop
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def fprop(self, x, **kwargs):
del kwargs
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
w1 = tf.constant(
[[1.5, .3], [-2, 0.3]], dtype=tf.as_dtype(x.dtype))
w2 = tf.constant(
[[-2.4, 1.2], [0.5, -2.3]], dtype=tf.as_dtype(x.dtype))
h1 = tf.nn.sigmoid(tf.matmul(x, w1))
res = tf.matmul(h1, w2)
return {self.O_LOGITS: res, self.O_PROBS: tf.nn.softmax(res)}
示例12: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self, dtype=tf.float32):
self.dtype = tf.as_dtype(dtype)
示例13: getTRTType
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def getTRTType(tensor):
if tf.as_dtype(tensor.dtype) == tf.float32:
return 0
if tf.as_dtype(tensor.dtype) == tf.float16:
return 1
if tf.as_dtype(tensor.dtype) == tf.int8:
return 2
if tf.as_dtype(tensor.dtype) == tf.int32:
return 3
print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype))
sys.exit();
示例14: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def __init__(self,
M: Tuple[int, int] = (1000, 2000),
dtype: type = np.float32) -> None:
self.__M = M
self.__dtype = tf.as_dtype(dtype)
示例15: random
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import as_dtype [as 別名]
def random(cls,
priorU: List[Distribution],
likelihood: Likelihood,
M: Tuple[int, ...],
K: int,
dtype: tf.DType,
phase: Phase,
stopCriterion,
noiseUniformity: NoiseUniformity = HOMOGENEOUS,
transform: bool = False) -> "TensorFactorisation":
# initialize U
dtype = tf.as_dtype(dtype)
zero = tf.constant(0., dtype=dtype)
one = tf.constant(1., dtype=dtype)
normal = tf.distributions.Normal(loc=zero, scale=one)
F = len(M)
U = []
for f in range(F):
if priorU[f].nonNegative:
UfInit = tf.abs(normal.sample(sample_shape=(K, M[f])))
else:
UfInit = normal.sample(sample_shape=(K, M[f]))
U.append(UfInit)
# instantiate
tefa = TensorFactorisation(U=U,
priorU=priorU,
likelihood=likelihood,
dtype=dtype,
phase=phase,
transform=transform,
noiseUniformity=noiseUniformity,
stopCriterion=stopCriterion)
return(tefa)