本文整理汇总了Python中tensorflow.python.framework.dtypes.as_dtype函数的典型用法代码示例。如果您正苦于以下问题:Python as_dtype函数的具体用法?Python as_dtype怎么用?Python as_dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了as_dtype函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compute_gradient
def _compute_gradient(x,
x_shape,
dx,
y,
y_shape,
dy,
x_init_value=None,
delta=1e-3):
"""Computes the theoretical and numerical jacobian."""
t = dtypes.as_dtype(x.dtype)
allowed_types = [dtypes.float32, dtypes.float64, dtypes.complex64]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = dtypes.as_dtype(y.dtype)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
if t == dtypes.float32:
dtype = np.float32
else:
dtype = np.float64
x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)
jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx)
jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta)
return jacob_t, jacob_n
示例2: _compute_gradient
def _compute_gradient(x,
x_shape,
dx,
y,
y_shape,
dy,
x_init_value=None,
delta=1e-3,
extra_feed_dict=None):
"""Computes the theoretical and numerical jacobian."""
t = dtypes.as_dtype(x.dtype)
allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = dtypes.as_dtype(y.dtype)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)
if t.is_complex:
x_data.imag = np.random.random_sample(x_shape)
jacob_t = _compute_theoretical_jacobian(
x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
jacob_n = _compute_numeric_jacobian(
x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
return jacob_t, jacob_n
示例3: _verifySolve
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
示例4: testUnsortedSegmentOps1DIndices1DDataNegativeIndices
def testUnsortedSegmentOps1DIndices1DDataNegativeIndices(self):
"""Tests for min, max, and prod ops.
These share most of their implementation with sum, so we only test basic
functionality.
"""
for dtype in self.numeric_types:
self.assertAllClose(
np.array([8, 3, 1, 0], dtype=dtype),
self._unsortedSegmentProd(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
maxval = dtypes.as_dtype(dtype).max
self.assertAllClose(
np.array([2, 3, maxval, 0], dtype=dtype),
self._unsortedSegmentMin(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
self.assertAllClose(
np.array([4, 3, minval, 6], dtype=dtype),
self._unsortedSegmentMax(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
示例5: remote_fused_graph_execute
def remote_fused_graph_execute(inputs,
output_types,
graph_def,
graph_input_node_names,
graph_output_node_names,
executor_name,
serialized_executor_parameters,
default_graph_input_tensor_type_shapes=None,
default_graph_output_tensor_type_shapes=None):
"""A wrapper for remote_fused_graph_execute."""
info_proto = info_pb2.RemoteFusedGraphExecuteInfo()
info_proto.remote_graph.CopyFrom(graph_def)
info_proto.graph_input_node_name.extend(graph_input_node_names)
info_proto.graph_output_node_name.extend(graph_output_node_names)
info_proto.executor_name = executor_name
info_proto.serialized_executor_parameters = serialized_executor_parameters
if default_graph_input_tensor_type_shapes:
for type_shape in default_graph_input_tensor_type_shapes:
type_shape_proto = info_proto.default_graph_input_tensor_shape.add()
type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
for dim in type_shape[1]:
type_shape_proto.shape.dim.add().size = dim
if default_graph_output_tensor_type_shapes:
for type_shape in default_graph_output_tensor_type_shapes:
type_shape_proto = info_proto.default_graph_output_tensor_shape.add()
type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
for dim in type_shape[1]:
type_shape_proto.shape.dim.add().size = dim
serialized_info = info_proto.SerializeToString()
return gen_remote_fused_graph_ops.remote_fused_graph_execute(
inputs, output_types, serialized_info)
示例6: _DefaultGradYs
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(dtypes.as_dtype(y.dtype).name,
dtypes.as_dtype(grad_y.dtype).name))
return grad_ys
示例7: _SatisfiesTypeConstraint
def _SatisfiesTypeConstraint(dtype, attr_def):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"DataType %s for attr '%s' not in list of allowed values: %s" %
(dtypes.as_dtype(dtype).name, attr_def.name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
示例8: _testTernary
def _testTernary(self, op, a, b, c, expected):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
output = op(pa, pb, pc)
result = session.run(output, {pa: a, pb: b, pc: c})
self.assertAllClose(result, expected, rtol=1e-3)
示例9: _SatisfiesTypeConstraint
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
示例10: input_builder
def input_builder(self):
"""Builds inputs in the graph."""
input_shape = [None] + self.input_shape[1:]
output_shape = [None] + self.output_shape[1:]
self._input_placeholder = array_ops.placeholder(dtypes.as_dtype(self.input_dtype), input_shape,
name="input")
self._output_placeholder = array_ops.placeholder(dtypes.as_dtype(self.output_dtype), output_shape,
name="output")
return self._input_placeholder, self._output_placeholder
示例11: __init__
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
示例12: _verifySolve
def _verifySolve(self,
x,
y,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=()):
if not fast and l2_regularizer != 0:
# The slow path does not support regularization.
return
maxdim = np.max(x.shape)
if dtype == np.float32 or dtype == np.complex64:
tol = maxdim * 5e-4
else:
tol = maxdim * 5e-7
a = x.astype(dtype)
b = y.astype(dtype)
if dtype in [np.complex64, np.complex128]:
a.imag = a.real
b.imag = b.real
# numpy.linalg.lstqr does not batching, so we just solve a single system
# and replicate the solution. and residual norm.
np_ans = _SolveWithNumpy(x, y, l2_regularizer=l2_regularizer)
np_r = np.dot(np.conj(a.T), b - np.dot(a, np_ans))
np_r_norm = np.sqrt(np.sum(np.conj(np_r) * np_r))
if batch_shape is not ():
a = np.tile(a, batch_shape + (1, 1))
b = np.tile(b, batch_shape + (1, 1))
np_ans = np.tile(np_ans, batch_shape + (1, 1))
np_r_norm = np.tile(np_r_norm, batch_shape)
with self.cached_session(use_gpu=fast) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
b_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
feed_dict = {a_ph: a, b_ph: b}
tf_ans = linalg_ops.matrix_solve_ls(
a_ph, b_ph, fast=fast, l2_regularizer=l2_regularizer)
else:
tf_ans = linalg_ops.matrix_solve_ls(
a, b, fast=fast, l2_regularizer=l2_regularizer)
feed_dict = {}
self.assertEqual(np_ans.shape, tf_ans.get_shape())
if l2_regularizer == 0:
# The least squares solution should satisfy A^H * (b - A*x) = 0.
tf_r = b - math_ops.matmul(a, tf_ans)
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
tf_ans_val, tf_r_norm_val = sess.run(
[tf_ans, tf_r_norm], feed_dict=feed_dict)
self.assertAllClose(np_r_norm, tf_r_norm_val, atol=tol, rtol=tol)
else:
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
self.assertEqual(np_ans.shape, tf_ans_val.shape)
self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)
示例13: _testBinary
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.test_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
示例14: __init__
def __init__(self, key_dtype, value_dtype):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
super(LookupInterface, self).__init__()
示例15: make_attr
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value