本文整理汇总了Python中tensorflow.compat.v2.float64方法的典型用法代码示例。如果您正苦于以下问题:Python v2.float64方法的具体用法?Python v2.float64怎么用?Python v2.float64使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.float64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: true_divide
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def true_divide(x1, x2):
def _avoid_float64(x1, x2):
if x1.dtype == x2.dtype and x1.dtype in (tf.int32, tf.int64):
x1 = tf.cast(x1, dtype=tf.float32)
x2 = tf.cast(x2, dtype=tf.float32)
return x1, x2
def f(x1, x2):
if x1.dtype == tf.bool:
assert x2.dtype == tf.bool
float_ = dtypes.default_float_type()
x1 = tf.cast(x1, float_)
x2 = tf.cast(x2, float_)
if not dtypes.is_allow_float64():
# tf.math.truediv in Python3 produces float64 when both inputs are int32
# or int64. We want to avoid that when is_allow_float64() is False.
x1, x2 = _avoid_float64(x1, x2)
return tf.math.truediv(x1, x2)
return _bin_op(f, x1, x2)
示例2: setUp
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def setUp(self):
super(LogicTest, self).setUp()
self.array_transforms = [
lambda x: x, # Identity,
tf.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.int32),
lambda x: np.array(x, dtype=np.int64),
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
array_ops.array,
lambda x: array_ops.array(x, dtype=tf.int32),
lambda x: array_ops.array(x, dtype=tf.int64),
lambda x: array_ops.array(x, dtype=tf.float32),
lambda x: array_ops.array(x, dtype=tf.float64),
]
示例3: testOutputIsPermutation
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testOutputIsPermutation(self):
"""Checks that stateless_random_shuffle outputs a permutation."""
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
identity_permutation = tf.range(10, dtype=dtype)
random_shuffle_seed_1 = tff_rnd.stateless_random_shuffle(
identity_permutation, seed=tf.constant((1, 42), tf.int64))
random_shuffle_seed_2 = tff_rnd.stateless_random_shuffle(
identity_permutation, seed=tf.constant((2, 42), tf.int64))
# Check that the shuffles are of the correct dtype
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
np.testing.assert_equal(shuffle.dtype, dtype.as_numpy_dtype)
random_shuffle_seed_1 = self.evaluate(random_shuffle_seed_1)
random_shuffle_seed_2 = self.evaluate(random_shuffle_seed_2)
identity_permutation = self.evaluate(identity_permutation)
# Check that the shuffles are different
self.assertTrue(
np.abs(random_shuffle_seed_1 - random_shuffle_seed_2).max())
# Check that the shuffles are indeed permutations
for shuffle in (random_shuffle_seed_1, random_shuffle_seed_2):
self.assertAllEqual(set(shuffle), set(identity_permutation))
示例4: testOutputIsIndependentOfInputValues
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testOutputIsIndependentOfInputValues(self):
"""stateless_random_shuffle output is independent of input_tensor values."""
# Generate sorted array of random numbers to control that the result
# is independent of `input_tesnor` values
np.random.seed(25)
random_input = np.random.normal(size=[10])
random_input.sort()
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
# Permutation of a sequence [0, 1, .., 9]
random_permutation = tff_rnd.stateless_random_shuffle(
tf.range(10, dtype=dtype), seed=(100, 42))
random_permutation = self.evaluate(random_permutation)
# Shuffle `random_input` with the same seed
random_shuffle_control = tff_rnd.stateless_random_shuffle(
random_input, seed=(100, 42))
random_shuffle_control = self.evaluate(random_shuffle_control)
# Checks that the generated permutation does not depend on the underlying
# values
np.testing.assert_array_equal(
np.argsort(random_permutation), np.argsort(random_shuffle_control))
示例5: testMultiDimensionalShape
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testMultiDimensionalShape(self):
"""Check that stateless_random_shuffle works with multi-dim shapes."""
for dtype in (tf.int32, tf.int64, tf.float32, tf.float64):
input_permutation = tf.constant([[[1], [2], [3]], [[4], [5], [6]]],
dtype=dtype)
random_shuffle = tff_rnd.stateless_random_shuffle(
input_permutation, seed=(1, 42))
random_permutation_first_call = self.evaluate(random_shuffle)
random_permutation_next_call = self.evaluate(random_shuffle)
input_permutation = self.evaluate(input_permutation)
# Check that the dtype is correct
np.testing.assert_equal(random_permutation_first_call.dtype,
dtype.as_numpy_dtype)
# Check that the shuffles are the same
np.testing.assert_array_equal(random_permutation_first_call,
random_permutation_next_call)
# Check that the output shape is correct
np.testing.assert_equal(random_permutation_first_call.shape,
input_permutation.shape)
示例6: testHomogeneous
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testHomogeneous(self, scheme, accuracy_order):
# Tests solving du/dt = At for a time step.
# Compares with exact solution u(t) = exp(At) u(0).
# Time step should be small enough to "resolve" different orders of accuracy
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(
scheme(u, 0, time_step, lambda t: (tridiag_form, None)))
expected = self.evaluate(
tf.squeeze(
tf.matmul(tf.linalg.expm(matrix * time_step), tf.expand_dims(u,
1))))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例7: testHomogeneousBackwards
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testHomogeneousBackwards(self, scheme, accuracy_order):
# Tests solving du/dt = At for a backward time step.
# Compares with exact solution u(0) = exp(-At) u(t).
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(
scheme(u, time_step, 0, lambda t: (tridiag_form, None)))
expected = self.evaluate(
tf.squeeze(
tf.matmul(
tf.linalg.expm(-matrix * time_step), tf.expand_dims(u, 1))))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例8: testInhomogeneousBackwards
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testInhomogeneousBackwards(self, scheme, accuracy_order):
# Tests solving du/dt = At + b for a backward time step.
# Compares with exact solution u(0) = exp(-At) u(t)
# + (exp(-At) - 1) A^(-1) b.
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
b = tf.constant([1, -1, -2, 2], dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(scheme(u, time_step, 0, lambda t: (tridiag_form, b)))
exponent = tf.linalg.expm(-matrix * time_step)
eye = tf.eye(4, 4, dtype=tf.float64)
u = tf.expand_dims(u, 1)
b = tf.expand_dims(b, 1)
expected = (
tf.matmul(exponent, u) +
tf.matmul(exponent - eye, tf.matmul(tf.linalg.inv(matrix), b)))
expected = self.evaluate(tf.squeeze(expected))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例9: testFindsRootForFlatFunction
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testFindsRootForFlatFunction(self):
# Flat in the [-0.5, 0.5] range.
objective_fn = lambda x: 0 if x == 0 else x * exp(-1 / x**2)
left_bracket = [-10]
right_bracket = [1]
expected_num_iterations = [13]
expected_num_iterations, result = self.evaluate([
tf.constant(expected_num_iterations, dtype=tf.int32),
root_search.brentq(objective_fn,
tf.constant(left_bracket, dtype=tf.float64),
tf.constant(right_bracket, dtype=tf.float64))
])
_, value_at_roots, num_iterations, _ = result
# Simply check that the objective function is close to the root for the
# returned estimate. Do not check the estimate itself.
# Unlike Brent's original algorithm (and the SciPy implementation), this
# implementation stops the search as soon as a good enough root estimate is
# found. As a result, the estimate may significantly differ from the one
# returned by SciPy for functions which are extremely flat around the root.
self.assertAllClose(value_at_roots, [0.])
self.assertAllEqual(num_iterations, expected_num_iterations)
示例10: testWithNoIteration
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def testWithNoIteration(self):
left_bracket = [-10, 1]
right_bracket = [10, -1]
first_guess = tf.constant(left_bracket, dtype=tf.float64)
second_guess = tf.constant(right_bracket, dtype=tf.float64)
# Skip iteration entirely.
# Should return a Tensor built from the best guesses in input positions.
guess, result = self.evaluate([
tf.constant([-10, -1], dtype=tf.float64),
root_search.brentq(
polynomial5, first_guess, second_guess, max_iterations=0)
])
self.assertAllEqual(result.estimated_root, guess)
示例11: test_error_calc
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def test_error_calc(self, optimize_for_tpu):
"""Test the deviation of the interpolated values from the actual."""
sampling_points = 1000
spline_x = np.linspace(0.0, 10.0, num=11, dtype=np.float64)
spline_y = [1.0 / (1.0 + x * x) for x in spline_x]
x_series = np.array([spline_x])
y_series = np.array([spline_y])
spline = tff.math.interpolation.cubic.build_spline(x_series, y_series)
# There is an error if we go to 10.0
test_range_x = np.linspace(0.0, 9.99, num=sampling_points, dtype=np.float64)
search_args = tf.constant(np.array([test_range_x]), dtype=tf.float64)
projected_y = tff.math.interpolation.cubic.interpolate(
search_args, spline, optimize_for_tpu=optimize_for_tpu)
expected_y = tf.constant([[1.0 / (1.0 + x * x) for x in test_range_x]],
dtype=tf.float64)
errors = expected_y - projected_y
deviation = self.evaluate(tfp.stats.stddev(errors[0], sample_axis=0))
limit = 0.02
self.assertLess(deviation, limit)
示例12: test_spline_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def test_spline_batch(self, optimize_for_tpu):
"""Tests batching of four splines."""
for dtype in (np.float32, np.float64):
x_data = np.linspace(-11, 12, 24)
x_data = np.reshape(x_data, [2, 2, 6])
y_data = 1.0 / (1.0 + x_data * x_data)
search_args = np.array([[[-10.5, -5.], [-4.5, 1]],
[[1.5, 2.], [7.5, 12.]]])
spline = tff.math.interpolation.cubic.build_spline(
x_data, y_data, dtype=dtype)
result = tff.math.interpolation.cubic.interpolate(
search_args, spline,
optimize_for_tpu=optimize_for_tpu, dtype=dtype)
expected = np.array([[[0.00900778, 0.02702703],
[0.04705774, 1.]],
[[0.33135411, 0.2],
[0.01756963, 0.00689655]]],
dtype=dtype)
self.assertEqual(result.dtype.as_numpy_dtype, dtype)
result = self.evaluate(result)
np.testing.assert_almost_equal(expected, result)
示例13: test_sample_paths_dtypes
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def test_sample_paths_dtypes(self):
"""Sampled paths have the expected dtypes."""
for dtype in [np.float32, np.float64]:
drift_fn = lambda t, x: tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)
vol_fn = lambda t, x: t * tf.ones([1, 1], dtype=t.dtype)
paths = self.evaluate(
euler_sampling.sample(
dim=1,
drift_fn=drift_fn, volatility_fn=vol_fn,
times=[0.1, 0.2],
num_samples=10,
initial_state=[0.1],
time_step=0.01,
seed=123,
dtype=dtype))
self.assertEqual(paths.dtype, dtype)
示例14: test_construct_vol_covar_and_vol_callables
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def test_construct_vol_covar_and_vol_callables(self):
dtype = np.float64
vol_matrix = np.array([[1.0, 0.21, -0.33], [0.61, 1.5, 1.77],
[-0.3, 1.19, -0.55]]).astype(dtype)
covar_matrix = np.matmul(vol_matrix, vol_matrix.transpose())
vol_fn = lambda time: bm_utils.outer_multiply(time, vol_matrix)
def tc_fn(t1, t2):
return bm_utils.outer_multiply((t2**2 - t1**2) / 2, covar_matrix)
times = np.array([[0.12, 0.44], [0.48, 1.698]]).astype(dtype)
actual_vol_fn, actual_tc_fn = bm_utils.construct_vol_data(
vol_fn, tc_fn, 3, dtype)
actual_vols = self.evaluate(actual_vol_fn(times))
np.testing.assert_array_equal(actual_vols.shape, [2, 2, 3, 3])
np.testing.assert_allclose(actual_vols, self.evaluate(vol_fn(times)))
times2 = times + np.array([[0.12, 0.34], [0.56, 0.78]]).astype(dtype)
actual_tc = self.evaluate(actual_tc_fn(times, times2))
np.testing.assert_array_equal(actual_tc.shape, [2, 2, 3, 3])
np.testing.assert_allclose(actual_tc,
self.evaluate(actual_tc_fn(times, times2)))
示例15: test_construct_vol_covar_and_vector_vol
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import float64 [as 别名]
def test_construct_vol_covar_and_vector_vol(self):
dtype = np.float64
vol = np.array([0.94, 1.1, 0.42], dtype=dtype)
# Note that the total covariance function we supply is deliberately not
# the one that is implied by the volatility function.
np.random.seed(5321)
dim = 3
vol_matrix = np.random.randn(dim, dim)
covar_matrix = np.matmul(vol_matrix, vol_matrix.transpose())
def tc_fn(t1, t2):
return bm_utils.outer_multiply(t2 - t1, covar_matrix)
times = np.array([[0.12], [0.48]]).astype(dtype)
actual_vol_fn, actual_tc_fn = bm_utils.construct_vol_data(
tf.constant(vol), tc_fn, dim, dtype)
actual_vols = self.evaluate(actual_vol_fn(times))
np.testing.assert_array_equal(actual_vols.shape, [2, 1, dim, dim])
for i in range(2):
np.testing.assert_allclose(actual_vols[i, 0], np.diag(vol))
actual_tc = self.evaluate(actual_tc_fn(times, times + 0.22))
np.testing.assert_array_equal(actual_tc.shape, [2, 1, dim, dim])
for i in range(2):
np.testing.assert_allclose(actual_tc[i, 0], covar_matrix * 0.22)