本文整理汇总了Python中tensorflow.compat.v2.matmul方法的典型用法代码示例。如果您正苦于以下问题:Python v2.matmul方法的具体用法?Python v2.matmul怎么用?Python v2.matmul使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.matmul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testHomogeneous
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def testHomogeneous(self, scheme, accuracy_order):
# Tests solving du/dt = At for a time step.
# Compares with exact solution u(t) = exp(At) u(0).
# Time step should be small enough to "resolve" different orders of accuracy
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(
scheme(u, 0, time_step, lambda t: (tridiag_form, None)))
expected = self.evaluate(
tf.squeeze(
tf.matmul(tf.linalg.expm(matrix * time_step), tf.expand_dims(u,
1))))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例2: testHomogeneousBackwards
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def testHomogeneousBackwards(self, scheme, accuracy_order):
# Tests solving du/dt = At for a backward time step.
# Compares with exact solution u(0) = exp(-At) u(t).
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(
scheme(u, time_step, 0, lambda t: (tridiag_form, None)))
expected = self.evaluate(
tf.squeeze(
tf.matmul(
tf.linalg.expm(-matrix * time_step), tf.expand_dims(u, 1))))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例3: testInhomogeneous
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def testInhomogeneous(self, scheme, accuracy_order):
# Tests solving du/dt = At + b for a time step.
# Compares with exact solution u(t) = exp(At) u(0) + (exp(At) - 1) A^(-1) b.
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
b = tf.constant([1, -1, -2, 2], dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(scheme(u, 0, time_step, lambda t: (tridiag_form, b)))
exponent = tf.linalg.expm(matrix * time_step)
eye = tf.eye(4, 4, dtype=tf.float64)
u = tf.expand_dims(u, 1)
b = tf.expand_dims(b, 1)
expected = (
tf.matmul(exponent, u) +
tf.matmul(exponent - eye, tf.matmul(tf.linalg.inv(matrix), b)))
expected = self.evaluate(tf.squeeze(expected))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例4: testInhomogeneousBackwards
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def testInhomogeneousBackwards(self, scheme, accuracy_order):
# Tests solving du/dt = At + b for a backward time step.
# Compares with exact solution u(0) = exp(-At) u(t)
# + (exp(-At) - 1) A^(-1) b.
time_step = 0.0001
u = tf.constant([1, 2, -1, -2], dtype=tf.float64)
matrix = tf.constant(
[[1, -1, 0, 0], [3, 1, 2, 0], [0, -2, 1, 4], [0, 0, 3, 1]],
dtype=tf.float64)
b = tf.constant([1, -1, -2, 2], dtype=tf.float64)
tridiag_form = self._convert_to_tridiagonal_format(matrix)
actual = self.evaluate(scheme(u, time_step, 0, lambda t: (tridiag_form, b)))
exponent = tf.linalg.expm(-matrix * time_step)
eye = tf.eye(4, 4, dtype=tf.float64)
u = tf.expand_dims(u, 1)
b = tf.expand_dims(b, 1)
expected = (
tf.matmul(exponent, u) +
tf.matmul(exponent - eye, tf.matmul(tf.linalg.inv(matrix), b)))
expected = self.evaluate(tf.squeeze(expected))
error_tolerance = 30 * time_step**(accuracy_order + 1)
self.assertLess(np.max(np.abs(actual - expected)), error_tolerance)
示例5: _block_matmul
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def _block_matmul(m1, m2):
"""Multiplies block matrices represented as nested lists."""
# Calls itself recursively to multiply blocks, until reaches the level of
# tf.Tensors.
if isinstance(m1, tf.Tensor):
assert isinstance(m2, tf.Tensor)
return tf.matmul(m1, m2)
assert _is_nested_list(m1) and _is_nested_list(m2)
i_max = len(m1)
k_max = len(m2)
j_max = 0 if k_max == 0 else len(m2[0])
if i_max > 0:
assert len(m1[0]) == k_max
def row_by_column(i, j):
return _block_add(*[_block_matmul(m1[i][k], m2[k][j])
for k in range(k_max)])
return [[row_by_column(i, j) for j in range(j_max)] for i in range(i_max)]
示例6: matmul_any_tensor_dense_tensor
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def matmul_any_tensor_dense_tensor(a,
b,
a_is_sparse = True,
transpose_a = False):
"""Like tf.matmul except that first argument might be a SparseTensor.
Args:
a: SparseTensor or Tensor
b: Tensor
a_is_sparse: true if a is a SparseTensor
transpose_a: transpose a before performing matmal operation.
Returns:
TF expression for a.dot(b) or a.transpose().dot(b)
Raises
ValueError: If a or b are of the wrong type.
"""
if a_is_sparse:
_check_type('a', a, tf.SparseTensor)
return tf.sparse.sparse_dense_matmul(
b, a, adjoint_a=False, adjoint_b=not transpose_a)
else:
return tf.transpose(
a=tf.matmul(a, tf.transpose(a=b), transpose_a=transpose_a))
示例7: matmul
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def matmul(x1, x2): # pylint: disable=missing-docstring
def f(x1, x2):
try:
return utils.cond(tf.rank(x2) == 1,
lambda: tf.tensordot(x1, x2, axes=1),
lambda: utils.cond(tf.rank(x1) == 1, # pylint: disable=g-long-lambda
lambda: tf.tensordot( # pylint: disable=g-long-lambda
x1, x2, axes=[[0], [-2]]),
lambda: tf.matmul(x1, x2)))
except tf.errors.InvalidArgumentError as err:
six.reraise(ValueError, ValueError(str(err)), sys.exc_info()[2])
return _bin_op(f, x1, x2)
示例8: test_HessianVectorProduct
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def test_HessianVectorProduct(self):
"""Tests the HVP for a simple quadratic function."""
Q = tf.linalg.diag([1.0, 2.0, 3.0]) # pylint:disable=invalid-name
def quadratic(x):
return 0.5 * tf.matmul(x, tf.matmul(Q, x), transpose_a=True)
x = tf.ones((3, 1))
v = tf.constant([[0.2], [0.5], [-1.2]])
# Computes (d2/dx2 1/2 * x^TQx).v = Q.v
hvp = matrix_vector_product._hessian_vector_product(quadratic, x, v)
self.assertAllClose(hvp, tf.matmul(Q, v))
示例9: test_ModelHessianOnDataset
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def test_ModelHessianOnDataset(self, reduce_op: Text):
"""Tests the HVP for a neural network."""
x = tf.random.uniform([32, 3], minval=-1, maxval=1, seed=0)
model = tf.keras.Sequential([
tf.keras.layers.Dense(7, activation='relu'),
tf.keras.layers.Dense(2),
])
_ = model(x) # call first time to initialize the weights
labels = tf.random.uniform((32, 2), minval=-1, maxval=1, seed=1)
# Computes a reference hessian vector product by computing the hessian
# explicitly.
mse_loss = tf.keras.losses.MeanSquaredError(
reduction=(tf.keras.losses.Reduction.SUM if reduce_op ==
'SUM' else tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE))
def loss_on_full_dataset(parameters):
del parameters # Used implicitly when calling the model.
return mse_loss(labels, model(x))
model_hessian = test_util.hessian_as_matrix(
loss_on_full_dataset, model.trainable_variables)
num_params = sum((np.prod(w.shape) for w in model.trainable_variables))
v = tf.random.uniform((num_params, 1), minval=-1, maxval=1, seed=2)
hvp_ref = tf.matmul(model_hessian, v)
# Compute the same HVP without computing the Hessian
def loss_fn(model, inputs):
x, y = inputs
preds = model(x)
return mse_loss(preds, y)
hvp_to_test = matrix_vector_product.model_hessian_vector_product(
loss_fn,
model,
tf.data.Dataset.from_tensor_slices((x, labels)).batch(5),
v,
reduce_op=reduce_op)
self.assertAllClose(hvp_to_test, hvp_ref)
示例10: test_FullOrderRecovery
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def test_FullOrderRecovery(self):
"""Matrix Q should be recovered by running Lanczos with dim=order."""
Q = tf.linalg.diag([1.0, 2.0, 3.0])
def Qv(v):
return tf.matmul(Q, v)
V, T = lanczos_algorithm.lanczos_algorithm(Qv, 3, 3)
Q_lanczos = tf.matmul(tf.matmul(V, T), V, transpose_b=True)
self.assertAllClose(Q_lanczos, Q, atol=1e-7)
示例11: test_FullOrderRecoveryOnModel
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def test_FullOrderRecoveryOnModel(self):
"""Hessian should be recovered by running Lanczos with order=dim."""
x = tf.random.uniform((32, 3), minval=-1, maxval=1, seed=0)
model = tf.keras.Sequential([
tf.keras.layers.Dense(7, activation='relu'),
tf.keras.layers.Dense(2),
])
_ = model(x) # Call first time to initialize the weights
labels = tf.random.uniform((32, 2), minval=-1, maxval=1, seed=1)
# Compute Hessian explicitly:
mse_loss = tf.keras.losses.MeanSquaredError()
def loss_on_full_dataset(parameters):
del parameters # Used implicitly when calling the model.
return mse_loss(labels, model(x))
model_hessian = test_util.hessian_as_matrix(
loss_on_full_dataset, model.trainable_variables)
# Compute a full rank approximation of the Hessian using Lanczos, that
# should then be equal to the Hessian.
def loss_fn(model, inputs):
x, y = inputs
preds = model(x)
return mse_loss(preds, y)
w_dim = sum((np.prod(w.shape) for w in model.trainable_variables))
V, T = lanczos_algorithm.approximate_hessian(
model,
loss_fn,
tf.data.Dataset.from_tensor_slices((x, labels)).batch(5),
order=w_dim)
model_hessian_lanczos = tf.matmul(tf.matmul(V, T), V, transpose_b=True)
self.assertAllClose(model_hessian_lanczos, model_hessian)
示例12: test_data_fitting
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def test_data_fitting(self):
"""Tests MLE estimation for a simple geometric GLM."""
n, dim = 100, 3
dtype = tf.float64
np.random.seed(234095)
x = np.random.choice([0, 1], size=[dim, n])
s = 0.01 * np.sum(x, 0)
p = 1. / (1 + np.exp(-s))
y = np.random.geometric(p)
x_data = tf.convert_to_tensor(value=x, dtype=dtype)
y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)
def neg_log_likelihood(state):
state_ext = tf.expand_dims(state, 0)
linear_part = tf.matmul(state_ext, x_data)
linear_part_ex = tf.stack([tf.zeros_like(linear_part), linear_part],
axis=0)
term1 = tf.squeeze(
tf.matmul(tf.reduce_logsumexp(linear_part_ex, axis=0), y_data), -1)
term2 = (0.5 * tf.reduce_sum(state_ext * state_ext, axis=-1) -
tf.reduce_sum(linear_part, axis=-1))
return tf.squeeze(term1 + term2)
self._check_algorithm(
func=neg_log_likelihood,
start_point=np.ones(shape=[dim]),
expected_argmin=[-0.020460034354, 0.171708568111, 0.021200423717])
示例13: _expected_exercise_fn
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def _expected_exercise_fn(design, continuation_value, exercise_value):
"""Returns the expected continuation value for each path.
Args:
design: A real `Tensor` of shape `[basis_size, num_samples]`.
continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of
the same dtype as `design`. The optimal value of the option conditional on
not exercising now or earlier, taking future information into account.
exercise_value: A `Tensor` of the same shape and dtype as
`continuation_value`. Value of the option if exercised immideately at
the current time
Returns:
A `Tensor` of the same shape and dtype as `continuation_value` whose
`(n, v)`-th entry represents the expected continuation value of sample path
`n` under the `v`-th payoff scheme.
"""
batch_design = tf.broadcast_to(
design[..., None], design.shape + [continuation_value.shape[-1]])
mask = tf.cast(exercise_value > 0, design.dtype)
# Zero out contributions from samples we'd never exercise at this point (i.e.,
# these extra observations do not change the regression coefficients).
masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))
# For design matrix X and response y, the coefficients beta of the best linear
# unbiased estimate are contained in the equation X'X beta = X'y. Here `lhs`
# is X'X and `rhs` is X'y, or rather a tensor of such left hand and right hand
# sides, one for each payoff dimension.
lhs = tf.matmul(masked, masked, transpose_a=True)
# Use pseudo inverse for the regression matrix to ensure stability of the
# algorithm.
lhs_pinv = tf.linalg.pinv(lhs)
rhs = tf.matmul(
masked,
tf.expand_dims(tf.transpose(continuation_value), axis=-1),
transpose_a=True)
beta = tf.matmul(lhs_pinv, rhs)
continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)
return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0)
示例14: expected_exercise_fn
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def expected_exercise_fn(design, continuation_value, exercise_value):
"""Returns the expected continuation value for each path.
Args:
design: A real `Tensor` of shape `[basis_size, num_samples]`.
continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of
the same dtype as `design`. The optimal value of the option conditional on
not exercising now or earlier, taking future information into account.
exercise_value: A `Tensor` of the same shape and dtype as
`continuation_value`. Value of the option if exercised immideately at
the current time
Returns:
A `Tensor` of the same shape and dtype as `continuation_value` whose
`(n, v)`-th entry represents the expected continuation value of sample path
`n` under the `v`-th payoff scheme.
"""
# We wish to value each option under different payoffs, expressed through a
# multidimensional payoff function. While the basis calculated from the sample
# paths is the same for each payoff, the LSM algorithm requires us to fit a
# regression model only on the in-the-money paths, which are payoff dependent,
# hence we create multiple copies of the regression design (basis) matrix and
# zero out rows for out of the money paths under each payoff.
batch_design = tf.broadcast_to(
tf.expand_dims(design, -1), design.shape + [continuation_value.shape[-1]])
mask = tf.cast(exercise_value > 0, design.dtype)
# Zero out contributions from samples we'd never exercise at this point (i.e.,
# these extra observations do not change the regression coefficients).
masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))
# For design matrix X and response y, the coefficients beta of the best linear
# unbiased estimate are contained in the equation X'X beta = X'y. Here `lhs`
# is X'X and `rhs` is X'y, or rather a tensor of such left hand and right hand
# sides, one for each payoff dimension.
lhs = tf.matmul(masked, masked, transpose_a=True)
# Use pseudo inverse for the regression matrix to ensure stability of the
# algorithm.
lhs_pinv = tf.linalg.pinv(lhs)
rhs = tf.matmul(
masked,
tf.expand_dims(tf.transpose(continuation_value), -1),
transpose_a=True)
beta = tf.linalg.matmul(lhs_pinv, rhs)
continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)
return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0)
示例15: _sample_paths
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import matmul [as 别名]
def _sample_paths(self, times, grid_step, keep_mask, num_requested_times,
num_samples, initial_state, random_type, seed, swap_memory):
"""Returns a sample of paths from the process."""
dt = times[1:] - times[:-1]
sqrt_dt = tf.sqrt(dt)
current_state = initial_state + tf.zeros(
[num_samples, self.dim()], dtype=initial_state.dtype)
steps_num = tf.shape(dt)[-1]
wiener_mean = tf.zeros((self.dim(), 1), dtype=self._dtype)
cond_fn = lambda i, *args: i < steps_num
def step_fn(i, written_count, current_state, result):
"""Performs one step of Euler scheme."""
current_time = times[i + 1]
dw = random_ops.mv_normal_sample((num_samples,),
mean=wiener_mean,
random_type=random_type,
seed=seed)
dw = dw * sqrt_dt[i]
dt_inc = dt[i] * self.drift_fn()(current_time, current_state) # pylint: disable=not-callable
dw_inc = tf.squeeze(
tf.matmul(self.volatility_fn()(current_time, current_state), dw), -1) # pylint: disable=not-callable
next_state = current_state + dt_inc + dw_inc
def write_next_state_to_result():
# Replace result[:, written_count, :] with next_state.
one_hot = tf.one_hot(written_count, depth=num_requested_times)
mask = tf.expand_dims(one_hot > 0, axis=-1)
return tf.where(mask, tf.expand_dims(next_state, axis=1), result)
# Keep only states for times requested by user.
result = tf.cond(keep_mask[i + 1],
write_next_state_to_result,
lambda: result)
written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
return i + 1, written_count, next_state, result
# Maximum number iterations is passed to the while loop below. It improves
# performance of the while loop on a GPU and is needed for XLA-compilation
# comptatiblity
maximum_iterations = (
tf.cast(1. / grid_step, dtype=tf.int32) + tf.size(times))
result = tf.zeros((num_samples, num_requested_times, self.dim()))
_, _, _, result = tf.compat.v1.while_loop(
cond_fn,
step_fn, (0, 0, current_state, result),
maximum_iterations=maximum_iterations,
swap_memory=swap_memory)
return result