本文整理汇总了Python中tensorflow.compat.v2.square方法的典型用法代码示例。如果您正苦于以下问题:Python v2.square方法的具体用法?Python v2.square怎么用?Python v2.square使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.square方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_fn
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def loss_fn(params, inputs, targets):
predicted = params[0] * inputs + params[1]
loss = tf.reduce_mean(input_tensor=tf.square(predicted - targets))
return tf_np.asarray(loss)
示例2: _init_norm
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
self.g.assign(kernel_norm)
示例3: _rosenbrock
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def _rosenbrock(x):
"""See https://en.wikipedia.org/wiki/Rosenbrock_function."""
term1 = 100 * tf.reduce_sum(tf.square(x[1:] - tf.square(x[:-1])))
term2 = tf.reduce_sum(tf.square(1 - x[:-1]))
return term1 + term2
示例4: _mc_cormick
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def _mc_cormick(coord):
"""See https://www.sfu.ca/~ssurjano/mccorm.html."""
x = coord[0]
y = coord[1]
return tf.sin(x + y) + tf.square(x - y) - 1.5 * x + 2.5 * y + 1
示例5: test_multiple_functions
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def test_multiple_functions(self):
# Define 3 independednt quadratic functions, each with its own minimum.
minima = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
func = lambda x: tf.reduce_sum(tf.square(x - minima), axis=1)
self._check_algorithm(
func=func, start_point=np.zeros_like(minima), expected_argmin=minima)
示例6: _compute_baseline_loss
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def _compute_baseline_loss(advantages, step):
# Loss for the baseline, summed over the time dimension. Multiply by 0.5 to
# match the standard update rule:
# d(loss) / d(baseline) = advantage
baseline_cost = .5 * tf.square(advantages)
tf.summary.scalar(
'loss/baseline_cost', tf.reduce_mean(baseline_cost), step=step)
return baseline_cost
示例7: testEuropeanCallDynamicVol
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def testEuropeanCallDynamicVol(self):
"""Price for the European Call option with time-dependent volatility."""
num_equations = 1 # Number of PDE
num_grid_points = 1024 # Number of grid points
dtype = np.float64
# Build a log-uniform grid
s_max = 300.
grid = grids.log_uniform_grid(minimums=[0.01], maximums=[s_max],
sizes=[num_grid_points],
dtype=dtype)
# Specify volatilities and interest rates for the options
expiry = 1.0
strike = 50.0
# Volatility is of the form `sigma**2(t) = 1 / 6 + 1 / 2 * t**2`.
def second_order_coeff_fn(t, location_grid):
return [[(1. / 6 + t**2 / 2) * tf.square(location_grid[0]) / 2]]
@dirichlet
def lower_boundary_fn(t, location_grid):
del t, location_grid
return 0
@dirichlet
def upper_boundary_fn(t, location_grid):
del t
return location_grid[0][-1] - strike
final_values = tf.nn.relu(grid[0] - strike)
# Broadcast to the shape of value dimension, if necessary.
final_values += tf.zeros([num_equations, num_grid_points],
dtype=dtype)
# Estimate European call option price
estimate = fd_solvers.solve_backward(
start_time=expiry,
end_time=0,
coord_grid=grid,
values_grid=final_values,
num_steps=None,
start_step_count=0,
time_step=tf.constant(0.01, dtype=dtype),
one_step_fn=crank_nicolson_step(),
boundary_conditions=[(lower_boundary_fn, upper_boundary_fn)],
values_transform_fn=None,
second_order_coeff_fn=second_order_coeff_fn,
dtype=dtype)[0]
value_grid = self.evaluate(estimate)[0, :]
# Get two grid locations (correspond to spot 51.9537332 and 106.25407758,
# respectively).
loc_1 = 849
# True call option price (obtained using black_scholes_price function)
call_price = 12.582092
self.assertAllClose(call_price, value_grid[loc_1], rtol=1e-02, atol=1e-02)
示例8: testCompareExpandedAndNotExpandedPdes
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def testCompareExpandedAndNotExpandedPdes(self):
"""Tests comparing PDEs with expanded derivatives and without.
Take equation `u_{t} - [x^2 u]_{xx} + [x u]_{x} = 0`.
Expanding the derivatives yields `u_{t} - x^2 u_{xx} - 3x u_{x} - u = 0`.
Solve both equations and expect the results to be equal.
"""
grid = grids.uniform_grid(
minimums=[0], maximums=[1], sizes=[501], dtype=tf.float32)
xs = grid[0]
final_t = 0.1
time_step = 0.001
initial = _reference_pde_initial_cond(xs) # arbitrary
def inner_second_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [[-tf.square(x)]]
def inner_first_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [x]
result_not_expanded = fd_solvers.solve_forward(
start_time=0,
end_time=final_t,
coord_grid=grid,
values_grid=initial,
time_step=time_step,
inner_second_order_coeff_fn=inner_second_order_coeff_fn,
inner_first_order_coeff_fn=inner_first_order_coeff_fn)[0]
def second_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [[-tf.square(x)]]
def first_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [-3 * x]
def zeroth_order_coeff_fn(t, coord_grid):
del t, coord_grid
return -1
result_expanded = fd_solvers.solve_forward(
start_time=0,
end_time=final_t,
coord_grid=grid,
values_grid=initial,
time_step=time_step,
second_order_coeff_fn=second_order_coeff_fn,
first_order_coeff_fn=first_order_coeff_fn,
zeroth_order_coeff_fn=zeroth_order_coeff_fn)[0]
self.assertAllClose(
result_not_expanded, result_expanded, atol=1e-3, rtol=1e-3)
示例9: estimate_tails
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import square [as 别名]
def estimate_tails(func, target, shape, dtype):
"""Estimates approximate tail quantiles.
This runs a simple Adam iteration to determine tail quantiles. The
objective is to find an `x` such that:
```
func(x) == target
```
For instance, if `func` is a CDF and the target is a quantile value, this
would find the approximate location of that quantile. Note that `func` is
assumed to be monotonic. When each tail estimate has passed the optimal value
of `x`, the algorithm does 10 additional iterations and then stops.
This operation is vectorized. The tensor shape of `x` is given by `shape`, and
`target` must have a shape that is broadcastable to the output of `func(x)`.
Arguments:
func: A callable that computes cumulative distribution function, survival
function, or similar.
target: The desired target value.
shape: The shape of the `tf.Tensor` representing `x`.
dtype: The `tf.dtypes.Dtype` of the computation (and the return value).
Returns:
A `tf.Tensor` representing the solution (`x`).
"""
with tf.name_scope("estimate_tails"):
dtype = tf.as_dtype(dtype)
shape = tf.convert_to_tensor(shape, tf.int32)
target = tf.convert_to_tensor(target, dtype)
def loop_cond(tails, m, v, count):
del tails, m, v # unused
return tf.reduce_min(count) < 10
def loop_body(tails, m, v, count):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(tails)
loss = abs(func(tails) - target)
grad = tape.gradient(loss, tails)
m = .5 * m + .5 * grad # Adam mean estimate.
v = .9 * v + .1 * tf.square(grad) # Adam variance estimate.
tails -= .5 * m / (tf.sqrt(v) + 1e-7)
# Start counting when the gradient flips sign (note that this assumes
# `tails` is initialized to zero).
count = tf.where(
tf.math.logical_or(count > 0, tails * grad > 0),
count + 1, count)
return tails, m, v, count
init_tails = tf.zeros(shape, dtype=dtype)
init_m = tf.zeros(shape, dtype=dtype)
init_v = tf.ones(shape, dtype=dtype)
init_count = tf.zeros(shape, dtype=tf.int32)
return tf.while_loop(
loop_cond, loop_body, (init_tails, init_m, init_v, init_count),
back_prop=False)[0]