本文整理汇总了Python中tensorflow.python.ops.math_ops.cos方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.cos方法的具体用法?Python math_ops.cos怎么用?Python math_ops.cos使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.cos方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cosine_decay
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def cosine_decay(learning_rate, global_step, max_step, name=None):
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import constant_op
with ops.name_scope(name, "CosineDecay",
[learning_rate, global_step, max_step]) as name:
learning_rate = ops.convert_to_tensor(0.5 * learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
const = math_ops.cast(constant_op.constant(1), learning_rate.dtype)
freq = math_ops.cast(constant_op.constant(np.pi / max_step), learning_rate.dtype)
osc = math_ops.cos(math_ops.multiply(freq, global_step))
osc = math_ops.add(osc, const)
return math_ops.multiply(osc, learning_rate, name=name)
示例2: test_all_unary_elemwise
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def test_all_unary_elemwise():
_test_forward_unary_elemwise(_test_abs)
_test_forward_unary_elemwise(_test_floor)
_test_forward_unary_elemwise(_test_exp)
_test_forward_unary_elemwise(_test_log)
_test_forward_unary_elemwise(_test_sin)
_test_forward_unary_elemwise(_test_sqrt)
_test_forward_unary_elemwise(_test_rsqrt)
_test_forward_unary_elemwise(_test_neg)
_test_forward_unary_elemwise(_test_square)
# ceil and cos come with TFLite 1.14.0.post1 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_forward_unary_elemwise(_test_ceil)
_test_forward_unary_elemwise(_test_cos)
_test_forward_unary_elemwise(_test_round)
# This fails with TF and Tflite 1.15.2, this could not have been tested
# in CI or anywhere else. The failure mode is that we see a backtrace
# from the converter that we need to provide a custom Tan operator
# implementation.
#_test_forward_unary_elemwise(_test_tan)
_test_forward_unary_elemwise(_test_elu)
#######################################################################
# Element-wise
# ------------
示例3: _compute_eta_t
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _compute_eta_t(self):
PI = 3.141592653589793
t_frac = math_ops.cast(self.t_cur / (self.total_iterations - 1), 'float32')
eta_t = self.eta_min + 0.5 * (self.eta_max - self.eta_min) * \
(1 + math_ops.cos(PI * t_frac))
return eta_t
示例4: _SinGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
示例5: _TanGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
示例6: cos
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
示例7: angles_to_projective_transforms
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def angles_to_projective_transforms(angles, image_height, image_width):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
示例8: setUp
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, math_ops.abs, core.abs_function),
('neg', operator.neg, math_ops.negative, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, math_ops.sign, core.sign),
('reciprocal', None, math_ops.reciprocal, core.reciprocal),
('square', None, math_ops.square, core.square),
('round', None, math_ops.round, core.round_function),
('sqrt', None, math_ops.sqrt, core.sqrt),
('rsqrt', None, math_ops.rsqrt, core.rsqrt),
('log', None, math_ops.log, core.log),
('exp', None, math_ops.exp, core.exp),
('log', None, math_ops.log, core.log),
('ceil', None, math_ops.ceil, core.ceil),
('floor', None, math_ops.floor, core.floor),
('cos', None, math_ops.cos, core.cos),
('sin', None, math_ops.sin, core.sin),
('tan', None, math_ops.tan, core.tan),
('acos', None, math_ops.acos, core.acos),
('asin', None, math_ops.asin, core.asin),
('atan', None, math_ops.atan, core.atan),
('lgamma', None, math_ops.lgamma, core.lgamma),
('digamma', None, math_ops.digamma, core.digamma),
('erf', None, math_ops.erf, core.erf),
('erfc', None, math_ops.erfc, core.erfc),
('lgamma', None, math_ops.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
示例9: _TanGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
secx = math_ops.inv(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
示例10: _test_cos
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _test_cos(data):
""" One iteration of cos """
return _test_unary_elemwise(math_ops.cos, data)
#######################################################################
# Tan
# ---
示例11: _SinGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:8,代码来源:math_grad.py
示例12: _TanGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:10,代码来源:math_grad.py
示例13: map
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def map(self, input_tensor):
"""Maps each row of input_tensor using random Fourier features.
Args:
input_tensor: a `Tensor` containing input features. It's shape is
[batch_size, self._input_dim].
Returns:
A `Tensor` of shape [batch_size, self._output_dim] containing RFFM-mapped
features.
Raises:
InvalidShapeError: if the shape of the `input_tensor` is inconsistent with
expected input dimension.
"""
input_tensor_shape = input_tensor.get_shape()
if len(input_tensor_shape) != 2:
raise dkm.InvalidShapeError(
'The shape of the tensor should be 2. Got %d instead.' %
len(input_tensor_shape))
features_dim = input_tensor_shape[1]
if features_dim != self._input_dim:
raise dkm.InvalidShapeError(
'Invalid dimension: expected %d input features, got %d instead.' %
(self._input_dim, features_dim))
# Add ops that compute (deterministically) omega_matrix and bias based on
# the provided seed.
# TODO(sibyl-vie3Poto): Storing the mapper's parameters (omega_matrix and bias) as
# constants incurs no RPC calls to the parameter server during distributed
# training. However, if the parameters grow too large (for instance if they
# don't fit into memory or if they blow up the size of the GraphDef proto),
# stroring them as constants is no longer an option. In this case, we should
# have a heuristic to choose out of one of the following alternatives:
# a) store them as variables (in the parameter server)
# b) store them as worker local variables
# c) generating on the fly the omega matrix at each step
np.random.seed(self._seed)
omega_matrix_shape = [self._input_dim, self._output_dim]
bias_shape = [self._output_dim]
omega_matrix = constant_op.constant(
np.random.normal(
scale=1.0 / self._stddev, size=omega_matrix_shape),
dtype=dtypes.float32)
bias = constant_op.constant(
np.random.uniform(
low=0.0, high=2 * np.pi, size=bias_shape),
dtype=dtypes.float32)
x_omega_plus_bias = math_ops.add(
math_ops.matmul(input_tensor, omega_matrix), bias)
return math.sqrt(2.0 / self._output_dim) * math_ops.cos(x_omega_plus_bias)
示例14: _add_sinusoids_signal
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import cos [as 别名]
def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
channels = x.get_shape().as_list()[-1]
if x.get_shape().ndims == 3: # [batch_size, timesteps, dim]
length = array_ops.shape(x)[1]
position = math_ops.to_float(math_ops.range(length))
elif x.get_shape().ndims == 2: # [batch_size, dim]
length = 1
position = math_ops.to_float(math_ops.range(time, time + 1))
else:
raise ValueError("need a Tensor with rank 2 or 3")
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(math_ops.to_float(num_timescales) - 1))
inv_timescales = min_timescale * math_ops.exp(
math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment)
scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0)
signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1)
signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]])
if x.get_shape().ndims == 3:
signal = array_ops.reshape(signal, [1, length, channels])
else:
signal = array_ops.reshape(signal, [1, channels])
return x + signal