本文整理汇总了Python中tensorflow.python.ops.math_ops.sin函数的典型用法代码示例。如果您正苦于以下问题:Python sin函数的具体用法?Python sin怎么用?Python sin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GetMultiEngineGraphDef
def GetMultiEngineGraphDef(dtype=dtypes.float32):
"""Create a graph containing multiple segment."""
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
p = conv * c1
c2 = constant_op.constant(
np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
q = conv / c2
edge = math_ops.sin(q)
edge /= edge
r = edge + edge
p -= edge
q *= edge
s = p + q
s -= r
array_ops.squeeze(s, name=OUTPUT_NAME)
return g.as_graph_def()
示例2: _CosGrad
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
if x.dtype.is_complex:
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
示例3: input_fn
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
sin_curves = math_ops.sin(
math_ops.linspace(start, (sequence_length - 1) * increment,
sequence_length + 1))
inputs = array_ops.slice(sin_curves, [0], [sequence_length])
labels = array_ops.slice(sin_curves, [1], [sequence_length])
return {'inputs': inputs}, labels
示例4: angles_to_projective_transforms
def angles_to_projective_transforms(angles,
image_height,
image_width,
name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank must
be statically known (the shape is not `TensorShape(None)`.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "angles_to_projective_transforms"):
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
示例5: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = tf_v[..., 0:1, :]
if tf_a.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_v *= phase
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
示例6: Compute
def Compute(x):
e, v = linalg_ops.self_adjoint_eig(x)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = v[..., 0:1, :]
if dtype_.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
v *= phase
return e, v
示例7: input_fn
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
sin_curves = math_ops.sin(
math_ops.linspace(start, (sequence_length - 1) * increment,
sequence_length + 1))
inputs = array_ops.slice(sin_curves, [0], [sequence_length])
labels = array_ops.slice(sin_curves, [1], [sequence_length])
input_key = string_ops.string_join([
'key_',
string_ops.as_string(math_ops.cast(10000 * start, dtypes.int32))
])
return {'inputs': inputs, input_key_column_name: input_key}, labels
示例8: _add_sinusoids_signal
def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
channels = x.get_shape().as_list()[-1]
if x.get_shape().ndims == 3: # [batch_size, timesteps, dim]
length = array_ops.shape(x)[1]
position = math_ops.to_float(math_ops.range(length))
elif x.get_shape().ndims == 2: # [batch_size, dim]
length = 1
position = math_ops.to_float(math_ops.range(time, time + 1))
else:
raise ValueError("need a Tensor with rank 2 or 3")
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(math_ops.to_float(num_timescales) - 1))
inv_timescales = min_timescale * math_ops.exp(
math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment)
scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0)
signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1)
signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]])
if x.get_shape().ndims == 3:
signal = array_ops.reshape(signal, [1, length, channels])
else:
signal = array_ops.reshape(signal, [1, channels])
return x + signal
示例9: test_stft_round_trip
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256),
# 75% overlap.
(4096, 256, 64, 256),
# Odd frame hop.
(4096, 128, 25, 128),
# Odd frame length.
(4096, 127, 32, 128),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
# Generate a 440Hz signal at 8kHz sample rate.
signal = math_ops.sin(2 * np.pi * 440 / 8000 *
math_ops.to_float(math_ops.range(signal_length)))
self._compare_round_trip(signal, frame_length, frame_step, fft_length)
示例10: _NormalizingSvd
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
示例11: test_gradients
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
with spectral_ops_test_util.fft_kernel_label_map(), (
self.test_session(use_gpu=True)) as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
示例12: get_multi_engine_graph_def
def get_multi_engine_graph_def(mode="FP32"):
"""Create a simple graph and return its graph_def."""
dtype = dtypes.float32
if mode.upper() == "FP16":
dtype = dtypes.float16
else:
pass
g = ops.Graph()
with g.as_default():
x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
with g.name_scope("Global_scope"):
with g.name_scope("first_scope"):
e = cop.constant(
np.random.randn(3, 2, 3, 4), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias1", dtype=dtype)
t = conv * b
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias2", dtype=dtype)
q = conv / b
edge = mops.sin(q)
edge1 = mops.cos(conv)
with g.name_scope("test_scope"):
de = edge + edge1
t -= edge1
q *= edge
t += q
t -= de
k = aops.squeeze(t, name="output")
print(k.dtype)
return g.as_graph_def()
示例13: _sin_fn
def _sin_fn(x):
ranger = math_ops.linspace(
array_ops.reshape(x[0], []), (sequence_length - 1) * increment,
sequence_length + 1)
return math_ops.sin(ranger)
示例14: _CosGrad
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
return -grad * math_ops.sin(x)
示例15: tf_function
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_sum(
math_ops.square(x - 0.5) + 0.25 * x + 1 * math_ops.sin(x * 15),
2,
keepdims=True)