本文整理汇总了Python中tensorflow.cos函数的典型用法代码示例。如果您正苦于以下问题:Python cos函数的具体用法?Python cos怎么用?Python cos使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cos函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testVonMisesSampleMoments
def testVonMisesSampleMoments(self):
locs_v = np.array([-2., -1., 0.3, 2.3])
concentrations_v = np.array([0.1, 1.0, 2.0, 10.0])
von_mises = tfd.VonMises(
self.make_tensor(locs_v), self.make_tensor(concentrations_v))
n = 10000
samples = von_mises.sample(n, seed=12345)
expected_mean = von_mises.mean()
actual_mean = tf.atan2(
tf.reduce_mean(tf.sin(samples), 0), tf.reduce_mean(tf.cos(samples), 0))
expected_variance = von_mises.variance()
standardized_samples = samples - tf.expand_dims(von_mises.mean(), 0)
actual_variance = 1. - tf.reduce_mean(tf.cos(standardized_samples), axis=0)
[
expected_mean_val, expected_variance_val, actual_mean_val,
actual_variance_val
] = self.evaluate(
[expected_mean, expected_variance, actual_mean, actual_variance])
self.assertAllClose(expected_mean_val, actual_mean_val, rtol=0.1)
self.assertAllClose(expected_variance_val, actual_variance_val, rtol=0.1)
示例2: get_filters
def get_filters(R, filter_size, P=None, n_rings=None):
"""Perform single-frequency DFT on each ring of a polar-resampled patch"""
k = filter_size
filters = {}
N = n_samples(k)
from scipy.linalg import dft
for m, r in R.iteritems():
rsh = r.get_shape().as_list()
# Get the basis matrices
weights = get_interpolation_weights(k, m, n_rings=n_rings)
DFT = dft(N)[m,:]
LPF = np.dot(DFT, weights).T
cosine = np.real(LPF).astype(np.float32)
sine = np.imag(LPF).astype(np.float32)
# Reshape for multiplication with radial profile
cosine = tf.constant(cosine)
sine = tf.constant(sine)
# Project taps on to rotational basis
r = tf.reshape(r, tf.stack([rsh[0],rsh[1]*rsh[2]]))
ucos = tf.reshape(tf.matmul(cosine, r), tf.stack([k, k, rsh[1], rsh[2]]))
usin = tf.reshape(tf.matmul(sine, r), tf.stack([k, k, rsh[1], rsh[2]]))
if P is not None:
# Rotate basis matrices
ucos_ = tf.cos(P[m])*ucos + tf.sin(P[m])*usin
usin = -tf.sin(P[m])*ucos + tf.cos(P[m])*usin
ucos = ucos_
filters[m] = (ucos, usin)
return filters
示例3: call
def call(self, inputs):
k1 = tf.matmul(tf.cos(inputs), self.k1 * tf.cos(self.mu))
k2 = tf.matmul(tf.sin(inputs), self.k2 * tf.sin(self.mu))
# Defines the two model formulations: "glm" vs "gvm".
if self.model_type == 'glm':
return tf.exp(k1 + k2 + self.k0)
else:
return tf.nn.softplus(self.b) + self.g * tf.exp(k1 + k2)
示例4: _euler2mat
def _euler2mat(z, y, x):
"""Converts euler angles to rotation matrix.
From:
https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
TODO: Remove the dimension for 'N' (deprecated for converting all source
poses altogether).
Args:
z: rotation angle along z axis (in radians) -- size = [B, n]
y: rotation angle along y axis (in radians) -- size = [B, n]
x: rotation angle along x axis (in radians) -- size = [B, n]
Returns:
Rotation matrix corresponding to the euler angles, with shape [B, n, 3, 3].
"""
batch_size = tf.shape(z)[0]
n = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([batch_size, n, 1, 1])
ones = tf.ones([batch_size, n, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny, zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
return tf.matmul(tf.matmul(xmat, ymat), zmat)
示例5: _J
def _J(self, theta):
"""
Implements the order dependent family of functions defined in equations
4 to 7 in the reference paper.
"""
if self.order == 0:
return np.pi - theta
elif self.order == 1:
return tf.sin(theta) + (np.pi - theta) * tf.cos(theta)
elif self.order == 2:
return 3. * tf.sin(theta) * tf.cos(theta) + \
(np.pi - theta) * (1. + 2. * tf.cos(theta) ** 2)
示例6: mmd_fourier
def mmd_fourier(x1, x2, bandwidth=2., dim_r=500):
"""
Approximate RBF kernel by random features
Notes:
Reimplementation in tensorflow of the Variational Fair Autoencoder
https://arxiv.org/abs/1511.00830
"""
d = x1.get_shape().as_list()[1]
rW_n = tf.sqrt(2. / bandwidth) * tf.random_normal([d, dim_r]) / np.sqrt(d)
rb_u = 2 * np.pi * tf.random_uniform([dim_r])
rf0 = tf.sqrt(2. / dim_r) * tf.cos(tf.matmul(x1, rW_n) + rb_u)
rf1 = tf.sqrt(2. / dim_r) * tf.cos(tf.matmul(x2, rW_n) + rb_u)
result = tf.reduce_sum((tf.reduce_mean(rf0, axis=0) - tf.reduce_mean(rf1, axis=0))**2)
return tf.sqrt(result)
示例7: get_position_encoding
def get_position_encoding(
length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
position = tf.to_float(tf.range(length))
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
示例8: loss
def loss(y_true_cls, y_pred_cls,
y_true_geo, y_pred_geo,
training_mask):
'''
define the loss used for training, contraning two part,
the first part we use dice loss instead of weighted logloss,
the second part is the iou loss defined in the paper
:param y_true_cls: ground truth of text
:param y_pred_cls: prediction os text
:param y_true_geo: ground truth of geometry
:param y_pred_geo: prediction of geometry
:param training_mask: mask used in training, to ignore some text annotated by ###
:return:
'''
classification_loss = dice_coefficient(y_true_cls, y_pred_cls, training_mask)
# scale classification loss to match the iou loss part
classification_loss *= 0.01
# d1 -> top, d2->right, d3->bottom, d4->left
d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = tf.split(value=y_true_geo, num_or_size_splits=5, axis=3)
d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = tf.split(value=y_pred_geo, num_or_size_splits=5, axis=3)
area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
w_union = tf.minimum(d2_gt, d2_pred) + tf.minimum(d4_gt, d4_pred)
h_union = tf.minimum(d1_gt, d1_pred) + tf.minimum(d3_gt, d3_pred)
area_intersect = w_union * h_union
area_union = area_gt + area_pred - area_intersect
L_AABB = -tf.log((area_intersect + 1.0)/(area_union + 1.0))
L_theta = 1 - tf.cos(theta_pred - theta_gt)
tf.summary.scalar('geometry_AABB', tf.reduce_mean(L_AABB * y_true_cls * training_mask))
tf.summary.scalar('geometry_theta', tf.reduce_mean(L_theta * y_true_cls * training_mask))
L_g = L_AABB + 20 * L_theta
return tf.reduce_mean(L_g * y_true_cls * training_mask) + classification_loss
示例9: test_cwise_unary_grad
def test_cwise_unary_grad(self):
"""
Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
"""
test_config = tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as s:
arg_np = np.random.random(100)
grad_above = tf.constant(np.random.random(100))
arg = tf.constant(arg_np)
def test_grad(fcn, tf_fcn):
ovl_out = as_tensorflow(fcn(arg))
tf_out = tf_fcn(arg)
ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
ovl_out, tf_out, ovl_grad, tf_grad = s.run([ovl_out, tf_out, ovl_grad, tf_grad])
assert np.allclose(ovl_out, tf_out)
assert np.allclose(ovl_grad, tf_grad)
test_grad(lambda x: neg(x), lambda x: tf.neg(x))
test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
test_grad(lambda x: sin(x), lambda x: tf.sin(x))
test_grad(lambda x: cos(x), lambda x: tf.cos(x))
test_grad(lambda x: tan(x), lambda x: tf.tan(x))
test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
示例10: get_box3d_corners_helper
def get_box3d_corners_helper(centers, headings, sizes):
""" TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) """
#print '-----', centers
N = centers.get_shape()[0].value
l = tf.slice(sizes, [0,0], [-1,1]) # (N,1)
w = tf.slice(sizes, [0,1], [-1,1]) # (N,1)
h = tf.slice(sizes, [0,2], [-1,1]) # (N,1)
#print l,w,h
x_corners = tf.concat([l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2], axis=1) # (N,8)
y_corners = tf.concat([h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2], axis=1) # (N,8)
z_corners = tf.concat([w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2], axis=1) # (N,8)
corners = tf.concat([tf.expand_dims(x_corners,1), tf.expand_dims(y_corners,1), tf.expand_dims(z_corners,1)], axis=1) # (N,3,8)
#print x_corners, y_corners, z_corners
c = tf.cos(headings)
s = tf.sin(headings)
ones = tf.ones([N], dtype=tf.float32)
zeros = tf.zeros([N], dtype=tf.float32)
row1 = tf.stack([c,zeros,s], axis=1) # (N,3)
row2 = tf.stack([zeros,ones,zeros], axis=1)
row3 = tf.stack([-s,zeros,c], axis=1)
R = tf.concat([tf.expand_dims(row1,1), tf.expand_dims(row2,1), tf.expand_dims(row3,1)], axis=1) # (N,3,3)
#print row1, row2, row3, R, N
corners_3d = tf.matmul(R, corners) # (N,3,8)
corners_3d += tf.tile(tf.expand_dims(centers,2), [1,1,8]) # (N,3,8)
corners_3d = tf.transpose(corners_3d, perm=[0,2,1]) # (N,8,3)
return corners_3d
示例11: tf_cheating_contcartpole
def tf_cheating_contcartpole(state, action):
gravity = 9.8
masscart = 1.0
masspole = 0.1
total_mass = (masspole + masscart)
length = 0.5 # actually half the pole's length
polemass_length = (masspole * length)
force_mag = 10.0
tau = 0.02 # seconds between state updates
# Angle at which to fail the episode
theta_threshold_radians = 12 * 2 * math.pi / 360
x_threshold = 2.4
x, x_dot, theta, theta_dot = tf.split(state, 4, axis=-1)
done = tf.logical_or(x < -x_threshold,
tf.logical_or(x > x_threshold,
tf.logical_or(theta < -theta_threshold_radians,
theta > theta_threshold_radians)))
force = force_mag * action
costheta = tf.cos(theta)
sintheta = tf.sin(theta)
temp = old_div((force + polemass_length * theta_dot * theta_dot * sintheta), total_mass)
thetaacc = old_div((gravity * sintheta - costheta* temp), (length * (old_div(4.0,3.0) - masspole * costheta * costheta / total_mass)))
xacc = temp - polemass_length * thetaacc * costheta / total_mass
x = x + tau * x_dot
x_dot = x_dot + tau * xacc
theta = theta + tau * theta_dot
theta_dot = theta_dot + tau * thetaacc
state = tf.concat([x,x_dot,theta,theta_dot], -1)
done = tf.squeeze(tf.cast(done, tf.float32), -1)
reward = 1.0 - done
done *= 0.
return state, reward, done
示例12: phigrad
def phigrad(X, omegas, D):
Z = tf.matmul(X, omegas)
Zc = tf.cos(Z)
Zs = tf.sin(Z)
phiX = tf.concat([Zc, Zs], 1) / np.sqrt(D)
phiXg = tf.concat([-omegas * Zs, omegas * Zc], 1) / np.sqrt(D)
return phiX, phiXg
示例13: distance_cutoff
def distance_cutoff(self, d, cutoff, flags):
""" Generate distance matrix with trainable cutoff """
# Cutoff with threshold Rc
d_flag = flags * tf.sign(cutoff - d)
d_flag = tf.nn.relu(d_flag)
d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
return d * d_flag
示例14: objective
def objective(self, params, data, labels=None):
radius = tf.sqrt(tf.reduce_sum(params[0]**2))
rad_loss = tf.reduce_sum(1. / (radius + 1e-6) * data[:, 0])
sin_dist = params[0][1:] - tf.cos(params[0][:-1]) * np.pi
sin_loss = tf.reduce_sum((sin_dist * data[:, 1:])**2)
return rad_loss + sin_loss
示例15: FormLStack
def FormLStack(omega_output, deltat):
# encoded_layer is [None, 2]
# omega_output is [None, 1]
if omega_output.shape[1] == 1:
entry11 = tf.cos(omega_output*deltat)
entry12 = tf.sin(omega_output*deltat)
row1 = tf.concat([entry11, -entry12], axis=1) # [None, 2]
row2 = tf.concat([entry12, entry11], axis=1) # [None, 2]
elif omega_output.shape[1] == 2:
scale = tf.exp(omega_output[:,1] * deltat)
entry11 = tf.multiply(scale, tf.cos(omega_output[:,0]*deltat))
entry12 = tf.multiply(scale, tf.sin(omega_output[:,0]*deltat))
row1 = tf.stack([entry11, -entry12], axis=1) # [None, 2]
row2 = tf.stack([entry12, entry11], axis=1) # [None, 2]
Lstack = tf.stack([row1, row2], axis=2) # [None, 2, 2] put one row below other
return Lstack