本文整理汇总了Python中tensorflow.eye函数的典型用法代码示例。如果您正苦于以下问题:Python eye函数的具体用法?Python eye怎么用?Python eye使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eye函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize_mod_binary_MERA
def initialize_mod_binary_MERA(phys_dim,
chi,
dtype=tf.float64):
"""
Parameters:
-------------------
phys_dim: int
Hilbert space dimension of the bottom layer
chi: int
maximum bond dimension
dtype: tensorflow dtype
dtype of the MERA tensors
Returns:
-------------------
(wC, vC, uC, rhoAB, rhoBA)
wC, vC, uC: list of tf.Tensor
rhoAB, rhoBA: tf.Tensor
"""
wC, vC, uC = increase_bond_dimension_by_adding_layers(chi_new=chi,
wC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim],dtype=dtype)],
vC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim],dtype=dtype)],
uC=[tf.random_uniform(shape=[phys_dim, phys_dim, phys_dim, phys_dim],dtype=dtype)])
chi_top = wC[-1].shape[2]
rhoAB = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),
(chi_top, chi_top, chi_top, chi_top))
rhoBA = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),
(chi_top, chi_top, chi_top, chi_top))
return wC, vC, uC, rhoAB, rhoBA
示例2: test_with_tensors
def test_with_tensors(self):
net = tensornetwork.TensorNetwork()
a = net.add_node(tf.eye(2) * 2, name="T")
b = net.add_node(tf.eye(2) * 3, name="A")
e1 = net.connect(a[0], b[0], "edge")
e2 = net.connect(a[1], b[1], "edge2")
net.check_correct()
net.contract(e1)
net.check_correct()
val = net.contract(e2)
net.check_correct()
self.assertAlmostEqual(val.get_tensor().numpy(), 12.0)
示例3: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | Y )
where F* are points on the GP at Xnew, Y are noisy observations at X.
"""
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
L = tf.cholesky(K)
A = tf.matrix_triangular_solve(L, Kx, lower=True)
V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X))
fmean = tf.matmul(A, V, transpose_a=True) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(A, A, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, tf.shape(self.Y)[1]])
return fmean, fvar
示例4: initial_state
def initial_state(self, batch_size, trainable=False):
"""Creates the initial memory.
We should ensure each row of the memory is initialized to be unique,
so initialize the matrix to be the identity. We then pad or truncate
as necessary so that init_state is of size
(batch_size, self._mem_slots, self._mem_size).
Args:
batch_size: The size of the batch.
trainable: Whether the initial state is trainable. This is always True.
Returns:
init_state: A truncated or padded matrix of size
(batch_size, self._mem_slots, self._mem_size).
"""
init_state = tf.eye(self._mem_slots, batch_shape=[batch_size])
# Pad the matrix with zeros.
if self._mem_size > self._mem_slots:
difference = self._mem_size - self._mem_slots
pad = tf.zeros((batch_size, self._mem_slots, difference))
init_state = tf.concat([init_state, pad], -1)
# Truncation. Take the first `self._mem_size` components.
elif self._mem_size < self._mem_slots:
init_state = init_state[:, :, :self._mem_size]
return init_state
示例5: maximum_mean_discrepancy
def maximum_mean_discrepancy(k_xx, k_yy, k_xy):
samples_x = tf.cast(tf.shape(k_xx)[0], dtype=tf.float32)
samples_y = tf.cast(tf.shape(k_yy)[0], dtype=tf.float32)
k_xx_diag = tf.multiply(k_xx, tf.eye(tf.shape(k_xx)[0]))
k_xx = k_xx - k_xx_diag
k_yy_diag = tf.multiply(k_yy, tf.eye(tf.shape(k_yy)[0]))
k_yy = k_yy - k_yy_diag
E_xx = tf.reduce_sum(k_xx)/(samples_x*(samples_x-1))
E_yy = tf.reduce_sum(k_yy)/(samples_y*(samples_y-1))
E_xy = tf.reduce_mean(k_xy)
mmd_2 = E_xx + E_yy - 2*E_xy
mmd = tf.sqrt(tf.maximum(mmd_2,0))
return mmd
示例6: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = len(self.feature)
err = self.Y - self.mean_function(self.X)
Kuf = self.feature.Kuf(self.kern, self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kus = self.feature.Kuf(self.kern, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
示例7: radial_symmetry
def radial_symmetry(self, d_cutoff, d, atom_numbers):
""" Radial Symmetry Function """
embedding = tf.eye(np.max(self.atom_cases) + 1)
atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
Rs = tf.cast(np.reshape(Rs, (1, 1, 1, -1)), tf.float32)
ita = tf.cast(np.reshape(ita, (1, 1, 1, -1)), tf.float32)
length = ita.get_shape().as_list()[-1]
d_cutoff = tf.stack([d_cutoff] * length, axis=3)
d = tf.stack([d] * length, axis=3)
out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
if self.atomic_number_differentiated:
out_tensors = []
for atom_type in self.atom_cases:
selected_atoms = tf.expand_dims(
tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
axis=3)
out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
return tf.concat(out_tensors, axis=2)
else:
return tf.reduce_sum(out, axis=2)
示例8: _build_likelihood
def _build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
K_alpha = tf.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(self.X)
# compute the variance for each of the outputs
I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
[self.num_latent, 1, 1])
A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, I)
tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
tf.reduce_sum(K_alpha * self.q_alpha))
v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
return tf.reduce_sum(v_exp) - KL
示例9: body
def body(self, features):
with tf.variable_scope('string_embedding'):
string_embedding = self.encode(features, 'inputs')
if 'targets' in features:
with tf.variable_scope('code_embedding'):
code_embedding = self.encode(features, 'targets')
string_embedding_norm = tf.nn.l2_normalize(string_embedding, axis=1)
code_embedding_norm = tf.nn.l2_normalize(code_embedding, axis=1)
# All-vs-All cosine distance matrix, reshaped as row-major.
cosine_dist = 1.0 - tf.matmul(string_embedding_norm, code_embedding_norm,
transpose_b=True)
cosine_dist_flat = tf.reshape(cosine_dist, [-1, 1])
# Positive samples on the diagonal, reshaped as row-major.
label_matrix = tf.eye(tf.shape(cosine_dist)[0], dtype=tf.int32)
label_matrix_flat = tf.reshape(label_matrix, [-1])
logits = tf.concat([1.0 - cosine_dist_flat, cosine_dist_flat], axis=1)
labels = tf.one_hot(label_matrix_flat, 2)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
return string_embedding, {'training': loss}
return string_embedding
示例10: _update_ortho
def _update_ortho(self,v,i):
s = self.gan.ops.shape(v)
if len(s) == 4 and s[0] == s[1]:
w=v
newv = []
#s = self.ops.shape(v_transpose)
#identity = tf.reshape(identity, [s[0],s[1],1,1])
#identity = tf.tile(identity, [1,1,s[2],s[3]])
decay = self.config.decay or 0.01
w = tf.transpose(w, perm=[2,3,0,1])
for i in range(self.config.iterations or 3):
wt = tf.transpose(w, perm=[1,0,2,3])
w2 = tf.reshape(w,[-1, s[0],s[1]])
wt2 = tf.reshape(wt,[-1, s[0],s[1]])
wtw = tf.matmul(wt2,w2)
eye = tf.eye(s[0],s[1])
eye = tf.tile(eye, [1,s[2]*s[3]])
eye = tf.reshape(eye, self.gan.ops.shape(w))
wtw = tf.reshape(wtw, self.gan.ops.shape(w))
qk = eye - wtw
w = w * (eye + 0.5*qk)
w = tf.transpose(w, perm=[2,3,0,1])
newv = w
newv=(1.0+decay)*v - decay*(newv)
newv = tf.reshape(newv,self.ops.shape(v))
return tf.assign(v, newv)
else:
return None
示例11: test_sample_mvn
def test_sample_mvn(session_tf, cov_structure, num_samples):
"""
Draws 10,000 samples from a distribution
with known mean and covariance. The test checks
if the mean and covariance of the samples is
close to the true mean and covariance.
"""
N, D = 10000, 2
means = tf.ones((N, D), dtype=float_type)
if cov_structure == "full":
covs = tf.eye(D, batch_shape=[N], dtype=float_type)
elif cov_structure == "diag":
covs = tf.ones((N, D), dtype=float_type)
samples = _sample_mvn(means, covs, cov_structure, num_samples=num_samples)
value = session_tf.run(samples)
if num_samples is None:
assert value.shape == (N, D)
else:
assert value.shape == (num_samples, N, D)
value = value.reshape(-1, D)
samples_mean = np.mean(value, axis=0)
samples_cov = np.cov(value, rowvar=False)
np.testing.assert_array_almost_equal(samples_mean, [1., 1.], decimal=1)
np.testing.assert_array_almost_equal(samples_cov, [[1., 0.], [0., 1.]], decimal=1)
示例12: testDimensionGuardDynamicShape
def testDimensionGuardDynamicShape(self):
testee_lkj = tfd.LKJ(
dimension=3, concentration=[1., 4.], validate_args=True)
with self.assertRaisesOpError('dimension mismatch'):
self.evaluate(
testee_lkj.log_prob(
tf.placeholder_with_default(tf.eye(4), shape=None)))
示例13: testMultivariateNormalNd
def testMultivariateNormalNd(self, event_size, num_samples):
def target_log_prob_fn(event):
return tfd.MultivariateNormalFullCovariance(
loc=tf.zeros(event_size),
covariance_matrix=tf.eye(event_size)).log_prob(event)
state = tf.zeros(event_size)
samples = []
for seed in range(num_samples):
[state], _, _ = no_u_turn_sampler.kernel(
target_log_prob_fn=target_log_prob_fn,
current_state=[state],
step_size=[0.3],
seed=seed)
npstate = state.numpy()
samples.append([npstate[0], npstate[1]])
samples = np.array(samples)
plt.scatter(samples[:, 0], samples[:, 1])
savefig("projection_chain_{}d_normal_{}_steps.png".format(
event_size, num_samples))
plt.close()
target_samples = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros(event_size),
covariance_matrix=tf.eye(event_size)).sample(
num_samples, seed=4).numpy()
plt.scatter(target_samples[:, 0], target_samples[:, 1])
savefig("projection_independent_{}d_normal_{}_samples.png".format(
event_size, num_samples))
plt.close()
示例14: _get_fldj_numerical
def _get_fldj_numerical(self, bijector, x, event_ndims,
eps=1.e-6,
input_to_vector=tfb.Identity,
output_to_vector=tfb.Identity):
"""Numerically approximate the forward log det Jacobian of a bijector.
Args:
bijector: the bijector whose Jacobian we wish to approximate
x: the value for which we want to approximate the Jacobian
event_ndims: number of dimensions in an event
eps: epsilon to add when forming (f(x+eps)-f(x)) / eps
input_to_vector: a bijector that maps the input value to a vector
output_to_vector: a bijector that maps the output value to a vector
Returns:
A numerical approximation to the log det Jacobian of bijector.forward
evaluated at x.
"""
x_vector = input_to_vector.forward(x)
n = tf.shape(x_vector)[-1]
x_plus_eps_vector = x_vector + eps * tf.eye(n, dtype=x_vector.dtype)
x_plus_eps = input_to_vector.inverse(x_plus_eps_vector)
f_x = bijector.forward(x)
f_x_vector = output_to_vector.forward(f_x)
f_x_plus_eps = bijector.forward(x_plus_eps)
f_x_plus_eps_vector = output_to_vector.forward(f_x_plus_eps)
jacobian_numerical = (f_x_plus_eps_vector - f_x_vector) / eps
return (
tf.log(tf.abs(tf.matrix_determinant(jacobian_numerical))) +
input_to_vector.forward_log_det_jacobian(x, event_ndims=event_ndims) -
output_to_vector.forward_log_det_jacobian(f_x, event_ndims=event_ndims))
示例15: distance_cutoff
def distance_cutoff(self, d, cutoff, flags):
""" Generate distance matrix with trainable cutoff """
# Cutoff with threshold Rc
d_flag = flags * tf.sign(cutoff - d)
d_flag = tf.nn.relu(d_flag)
d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
return d * d_flag