本文整理汇总了Python中scipy.sparse.identity方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.identity方法的具体用法?Python sparse.identity怎么用?Python sparse.identity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse
的用法示例。
在下文中一共展示了sparse.identity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SpectralClustering
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def SpectralClustering(CKSym, n):
# This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
CKSym = CKSym.astype(float)
N, _ = CKSym.shape
MAXiter = 1000 # Maximum number of iterations for KMeans
REPlic = 20 # Number of replications for KMeans
DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
_, _, vN = np.linalg.svd(LapN)
vN = vN.T
kerN = vN[:, N - n:N]
normN = np.sqrt(np.sum(np.square(kerN), axis=1))
kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
return km.labels_
示例2: to_unitary
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def to_unitary(scaled_unitary):
"""
Compute the scaling factor required to turn a scalar multiple of a unitary matrix
to a unitary matrix.
Parameters
----------
scaled_unitary : ndarray
A scaled unitary matrix
Returns
-------
scale : float
unitary : ndarray
Such that `scale * unitary == scaled_unitary`.
"""
scaled_identity = _np.dot(scaled_unitary, _np.conjugate(scaled_unitary.T))
scale = _np.sqrt(scaled_identity[0, 0])
assert(_np.allclose(scaled_identity / (scale**2), _np.identity(scaled_identity.shape[0], 'd'))), \
"Given `scaled_unitary` does not appear to be a scaled unitary matrix!"
return scale, (scaled_unitary / scale)
示例3: test_sparse_lindblad_param
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def test_sparse_lindblad_param(self):
#Test sparse Lindblad gates
print("\nGate Test:")
SparseId = sps.identity(4**2,'d','csr')
gate = LindbladDenseOp.from_operation_matrix( np.identity(4**2,'d') )
print("gate Errgen type (should be dense):",type(gate.errorgen.todense()))
self.assertIsInstance(gate.errorgen.todense(), np.ndarray)
sparseOp = LindbladOp.from_operation_matrix( SparseId )
print("spareGate Errgen type (should be sparse):",type(sparseOp.errorgen.tosparse()))
self.assertIsInstance(sparseOp.errorgen.tosparse(), sps.csr_matrix)
self.assertArraysAlmostEqual(gate.errorgen.todense(),sparseOp.errorgen.todense())
perfectG = std2Q_XYICNOT.target_model().operations['Gix'].copy()
noisyG = std2Q_XYICNOT.target_model().operations['Gix'].copy()
noisyG.depolarize(0.9)
Sparse_noisyG = sps.csr_matrix(noisyG,dtype='d')
Sparse_perfectG = sps.csr_matrix(perfectG,dtype='d')
op2 = LindbladDenseOp.from_operation_matrix( noisyG, perfectG )
sparseGate2 = LindbladOp.from_operation_matrix( Sparse_noisyG, Sparse_perfectG )
print("spareGate2 Errgen type (should be sparse):",type(sparseGate2.errorgen.tosparse()))
self.assertIsInstance(sparseGate2.errorgen.tosparse(), sps.csr_matrix)
#print("errgen = \n"); pygsti.tools.print_mx(op2.err_gen,width=4,prec=1)
#print("sparse errgen = \n"); pygsti.tools.print_mx(sparseGate2.err_gen.toarray(),width=4,prec=1)
self.assertArraysAlmostEqual(op2.errorgen.todense(),sparseGate2.errorgen.todense())
示例4: _fit
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def _fit(self):
# Versions using sparse matrices
# adj = nx.adjacency_matrix(self._G)
# ident = sparse.identity(len(self._G.nodes)).tocsc()
# sim = inv(ident - adj.multiply(self.beta).T) - ident
# adj = nx.adjacency_matrix(self._G)
# aux = adj.multiply(-self.beta).T
# aux.setdiag(1+aux.diagonal(), k=0)
# sim = inv(aux)
# sim.setdiag(sim.diagonal()-1)
# print(sim.nnz)
# print(adj.nnz)
# Version using dense matrices
adj = nx.adjacency_matrix(self._G)
aux = adj.T.multiply(-self.beta).todense()
np.fill_diagonal(aux, 1+aux.diagonal())
sim = np.linalg.inv(aux)
np.fill_diagonal(sim, sim.diagonal()-1)
return sim
示例5: suggest
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def suggest(self, method=s.RANDOM, eps=1e-8, *args, **kwargs):
if method not in s.suggest_methods:
raise Exception("Unknown suggest method: %s\n", method)
if method == s.RANDOM:
x = np.random.randn(self.n)
elif method == s.SPECTRAL:
if self.spectral_sol is None:
self.spectral_sol, self.spectral_bound = solve_spectral(self.qcqp_form, *args, **kwargs)
if self.maximize_flag:
self.spectral_bound *= -1
x = self.spectral_sol
elif method == s.SDR:
if self.sdr_sol is None:
self.sdr_sol, self.sdr_bound = solve_sdr(self.qcqp_form, *args, **kwargs)
if self.maximize_flag:
self.sdr_bound *= -1
self.mu = np.asarray(self.sdr_sol[:-1, -1]).flatten()
self.Sigma = self.sdr_sol[:-1, :-1] - self.mu*self.mu.T + eps*sp.identity(self.n)
x = np.random.multivariate_normal(self.mu, self.Sigma)
assign_vars(self.prob.variables(), x)
f0 = self.qcqp_form.f0.eval(x)
if self.maximize_flag: f0 *= -1
return (f0, max(self.qcqp_form.violations(x)))
示例6: get_measurements
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def get_measurements(model, domain_shape):
# model is a set of contingency tables to calculate
# each contingency table is a list of [(attribute, size)]
M = []
for table in model:
Q = [np.ones((1,size)) for size in domain_shape]
for attribute, size in table:
full_size = domain_shape[attribute]
I = sparse.identity(size)
if size != full_size:
P = PrivBayesSelect.domain_transform(size, full_size)
Q[attribute] = I * P
elif size == full_size:
Q[attribute] = I
else:
print('bug here')
M.append(reduce(sparse.kron, Q))
return sparse.vstack(M)
示例7: _zeropi_operator_in_product_basis
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def _zeropi_operator_in_product_basis(self, zeropi_operator, zeropi_evecs=None):
"""Helper method that converts a zeropi operator into one in the product basis.
Returns
-------
scipy.sparse.csc_matrix
operator written in the product basis
"""
zeropi_dim = self.zeropi_cutoff
zeta_dim = self.zeta_cutoff
if zeropi_evecs is None:
_, zeropi_evecs = self._zeropi.eigensys(evals_count=zeropi_dim)
op_eigen_basis = sparse.dia_matrix((zeropi_dim, zeropi_dim),
dtype=np.complex_) # is this guaranteed to be zero?
op_zeropi = spec_utils.get_matrixelement_table(zeropi_operator, zeropi_evecs)
for n in range(zeropi_dim):
for m in range(zeropi_dim):
op_eigen_basis += op_zeropi[n, m] * op.hubbard_sparse(n, m, zeropi_dim)
return sparse.kron(op_eigen_basis, sparse.identity(zeta_dim, format='csc', dtype=np.complex_), format='csc')
示例8: sparse_kinetic_mat
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def sparse_kinetic_mat(self):
"""
Kinetic energy portion of the Hamiltonian.
TODO: update this method to use single-variable operator methods
Returns
-------
scipy.sparse.csc_matrix
matrix representing the kinetic energy operator
"""
pt_count = self.grid.pt_count
dim_theta = 2 * self.ncut + 1
identity_phi = sparse.identity(pt_count, format='csc', dtype=np.complex_)
identity_theta = sparse.identity(dim_theta, format='csc', dtype=np.complex_)
kinetic_matrix_phi = self.grid.second_derivative_matrix(prefactor=-2.0 * self.ECJ)
diag_elements = 2.0 * self.ECS * np.square(np.arange(-self.ncut + self.ng, self.ncut + 1 + self.ng))
kinetic_matrix_theta = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc()
kinetic_matrix = (sparse.kron(kinetic_matrix_phi, identity_theta, format='csc')
+ sparse.kron(identity_phi, kinetic_matrix_theta, format='csc'))
kinetic_matrix -= 2.0 * self.ECS * self.dCJ * self.i_d_dphi_operator() * self.n_theta_operator()
return kinetic_matrix
示例9: A_to_diffusion_kernel
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def A_to_diffusion_kernel(A, k):
"""
Computes [A**0, A**1, ..., A**k]
:param A: 2d numpy array
:param k: integer, degree of series
:return: 3d numpy array [A**0, A**1, ..., A**k]
"""
assert k >= 0
Apow = [np.identity(A.shape[0])]
if k > 0:
d = A.sum(0)
Apow.append(A / (d + 1.0))
for i in range(2, k + 1):
Apow.append(np.dot(A / (d + 1.0), Apow[-1]))
return np.transpose(np.asarray(Apow, dtype='float32'), (1, 0, 2))
示例10: sparse_A_to_diffusion_kernel
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def sparse_A_to_diffusion_kernel(A, k):
assert k >= 0
num_nodes = A.shape[0]
Apow = [sp.identity(num_nodes)]
if k > 0:
d = A.sum(0)
Apow.append(A / (d + 1.0))
for i in range(2, k + 1):
Apow.append((A / (d + 1.0)).dot(Apow[-1]))
return Apow
示例11: get_movielens_100k
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def get_movielens_100k(min_positive_score=4, negative_value=0):
movielens_100k_dict = datasets.fetch_movielens(indicator_features=True, genre_features=True)
def flip_ratings(ratings_matrix):
ratings_matrix.data = np.array([1 if rating >= min_positive_score else negative_value
for rating in ratings_matrix.data])
return ratings_matrix
test_interactions = flip_ratings(movielens_100k_dict['test'])
train_interactions = flip_ratings(movielens_100k_dict['train'])
# Create indicator features for all users
num_users = train_interactions.shape[0]
user_features = sp.identity(num_users)
# Movie titles
titles = movielens_100k_dict['item_labels']
return train_interactions, test_interactions, user_features, movielens_100k_dict['item_features'], titles
示例12: test_overflow_predict
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def test_overflow_predict():
no_users, no_items = (1000, 1000)
train = sp.rand(no_users, no_items, format="csr", random_state=42)
model = LightFM(loss="warp")
model.fit(train)
with pytest.raises((ValueError, OverflowError)):
print(
model.predict(
1231241241231241414,
np.arange(no_items),
user_features=sp.identity(no_users),
)
)
示例13: load_adj
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def load_adj(pkl_filename, adjtype):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return sensor_ids, sensor_id_to_ind, adj
示例14: _compute
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def _compute(self, data_1, data_2):
data_1 = basic.graphs_to_adjacency_lists(data_1)
data_2 = basic.graphs_to_adjacency_lists(data_2)
res = np.zeros((len(data_1), len(data_2)))
N = len(data_1) * len(data_2)
for i, graph1 in enumerate(data_1):
for j, graph2 in enumerate(data_2):
# norm1, norm2 - normalized adjacency matrixes
norm1 = _norm(graph1)
norm2 = _norm(graph2)
# if graph is unweighted, W_prod = kron(a_norm(g1)*a_norm(g2))
w_prod = kron(lil_matrix(norm1), lil_matrix(norm2))
starting_prob = np.ones(w_prod.shape[0]) / (w_prod.shape[0])
stop_prob = starting_prob
# first solve (I - lambda * W_prod) * x = p_prod
A = identity(w_prod.shape[0]) - (w_prod * self._lmb)
x = lsqr(A, starting_prob)
res[i, j] = stop_prob.T.dot(x[0])
# print float(len(data_2)*i + j)/float(N), "%"
return res
示例15: test_invXXXBlockDiagonal
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import identity [as 别名]
def test_invXXXBlockDiagonal(self):
a = [np.random.rand(5, 1) for i in range(4)]
B = inv2X2BlockDiagonal(*a)
A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]))),
sp.hstack((sdiag(a[2]), sdiag(a[3])))))
Z2 = B*A - sp.identity(10)
self.assertTrue(np.linalg.norm(Z2.todense().ravel(), 2) < TOL)
a = [np.random.rand(5, 1) for i in range(9)]
B = inv3X3BlockDiagonal(*a)
A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]), sdiag(a[2]))),
sp.hstack((sdiag(a[3]), sdiag(a[4]), sdiag(a[5]))),
sp.hstack((sdiag(a[6]), sdiag(a[7]), sdiag(a[8])))))
Z3 = B*A - sp.identity(15)
self.assertTrue(np.linalg.norm(Z3.todense().ravel(), 2) < TOL)