本文整理汇总了Python中scipy.sparse.rand方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.rand方法的具体用法?Python sparse.rand怎么用?Python sparse.rand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse
的用法示例。
在下文中一共展示了sparse.rand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testVStackExecution
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def testVStackExecution(self):
a_data = np.random.rand(10)
b_data = np.random.rand(10)
a = tensor(a_data, chunk_size=4)
b = tensor(b_data, chunk_size=4)
c = vstack([a, b])
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.vstack([a_data, b_data])
self.assertTrue(np.array_equal(res, expected))
a_data = np.random.rand(10, 20)
b_data = np.random.rand(5, 20)
a = tensor(a_data, chunk_size=3)
b = tensor(b_data, chunk_size=4)
c = vstack([a, b])
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.vstack([a_data, b_data])
self.assertTrue(np.array_equal(res, expected))
示例2: testDStackExecution
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def testDStackExecution(self):
a_data = np.random.rand(10)
b_data = np.random.rand(10)
a = tensor(a_data, chunk_size=4)
b = tensor(b_data, chunk_size=4)
c = dstack([a, b])
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.dstack([a_data, b_data])
self.assertTrue(np.array_equal(res, expected))
a_data = np.random.rand(10, 20)
b_data = np.random.rand(10, 20)
a = tensor(a_data, chunk_size=3)
b = tensor(b_data, chunk_size=4)
c = dstack([a, b])
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.dstack([a_data, b_data])
self.assertTrue(np.array_equal(res, expected))
示例3: testColumnStackExecution
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def testColumnStackExecution(self):
a_data = np.array((1, 2, 3))
b_data = np.array((2, 3, 4))
a = tensor(a_data, chunk_size=1)
b = tensor(b_data, chunk_size=2)
c = column_stack((a, b))
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.column_stack((a_data, b_data))
np.testing.assert_equal(res, expected)
a_data = np.random.rand(4, 2, 3)
b_data = np.random.rand(4, 2, 3)
a = tensor(a_data, chunk_size=1)
b = tensor(b_data, chunk_size=2)
c = column_stack((a, b))
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.column_stack((a_data, b_data))
np.testing.assert_equal(res, expected)
示例4: test_linear_regression_sparse_equal_dense
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_linear_regression_sparse_equal_dense(normalize, fit_intercept):
# Test that linear regression agrees between sparse and dense
rng = check_random_state(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.
Xcsr = sparse.csr_matrix(X)
y = rng.rand(n_samples)
params = dict(normalize=normalize, fit_intercept=fit_intercept)
clf_dense = LinearRegression(**params)
clf_sparse = LinearRegression(**params)
clf_dense.fit(X, y)
clf_sparse.fit(Xcsr, y)
assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
assert_allclose(clf_dense.coef_, clf_sparse.coef_)
示例5: test_robust_scaler_equivalence_dense_sparse
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
示例6: test_solve
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_solve(self):
# Test whether the lu_solve command segfaults, as reported by Nils
# Wagner for a 64-bit machine, 02 March 2005 (EJS)
n = 20
np.random.seed(0) # make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
示例7: test_fancy_indexing_randomized
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_fancy_indexing_randomized(self):
np.random.seed(1234) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.randint(-M + 1, M, size=NUM_SAMPLES)
J = np.random.randint(-N + 1, N, size=NUM_SAMPLES)
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
示例8: test_arnoldi
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_arnoldi(self):
np.random.rand(1234)
A = eye(10000) + rand(10000,10000,density=1e-4)
b = np.random.rand(10000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3)
assert_allclose(x0, x1)
示例9: test_arnoldi
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_arnoldi(self):
np.random.rand(1234)
A = eye(10000) + rand(10000,10000,density=1e-4)
b = np.random.rand(10000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=15, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3)
assert_allclose(x0, x1)
示例10: test_conversion_with_sparse_X
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_conversion_with_sparse_X(self):
"""Tests conversion of a model that's fitted with sparse data."""
num_samples = 100
num_dims = 64
sparse_X = sparse.rand(
num_samples, num_dims, format="csr"
) # KNeighborsClassifier only supports CSR format
y = self.iris_y[
0:num_samples
] # the labels themselves don't matter - just use 100 of the Iris ones
sklearn_model = KNeighborsClassifier(algorithm="brute")
sklearn_model.fit(sparse_X, y)
coreml_model = sklearn.convert(sklearn_model)
coreml_spec = coreml_model.get_spec()
self.assertIsNotNone(coreml_spec)
示例11: test_simulate_glm
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_simulate_glm(distr):
"""Test that every generative model can be simulated from."""
random_state = 1
state = np.random.RandomState(random_state)
n_samples, n_features = 10, 3
# sample random coefficients
beta0 = state.rand()
beta = state.normal(0.0, 1.0, n_features)
X = state.normal(0.0, 1.0, [n_samples, n_features])
simulate_glm(distr, beta0, beta, X, random_state=random_state)
with pytest.raises(ValueError, match="'beta0' must be float"):
simulate_glm(distr, np.array([1.0]), beta, X, random_state)
with pytest.raises(ValueError, match="'beta' must be 1D"):
simulate_glm(distr, 1.0, np.atleast_2d(beta), X, random_state)
# If the distribution name is garbage it will fail
distr = 'multivariate_gaussian_poisson'
with pytest.raises(ValueError, match="'distr' must be in"):
simulate_glm(distr, 1.0, 1.0, np.array([[1.0]]))
示例12: generate_dummy_data
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def generate_dummy_data(num_users=15000, num_items=30000, interaction_density=.00045, num_user_features=200,
num_item_features=200, n_features_per_user=20, n_features_per_item=20, pos_int_ratio=.5,
return_datasets=False):
if pos_int_ratio <= 0.0:
raise Exception("pos_int_ratio must be > 0")
print("Generating positive interactions")
interactions = sp.rand(num_users, num_items, density=interaction_density * pos_int_ratio)
if pos_int_ratio < 1.0:
print("Generating negative interactions")
interactions += -1 * sp.rand(num_users, num_items, density=interaction_density * (1 - pos_int_ratio))
print("Generating user features")
user_features = sp.rand(num_users, num_user_features, density=float(n_features_per_user) / num_user_features)
print("Generating item features")
item_features = sp.rand(num_items, num_item_features, density=float(n_features_per_item) / num_item_features)
if return_datasets:
interactions = create_tensorrec_dataset_from_sparse_matrix(interactions)
user_features = create_tensorrec_dataset_from_sparse_matrix(user_features)
item_features = create_tensorrec_dataset_from_sparse_matrix(item_features)
return interactions, user_features, item_features
示例13: test_overflow_predict
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def test_overflow_predict():
no_users, no_items = (1000, 1000)
train = sp.rand(no_users, no_items, format="csr", random_state=42)
model = LightFM(loss="warp")
model.fit(train)
with pytest.raises((ValueError, OverflowError)):
print(
model.predict(
1231241241231241414,
np.arange(no_items),
user_features=sp.identity(no_users),
)
)
示例14: __init__
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def __init__(self,ndims=36,nbasis=72,nbatch=100,logalpha=None,W=None,b=None):
""" Product of T experts, assumes a fixed W that is sparse and alpha that is
"""
self.ndims=ndims
self.nbasis=nbasis
self.nbatch=nbatch
if W is None:
rand_val = rand(ndims,nbasis/2,density=0.25)
W = np.concatenate([rand_val.toarray(), -rand_val.toarray()],axis=1)
self.W = theano.shared(np.array(W,dtype='float32'),'W')
if logalpha is None:
logalpha = np.random.randn(nbasis,)
self.logalpha = theano.shared(np.array(logalpha,dtype='float32'),'alpha')
if b is None:
b = np.zeros((nbasis,))
self.b = theano.shared(np.array(b,dtype='float32'),'b')
X = T.matrix()
E = self.E_def(X)
dEdX = T.grad(T.sum(E),X)
#@overrides(Distribution)
self.E_val=theano.function([X],E,allow_input_downcast=True)
#@overrides(Distribution)
self.dEdX_val = theano.function([X],dEdX,allow_input_downcast=True)
super(ProductOfT,self).__init__(ndims,nbatch)
示例15: _get_uniform_dataset_csr
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import rand [as 别名]
def _get_uniform_dataset_csr(num_rows, num_cols, density=0.1, dtype=None,
data_init=None, shuffle_csr_indices=False):
"""Returns CSRNDArray with uniform distribution
This generates a csr matrix with totalnnz unique randomly chosen numbers
from num_rows*num_cols and arranges them in the 2d array in the
following way:
row_index = (random_number_generated / num_rows)
col_index = random_number_generated - row_index * num_cols
"""
_validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="uniform")
try:
from scipy import sparse as spsp
csr = spsp.rand(num_rows, num_cols, density, dtype=dtype, format="csr")
if data_init is not None:
csr.data.fill(data_init)
if shuffle_csr_indices is True:
shuffle_csr_column_indices(csr)
result = mx.nd.sparse.csr_matrix((csr.data, csr.indices, csr.indptr),
shape=(num_rows, num_cols), dtype=dtype)
except ImportError:
assert(data_init is None), \
"data_init option is not supported when scipy is absent"
assert(not shuffle_csr_indices), \
"shuffle_csr_indices option is not supported when scipy is absent"
# scipy not available. try to generate one from a dense array
dns = mx.nd.random.uniform(shape=(num_rows, num_cols), dtype=dtype)
masked_dns = dns * (dns < density)
result = masked_dns.tostype('csr')
return result