本文整理匯總了Python中theano.sparse.dot方法的典型用法代碼示例。如果您正苦於以下問題:Python sparse.dot方法的具體用法?Python sparse.dot怎麽用?Python sparse.dot使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.sparse
的用法示例。
在下文中一共展示了sparse.dot方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_dot_sparse_sparse
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_dot_sparse_sparse(self):
# test dot for 2 input sparse matrix
sparse_dtype = 'float64'
sp_mat = {'csc': sp.csc_matrix,
'csr': sp.csr_matrix,
'bsr': sp.csr_matrix}
for sparse_format_a in ['csc', 'csr', 'bsr']:
for sparse_format_b in ['csc', 'csr', 'bsr']:
a = SparseType(sparse_format_a, dtype=sparse_dtype)()
b = SparseType(sparse_format_b, dtype=sparse_dtype)()
d = theano.dot(a, b)
f = theano.function([a, b], theano.Out(d, borrow=True))
topo = f.maker.fgraph.toposort()
for M, N, K, nnz in [(4, 3, 2, 3),
(40, 30, 20, 3),
(40, 30, 20, 30),
(400, 3000, 200, 6000),
]:
a_val = sp_mat[sparse_format_a](
random_lil((M, N), sparse_dtype, nnz))
b_val = sp_mat[sparse_format_b](
random_lil((N, K), sparse_dtype, nnz))
f(a_val, b_val)
示例2: test_csr_dense
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_csr_dense(self):
x = theano.sparse.csr_matrix('x')
y = theano.tensor.matrix('y')
v = theano.tensor.vector('v')
for (x, y, x_v, y_v) in [(x, y, self.x_csr, self.y),
(x, v, self.x_csr, self.v_100),
(v, x, self.v_10, self.x_csr)]:
f_a = theano.function([x, y], theano.sparse.dot(x, y))
f_b = lambda x, y: x * y
utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))
# Test infer_shape
self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
[x_v, y_v],
(Dot, Usmm, UsmmCscDense))
示例3: test_csc_dense
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_csc_dense(self):
x = theano.sparse.csc_matrix('x')
y = theano.tensor.matrix('y')
v = theano.tensor.vector('v')
for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y),
(x, v, self.x_csc, self.v_100),
(v, x, self.v_10, self.x_csc)]:
f_a = theano.function([x, y], theano.sparse.dot(x, y))
f_b = lambda x, y: x * y
utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))
# Test infer_shape
self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
[x_v, y_v],
(Dot, Usmm, UsmmCscDense))
示例4: test_int32_dtype
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_int32_dtype(self):
# Reported on the theano-user mailing-list:
# https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
size = 9
intX = 'int32'
C = tensor.matrix('C', dtype=intX)
I = tensor.matrix('I', dtype=intX)
fI = I.flatten()
data = tensor.ones_like(fI)
indptr = tensor.arange(data.shape[0] + 1, dtype='int32')
m1 = sparse.CSR(data, fI, indptr, (8, size))
m2 = sparse.dot(m1, C)
y = m2.reshape(shape=(2, 4, 9), ndim=3)
f = theano.function(inputs=[I, C], outputs=y)
i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
dtype=intX)
f(i, a)
示例5: test_op_ss
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_op_ss(self):
for format in sparse.sparse_formats:
for dtype in sparse.all_dtypes:
variable, data = sparse_random_inputs(format,
shape=(10, 10),
out_dtype=dtype,
n=2,
p=0.1)
f = theano.function(variable, self.op(*variable))
tested = f(*data)
x, y = [m.toarray() for m in data]
expected = numpy.dot(x, y)
assert tested.format == format
assert tested.dtype == expected.dtype
tested = tested.toarray()
utt.assert_allclose(tested, expected)
示例6: test_op_sd
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_op_sd(self):
for format in sparse.sparse_formats:
for dtype in sparse.all_dtypes:
variable, data = sparse_random_inputs(format,
shape=(10, 10),
out_dtype=dtype,
n=2,
p=0.1)
variable[1] = tensor.TensorType(dtype=dtype,
broadcastable=(False, False))()
data[1] = data[1].toarray()
f = theano.function(variable, self.op(*variable))
tested = f(*data)
expected = numpy.dot(data[0].toarray(), data[1])
assert tested.format == format
assert tested.dtype == expected.dtype
tested = tested.toarray()
utt.assert_allclose(tested, expected)
示例7: squared_euclidean_distances
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def squared_euclidean_distances(x_1, x_2):
"""
Compute the euclidian distances in 3D between all the points in x_1 and x_2
Args:
x_1 (theano.tensor.matrix): shape n_points x number dimension
x_2 (theano.tensor.matrix): shape n_points x number dimension
Returns:
theano.tensor.matrix: Distancse matrix. shape n_points x n_points
"""
# T.maximum avoid negative numbers increasing stability
sqd = T.sqrt(T.maximum(
(x_1 ** 2).sum(1).reshape((x_1.shape[0], 1)) +
(x_2 ** 2).sum(1).reshape((1, x_2.shape[0])) -
2 * x_1.dot(x_2.T), 1e-12
))
return sqd
示例8: extend_dual_kriging
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def extend_dual_kriging(self, weights, grid_shape):
# TODO Think what object is worth to save to speed up computation
"""
Tile the dual kriging vector to cover all the points to interpolate.So far I just make a matrix with the
dimensions len(DK)x(grid) but in the future maybe I have to try to loop all this part so consume less memory
Returns:
theano.tensor.matrix: Matrix with the Dk parameters repeated for all the points to interpolate
"""
DK_parameters = weights
# Creation of a matrix of dimensions equal to the grid with the weights for every point (big 4D matrix in
# ravel form)
# TODO IMP: Change the tile by a simple dot op -> The DOT version in gpu is slower
DK_weights = T.tile(DK_parameters, (grid_shape, 1)).T
return DK_weights
# endregion
# region Evaluate Geology
示例9: select_finite_faults
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def select_finite_faults(self, grid):
fault_points = T.vertical_stack(T.stack([self.ref_layer_points[0]], axis=0), self.rest_layer_points).T
ctr = T.mean(fault_points, axis=1)
x = fault_points - ctr.reshape((-1, 1))
M = T.dot(x, x.T)
U, D, V = T.nlinalg.svd(M)
rotated_x = T.dot(T.dot(grid, U), V)
rotated_fault_points = T.dot(T.dot(fault_points.T, U), V)
rotated_ctr = T.mean(rotated_fault_points, axis=0)
a_radius = (rotated_fault_points[:, 0].max() - rotated_fault_points[:, 0].min()) / 2
b_radius = (rotated_fault_points[:, 1].max() - rotated_fault_points[:, 1].min()) / 2
ellipse_factor = (rotated_x[:, 0] - rotated_ctr[0])**2 / a_radius**2 + \
(rotated_x[:, 1] - rotated_ctr[1])**2 / b_radius**2
if "select_finite_faults" in self.verbose:
ellipse_factor = theano.printing.Print("h")(ellipse_factor)
return ellipse_factor
示例10: P
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def P(self, lat, lon):
"""Compute the pixelization matrix, no filters or illumination."""
# Get the Cartesian points
xpt, ypt, zpt = self.latlon_to_xyz(lat, lon)
# Compute the polynomial basis at the point
pT = self.pT(xpt, ypt, zpt)[:, : (self.ydeg + 1) ** 2]
# Transform to the Ylm basis
pTA1 = ts.dot(pT, self.A1)
# NOTE: The factor of `pi` ensures the correct normalization.
# This is *different* from the derivation in the paper, but it's
# due to the fact that the in starry we normalize the spherical
# harmonics in a slightly strange way (they're normalized so that
# the integral of Y_{0,0} over the unit sphere is 4, not 4pi).
# This is useful for thermal light maps, where the flux from a map
# with Y_{0,0} = 1 is *unity*. But it messes up things for reflected
# light maps, so we need to account for that here.
if self._reflected:
pTA1 *= np.pi
# We're done
return pTA1
示例11: compute_ortho_grid_inc_obl
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def compute_ortho_grid_inc_obl(self, res, inc, obl):
"""Compute the polynomial basis on the plane of the sky, accounting
for the map inclination and obliquity."""
# See NOTE on tt.mgrid bug in `compute_ortho_grid`
dx = 2.0 / (res - 0.01)
y, x = tt.mgrid[-1:1:dx, -1:1:dx]
z = tt.sqrt(1 - x ** 2 - y ** 2)
y = tt.set_subtensor(y[tt.isnan(z)], np.nan)
x = tt.reshape(x, [1, -1])
y = tt.reshape(y, [1, -1])
z = tt.reshape(z, [1, -1])
Robl = self.RAxisAngle(tt.as_tensor_variable([0.0, 0.0, 1.0]), -obl)
Rinc = self.RAxisAngle(
tt.as_tensor_variable([tt.cos(obl), tt.sin(obl), 0.0]),
-(0.5 * np.pi - inc),
)
R = tt.dot(Robl, Rinc)
xyz = tt.dot(R, tt.concatenate((x, y, z)))
x = tt.reshape(xyz[0], [1, -1])
y = tt.reshape(xyz[1], [1, -1])
z = tt.reshape(xyz[2], [1, -1])
lat = tt.reshape(0.5 * np.pi - tt.arccos(y), [1, -1])
lon = tt.reshape(tt.arctan2(x, z), [1, -1])
return tt.concatenate((lat, lon)), tt.concatenate((x, y, z))
示例12: test_csr_correct_output_faster_than_scipy
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_csr_correct_output_faster_than_scipy(self):
# contrast with test_grad, we put csr in float32, csc in float64
sparse_dtype = 'float32'
dense_dtype = 'float32'
a = SparseType('csr', dtype=sparse_dtype)()
b = tensor.matrix(dtype=dense_dtype)
d = theano.dot(a, b)
f = theano.function([a, b], d)
for M, N, K, nnz in [(4, 3, 2, 3),
(40, 30, 20, 3),
(40, 30, 20, 30),
(400, 3000, 200, 6000),
]:
spmat = sp.csr_matrix(random_lil((M, N), sparse_dtype, nnz))
mat = numpy.asarray(numpy.random.randn(N, K), dense_dtype)
t0 = time.time()
theano_result = f(spmat, mat)
t1 = time.time()
scipy_result = spmat * mat
t2 = time.time()
theano_time = t1 - t0
scipy_time = t2 - t1
# print 'theano took', theano_time,
# print 'scipy took', scipy_time
overhead_tol = 0.002 # seconds
overhead_rtol = 1.1 # times as long
utt.assert_allclose(scipy_result, theano_result)
if (not theano.config.mode in ["DebugMode", "DEBUG_MODE"] and
theano.config.cxx):
self.assertFalse(
theano_time > overhead_rtol * scipy_time + overhead_tol,
(theano_time,
overhead_rtol * scipy_time + overhead_tol,
scipy_time, overhead_rtol, overhead_tol))
示例13: test_cuda
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def test_cuda(self):
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest("Optional package cuda not available")
a = sparse.csr_matrix('a', dtype='float32')
b = cuda.float32_shared_constructor(
numpy.random.rand(3, 4).astype('float32'))
d = sparse.dot(a, b)
f = theano.function([a], d)
a_val = scipy.sparse.csr_matrix(random_lil((5, 3), 'float32', 5))
d_theano = f(a_val)
d_numpy = a_val * b.get_value()
utt.assert_allclose(d_numpy, d_theano)
示例14: multivariate_normal
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def multivariate_normal(datasets, weights, hyperparams, residuals):
"""
Calculate posterior Likelihood of a Multivariate Normal distribution.
Uses plain inverse of the covariances.
DEPRECATED! Is currently not being used in beat.
Can only be executed in a `with model context`.
Parameters
----------
datasets : list
of :class:`heart.SeismicDataset` or :class:`heart.GeodeticDataset`
weights : list
of :class:`theano.shared`
Square matrix of the inverse of the covariance matrix as weights
hyperparams : dict
of :class:`theano.`
residual : list or array of model residuals
Returns
-------
array_like
"""
n_t = len(datasets)
logpts = tt.zeros((n_t), tconfig.floatX)
for l, data in enumerate(datasets):
M = tt.cast(shared(
data.samples, name='nsamples', borrow=True), 'int16')
hp_name = get_hyper_name(data)
norm = (M * (2 * hyperparams[hp_name] + log_2pi))
logpts = tt.set_subtensor(
logpts[l:l + 1],
(-0.5) * (
data.covariance.slog_pdet +
norm +
(1 / tt.exp(hyperparams[hp_name] * 2)) *
(residuals[l].dot(weights[l]).dot(residuals[l].T))))
return logpts
示例15: compRelationProbsFunc
# 需要導入模塊: from theano import sparse [as 別名]
# 或者: from theano.sparse import dot [as 別名]
def compRelationProbsFunc(self, xFeats):
# xFeats [l, h] matrix
# xFeats = theano.printing.Print("xFeats")(xFeats)
# self.Wb = theano.printing.Print("Wb ") (self.Wb)
# self.W = theano.printing.Print("W ") (self.W)
# scores of each role by a classifier
relationScores = sparse.dot(xFeats, self.W) + self.Wb # [l, h] x [h, r] => [l, r]
#relationScores = theano.printing.Print("relationScores=")(relationScores)
# convert it to probabilities
relationProbs = T.nnet.softmax(relationScores)
#relationProbs = theano.printing.Print("relationProbs = ")(relationProbs)
return relationProbs # [l, r]