本文整理汇总了Python中scikits.cuda.linalg.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: multinomial_log_likelihood
def multinomial_log_likelihood(softmax_vals,Y,one_n_trans,one_c):
# add small amount to protect against log(0)
small_val = 1e-9
prod = Y*cumath.log(softmax_vals+small_val)
prod = linalg.dot(one_n_trans,prod)
prod = linalg.dot(prod,one_c)
return(prod.get())
示例2: calculate_H_gpu
def calculate_H_gpu(X, W, P):
WPW = la.add_diag(P, la.dot(W, W, "t", "n"))
tmp = la.dot(W, la.inv(WPW, overwrite=True))
H = la.dot(X, tmp, "n", "t")
H = gpu.maximum(H, 0)
H = to_unit_variance(H)
return H, tmp
示例3: backward
def backward(self, top, propagate_down, bottom):
with pu.caffe_cuda_context():
h = caffe.cublas_handle()
import scikits.cuda.linalg as linalg
top_diff = top[0].diff_as_pycuda_gpuarray()
ts = [self.t1_, self.t2_]
for i in xrange(len(bottom)):
if not propagate_down[i]:
continue
diff = bottom[i].diff_as_pycuda_gpuarray()
data = bottom[(i + 1) % 2].data_as_pycuda_gpuarray()
# Belew 3 conditions are complicated and might be hard to
# understand.
swap = ts[i] ^ bool(i)
t1 = ts[i]
t2 = (not t1) ^ ts[(i + 1) % 2]
for b in xrange(bottom[0].shape[0]):
x = top_diff[b]
y = data[b]
t1_, t2_ = t1, t2
if swap:
x, y = y, x
t1_, t2_ = t2_, t1_
linalg.dot(x, y,
transa=blas_trans(t1_), transb=blas_trans(t2_),
handle=h, out=diff[b])
示例4: backprop
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = df_activations * df_output
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
示例5: eps_r
def eps_r(x, A1, A2, out, handle):
out.fill(0)
#tmp = garr.empty((A1[0].shape[0], x.shape[1]), dtype=A1[0].dtype)
#tmp2 = garr.empty((tmp.shape[0], A2[0].shape[0]), dtype=A1[0].dtype)
for s in range(len(A1)):
tmp = cla.dot(A1[s], x, handle=handle)
tmp2 = cla.dot(tmp, A2[s], transb='C', handle=handle)
out += tmp2
return out
示例6: forward
def forward(self, bottom, top):
with pu.caffe_cuda_context():
h = caffe.cublas_handle()
import scikits.cuda.linalg as linalg
mat1 = bottom[0].data_as_pycuda_gpuarray()
mat2 = bottom[1].data_as_pycuda_gpuarray()
mato = top[0].data_as_pycuda_gpuarray()
for b in xrange(bottom[0].shape[0]):
linalg.dot(mat1[b], mat2[b],
transa=blas_trans(self.t1_),
transb=blas_trans(self.t2_),
handle=h, out=mato[b])
示例7: decompose
def decompose(self):
gcov = cla.dot(self._Y_gpu, self._Y_gpu, transa='C')
ge_g, gh_g = np.linalg.eigh(gcov.get())
I = np.argsort(ge_g)[::-1]
ge_g, gh_g = np.sqrt(ge_g[I]), gh_g[:,I]
# push the matrix back out
gpueigs = gpuarray.to_gpu(gh_g)
W_g = cla.dot(self._Y_gpu, gpueigs)
# Unitize W_g - could be done on gpu to allow async returning
W_g = W_g.get()
W_g = W_g / np.sqrt(np.sum(W_g**2, axis=0))[np.newaxis, :]
return W_g, ge_g, gh_g.T # Not sure whether the last one should be transposed
示例8: test_dot_matrix_h_complex128
def test_dot_matrix_h_complex128(self):
a = np.asarray(np.random.rand(2, 4) + 1j * np.random.rand(2, 4), np.complex128)
b = np.asarray(np.random.rand(2, 2) + 1j * np.random.rand(2, 2), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, "c")
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
a = a.astype(np.complex128, order="F", copy=True)
b = b.astype(np.complex128, order="F", copy=True)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, "c")
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
示例9: test_dot_vector_complex128
def test_dot_vector_complex128(self):
a = np.asarray(np.random.rand(5), np.complex128)
b = np.asarray(np.random.rand(5), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
a = a.astype(np.complex128, order="F", copy=True)
b = b.astype(np.complex128, order="F", copy=True)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
示例10: backprop
def backprop(self, input_data, targets,
cache=None):
""" Backpropagate through the logistic layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if cache is not None:
activations = cache
else:
activations = self.feed_forward(input_data, prediction=False)
delta = activations - targets
nan_to_zeros(delta, delta)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt input
df_input = linalg.dot(delta, self.W, transb='T')
# L1 penalty
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 penalty
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
示例11: backprop
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
Inputs:
input_data
df_output: the gradient wrt the output units
cache (optional): cache object from the forward pass
Output:
df_W: gradient wrt the weights
df_b: gradient wrt the bias
df_input: gradient wrt the input
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = df_activations * df_output
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
示例12: feed_forward
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are halved if the layers
uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the hidden units.
"""
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
self.f(activations)
if self.dropout and prediction:
activations *= .5
if self.dropout and not prediction:
dropout_mask = sample_dropout_mask(activations)
return activations, dropout_mask
return (activations,)
示例13: thunk
def thunk():
x = inputs[0]
y = inputs[1]
# chop off the real/imag dimension
input_shape_x = x[0].shape # (a, b, 2)
input_shape_y = y[0].shape # (b, c, 2)
output_shape = (input_shape_x[0], input_shape_y[1], 2) # (a, c, 2)
input_x_pycuda = to_complex_gpuarray(x[0])
input_y_pycuda = to_complex_gpuarray(y[0])
# multistream experiment
# print "DEBUG: Setting stream to %d" % current_stream[0]
# prev_stream_obj = stream_pool[(current_stream[0] - 1) % num_streams]
# print "PREV STREAM IS DONE?"
# print prev_stream_obj.is_done()
# print
stream_obj = stream_pool[current_stream[0]]
cublas.cublasSetStream(handle[0], stream_obj.handle)
current_stream[0] += 1
current_stream[0] %= num_streams
# print "DEBUG: set next stream id to %d" % current_stream[0]
output_pycuda = linalg.dot(input_x_pycuda, input_y_pycuda, handle=handle[0])
outputs[0][0] = to_complex_cudandarray(output_pycuda)
示例14: test_dot_matrix_h_complex128
def test_dot_matrix_h_complex128(self):
a = np.asarray(np.random.rand(2, 4)+1j*np.random.rand(2, 4), np.complex128)
b = np.asarray(np.random.rand(2, 2)+1j*np.random.rand(2, 2), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, 'c')
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
示例15: test_dot_vector_float64
def test_dot_vector_float64(self):
a = np.asarray(np.random.rand(5), np.float64)
b = np.asarray(np.random.rand(5), np.float64)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)