本文整理汇总了Python中cupy.dot方法的典型用法代码示例。如果您正苦于以下问题:Python cupy.dot方法的具体用法?Python cupy.dot怎么用?Python cupy.dot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cupy
的用法示例。
在下文中一共展示了cupy.dot方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: apply_mapping
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def apply_mapping(x, model_params, src='src', tgt='tgt', latent_space=True):
"""
Applies bilingual mapping to the matrix x and returns the transformed matrix.
vocab_type is one of `src` or `tgt`. Indicates the source or target language as per the trained model.
latent_space: If true, the embeddings are mapped to latent space. Otherwise,
they are mapped to the embedding space of the other language.
"""
xw=None
src_mat_name='U_{}'.format(src)
tgt_mat_name='U_{}'.format(tgt)
if latent_space:
xw = x.dot( model_params[src_mat_name] ).dot(scipy.linalg.sqrtm( model_params['B'] ))
else:
xw = x.dot( model_params[src_mat_name] ).dot( model_params['B'] ).dot( model_params[tgt_mat_name].T )
return xw
示例2: compute_word_similarity
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def compute_word_similarity(emb_info, sim_database):
emb_words, emb_vectors = emb_info
w2i=build_w2i(emb_info[0])
sim_words = set([ x[0] for x in sim_database ])
sim_words.update([ x[1] for x in sim_database ])
oov_words = sim_words.difference(emb_words)
non_oov_words=sim_words.difference(oov_words)
non_oov_sim_pairs = list(filter( lambda x: len(oov_words.intersection(x[:2]))==0 , sim_database))
cos_sims=[]
ref_sims=[]
for w1, w2, ref_sim in non_oov_sim_pairs:
v1=emb_vectors[w2i[w1]]
v2=emb_vectors[w2i[w2]]
cos_sim=np.dot(v1,v2)/np.sqrt(v1.dot(v1)*v2.dot(v2))
cos_sims.append(cos_sim)
ref_sims.append(ref_sim)
corr=scipy.stats.spearmanr(np.array(cos_sims),np.array(ref_sims))
return corr[0], corr[1], len(non_oov_sim_pairs)/len(sim_database)
示例3: test_01
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def test_01(self):
N = 64
K = 5
L = 10
u = cp.random.randn(N, K)
U = cp.dot(u, u.T)
V = cp.random.randn(N, N)
t = cp.sort(cp.abs(V).ravel())[V.size-L]
V[cp.abs(V) < t] = 0
D = U + V
opt = rpca.RobustPCA.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250,
'AutoRho': {'Enabled': True}})
b = rpca.RobustPCA(D, None, opt)
X, Y = b.solve()
assert sm.mse(U, X) < 5e-6
assert sm.mse(V, Y) < 1e-8
示例4: read_model
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def read_model(mapping_model_dir):
"""
Reads the model and returns a dictionary with model parameters
"""
model_params={}
for f in os.listdir(mapping_model_dir):
if f.find('U')==0 or f.find('B')==0:
model_params[f.replace('.csv','')]=np.loadtxt( '{}/{}'.format(mapping_model_dir,f) )
return model_params
# def apply_mapping(x,vocab_type,model_params, latent_space=True):
# """
# Applies bilingual mapping to the matrix x and returns the transformed matrix.
# vocab_type is one of `src` or `tgt`. Indicates the source or target language as per the trained model.
# latent_space: If true, the embeddings are mapped to latent space. Otherwise,
# they are mapped to the embedding space of the other language.
# """
# xw=None
# if vocab_type=='src':
# if latent_space:
# xw = x.dot( model_params['U_src'] ).dot(scipy.linalg.sqrtm( model_params['B'] ))
# else:
# xw = x.dot( model_params['U_src'] ).dot( model_params['B'] ).dot( model_params['U_tgt'].T )
# elif vocab_type=='tgt':
# if latent_space:
# xw = x.dot( model_params['U_tgt'] ).dot(scipy.linalg.sqrtm( model_params['B'] ))
# else:
# xw = x.dot( model_params['U_tgt'] ).dot( model_params['B'] ).dot( model_params['U_src'].T )
# return xw
示例5: main
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def main():
parser = argparse.ArgumentParser(
description='SGEMM kernel call from CuPy')
parser.add_argument('--gpu', '-g', default=0, type=int,
help='ID of GPU.')
parser.add_argument(
'--m', type=int, default=np.random.randint(1000, 1500))
parser.add_argument(
'--n', type=int, default=np.random.randint(1000, 1500))
parser.add_argument(
'--k', type=int, default=np.random.randint(500, 3000))
args = parser.parse_args()
print('m={} n={} k={}'.format(args.m, args.n, args.k))
print('start benchmarking')
print('')
with cp.cuda.Device(args.gpu):
A = cp.random.uniform(
low=-1., high=1., size=(args.m, args.k)).astype(cp.float32)
B = cp.random.uniform(
low=-1., high=1., size=(args.k, args.n)).astype(cp.float32)
# check correctness
cp.testing.assert_array_almost_equal(
sgemm(A, B), cp.dot(A, B), decimal=3)
# dry run
for _ in range(3):
sgemm(A, B)
kernel_times = benchmark(sgemm, (A, B), n_run=5)
for _ in range(3):
cp.dot(A, B)
cublas_times = benchmark(cp.dot, (A, B), n_run=5)
print('=============================Result===============================')
print('hand written kernel time {} ms'.format(np.mean(kernel_times)))
print('cuBLAS time {} ms'.format(np.mean(cublas_times)))
示例6: pinv
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def pinv(a, rcond=1e-15):
"""Compute the Moore-Penrose pseudoinverse of a matrix.
It computes a pseudoinverse of a matrix ``a``, which is a generalization
of the inverse matrix with Singular Value Decomposition (SVD).
Note that it automatically removes small singular values for stability.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``
rcond (float): Cutoff parameter for small singular values.
For stability it computes the largest singular value denoted by
``s``, and sets all singular values smaller than ``s`` to zero.
Returns:
cupy.ndarray: The pseudoinverse of ``a`` with dimension ``(N, M)``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.pinv`
"""
u, s, vt = decomposition.svd(a.conj(), full_matrices=False)
cutoff = rcond * s.max()
s1 = 1 / s
s1[s <= cutoff] = 0
return core.dot(vt.T, s1[:, None] * u.T)
示例7: _rbf_kernel
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import dot [as 别名]
def _rbf_kernel(x, y, gamma=None):
xn, nx = x.shape
_, ny = y.shape
assert nx == ny, ('The number ({}) of columns of x must be the same as '
'the number ({}) of rows of y'.format(nx, ny))
if gamma is None:
gamma = 1.0 / xn
xy = cupy.dot(x, y.transpose())
x2 = (x * x).sum(axis=1)
y2 = (y * y).sum(axis=1)
return cupy.exp((x2[:, cupy.newaxis] - 2 * xy + y2) * -gamma)