本文整理汇总了Python中mdp.utils.mult函数的典型用法代码示例。如果您正苦于以下问题:Python mult函数的具体用法?Python mult怎么用?Python mult使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mult函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _inverse
def _inverse(self, y, n=None):
"""Project data from the output to the input space using the
first 'n' components.
If 'n' is not set, use all available components.
:param y: Data to be projected to the input space.
:type y: numpy.ndarray
:param n: Number of first principle components.
:type n: int
:return: The projected data
:rtype: numpy.ndarray
"""
if n is None:
n = y.shape[1]
if n > self.output_dim:
error_str = ("y has dimension %d,"
" should be at most %d" % (n, self.output_dim))
raise mdp.NodeException(error_str)
v = self.get_recmatrix()
if n is not None:
return mult(y, v[:n, :]) + self.avg
return mult(y, v) + self.avg
示例2: _energy
def _energy(self, v, h):
if self._gaussian:
return ((((v - self.bv) ** 2).sum() / 2) - mult(h, self.bh) -
(mult(v, self.w) * h).sum(axis=1))
else:
return (-mult(v, self.bv) - mult(h, self.bh) -
(mult(v, self.w) * h).sum(axis=1))
示例3: _train
def _train(self, x):
"""Update the principal components.
:param x: Data vectors.
:type x: numpy.ndarray
"""
[w1, w2] = self._amnesic(self.get_current_train_iteration() + 1)
red_j = self.output_dim
red_j_flag = False
explained_var = 0.0
r = x
for j in range(self.output_dim):
v = self._v[:, j:j + 1]
d = self.d[j]
v = w1 * v + w2 * mult(r, v) / d * r.T
d = mdp.numx_linalg.norm(v)
vn = old_div(v, d)
r = r - mult(r, vn) * vn.T
explained_var += d
if not red_j_flag:
ratio = explained_var / self._var_tot
if ratio > self.var_rel:
red_j = j
red_j_flag = True
self._v[:, j:j + 1] = v
self.v[:, j:j + 1] = vn
self.d[j] = d
self._var_tot = explained_var
self._reduced_dims = red_j
示例4: matmult_n_MDP_benchmark
def matmult_n_MDP_benchmark(dim):
""" This benchmark multiplies two non-contiguous matrices using the
MDP internal matrix multiplication routine.
First argument matrix dimensionality"""
a = numx_rand.random((dim,dim)).T
b = numx_rand.random((dim,dim)).T
mult(a,b)
示例5: _inverse
def _inverse(self, y, n=None):
"""Project 'y' to the input space using the first 'n' components.
:param y: Vectors from the output space.
:type y: numpy.ndarray
:param n: The number of components to use for projection to the
input space. If 'n' is not set, use all available components.
:type n: int
:return: The projected vectors.
:rtype: numpy.ndarray
:raises mdp.NodeException: If the valid dimension is exceeded.
"""
if n is None:
n = y.shape[1]
if n > self.output_dim:
error_str = ("y has dimension %d,"
" should be at most %d" % (n, self.output_dim))
raise mdp.NodeException(error_str)
v = self.get_recmatrix()
if n is not None:
return mult(y, v[:n, :])
return mult(y, v)
示例6: _train
def _train(self, x, y):
"""
:param x: Array of different input observations.
:type x: numpy.ndarray
:param y: Array of size (x.shape[0], output_dim) that contains the
observed output to the input x's.
:type y: numpy.ndarray
"""
# initialize internal vars if necessary
if self._xTx is None:
if self.with_bias:
x_size = self._input_dim + 1
else:
x_size = self._input_dim
self._xTx = numx.zeros((x_size, x_size), self._dtype)
self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)
if self.with_bias:
x = self._add_constant(x)
# update internal variables
self._xTx += mult(x.T, x)
self._xTy += mult(x.T, y)
self._tlen += x.shape[0]
示例7: _calculate_gradient
def _calculate_gradient(self, y):
x = self._last_x
dy = Oger.utils.LogisticFunction.df(x, self._last_y) * y
dw = mult(x.T, dy)
self._gradient_vector = numx.concatenate((dw.ravel(), dy.sum(axis=0)))
dx = mult(self.w, dy.T).T
return dx
示例8: _sample_v
def _sample_v(self, h, x):
# returns P(v=1|h,W,b) and a sample from it
dynamic_b = mult(x, self.a)
v_in = self.bv + mult(h, self.w.T) + dynamic_b
if self._gaussian:
return v_in, v_in
else:
probs = Oger.utils.LogisticFunction.f(v_in)
v = (probs > random(probs.shape)).astype(self.dtype)
return probs, v
示例9: test_mult_diag
def test_mult_diag():
dim = 20
d = numx_rand.random(size=(dim,))
dd = numx.diag(d)
mtx = numx_rand.random(size=(dim, dim))
res1 = utils.mult(dd, mtx)
res2 = utils.mult_diag(d, mtx, left=True)
assert_array_almost_equal(res1, res2, 10)
res1 = utils.mult(mtx, dd)
res2 = utils.mult_diag(d, mtx, left=False)
assert_array_almost_equal(res1, res2, 10)
示例10: _inverse
def _inverse(self, y, n=None):
"""Project 'y' to the input space using the first 'n' components.
If 'n' is not set, use all available components."""
if n is None:
n = y.shape[1]
if n > self.output_dim:
error_str = "y has dimension %d," " should be at most %d" % (n, self.output_dim)
raise mdp.NodeException(error_str)
v = self.get_recmatrix()
if n is not None:
return mult(y, v[:n, :]) + self.avg
return mult(y, v) + self.avg
示例11: get_CD_gradient
def get_CD_gradient(self, x, n_updates=1):
"""Use Gibbs sampling to estimate the contrastive divergence gradient.
- x: a binary matrix having different variables on different columns and observations on the rows (concatenation of visibles and context)
- n_updates: number of CD iterations. Default value: 1
Returns a tuple (dw, dbv, dbh, da, db) that contains the gradients of the
weights and the biases of the visibles and the hidden respectively and
the autoregressive gradients da and db.
"""
# useful quantities
n = x.shape[0]
v, x = self._split_data(x)
w, a, b, bv, bh = self.w, self.a, self.b, self.bv, self.bh
# first update of the hidden units for the data term
ph_data, h_data = self._sample_h(v, x)
# n updates of both v and h for the model term
h_model = h_data.copy()
for i in range(n_updates):
pv_model, v_model = self._sample_v(h_model, x)
ph_model, h_model = self._sample_h(v_model, x)
# find dw
data_term = mult(v.T, ph_data)
model_term = mult(v_model.T, ph_model)
dw = (data_term - model_term) / n
# find da
data_term = v
model_term = v_model
# Should I include the weight decay here as well?
da = mult(x.T, data_term - model_term) / n
# find db
data_term = ph_data
model_term = ph_model
db = mult(x.T, data_term - model_term) / n
# find dbv
data_term = v.sum(axis=0)
model_term = v_model.sum(axis=0)
dbv = (data_term - model_term) / n
# find dbh
data_term = ph_data.sum(axis=0)
model_term = ph_model.sum(axis=0)
dbh = (data_term - model_term) / n
return (dw, dbv, dbh, da, db)
示例12: _inverse
def _inverse(self, y):
# counter-rotate input
x = mult(y, self.RP.T)
# invert whitening node if needed
if not self.whitened:
x = self.white.inverse(x)
return x
示例13: _down_pass
def _down_pass(self, h, top_updates=0, epsilon=0.1, decay=0.0, momentum=0.0):
"""
top_updates -- set >0 for top node, so that it ends up sampling
from the prior
"""
# TODO: check input
pv, v = self._sample_v(h)
for _ in range(top_updates):
ph, h = self._sample_h(v)
pv, v = self._sample_v(h)
# reconstruct hidden state
ph1, h1 = self._sample_h(v)
# adapt generative weights
delta = mult(v.T, (h - ph1)) / v.shape[0]
self.dw_sleep = momentum * self.dw_sleep + epsilon * (delta - decay * self.w_rec)
self.w_rec += self.dw_sleep
# adapt biases
delta = (h - ph1).mean(axis=0)
self.dbh = momentum * self.dbh + epsilon * delta
self.bh += self.dbh
return v, pv, mdp.utils.norm2(self.dbh)
示例14: guess
def guess(input, reservoir, dirname):
#print input.shape
"""
pylab.plot(input)
pylab.show()
pylab.figure()
"""
try:
beta = np.loadtxt(dirname + os.sep + 'beta.mat')
except:
return 0 #19
x = reservoir.execute(input)
#m = readout._execute(x)
#m = mult(x, readout.beta)
m = mult(x, beta)
# find maximum place of m
mcs = np.zeros(m.shape[1])
for i in range(m.shape[1]):
mc = sum(m[:,i]) / m.shape[1]
mcs[i] = mc
return mcs.argmax()
示例15: _sample_v
def _sample_v(self, h, sample_l=False, concatenate=True):
# returns P(v=1|h,W,b), a sample from it, P(l=1|h,W,b),
# and a sample from it
ldim, vdim = self._labels_dim, self._visible_dim
# activation
a = self.bv + mult(h, self.w.T)
av, al = a[:, :vdim], a[:, vdim:]
# ## visible units: logistic activation
probs_v = old_div(1.,(1. + exp(-av)))
v = (probs_v > random(probs_v.shape)).astype('d')
# ## label units: softmax activation
# subtract maximum to regularize exponent
exponent = al - rrep(al.max(axis=1), ldim)
probs_l = exp(exponent)
probs_l /= rrep(probs_l.sum(axis=1), ldim)
if sample_l:
# ?? todo: I'm sure this can be optimized
l = numx.zeros((h.shape[0], ldim))
for t in range(h.shape[0]):
l[t, :] = mdp.numx_rand.multinomial(1, probs_l[t, :])
else:
l = probs_l.copy()
if concatenate:
probs = numx.concatenate((probs_v, probs_l), axis=1)
x = numx.concatenate((v, l), axis=1)
return probs, x
else:
return probs_v, probs_l, v, l