本文整理汇总了Python中theano.sparse.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, rng, P_input, L2_input, **kwargs):
#symbol declaration, initialization and definition
x_1_tm1, x_t = (\
sparse.csr_matrix("x_1_tm1", dtype=theano.config.floatX),\
sparse.csr_matrix("x_t",dtype=theano.config.floatX)\
)\
if P_input is None else P_input[:2]
#elements of history
shape = kwargs.get("shape")
if shape is not None:
dict_size = shape[0]
if len(shape) <= 1:
del shape["shape"]
else:
shape["shape"] = shape["shape"][1:]
else:
dict_size = (16,1,32,32)
D_1_tm1 = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_1_tm1 = sparse.dot(x_1_tm1, D_1_tm1)#array access=dot operation
super(SequenceCNN, self).__init__(rng=rng, inputsymbol=Dx_1_tm1, **kwargs)#attaches new elements into the fgraph
self.L2_output_1_tm1 = self.L2_output
#elements of current time
D_t = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_t = sparse.dot(x_t, D_t)#array access=dot operation
self.L2_output_t = theano.clone(self.L2_output_1_tm1, replace={Dx_1_tm1:Dx_t})
#element prepartion for model building
self.P_input = (x_1_tm1,x_t)
self.params += [D_1_tm1, D_t]
self.L2_output = self.L2_output_1_tm1*self.L2_output_t
示例2: create_TrainFunc_tranPES
def create_TrainFunc_tranPES(simfn, embeddings, marge=0.5, alpha=1., beta=1.):
# parse the embedding data
embedding = embeddings[0] # D x N matrix
lembedding = embeddings[1]
# declare the symbolic variables for training triples
hp = S.csr_matrix('head positive') # N x batchsize matrix
rp = S.csr_matrix('relation')
tp = S.csr_matrix('tail positive')
hn = S.csr_matrix('head negative')
tn = S.csr_matrix('tail negative')
lemb = T.scalar('embedding learning rate')
lremb = T.scalar('relation learning rate')
subtensorE = T.ivector('batch entities set')
subtensorR = T.ivector('batch link set')
# Generate the training positive and negative triples
hpmat = S.dot(embedding.E, hp).T # batchsize x D dense matrix
rpmat = S.dot(lembedding.E, rp).T
tpmat = S.dot(embedding.E, tp).T
hnmat = S.dot(embedding.E, hn).T
tnmat = S.dot(embedding.E, tn).T
# calculate the score
pos = tranPES3(simfn, T.concatenate([hpmat, tpmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tpmat)
negh = tranPES3(simfn, T.concatenate([hnmat, tpmat], axis=1).reshape((hnmat.shape[0], 2, hnmat.shape[1])).dimshuffle(0, 2, 1), hnmat, rpmat, tpmat)
negt = tranPES3(simfn, T.concatenate([hpmat, tnmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tnmat)
costh, outh = margeCost(pos, negh, marge)
costt, outt = margeCost(pos, negt, marge)
embreg = regEmb(embedding, subtensorE, alpha)
lembreg = regLink(lembedding, subtensorR, beta)
cost = costh + costt + embreg[0] + lembreg
out = T.concatenate([outh, outt])
outc = embreg[1]
# list of inputs to the function
list_in = [lemb, lremb, hp, rp, tp, hn, tn, subtensorE, subtensorR]
# updating the embeddings using gradient descend
emb_grad = T.grad(cost, embedding.E)
New_embedding = embedding.E - lemb*emb_grad
remb_grad = T.grad(cost, lembedding.E)
New_rembedding = lembedding.E - lremb * remb_grad
updates = OrderedDict({embedding.E: New_embedding, lembedding.E: New_rembedding})
return theano.function(list_in, [cost, T.mean(out), T.mean(outc), embreg[0], lembreg],
updates=updates, on_unused_input='ignore')
示例3: fprop
def fprop(self, state_below, add_noise=True):
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
self.x = state_below
# linear part
if isinstance(self.x, S.SparseVariable):
z = S.dot(self.x,self.W[0]) + self.b[0]
else:
z = T.dot(self.x,self.W[0]) + self.b[0]
self.z = self.activate(z, self.expert_activation)
# first layer non-linear part
if isinstance(self.x, S.SparseVariable):
h = S.dot(self.x,self.W[1]) + self.b[1]
else:
h = T.dot(self.x,self.W[1]) + self.b[1]
# activate hidden units of non-linear part
self.h = self.activate(h, self.hidden_activation)
noise = 0.
if add_noise:
rng = MRG_RandomStreams(self.mlp.rng.randint(2**15))
noise = rng.normal(size = self.z.shape,
std=self.noise_stdev ,
dtype=self.z.type.dtype)
# second layer non-linear part
self.a = T.dot(self.h,self.W[2]) + self.b[2] + noise
# activate non-linear part
self.m_mean = self.activate(self.a, self.gater_activation)
# how many are over 0:
self.effective_sparsity = T.cast(T.gt(self.m_mean, 0),
theano.config.floatX).mean()
# mix output of linear part with output of non-linear part
self.p = self.m_mean * self.z
if self.layer_name is not None:
self.z.name = self.layer_name + '_z'
self.h.name = self.layer_name + '_h'
self.a.name = self.layer_name + '_a'
self.m_mean.name = self.layer_name + '_m_mean'
self.p.name = self.layer_name + '_p'
return self.p
示例4: ForwardFn
def ForwardFn(fnsim, embeddings, leftop, rightop, marge=1.0):
"""
This function returns a theano function to perform a forward step,
contrasting couples of positive and negative triplets. members are given
as sparse matrices. For one positive triplet there is one negative
triplet.
:param fnsim: similarity function (on theano variables).
:param embeddings: an embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
:param marge: marge for the cost function.
:note: this is useful for W_SABIE [Weston et al., IJCAI 2011]
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# inputs
inpr = S.csr_matrix()
inpl = S.csr_matrix()
inpo = S.csr_matrix()
inpln = S.csr_matrix()
inprn = S.csr_matrix()
inpon = S.csr_matrix()
# graph
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
lhsn = S.dot(embedding.E, inpln).T
rhsn = S.dot(embedding.E, inprn).T
relln = S.dot(relationl.E, inpon).T
relrn = S.dot(relationr.E, inpon).T
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
simin = fnsim(leftop(lhsn, relln), rightop(rhsn, relrn))
cost, out = margincost(simi, simin, marge)
"""
Theano function inputs.
:input inpl: sparse csr matrix representing the indexes of the positive
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inpr: sparse csr matrix representing the indexes of the positive
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpo: sparse csr matrix representing the indexes of the positive
triplet relation member, shape=(#examples,N [Embeddings]).
:input inpln: sparse csr matrix representing the indexes of the negative
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inprn: sparse csr matrix representing the indexes of the negative
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpon: sparse csr matrix representing the indexes of the negative
triplet relation member, shape=(#examples,N [Embeddings]).
Theano function output.
:output out: binary vector representing when the margin is violated, i.e.
when an update occurs.
"""
return theano.function([inpl, inpr, inpo,
inpln, inprn, inpon], [out],
on_unused_input='ignore')
示例5: SimFn
def SimFn(fnsim, embeddings, leftop, rightop):
"""
This function returns a Theano function to measure the similarity score
for sparse matrices inputs.
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
inpr = S.csr_matrix('inpr')
inpl = S.csr_matrix('inpl')
inpo = S.csr_matrix('inpo')
# Graph
#what is T? Are they tensor? lhs, rhs,rell,relr
# we just created inpl and inplr inpo . what does it mean to calculate dot product?
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
# what is this?
#ref:
#leftop = LayerMat('lin', state.ndim, state.nhid)
#rightop = LayerMat('lin', state.ndim, state.nhid)
# on call
#ry = y.reshape((y.shape[0], self.n_inp, self.n_out))
#rx = x.reshape((x.shape[0], x.shape[1], 1))
#return self.act((rx * ry).sum(1))
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
"""
Theano function inputs.
:input inpl: sparse csr matrix (representing the indexes of the 'left'
entities), shape=(#examples, N [Embeddings]).
:input inpr: sparse csr matrix (representing the indexes of the 'right'
entities), shape=(#examples, N [Embeddings]).
:input inpo: sparse csr matrix (representing the indexes of the
relation member), shape=(#examples, N [Embeddings]).
Theano function output
:output simi: matrix of score values.
"""
return theano.function([inpl, inpr, inpo], [simi],
on_unused_input='ignore')
示例6: get_train_function
def get_train_function(self):
# specify the computational graph
target = T.matrix('target')
weight = theano.shared(np.random.randn(len(self.feature_map), len(self.label_map)), name='weight')
feat_mat = sparse.csr_matrix(name='feat_mat')
mask_mat = sparse.csr_matrix(name='mask_mat')
sum_pred = sparse.dot( mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
pred = sum_pred / sum_pred.sum(axis=1).reshape((sum_pred.shape[0], 1))
objective = T.nnet.categorical_crossentropy(pred, target).sum() + self.param.l2_regularization * (weight ** 2).sum()
grad_weight = T.grad(objective, weight)
# print 'Compiling function ...'
# compile the function
train = theano.function(inputs = [feat_mat, mask_mat, target], outputs = [objective, weight], updates=[(weight, weight - 0.1*grad_weight)] )
return train
示例7: labelFunct
def labelFunct(self, batchSize, xFeats):
# xFeats [l, h]
# l = batchSize
# self.W = theano.printing.Print("W ") (self.W)
# self.Wb = theano.printing.Print("Wb ") (self.Wb)
scores = sparse.dot(xFeats, self.W) + self.Wb # [l, h] x [h, r] => [l, r]
relationProbs = T.nnet.softmax(scores)
# scores = theano.printing.Print("scores ") (scores)
labels = T.argmax(scores, axis=1) # [l, r] => [l]
# labels = theano.printing.Print("labels ") (labels)
return (labels, relationProbs)
示例8: _get_diagonal_term
def _get_diagonal_term(self, X_left, X_right, diag_init):
diag = tn.shared(value=diag_init, name='diag')
if _tn_is_sparse(X_left) or _tn_is_sparse(X_right):
XlXr = tsp.mul(X_left, X_right)
y_pred = tsp.dot(XlXr, diag)
else:
XlXr = T.mul(X_left, X_right)
y_pred = T.dot(XlXr, diag)
return y_pred, [diag]
示例9: get_train_function
def get_train_function(self):
# specify the computational graph
weight = theano.shared(np.random.randn(len(self.feature_map), len(self.label_map)), name='weight')
# weight = theano.shared(np.zeros((len(self.feature_map), len(self.label_map))), name='weight')
feat_mat = sparse.csr_matrix(name='feat_mat')
f_target = T.matrix('f_target')
f_mask_mat = sparse.csr_matrix(name='f_mask_mat')
f_sum_pred = sparse.dot( f_mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
f_pred = f_sum_pred / f_sum_pred.sum(axis=1).reshape((f_sum_pred.shape[0], 1))
i_target = T.matrix('i_target')
i_mask_mat = sparse.csr_matrix(name='l_mask_mat')
i_pred = sparse.dot( i_mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
objective = self.param.feature_lambda * T.nnet.categorical_crossentropy(f_pred, f_target).sum() + T.nnet.categorical_crossentropy(i_pred, i_target).sum() + self.param.l2_lambda * (weight ** 2).sum() / 2
grad_weight = T.grad(objective, weight)
# print 'Compiling function ...'
# compile the function
train = theano.function(inputs = [feat_mat, f_mask_mat, f_target, i_mask_mat, i_target], outputs = [objective, weight], updates=[(weight, weight - 0.1*grad_weight)] )
return train
示例10: SimFn
def SimFn(fnsim, embeddings, leftop, rightop, op=''):
"""
This function returns a Theano function to measure the similarity score for sparse matrices inputs.
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
inpr, inpl, inpo = S.csr_matrix('inpr'), S.csr_matrix('inpl'), S.csr_matrix('inpo')
# Graph
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
lop, rop = leftop(lhs, rell), rightop(rhs, relr)
simi = fnsim(lop, rop)
"""
Theano function inputs.
:input inpl: sparse csr matrix (representing the indexes of the 'left' entities), shape=(#examples, N [Embeddings]).
:input inpr: sparse csr matrix (representing the indexes of the 'right' entities), shape=(#examples, N [Embeddings]).
:input inpo: sparse csr matrix (representing the indexes of the relation member), shape=(#examples, N [Embeddings]).
Theano function output
:output simi: matrix of score values.
"""
return theano.function([inpl, inpr, inpo], [simi], on_unused_input='ignore')
示例11: compRelationProbsFunc
def compRelationProbsFunc(self, xFeats):
# xFeats [l, h] matrix
# xFeats = theano.printing.Print("xFeats")(xFeats)
# self.Wb = theano.printing.Print("Wb ") (self.Wb)
# self.W = theano.printing.Print("W ") (self.W)
# scores of each role by a classifier
relationScores = sparse.dot(xFeats, self.W) + self.Wb # [l, h] x [h, r] => [l, r]
#relationScores = theano.printing.Print("relationScores=")(relationScores)
# convert it to probabilities
relationProbs = T.nnet.softmax(relationScores)
#relationProbs = theano.printing.Print("relationProbs = ")(relationProbs)
return relationProbs # [l, r]
示例12: get_output_for
def get_output_for(self, input, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
# According to pull-request 595 from eduardo4jesus
# Though it might be the case, the activation layer will remain
# dense since Weights represent dense matrix ( Kinda makes sense)
if (type(input) == S.SparseVariable) or (type(input) == S.SparseConstant):
activation = S.dot(input, self.W)
else:
activation = T.dot(input, self.W)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
示例13: __init__
def __init__(self, rng, x, topic_num=100):
#input
L2_input = sparse.csr_matrix("x",dtype=theano.config.floatX)
#params
vocab_size = x.shape[1]
mu, sigma = x.data.mean(), x.data.var()**0.5
rng = numpy.random.RandomState(numpy.random.randint(2**32-1)) if rng is None else rng
self.L2_w = theano.shared(\
numpy.asarray(\
rng.normal(loc=mu,scale=sigma,size=(vocab_size, topic_num)),\
dtype=theano.config.floatX\
),\
borrow=True\
)
self.L2_b = theano.shared(numpy.zeros(topic_num,dtype=theano.config.floatX), borrow=True)
self.params = [self.L2_w, self.L2_b]
#stick-breaking:sticks->orthgonal sticks
L2_stick = sparse.dot(L2_input,self.L2_w)+self.L2_b-\
0.5*(L2_input.size/vocab_size*tensor.sum(self.L2_w**2,0)+self.L2_b**2)
zero_space = tensor.zeros((L2_input.shape[0],1),dtype=theano.config.floatX)
L2_orth_stick = tensor.join(1, L2_stick, zero_space)\
- tensor.join(1, zero_space, tensor.cumsum(L2_stick,1))
Pasterik_orth_stick = tensor.log(1 + tensor.exp(L2_orth_stick))
#training model definition
Likelihood = tensor.mean(Pasterik_orth_stick)
grads = theano.grad(Likelihood, self.params)#gradient w.r.t params
eta = tensor.scalar("eta")
updates = [(param, param+eta*grad) for param, grad in zip(self.params, grads)]
self._fit = theano.function(\
inputs=[L2_input, eta],\
outputs=Likelihood,\
updates=updates\
)
#predict model definition
self._predict = theano.function(\
inputs=[L2_input],\
outputs=tensor.argmax(L2_stick,axis=-1)\
)
self._codec = theano.function(\
inputs=[L2_input],\
outputs=L2_stick>0\
)
示例14: __init__
def __init__(self, rng, x, topic_num=100):
#input
L2_input = sparse.csr_matrix("x",dtype=theano.config.floatX)
#params
vocab_size = x.shape[1]
mu, sigma = x.data.mean(), 2.56*x.data.var()**0.5
rng = numpy.random.RandomState(numpy.random.randint(2**32-1)) if rng is None else rng
self.L2_w = theano.shared(\
numpy.asarray(\
mu + (mu if mu < sigma else sigma)*rng.uniform(low=-1,high=1,size=(vocab_size, topic_num)),\
dtype=theano.config.floatX\
),\
borrow=True\
)
self.L2_b = theano.shared(numpy.zeros(topic_num, dtype=theano.config.floatX), borrow=True)
self.params = [self.L2_w, self.L2_b]
#output
L2_topic = sparse.dot(L2_input,self.L2_w)+self.L2_b
#difference based objective function
Pasterik_topic = tensor.log(tensor.sum(tensor.exp(L2_topic-L2_topic.max(-1, keepdims=True)),-1))#avoiding overflow
d_xw_w2 = tensor.mean(Pasterik_topic) -\
0.5*(L2_input.size*tensor.mean(self.L2_w*self.L2_w)+tensor.dot(self.L2_b,self.L2_b))
grads = theano.grad(d_xw_w2, self.params)#gradient w.r.t params
eta = tensor.scalar("eta")
updates = [(param, param+eta*grad) for param, grad in zip(self.params, grads)]
#training model definition
self._fit = theano.function(\
inputs=[L2_input, eta],\
outputs=d_xw_w2, \
updates=updates\
)
#predict model definition
self._predict = theano.function(\
inputs=[L2_input],\
outputs=tensor.argmax(L2_topic,axis=-1)\
)
示例15: TrainFn
def TrainFn(fnsim, embeddings, leftop, rightop, marge=1.0):
"""
This function returns a theano function to perform a training iteration,
contrasting couples of positive and negative triplets. members are given
as sparse matrices. for one positive triplet there is one negative
triplet.
:param fnsim: similarity function (on theano variables).
:param embeddings: an embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
:param marge: marge For the cost function.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
inpr = S.csr_matrix()
inpl = S.csr_matrix()
inpo = S.csr_matrix()
inpln = S.csr_matrix()
inprn = S.csr_matrix()
inpon = S.csr_matrix()
lrparams = T.scalar('lrparams')
lrembeddings = T.scalar('lrembeddings')
# Graph
## Positive triplet
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
## Negative triplet
lhsn = S.dot(embedding.E, inpln).T
rhsn = S.dot(embedding.E, inprn).T
relln = S.dot(relationl.E, inpon).T
relrn = S.dot(relationr.E, inpon).T
simin = fnsim(leftop(lhsn, relln), rightop(rhsn, relrn))
cost, out = margincost(simi, simin, marge)
# Parameters gradients
if hasattr(fnsim, 'params'):
# If the similarity function has some parameters, we update them too.
gradientsparams = T.grad(cost,
leftop.params + rightop.params + fnsim.params)
updates = OrderedDict((i, i - lrparams * j) for i, j in zip(
leftop.params + rightop.params + fnsim.params, gradientsparams))
else:
gradientsparams = T.grad(cost, leftop.params + rightop.params)
updates = OrderedDict((i, i - lrparams * j) for i, j in zip(
leftop.params + rightop.params, gradientsparams))
# Embeddings gradients
gradients_embedding = T.grad(cost, embedding.E)
newE = embedding.E - lrembeddings * gradients_embedding
updates.update({embedding.E: newE})
if type(embeddings) == list:
# If there are different embeddings for the relation member.
gradients_embedding = T.grad(cost, relationl.E)
newE = relationl.E - lrparams * gradients_embedding
updates.update({relationl.E: newE})
gradients_embedding = T.grad(cost, relationr.E)
newE = relationr.E - lrparams * gradients_embedding
updates.update({relationr.E: newE})
"""
Theano function inputs.
:input lrembeddings: learning rate for the embeddings.
:input lrparams: learning rate for the parameters.
:input inpl: sparse csr matrix representing the indexes of the positive
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inpr: sparse csr matrix representing the indexes of the positive
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpo: sparse csr matrix representing the indexes of the positive
triplet relation member, shape=(#examples,N [Embeddings]).
:input inpln: sparse csr matrix representing the indexes of the negative
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inprn: sparse csr matrix representing the indexes of the negative
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpon: sparse csr matrix representing the indexes of the negative
triplet relation member, shape=(#examples,N [Embeddings]).
Theano function output.
:output mean(cost): average cost.
:output mean(out): ratio of examples for which the margin is violated,
i.e. for which an update occurs.
"""
return theano.function([lrembeddings, lrparams, inpl, inpr, inpo,
inpln, inprn, inpon],
[T.mean(cost), T.mean(out)], updates=updates,
on_unused_input='ignore')