本文整理汇总了Python中theano.sparse.csr_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python csr_matrix函数的具体用法?Python csr_matrix怎么用?Python csr_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了csr_matrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, rng, P_input, L2_input, **kwargs):
#symbol declaration, initialization and definition
x_1_tm1, x_t = (\
sparse.csr_matrix("x_1_tm1", dtype=theano.config.floatX),\
sparse.csr_matrix("x_t",dtype=theano.config.floatX)\
)\
if P_input is None else P_input[:2]
#elements of history
shape = kwargs.get("shape")
if shape is not None:
dict_size = shape[0]
if len(shape) <= 1:
del shape["shape"]
else:
shape["shape"] = shape["shape"][1:]
else:
dict_size = (16,1,32,32)
D_1_tm1 = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_1_tm1 = sparse.dot(x_1_tm1, D_1_tm1)#array access=dot operation
super(SequenceCNN, self).__init__(rng=rng, inputsymbol=Dx_1_tm1, **kwargs)#attaches new elements into the fgraph
self.L2_output_1_tm1 = self.L2_output
#elements of current time
D_t = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))
Dx_t = sparse.dot(x_t, D_t)#array access=dot operation
self.L2_output_t = theano.clone(self.L2_output_1_tm1, replace={Dx_1_tm1:Dx_t})
#element prepartion for model building
self.P_input = (x_1_tm1,x_t)
self.params += [D_1_tm1, D_t]
self.L2_output = self.L2_output_1_tm1*self.L2_output_t
示例2: build
def build(self):
print "start building"
x_sym = sparse.csr_matrix("x", dtype="float32")
y_sym = T.imatrix("y")
gx_sym_1 = sparse.csr_matrix("x", dtype="float32")
gx_sym_2 = sparse.csr_matrix("x", dtype="float32")
l_x_in = lasagne.layers.InputLayer(shape=(None, self.x.shape[1]), input_var=x_sym)
l_hid = layers.SparseLayer(l_x_in, 50)
embedding = lasagne.layers.get_output(l_hid)
self.emb_fn = theano.function([x_sym], embedding)
l_y = lasagne.layers.DenseLayer(l_hid, self.y.shape[1], nonlinearity=lasagne.nonlinearities.softmax)
py_sym = lasagne.layers.get_output(l_y)
loss = lasagne.objectives.categorical_crossentropy(py_sym, y_sym).mean()
params = lasagne.layers.get_all_params(l_y, trainable=True)
updates = lasagne.updates.sgd(loss, params, learning_rate=self.learning_rate)
self.train_fn = theano.function([x_sym, y_sym], loss, updates=updates)
l_gx_1 = lasagne.layers.InputLayer(shape=(None, self.x.shape[1]), input_var=gx_sym_1)
l_gx_2 = lasagne.layers.InputLayer(shape=(None, self.x.shape[1]), input_var=gx_sym_2)
l_gy_1 = layers.SparseLayer(l_gx_1, 50, W=l_hid.W, b=l_hid.b)
l_gy_2 = layers.SparseLayer(l_gx_2, 50, W=l_hid.W, b=l_hid.b)
gy_sym_1 = lasagne.layers.get_output(l_gy_1)
gy_sym_2 = lasagne.layers.get_output(l_gy_2)
g_loss = lasagne.objectives.squared_error(gy_sym_1, gy_sym_2).mean()
g_params = lasagne.layers.get_all_params(l_gy_1) + lasagne.layers.get_all_params(l_gy_2)
g_updates = lasagne.updates.sgd(g_loss, g_params, learning_rate=self.g_learning_rate)
self.g_fn = theano.function([gx_sym_1, gx_sym_2], g_loss, updates=g_updates)
acc = T.mean(T.eq(T.argmax(py_sym, axis=1), T.argmax(y_sym, axis=1)))
self.test_fn = theano.function([x_sym, y_sym], acc)
self.predict_fn = theano.function([x_sym], py_sym)
示例3: create_TrainFunc_tranPES
def create_TrainFunc_tranPES(simfn, embeddings, marge=0.5, alpha=1., beta=1.):
# parse the embedding data
embedding = embeddings[0] # D x N matrix
lembedding = embeddings[1]
# declare the symbolic variables for training triples
hp = S.csr_matrix('head positive') # N x batchsize matrix
rp = S.csr_matrix('relation')
tp = S.csr_matrix('tail positive')
hn = S.csr_matrix('head negative')
tn = S.csr_matrix('tail negative')
lemb = T.scalar('embedding learning rate')
lremb = T.scalar('relation learning rate')
subtensorE = T.ivector('batch entities set')
subtensorR = T.ivector('batch link set')
# Generate the training positive and negative triples
hpmat = S.dot(embedding.E, hp).T # batchsize x D dense matrix
rpmat = S.dot(lembedding.E, rp).T
tpmat = S.dot(embedding.E, tp).T
hnmat = S.dot(embedding.E, hn).T
tnmat = S.dot(embedding.E, tn).T
# calculate the score
pos = tranPES3(simfn, T.concatenate([hpmat, tpmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tpmat)
negh = tranPES3(simfn, T.concatenate([hnmat, tpmat], axis=1).reshape((hnmat.shape[0], 2, hnmat.shape[1])).dimshuffle(0, 2, 1), hnmat, rpmat, tpmat)
negt = tranPES3(simfn, T.concatenate([hpmat, tnmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tnmat)
costh, outh = margeCost(pos, negh, marge)
costt, outt = margeCost(pos, negt, marge)
embreg = regEmb(embedding, subtensorE, alpha)
lembreg = regLink(lembedding, subtensorR, beta)
cost = costh + costt + embreg[0] + lembreg
out = T.concatenate([outh, outt])
outc = embreg[1]
# list of inputs to the function
list_in = [lemb, lremb, hp, rp, tp, hn, tn, subtensorE, subtensorR]
# updating the embeddings using gradient descend
emb_grad = T.grad(cost, embedding.E)
New_embedding = embedding.E - lemb*emb_grad
remb_grad = T.grad(cost, lembedding.E)
New_rembedding = lembedding.E - lremb * remb_grad
updates = OrderedDict({embedding.E: New_embedding, lembedding.E: New_rembedding})
return theano.function(list_in, [cost, T.mean(out), T.mean(outc), embreg[0], lembreg],
updates=updates, on_unused_input='ignore')
示例4: build
def build(self):
x_sym = sparse.csr_matrix("x", dtype="float32")
self.x_sym = x_sym
y_sym = T.imatrix("y")
gx_sym = sparse.csr_matrix("gx", dtype="float32")
gy_sym = T.ivector("gy")
gz_sym = T.vector("gz")
l_x_in = lasagne.layers.InputLayer(shape=(None, self.x.shape[1]), input_var=x_sym)
l_gx_in = lasagne.layers.InputLayer(shape=(None, self.x.shape[1]), input_var=gx_sym)
l_gy_in = lasagne.layers.InputLayer(shape=(None,), input_var=gy_sym)
# l_x_1 = layers.SparseLayer(l_x_in, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_x_2 = layers.SparseLayer(l_x_in, self.embedding_size)
W = l_x_2.W
# l_x_2 = layers.DenseLayer(l_x_2, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
# l_x = lasagne.layers.ConcatLayer([l_x_1, l_x_2], axis = 1)
# l_x = layers.DenseLayer(l_x, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_x = layers.DenseLayer(l_x_2, self.y.shape[1], nonlinearity=lasagne.nonlinearities.softmax)
# l_x = layers.HybridLayer([l_x_in, l_x_2], self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_gx = layers.SparseLayer(l_gx_in, self.embedding_size, W=W)
l_gy = lasagne.layers.EmbeddingLayer(l_gy_in, input_size=self.num_ver, output_size=self.embedding_size)
l_gx = lasagne.layers.ElemwiseMergeLayer([l_gx, l_gy], T.mul)
pgy_sym = lasagne.layers.get_output(l_gx)
g_loss = -T.log(T.nnet.sigmoid(T.sum(pgy_sym, axis=1) * gz_sym)).sum()
self.l = l_gx
py_sym = lasagne.layers.get_output(l_x)
self.ret_y = py_sym
loss = lasagne.objectives.categorical_crossentropy(py_sym, y_sym).mean()
# loss += lasagne.objectives.categorical_crossentropy(lasagne.layers.get_output(l_x_1), y_sym).mean()
# loss += lasagne.objectives.categorical_crossentropy(lasagne.layers.get_output(l_x_2), y_sym).mean()
# params = lasagne.layers.get_all_params(l_x)
# params = [l_x_1.W, l_x_1.b, l_x_2.W, l_x_2.b, l_x.W, l_x.b]
if not JOINT:
params = [l_x.W, l_x.b]
else:
params = lasagne.layers.get_all_params(l_x)
# params = [l_x.W1, l_x.W2, l_x.b]
updates = lasagne.updates.sgd(loss, params, learning_rate=self.learning_rate)
self.train_fn = theano.function([x_sym, y_sym], loss, updates=updates)
g_params = lasagne.layers.get_all_params(l_gx)
g_updates = lasagne.updates.sgd(g_loss, g_params, learning_rate=self.g_learning_rate)
self.g_fn = theano.function([gx_sym, gy_sym, gz_sym], g_loss, updates=g_updates)
acc = T.mean(T.eq(T.argmax(py_sym, axis=1), T.argmax(y_sym, axis=1)))
self.test_fn = theano.function([x_sym, y_sym], acc)
示例5: SimFn
def SimFn(fnsim, embeddings, leftop, rightop):
"""
This function returns a Theano function to measure the similarity score
for sparse matrices inputs.
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
inpr = S.csr_matrix('inpr')
inpl = S.csr_matrix('inpl')
inpo = S.csr_matrix('inpo')
# Graph
#what is T? Are they tensor? lhs, rhs,rell,relr
# we just created inpl and inplr inpo . what does it mean to calculate dot product?
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
# what is this?
#ref:
#leftop = LayerMat('lin', state.ndim, state.nhid)
#rightop = LayerMat('lin', state.ndim, state.nhid)
# on call
#ry = y.reshape((y.shape[0], self.n_inp, self.n_out))
#rx = x.reshape((x.shape[0], x.shape[1], 1))
#return self.act((rx * ry).sum(1))
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
"""
Theano function inputs.
:input inpl: sparse csr matrix (representing the indexes of the 'left'
entities), shape=(#examples, N [Embeddings]).
:input inpr: sparse csr matrix (representing the indexes of the 'right'
entities), shape=(#examples, N [Embeddings]).
:input inpo: sparse csr matrix (representing the indexes of the
relation member), shape=(#examples, N [Embeddings]).
Theano function output
:output simi: matrix of score values.
"""
return theano.function([inpl, inpr, inpo], [simi],
on_unused_input='ignore')
示例6: _setup_vars
def _setup_vars(self, sparse_input):
'''Setup Theano variables for our network.
Parameters
----------
sparse_input : bool
If True, create an input variable that can hold a sparse matrix.
Defaults to False, which assumes all arrays are dense.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
# x represents our network's input.
self.x = TT.matrix('x')
if sparse_input:
self.x = SS.csr_matrix('x')
# for a classifier, this specifies the correct labels for a given input.
self.labels = TT.ivector('labels')
# and the weights are reshaped to be just a vector.
self.weights = TT.vector('weights')
if self.weighted:
return [self.x, self.labels, self.weights]
return [self.x, self.labels]
示例7: build
def build(self):
"""build the model. This method should be called after self.add_data.
"""
x_sym = sparse.csr_matrix('x', dtype = 'float32')
y_sym = T.imatrix('y')
g_sym = T.imatrix('g')
gy_sym = T.vector('gy')
ind_sym = T.ivector('ind')
l_x_in = lasagne.layers.InputLayer(shape = (None, self.x.shape[1]), input_var = x_sym)
l_g_in = lasagne.layers.InputLayer(shape = (None, 2), input_var = g_sym)
l_ind_in = lasagne.layers.InputLayer(shape = (None, ), input_var = ind_sym)
l_gy_in = lasagne.layers.InputLayer(shape = (None, ), input_var = gy_sym)
num_ver = max(self.graph.keys()) + 1
l_emb_in = lasagne.layers.SliceLayer(l_g_in, indices = 0, axis = 1)
l_emb_in = lasagne.layers.EmbeddingLayer(l_emb_in, input_size = num_ver, output_size = self.embedding_size)
l_emb_out = lasagne.layers.SliceLayer(l_g_in, indices = 1, axis = 1)
if self.neg_samp > 0:
l_emb_out = lasagne.layers.EmbeddingLayer(l_emb_out, input_size = num_ver, output_size = self.embedding_size)
l_emd_f = lasagne.layers.EmbeddingLayer(l_ind_in, input_size = num_ver, output_size = self.embedding_size, W = l_emb_in.W)
l_x_hid = layers.SparseLayer(l_x_in, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
if self.use_feature:
l_emd_f = layers.DenseLayer(l_emd_f, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_y = lasagne.layers.ConcatLayer([l_x_hid, l_emd_f], axis = 1)
l_y = layers.DenseLayer(l_y, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
else:
l_y = layers.DenseLayer(l_emd_f, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
py_sym = lasagne.layers.get_output(l_y)
loss = lasagne.objectives.categorical_crossentropy(py_sym, y_sym).mean()
if self.layer_loss and self.use_feature:
hid_sym = lasagne.layers.get_output(l_x_hid)
loss += lasagne.objectives.categorical_crossentropy(hid_sym, y_sym).mean()
emd_sym = lasagne.layers.get_output(l_emd_f)
loss += lasagne.objectives.categorical_crossentropy(emd_sym, y_sym).mean()
if self.neg_samp == 0:
l_gy = layers.DenseLayer(l_emb_in, num_ver, nonlinearity = lasagne.nonlinearities.softmax)
pgy_sym = lasagne.layers.get_output(l_gy)
g_loss = lasagne.objectives.categorical_crossentropy(pgy_sym, lasagne.layers.get_output(l_emb_out)).sum()
else:
l_gy = lasagne.layers.ElemwiseMergeLayer([l_emb_in, l_emb_out], T.mul)
pgy_sym = lasagne.layers.get_output(l_gy)
g_loss = - T.log(T.nnet.sigmoid(T.sum(pgy_sym, axis = 1) * gy_sym)).sum()
params = [l_emd_f.W, l_emd_f.b, l_x_hid.W, l_x_hid.b, l_y.W, l_y.b] if self.use_feature else [l_y.W, l_y.b]
if self.update_emb:
params = lasagne.layers.get_all_params(l_y)
updates = lasagne.updates.sgd(loss, params, learning_rate = self.learning_rate)
self.train_fn = theano.function([x_sym, y_sym, ind_sym], loss, updates = updates, on_unused_input = 'ignore')
self.test_fn = theano.function([x_sym, ind_sym], py_sym, on_unused_input = 'ignore')
self.l = [l_gy, l_y]
g_params = lasagne.layers.get_all_params(l_gy, trainable = True)
g_updates = lasagne.updates.sgd(g_loss, g_params, learning_rate = self.g_learning_rate)
self.g_fn = theano.function([g_sym, gy_sym], g_loss, updates = g_updates, on_unused_input = 'ignore')
示例8: get_train_function
def get_train_function(self):
# specify the computational graph
target = T.matrix('target')
weight = theano.shared(np.random.randn(len(self.feature_map), len(self.label_map)), name='weight')
feat_mat = sparse.csr_matrix(name='feat_mat')
mask_mat = sparse.csr_matrix(name='mask_mat')
sum_pred = sparse.dot( mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
pred = sum_pred / sum_pred.sum(axis=1).reshape((sum_pred.shape[0], 1))
objective = T.nnet.categorical_crossentropy(pred, target).sum() + self.param.l2_regularization * (weight ** 2).sum()
grad_weight = T.grad(objective, weight)
# print 'Compiling function ...'
# compile the function
train = theano.function(inputs = [feat_mat, mask_mat, target], outputs = [objective, weight], updates=[(weight, weight - 0.1*grad_weight)] )
return train
示例9: __init__
def __init__(self, size, name="in", ndim=2, sparse=False):
self.input = util.FLOAT_CONTAINERS[ndim](name)
if sparse is True or isinstance(sparse, str) and sparse.lower() == "csr":
assert ndim == 2, "Theano only supports sparse arrays with 2 dims"
self.input = SS.csr_matrix("input")
if isinstance(sparse, str) and sparse.lower() == "csc":
assert ndim == 2, "Theano only supports sparse arrays with 2 dims"
self.input = SS.csc_matrix("input")
super(Input, self).__init__(size=size, name=name, inputs=0, activation="linear", ndim=ndim, sparse=sparse)
示例10: __init__
def __init__(self, sparse_coding, nb_negative, embed_dims=128, context_dims=128,
init_embeddings=None, negprob_table=None, optimizer='adam'):
super(NCELangModelV4, self).__init__(weighted_inputs=False)
vocab_size = sparse_coding.shape[0] # the extra word is for OOV
self.nb_base = sparse_coding.shape[1] - 1
self.vocab_size = vocab_size
self.embed_dim = embed_dims
self.optimizer = optimizers.get(optimizer)
self.nb_negative = nb_negative
self.loss = categorical_crossentropy
self.loss_fnc = objective_fnc(self.loss)
self.sparse_coding = sparse_coding
if negprob_table is None:
negprob_table_ = np.ones(shape=(vocab_size,), dtype=theano.config.floatX)/vocab_size
negprob_table = theano.shared(negprob_table_)
self.neg_prob_table = negprob_table_
else:
self.neg_prob_table = negprob_table.astype(theano.config.floatX)
negprob_table = theano.shared(negprob_table.astype(theano.config.floatX))
self.sampler = TableSampler(self.neg_prob_table)
self.add_input(name='idxes', ndim=3, dtype='int32')
idxes = self.inputs['idxes'].get_output(True)
shape = idxes.shape[1:]
codes = tsp.csr_matrix('sp-codes', dtype=floatX)
nb_pos_words = shape[0] * shape[1]
pos_codes = codes[:nb_pos_words]
self.add_node(Identity(inputs={True: pos_codes, False: pos_codes}), name='codes_flat')
self.add_node(Identity(inputs={True: shape, False: shape}), name='sents_shape')
self.add_node(Identity(inputs={True: codes, False: codes}), name='sparse_codes')
self.add_node(SparseEmbedding(self.nb_base+1, embed_dims, weights=init_embeddings),
name='embedding', inputs=('codes_flat', 'sents_shape'))
self.add_node(LangLSTMLayer(embed_dims, output_dim=context_dims), name='encoder', inputs='embedding')
# seq.add(Dropout(0.5))
self.add_node(PartialSoftmaxV4(input_dim=context_dims, base_size=self.nb_base+1),
name='part_prob', inputs=('idxes', 'sparse_codes', 'encoder'))
self.add_node(Dense(input_dim=context_dims, output_dim=1, activation='exponential'),
name='normalizer', inputs='encoder')
self.add_node(LookupProb(negprob_table), name='lookup_prob', inputs='idxes')
self.add_node(SharedWeightsDense(self.nodes['part_prob'].W, self.nodes['part_prob'].b, self.sparse_coding,
activation='exponential'),
name='true_unnorm_prob', inputs='encoder')
self.add_node(ActivationLayer(name='normalization'), name='true_prob', inputs='true_unnorm_prob')
self.add_output('pos_prob', node='part_prob')
self.add_output('neg_prob', node='lookup_prob')
self.add_output('pred_prob', node='true_prob')
self.add_output('normalizer', node='normalizer')
self.add_output('unrm_prob', node='true_unnorm_prob')
示例11: __init__
def __init__(self, size, name='in', ndim=2, sparse=False, **kwargs):
self.input = util.FLOAT_CONTAINERS[ndim](name)
if sparse is True or \
isinstance(sparse, util.basestring) and sparse.lower() == 'csr':
assert ndim == 2, 'Theano only supports sparse arrays with 2 dims'
self.input = SS.csr_matrix('input')
if isinstance(sparse, util.basestring) and sparse.lower() == 'csc':
assert ndim == 2, 'Theano only supports sparse arrays with 2 dims'
self.input = SS.csc_matrix('input')
super(Input, self).__init__(
size=size, name=name, activation='linear', ndim=ndim, sparse=sparse)
示例12: build
def build(self):
x_sym = sparse.csr_matrix('x', dtype = 'float32')
self.x_sym = x_sym
y_sym = T.imatrix('y')
gx_sym = sparse.csr_matrix('gx', dtype = 'float32')
gy_sym = T.ivector('gy')
l_x_in = lasagne.layers.InputLayer(shape = (None, self.x.shape[1]), input_var = x_sym)
l_gx_in = lasagne.layers.InputLayer(shape = (None, self.x.shape[1]), input_var = gx_sym)
l_x_1 = layers.SparseLayer(l_x_in, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_x_2 = layers.SparseLayer(l_x_in, self.embedding_size)
W = l_x_2.W
embedding = lasagne.layers.get_output(l_x_2)
self.emb_fn = theano.function([x_sym], embedding)
l_x_2 = lasagne.layers.DenseLayer(l_x_2, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
l_x = lasagne.layers.ConcatLayer([l_x_1, l_x_2], axis = 1)
l_x = lasagne.layers.DenseLayer(l_x, self.y.shape[1], nonlinearity = lasagne.nonlinearities.softmax)
self.num_ver = max(self.graph.keys()) + 1
l_gx = layers.SparseLayer(l_gx_in, self.embedding_size, W = W)
l_gx = lasagne.layers.DenseLayer(l_gx, self.num_ver, nonlinearity = lasagne.nonlinearities.softmax)
py_sym = lasagne.layers.get_output(l_x)
self.ret_y = py_sym
loss = lasagne.objectives.categorical_crossentropy(py_sym, y_sym).mean()
pgy_sym = lasagne.layers.get_output(l_gx)
g_loss = lasagne.objectives.categorical_crossentropy(pgy_sym, gy_sym).sum()
# params = lasagne.layers.get_all_params(l_x)
params = [l_x_1.W, l_x_1.b, l_x_2.W, l_x_2.b, l_x.W, l_x.b]
updates = lasagne.updates.sgd(loss, params, learning_rate = self.learning_rate)
self.train_fn = theano.function([x_sym, y_sym], loss, updates = updates)
g_params = lasagne.layers.get_all_params(l_gx)
g_updates = lasagne.updates.sgd(g_loss, g_params, learning_rate = self.g_learning_rate)
self.g_fn = theano.function([gx_sym, gy_sym], g_loss, updates = g_updates)
acc = T.mean(T.eq(T.argmax(py_sym, axis = 1), T.argmax(y_sym, axis = 1)))
self.test_fn = theano.function([x_sym, y_sym], acc)
示例13: test_sparse
def test_sparse(self):
mySymbolicSparseList = TypedListType(sparse.SparseType("csr", theano.config.floatX))()
mySymbolicSparse = sparse.csr_matrix()
z = Count()(mySymbolicSparseList, mySymbolicSparse)
f = theano.function([mySymbolicSparseList, mySymbolicSparse], z)
x = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
y = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
self.assertTrue(f([x, y, y], y) == 2)
示例14: ForwardFn
def ForwardFn(fnsim, embeddings, leftop, rightop, marge=1.0):
"""
This function returns a theano function to perform a forward step,
contrasting couples of positive and negative triplets. members are given
as sparse matrices. For one positive triplet there is one negative
triplet.
:param fnsim: similarity function (on theano variables).
:param embeddings: an embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
:param marge: marge for the cost function.
:note: this is useful for W_SABIE [Weston et al., IJCAI 2011]
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# inputs
inpr = S.csr_matrix()
inpl = S.csr_matrix()
inpo = S.csr_matrix()
inpln = S.csr_matrix()
inprn = S.csr_matrix()
inpon = S.csr_matrix()
# graph
lhs = S.dot(embedding.E, inpl).T
rhs = S.dot(embedding.E, inpr).T
rell = S.dot(relationl.E, inpo).T
relr = S.dot(relationr.E, inpo).T
lhsn = S.dot(embedding.E, inpln).T
rhsn = S.dot(embedding.E, inprn).T
relln = S.dot(relationl.E, inpon).T
relrn = S.dot(relationr.E, inpon).T
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
simin = fnsim(leftop(lhsn, relln), rightop(rhsn, relrn))
cost, out = margincost(simi, simin, marge)
"""
Theano function inputs.
:input inpl: sparse csr matrix representing the indexes of the positive
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inpr: sparse csr matrix representing the indexes of the positive
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpo: sparse csr matrix representing the indexes of the positive
triplet relation member, shape=(#examples,N [Embeddings]).
:input inpln: sparse csr matrix representing the indexes of the negative
triplet 'left' member, shape=(#examples,N [Embeddings]).
:input inprn: sparse csr matrix representing the indexes of the negative
triplet 'right' member, shape=(#examples,N [Embeddings]).
:input inpon: sparse csr matrix representing the indexes of the negative
triplet relation member, shape=(#examples,N [Embeddings]).
Theano function output.
:output out: binary vector representing when the margin is violated, i.e.
when an update occurs.
"""
return theano.function([inpl, inpr, inpo,
inpln, inprn, inpon], [out],
on_unused_input='ignore')
示例15: test_sparse
def test_sparse(self):
if not scipy_imported:
raise SkipTest("Optional package SciPy not installed")
mySymbolicSparseList = TypedListType(sparse.SparseType("csr", theano.config.floatX))()
mySymbolicSparse = sparse.csr_matrix()
z = Count()(mySymbolicSparseList, mySymbolicSparse)
f = theano.function([mySymbolicSparseList, mySymbolicSparse], z)
x = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
y = sp.csr_matrix(random_lil((10, 40), theano.config.floatX, 3))
self.assertTrue(f([x, y, y], y) == 2)