本文整理汇总了Python中theano.tensor.lmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python lmatrix函数的具体用法?Python lmatrix怎么用?Python lmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lmatrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: arch_memnet_selfsup
def arch_memnet_selfsup(self):
'''
memory net with self supervision.
'''
contexts = T.ltensor3('contexts')
querys = T.lmatrix('querys')
yvs = T.lmatrix('yvs')
params = []
question_layer = Embed(self.vocab_size, self.hidden_dim)
q = T.reshape(question_layer(querys.flatten()),
(self.batchsize, self.sen_maxlen, self.hidden_dim)
)
if self.kwargs.get('position_encoding'):
lmat = position_encoding(self.sen_maxlen, self.hidden_dim).dimshuffle('x', 0, 1)
print '[memory network] use PE'
q = q * lmat
u = mean(q, axis=1)
params.extend(question_layer.params)
mem_layer = MemoryLayer(self.batchsize, self.mem_size, self.unit_size, self.vocab_size, self.hidden_dim,
**self.kwargs)
probs = mem_layer.get_probs(contexts, u).dimshuffle(0, 2)
inputs = {
'contexts': contexts,
'querys': querys,
'yvs': yvs,
'cvs': T.lmatrix('cvs')
}
return (probs, inputs, params)
示例2: test_maxpool_layer_forward_pass
def test_maxpool_layer_forward_pass():
W_emb = [[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0]]
W_emb = np.array(W_emb)
W_dense = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 0,-0.5, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
W_dense = np.array(W_dense, dtype=float).T
bounds = T.lmatrix('bounds')
X = T.lmatrix('X')
l_in1 = InputLayer((None, 2), input_var=bounds)
l_in2 = InputLayer((None, 2), input_var=X)
h1 = lasagne.layers.EmbeddingLayer(l_in2, input_size=4, output_size=5, W=W_emb)
h2 = lasagne.layers.FlattenLayer(h1)
h3 = lasagne.layers.DenseLayer(h2, num_units=5, nonlinearity=rectify, W=W_dense)
l_pool = MaxpoolLayer([l_in1, h3])
predictions = get_output(l_pool)
pred_func = theano.function([bounds, X], predictions, allow_input_downcast=True, on_unused_input='warn')
test_bounds = np.array([[0, 4]])
test_X = np.array([[0, 1], [0, 0], [1, 1], [3, 3]])
print pred_func(test_bounds, test_X)
示例3: multMatVect
def multMatVect(v, A, m1, B, m2):
# TODO : need description for parameter and return
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例4: jointModelOutput
def jointModelOutput(num_sub_activities, num_affordances, num_sub_activities_anticipation,
num_affordances_anticipation, inputJointFeatures, inputHumanFeatures, inputObjectFeatures):
shared_input_layer = TemporalInputFeatures(inputJointFeatures)
shared_hidden_layer = LSTM('tanh','sigmoid','orthogonal',4,128)
#shared_hidden_layer = simpleRNN('tanh','orthogonal',4,128)
shared_layers = [shared_input_layer,shared_hidden_layer]
human_layers = [ConcatenateFeatures(inputHumanFeatures),LSTM('tanh','sigmoid','orthogonal',4,256)]
object_layers = [ConcatenateFeatures(inputObjectFeatures),LSTM('tanh','sigmoid','orthogonal',4,256)]
human_anticipation = [softmax(num_sub_activities_anticipation)]
human_detection = [softmax(num_sub_activities)]
object_anticipation = [softmax(num_affordances_anticipation)]
object_detection = [softmax(num_affordances)]
trY_1_detection = T.lmatrix()
trY_2_detection = T.lmatrix()
trY_1_anticipation = T.lmatrix()
trY_2_anticipation = T.lmatrix()
sharedrnn = SharedRNNOutput(
shared_layers, human_layers, object_layers,
human_detection, human_anticipation, object_detection,
object_anticipation, softmax_loss, trY_1_detection,
trY_2_detection,trY_1_anticipation,trY_2_anticipation,1e-3
)
return sharedrnn
示例5: test_blocksparse_grad_merge
def test_blocksparse_grad_merge():
b = tensor.fmatrix()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
W_val, h_val, iIdx_val, b_val, oIdx_val = blocksparse_data()
W = float32_shared_constructor(W_val)
o = sparse_block_gemv_ss(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
gW = theano.grad(o.sum(), W)
lr = numpy.asarray(0.05, dtype='float32')
upd = W - lr * gW
f1 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)],
mode=mode_with_gpu)
# not running with mode=gpu ensures that the elemwise is not merged in
mode = None
if theano.config.mode == 'FAST_COMPILE':
mode = theano.compile.mode.get_mode('FAST_RUN')
f2 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)], mode=mode)
f2(h_val, iIdx_val, b_val, oIdx_val)
W_ref = W.get_value()
# reset the var
W.set_value(W_val)
f1(h_val, iIdx_val, b_val, oIdx_val)
W_opt = W.get_value()
utt.assert_allclose(W_ref, W_opt)
示例6: multMatVect
def multMatVect(v, A, m1, B, m2):
"""
multiply the first half of v by A with a modulo of m1
and the second half by B with a modulo of m2
Note: The parameters of dot_modulo are passed implicitly because passing
them explicitly takes more time then running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix("A")
s_sym = tensor.ivector("s")
m_sym = tensor.iscalar("m")
A2_sym = tensor.lmatrix("A2")
s2_sym = tensor.ivector("s2")
m2_sym = tensor.iscalar("m2")
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例7: test_multMatVect
def test_multMatVect():
A1 = tensor.lmatrix('A1')
s1 = tensor.ivector('s1')
m1 = tensor.iscalar('m1')
A2 = tensor.lmatrix('A2')
s2 = tensor.ivector('s2')
m2 = tensor.iscalar('m2')
g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)
i32max = numpy.iinfo(numpy.int32).max
A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s1 = numpy.random.randint(0, i32max, 3).astype('int32')
m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s2 = numpy.random.randint(0, i32max, 3).astype('int32')
m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
f0.input_storage[0].storage[0] = A1
f0.input_storage[1].storage[0] = s1
f0.input_storage[2].storage[0] = m1
f0.input_storage[3].storage[0] = A2
f0.input_storage[4].storage[0] = s2
f0.input_storage[5].storage[0] = m2
r_a1 = rng_mrg.matVecModM(A1, s1, m1)
r_a2 = rng_mrg.matVecModM(A2, s2, m2)
f0.fn()
r_b = f0.output_storage[0].value
assert numpy.allclose(r_a1, r_b[:3])
assert numpy.allclose(r_a2, r_b[3:])
示例8: train_minibatch_fn
def train_minibatch_fn(self, evaluate=False):
"""
Initialize this Theano function once
"""
X = T.lmatrix('X_train')
L_x = T.lvector('L_X_train')
Y = T.lmatrix('Y_train')
L_y = T.lvector('L_y_train')
learning_rate = T.dscalar('learning_rate')
momentum = T.dscalar('momentum')
weight_decay = T.dscalar('weight_decay')
loss, accuracy = self.loss(X, L_x, Y, L_y, weight_decay)
updates = self.get_sgd_updates(loss, learning_rate, momentum)
outputs = [loss, accuracy]
if evaluate:
precision, recall = self.evaluate(X, L_x, Y, L_y)
outputs = outputs + [precision, recall]
return theano.function(
inputs=[X, L_x, Y, L_y, learning_rate, momentum, weight_decay],
outputs=outputs,
updates=updates
)
示例9: DRAmodelnoedge
def DRAmodelnoedge(nodeList,edgeList,edgeListComplete,edgeFeatures,nodeFeatures,nodeToEdgeConnections,clipnorm=25.0,train_for='joint'):
edgeRNNs = {}
edgeTypes = edgeList
lstm_init = 'orthogonal'
softmax_init = 'uniform'
rng = np.random.RandomState(1234567890)
for et in edgeTypes:
inputJointFeatures = edgeFeatures[et]
print inputJointFeatures
edgeRNNs[et] = [TemporalInputFeatures(inputJointFeatures)] #128
nodeRNNs = {}
nodeTypes = nodeList.keys()
nodeLabels = {}
outputLayer = {}
for nt in nodeTypes:
num_classes = nodeList[nt]
#nodeRNNs[nt] = [LSTM('tanh','sigmoid',lstm_init,truncate_gradient=4,size=256,rng=rng),softmax(num_classes,softmax_init,rng=rng)] #256
nodeRNNs[nt] = [LSTM('tanh','sigmoid',lstm_init,truncate_gradient=4,size=args.nodeRNN_size,rng=rng)] #256
if train_for=='joint':
nodeLabels[nt] = {}
nodeLabels[nt]['detection'] = T.lmatrix()
nodeLabels[nt]['anticipation'] = T.lmatrix()
outputLayer[nt] = [softmax(num_classes,softmax_init,rng=rng),softmax(num_classes+1,softmax_init,rng=rng)]
else:
nodeLabels[nt] = T.lmatrix()
outputLayer[nt] = [softmax(num_classes,softmax_init,rng=rng)]
et = nt+'_input'
edgeRNNs[et] = [TemporalInputFeatures(nodeFeatures[nt])]
learning_rate = T.fscalar()
dra = DRAanticipation(edgeRNNs,nodeRNNs,outputLayer,nodeToEdgeConnections,edgeListComplete,softmax_loss,nodeLabels,learning_rate,clipnorm,train_for=train_for)
return dra
示例10: test_blocksparse_gpu_gemv_opt
def test_blocksparse_gpu_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockGemv)
示例11: test_correct_solution
def test_correct_solution(self):
x = tensor.lmatrix()
y = tensor.lmatrix()
z = tensor.lscalar()
b = theano.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
TestMatrix1 = np.asarray([[2, 1], [3, 4]])
TestMatrix2 = np.asarray([[17, 20], [43, 50]])
TestScalar = np.asarray(1)
f = function([x, y, z], b)
m = f(TestMatrix1, TestMatrix2, TestScalar)
self.assertTrue(np.allclose(TestMatrix2, np.dot(TestMatrix1, m[0])))
示例12: test_blocksparse_gpu_gemv_opt
def test_blocksparse_gpu_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)
assert sum(1 for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, GpuSparseBlockGemv)) == 1
示例13: test7
def test7():
A = T.lmatrix("A")
A_start = T.lvector("A_start")
f = T.lmatrix("f")
tgt = T.ivector("tgt")
v = Viterbi(A , A_start , f , tgt)
decode = v.decode()
ff = theano.function([A , A_start , f , tgt] , outputs = v.apply())
ff2 = theano.function([A , A_start , f , tgt] , decode)
print ff2([[1 , 3 , 1] , [1 , 2 , 2] , [2 , 1 , 3]]
, [1 , 2 , 1]
, [[1 , 2 , 3] , [2 , 2 , 1] , [3 , 3 , 2] , [1 , 1 , 2]]
, [1 , 2 , 1 , 2])
示例14: test_blocksparse_gpu_outer_opt
def test_blocksparse_gpu_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
wrt=W)],
mode=mode_with_gpu)
assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockOuter)
示例15: test_blocksparse_inplace_gemv_opt
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
else:
assert f.maker.fgraph.toposort()[-1].op.inplace