本文整理匯總了Python中theano.tensor.matrix方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.matrix方法的具體用法?Python tensor.matrix怎麽用?Python tensor.matrix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.matrix方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: return_network
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def return_network(self):
'''This function returns weight matrix and bias vectors of each hidden layer in the
final network after training.'''
weights_all_layer = []
bias_all_layer = []
bias_prime_all_layer = []
for dA_layer in self.dA_layers:
weight = dA_layer.W.get_value(borrow = True)
bias = dA_layer.b.get_value(borrow = True)
bias_prime = dA_layer.b_prime.get_value(borrow = True)
weights_all_layer.append(weight)
bias_all_layer.append(bias)
bias_prime_all_layer.append(bias_prime)
return weights_all_layer, bias_all_layer, bias_prime_all_layer
示例2: build_encoder_bi
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def build_encoder_bi(tparams, options):
"""
build bidirectional encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
embeddingr = embedding[::-1]
x_mask = tensor.matrix('x_mask', dtype='float32')
xr_mask = x_mask[::-1]
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)
return embedding, x_mask, ctx
# some utilities
示例3: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,learning_rate,regularization):
self.input = T.tensor4()
self.convolutional_layers = []
self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0]))
for i in range(1,convolutional_layers):
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i]))
self.feedforward_layers = []
self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0]))
for i in range(1,feedforward_layers):
self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
self.params = []
for l in self.convolutional_layers + self.feedforward_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.target = T.matrix()
self.output = self.output_layer.output
self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
self.cost = self.cost.mean()
for i in range(convolutional_layers+feedforward_layers+1):
self.cost += regularization*(self.params[2*i]**2).mean()
self.gparams = [T.grad(self.cost, param) for param in self.params]
self.propogate = theano.function([self.input,self.target],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True)
self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
示例4: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,classes,hidden_layers,features,nodes_per_hidden_layer,learning_rate,regularization):
self.hidden_layers = []
self.hidden_layers.append(layer(features,nodes_per_hidden_layer))
for i in range(hidden_layers-1):
self.hidden_layers.append(layer(nodes_per_hidden_layer,nodes_per_hidden_layer))
self.output_layer = layer(nodes_per_hidden_layer,classes)
self.params = []
for l in self.hidden_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.A = T.matrix()
self.t = T.matrix()
self.s = 1/(1+T.exp(-T.dot(self.A,self.params[0])-self.params[1]))
for i in range(hidden_layers):
self.s = 1/(1+T.exp(-T.dot(self.s,self.params[2*(i+1)])-self.params[2*(i+1)+1]))
self.cost = -self.t*T.log(self.s)-(1-self.t)*T.log(1-self.s)
self.cost = self.cost.mean()
for i in range(hidden_layers+1):
self.cost += regularization*(self.params[2*i]**2).mean()
self.gparams = [T.grad(self.cost, param) for param in self.params]
self.propogate = theano.function([self.A,self.t],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True)
self.classify = theano.function([self.A],self.s,allow_input_downcast=True)
示例5: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,regularization):
self.input = T.tensor4()
self.convolutional_layers = []
self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0]))
for i in range(1,convolutional_layers):
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i]))
self.feedforward_layers = []
self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0]))
for i in range(1,feedforward_layers):
self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
self.params = []
for l in self.convolutional_layers + self.feedforward_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.target = T.matrix()
self.output = self.output_layer.output
self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
self.cost = self.cost.mean()
for i in range(convolutional_layers+feedforward_layers+1):
self.cost += regularization*(self.params[2*i]**2).mean()
self.updates = self.adam(self.cost,self.params)
self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
示例6: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
self.input = T.tensor4()
self.convolutional_layers = []
self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
for i in range(1,convolutional_layers):
if i==2 or i==4:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
else:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
self.feedforward_layers = []
self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),20480,feedforward_nodes[0]))
for i in range(1,feedforward_layers):
self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
self.params = []
for l in self.convolutional_layers + self.feedforward_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.target = T.matrix()
self.output = self.output_layer.output
self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
self.cost = self.cost.mean()
self.updates = self.adam(self.cost, self.params)
self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
示例7: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self):
X_in = T.matrix('X_in')
u = T.matrix('u')
s = T.vector('s')
eps = T.scalar('eps')
X_ = X_in - T.mean(X_in, 0)
sigma = T.dot(X_.T, X_) / X_.shape[0]
self.sigma = theano.function([X_in], sigma, allow_input_downcast=True)
Z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps))), u.T)
X_zca = T.dot(X_, Z.T)
self.compute_zca = theano.function([X_in, u, s, eps], X_zca, allow_input_downcast=True)
self._u = None
self._s = None
示例8: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
self.input = T.tensor4()
self.convolutional_layers = []
self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
for i in range(1,convolutional_layers):
if i==3:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
else:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
self.feedforward_layers = []
self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),40000,feedforward_nodes[0]))
for i in range(1,feedforward_layers):
self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
self.params = []
for l in self.convolutional_layers + self.feedforward_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.target = T.matrix()
self.output = self.output_layer.output
self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
self.cost = self.cost.mean()
self.updates = self.adam(self.cost, self.params)
self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
示例9: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,rbm1,rbm2,rbm3,rbm4):
self.learning_rate = 0.01
self.W1 = rbm1.W
self.W2 = rbm2.W
self.W3 = rbm3.W
self.W4 = rbm4.W
self.W5 = theano.shared(self.ortho_weight(1000,10),borrow=True)
self.b1 = rbm1.hbias
self.b2 = rbm2.hbias
self.b3 = rbm3.hbias
self.b4 = rbm4.hbias
self.b5 = (theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True))
self.input = T.matrix()
self.target = T.matrix()
self.l1out = T.nnet.sigmoid(T.dot(self.input,self.W1)+self.b1)
self.l2out = T.nnet.sigmoid(T.dot(self.l1out,self.W2)+self.b2)
self.l3out = T.nnet.sigmoid(T.dot(self.l2out,self.W3)+self.b3)
self.l4out = T.nnet.sigmoid(T.dot(self.l3out,self.W4)+self.b4)
self.output = T.nnet.softmax(T.dot(self.l4out,self.W5)+self.b5)
self.cost = T.nnet.categorical_crossentropy(self.output,self.target).mean()
self.params = [self.W1,self.W2,self.W3,self.W4,self.W5,self.b1,self.b2,self.b3,self.b4,self.b5]
self.updates = self.adam(self.cost,self.params)
self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.predict_f = theano.function([self.input],self.output,allow_input_downcast=True)
示例10: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,hidden_layers,layer_nodes):
self.input = T.matrix()
self.target = T.matrix()
self.W = []
self.b = []
self.activations = []
self.W.append(theano.shared(self.ortho_weight(784,layer_nodes),borrow=True))
self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.sigmoid(T.dot(self.input,self.W[-1])+self.b[-1]))
self.residuals = self.activations[-1].copy()
for layer in range(hidden_layers-1):
self.W.append(theano.shared(self.ortho_weight(layer_nodes,layer_nodes),borrow=True))
self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.sigmoid(T.dot(self.residuals,self.W[-1])+self.b[-1]))
self.residuals += self.activations[-1]
self.W.append(theano.shared(self.ortho_weight(layer_nodes,10),borrow=True))
self.b.append(theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.softmax(T.dot(self.residuals,self.W[-1])+self.b[-1]))
self.cost = T.nnet.categorical_crossentropy(self.activations[-1],self.target).mean()
self.params = self.W+self.b
self.updates = self.adam(self.cost,self.params)
self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.predict_f = theano.function([self.input],self.activations[-1],allow_input_downcast=True)
示例11: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self,hidden_layers,layer_nodes):
self.input = T.matrix()
self.target = T.matrix()
self.W = []
self.b = []
self.activations = []
self.W.append(theano.shared(self.ortho_weight(784,layer_nodes),borrow=True))
self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.sigmoid(T.dot(self.input,self.W[-1])+self.b[-1]))
for layer in range(hidden_layers-1):
self.W.append(theano.shared(self.ortho_weight(layer_nodes,layer_nodes),borrow=True))
self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.sigmoid(T.dot(self.activations[-1],self.W[-1])+self.b[-1]))
self.W.append(theano.shared(self.ortho_weight(layer_nodes,10),borrow=True))
self.b.append(theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True))
self.activations.append(T.nnet.softmax(T.dot(self.activations[-1],self.W[-1])+self.b[-1]))
self.cost = T.nnet.categorical_crossentropy(self.activations[-1],self.target).mean()
self.params = self.W+self.b
self.updates = self.adam(self.cost,self.params)
self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.predict_f = theano.function([self.input],self.activations[-1],allow_input_downcast=True)
示例12: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __init__(self):
super(M, self).__init__()
x = T.matrix('x') # input, target
self.w = module.Member(T.matrix('w')) # weights
self.a = module.Member(T.vector('a')) # hid bias
self.b = module.Member(T.vector('b')) # output bias
self.hid = T.tanh(T.dot(x, self.w) + self.a)
hid = self.hid
self.out = T.tanh(T.dot(hid, self.w.T) + self.b)
out = self.out
self.err = 0.5 * T.sum((out - x)**2)
err = self.err
params = [self.w, self.a, self.b]
gparams = T.grad(err, params)
updates = [(p, p - 0.01 * gp) for p, gp in zip(params, gparams)]
self.step = module.Method([x], err, updates=dict(updates))
示例13: test_local_mul_s_d
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def test_local_mul_s_d():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
mode = theano.compile.mode.get_default_mode()
mode = mode.including("specialize", "local_mul_s_d")
for sp_format in sparse.sparse_formats:
inputs = [getattr(theano.sparse, sp_format + '_matrix')(),
tensor.matrix()]
f = theano.function(inputs,
sparse.mul_s_d(*inputs),
mode=mode)
assert not any(isinstance(node.op, sparse.MulSD) for node
in f.maker.fgraph.toposort())
示例14: test_local_sampling_dot_csr
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def test_local_sampling_dot_csr():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
mode = theano.compile.mode.get_default_mode()
mode = mode.including("specialize", "local_sampling_dot_csr")
for sp_format in ['csr']: # Not implemented for other format
inputs = [tensor.matrix(),
tensor.matrix(),
getattr(theano.sparse, sp_format + '_matrix')()]
f = theano.function(inputs,
sparse.sampling_dot(*inputs),
mode=mode)
if theano.config.blas.ldflags:
assert not any(isinstance(node.op, sparse.SamplingDot) for node
in f.maker.fgraph.toposort())
else:
# SamplingDotCSR's C implementation needs blas, so it should not
# be inserted
assert not any(isinstance(node.op, sparse.opt.SamplingDotCSR) for node
in f.maker.fgraph.toposort())
示例15: __generalized_sd_test
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import matrix [as 別名]
def __generalized_sd_test(self, theanop, symbolicType, testOp, scipyType):
scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]
if (bool(scipy_ver < [0, 13])):
raise SkipTest("comparison operators need newer release of scipy")
x = symbolicType()
y = theano.tensor.matrix()
op = theanop(x, y)
f = theano.function([x, y], op)
m1 = scipyType(random_lil((10, 40), config.floatX, 3))
m2 = self._rand_ranged(1000, -1000, [10, 40])
self.assertTrue(numpy.array_equal(f(m1, m2).data, testOp(m1, m2).data))