本文整理汇总了Python中theano.tensor.dmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python dmatrix函数的具体用法?Python dmatrix怎么用?Python dmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dmatrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_hidden_layers
def get_hidden_layers(dbn, layers):
print "... getting hidden layers"
test_data, test_label = get_test_set()
index = T.lscalar()
hidden_features = []
total_layers = len(layers)
w = T.dmatrix("w")
t = T.dmatrix("t")
b = T.vector("b")
z = T.dot(w,t)
# function for testing model
test_f = theano.function([w,t], z)
#loop through each layer
for i in xrange(total_layers):
weights = layers[i][0]
bias = layers[i][1]
if i == 0:
hidden_features.append( test_f(test_data,weights) )
else:
#use previous layer
prev_layer = hidden_features[i-1]
hidden_features.append( test_f(prev_layer,weights) )
# apply sigmoid
with open('hidden.pkl', 'w') as f:
cPickle.dump(hidden_features, f)
示例2: test_mixin_composition
def test_mixin_composition():
# Check composed expressions as parameters
a = theano.shared(0.0)
b = theano.shared(-1.0)
mu = a + b - 1.0
sigma = T.abs_(a * b)
p = Normal(mu=mu, sigma=sigma)
assert a in p.parameters_
assert b in p.parameters_
# Compose parameters with observed variables
a = theano.shared(1.0)
b = theano.shared(0.0)
y = T.dmatrix(name="y")
p = Normal(mu=a * y + b)
assert len(p.parameters_) == 3
assert a in p.parameters_
assert b in p.parameters_
assert p.sigma in p.parameters_
assert p.mu not in p.parameters_
assert len(p.observeds_) == 1
assert y in p.observeds_
# Check signatures
data_X = np.random.rand(10, 1)
data_y = np.random.rand(10, 1)
p.pdf(X=data_X, y=data_y)
p.cdf(X=data_X, y=data_y)
p.rvs(10, y=data_y)
# Check error
a = theano.shared(1.0)
b = theano.shared(0.0)
y = T.dmatrix() # y must be named
assert_raises(ValueError, Normal, mu=a * y + b)
示例3: neural_net
def neural_net(
x=T.dmatrix(), #our points, one point per row
y=T.dmatrix(), #our targets
w=T.dmatrix(), #first layer weights
b=T.dvector(), #first layer bias
v=T.dmatrix(), #second layer weights
c=T.dvector(), #second layer bias
step=T.dscalar(), #step size for gradient descent
l2_coef=T.dscalar() #l2 regularization amount
):
"""Idea A:
"""
hid = T.tanh(T.dot(x, w) + b)
pred = T.dot(hid, v) + c
sse = T.sum((pred - y) * (pred - y))
w_l2 = T.sum(T.sum(w*w))
v_l2 = T.sum(T.sum(v*v))
loss = sse + l2_coef * (w_l2 + v_l2)
def symbolic_params(cls):
return [cls.w, cls.b, cls.v, cls.c]
def update(cls, x, y, **kwargs):
params = cls.symbolic_params()
gp = T.grad(cls.loss, params)
return [], [In(p, update=p - cls.step * g) for p,g in zip(params, gp)]
def predict(cls, x, **kwargs):
return cls.pred, []
return locals()
示例4: createMLP
def createMLP(layers, s):
l_in = lasagne.layers.InputLayer(shape=(None, s))
prev_layer = l_in
Ws = []
for layer in layers:
enc = lasagne.layers.DenseLayer(prev_layer, num_units=layer, nonlinearity=rectify, W=init.Uniform(0.01))
Ws += [enc.W]
drop = lasagne.layers.DropoutLayer(enc, p=0.5)
prev_layer = drop
idx = 1
# creating mask
mask = lasagne.layers.InputLayer(shape=(None, layers[-1]))
prev_layer = lasagne.layers.ElemwiseMergeLayer([prev_layer, mask], merge_function=T.mul)
for layer in layers[-2::-1]:
print layer
dec = lasagne.layers.DenseLayer(prev_layer, num_units=layer, nonlinearity=rectify, W=Ws[-idx].T)
idx += 1
drop = lasagne.layers.DropoutLayer(dec, p=0.0)
prev_layer = drop
model = lasagne.layers.DenseLayer(prev_layer, num_units=s, nonlinearity=identity, W=Ws[0].T)
x_sym = T.dmatrix()
mask_sym = T.dmatrix()
all_params = lasagne.layers.get_all_params(model)
output = lasagne.layers.get_output(model, inputs={l_in: x_sym, mask: mask_sym})
loss_eval = lasagne.objectives.squared_error(output, x_sym).sum()
loss_eval /= (2.*batch_size)
updates = lasagne.updates.adam(loss_eval, all_params)
return l_in, mask, model, theano.function([x_sym, mask_sym], loss_eval, updates=updates)
示例5: test_infer_shape
def test_infer_shape(self):
admat = dmatrix()
bdmat = dmatrix()
admat_val = numpy.random.rand(3, 4)
bdmat_val = numpy.random.rand(3, 4)
self._compile_and_check([admat, bdmat], [SoftmaxGrad()(admat, bdmat)],
[admat_val, bdmat_val], SoftmaxGrad)
示例6: LQLEP_wBarrier
def LQLEP_wBarrier( LQLEP = Th.dscalar(), ldet = Th.dscalar(), v1 = Th.dvector(),
N_spike = Th.dscalar(), ImM = Th.dmatrix(), U = Th.dmatrix(),
V2 = Th.dvector(), u = Th.dvector(), C = Th.dmatrix(),
**other):
'''
The actual Linear-Quadratic-Exponential-Poisson log-likelihood,
as a function of theta and M,
with a barrier on the log-det term and a prior.
'''
sq_nonlinearity = V2**2.*Th.sum( Th.dot(U,C)*U, axis=[1]) #Th.sum(U**2,axis=[1])
nonlinearity = V2 * Th.sqrt( Th.sum( Th.dot(U,C)*U, axis=[1])) #Th.sum(U**2,axis=[1]) )
if other.has_key('uc'):
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 10. * Th.sum( (other['uc'][2:]+other['uc'][:-2]-2*other['uc'][1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
else:
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
eigsImM,barrier = eig( ImM )
barrier = 1-(Th.sum(Th.log(eigsImM))>-250) * \
(Th.min(eigsImM)>0) * (Th.max(4*sq_nonlinearity)<1)
other.update(locals())
return named( **other )
示例7: __init__
def __init__(self,N,Nsub,NRGC,prior=1):
self.N = N
self.Nsub = Nsub
self.NRGC = NRGC
U = Th.dmatrix() # SYMBOLIC variables #
V1 = Th.dvector() #
V2 = Th.dvector() #
STA = Th.dvector() #
STC = Th.dmatrix() #
theta = Th.dot( U.T , V1 ) #
UV1U = Th.dot( U , theta ) #
UV1V2U= Th.dot( V1 * U.T , (V2 * U.T).T ) #
posterior = -0.5 * Th.sum( V1 * V2 * U.T*U.T ) \
-0.25* Th.sum( UV1V2U.T * UV1V2U ) \
-0.5 * Th.sum( UV1U * UV1U * UV1U *V2 *V2 * V1 ) \
-0.5 * Th.sum( UV1U * UV1U * V2 * V1 ) \
-0.5 * Th.sum( theta * theta ) \
+ Th.dot( theta.T , STA ) \
+ Th.sum( Th.dot( V1* V2*U.T , U ) \
* (STC + STA.T*STA) )
dpost_dU = Th.grad( cost = posterior , #
wrt = U ) #
dpost_dV1 = Th.grad( cost = posterior , #
wrt = V1 ) #
dpost_dV2 = Th.grad( cost = posterior , #
wrt = V2 ) #
# self.posterior = function( [U,V2,V1,STA,STC], UV1V2U) #
self.posterior = function( [U,V2,V1,STA,STC], posterior) #
self.dpost_dU = function( [U,V2,V1,STA,STC], dpost_dU ) #
self.dpost_dV1 = function( [U,V2,V1,STA,STC], dpost_dV1 ) #
self.dpost_dV2 = function( [U,V2,V1,STA,STC], dpost_dV2 ) #
示例8: theano_setup
def theano_setup(self):
W = T.dmatrix('W')
b = T.dvector('b')
c = T.dvector('c')
x = T.dmatrix('x')
s = T.dot(x, W) + c
# h = 1 / (1 + T.exp(-s))
# h = T.nnet.sigmoid(s)
h = T.tanh(s)
# r = T.dot(h,W.T) + b
# r = theano.printing.Print("r=")(2*T.tanh(T.dot(h,W.T) + b))
ract = T.dot(h,W.T) + b
r = self.output_scaling_factor * T.tanh(ract)
#g = function([W,b,c,x], h)
#f = function([W,b,c,h], r)
#fg = function([W,b,c,x], r)
# Another variable to be able to call a function
# with a noisy x and compare it to a reference x.
y = T.dmatrix('y')
all_losses = ((r - y)**2)
loss = T.sum(all_losses)
#loss = ((r - y)**2).sum()
self.theano_encode_decode = function([W,b,c,x], r)
self.theano_all_losses = function([W,b,c,x,y], [all_losses, T.abs_(s), T.abs_(ract)])
self.theano_gradients = function([W,b,c,x,y], [T.grad(loss, W), T.grad(loss, b), T.grad(loss, c)])
示例9: LR
def LR(x=None, y=None, v=None, c=None, l2_coef=None):
# our points, one point per row
if x is None:
x = T.dmatrix()
# targets , one per row
if y is None:
y = T.dmatrix()
# first layer weights
if v is None:
v = T.dmatrix()
# first layer biases
if c is None:
c = T.dvector()
if l2_coef is None:
l2_coef = T.dscalar()
pred = T.dot(x, v) + c
sse = T.sum((pred - y) * (pred - y))
mse = sse / T.shape(y)[0]
v_l2 = T.sum(T.sum(v*v))
loss = mse + l2_coef * v_l2
@symbolicmethod
def params():
return [v, c]
return locals()
示例10: make_theano_functions
def make_theano_functions(self) :
x = T.dmatrix('x')
h1 = T.dot(x, self.w1.T) + self.b1
a1 = 1. / (1. + T.exp(-h1))
h2 = T.dot(a1,self.w2.T) + self.b2
a2 = T.nnet.softmax(h2)
f = theano.function([x], a2)
y = T.dmatrix('y')
loss = T.mean(T.sum(y*-T.log(a2), axis=1))
gradw1 = T.grad(loss, self.w1)
gradw2 = T.grad(loss, self.w2)
gradb1 = T.grad(loss, self.b1)
gradb2 = T.grad(loss, self.b2)
gradf = theano.function(
[x, y],
[loss, a2],
updates = [
(self.w1, self.w1-self.lr*gradw1),
(self.w2, self.w2-self.lr*gradw2),
(self.b1, self.b1-self.lr*gradb1),
(self.b2, self.b2-self.lr*gradb2)
]
)
return f, gradf
示例11: train
def train( self, train_set, batch_size = 100 ):
for i in xrange(len(self.layers) - 1):
train_data = T.dmatrix('train_data')
x = T.dmatrix('x')
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 10))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=self.layers[i],
n_hidden=self.layers[i+1]
)
cost, updates = da.get_cost_updates(
corruption_level=0.,
learning_rate=0.4
)
train_da = theano.function(
[train_data],
cost,
updates=updates,
givens={
x: train_data
}
)
for epoch in xrange(200):
train_cost = []
for index in xrange(len(train_set)/batch_size):
train_cost.append(train_da(numpy.asarray(train_set[index * batch_size: (index + 1) * batch_size])))
print 'Training 1st ae epoch %d, cost ' % epoch, numpy.mean(train_cost)
train_set = da.get_hidden_values(train_set).eval()
self.dAs.append(da)
示例12: NNet
def NNet(x=None, y=None, n_hid_layers=2):
# our points, one point per row
if x is None:
x = T.dmatrix()
# targets , one per row
if y is None:
y = T.dmatrix()
layers = []
_x = x
for i in xrange(n_hid_layers):
layers.append(Layer(x=_x))
_x = layers[-1].y
classif = LR(x=_x)
@symbolicmethod
def params():
rval = classif.params()
for l in layers:
rval.extend(l.params())
print([id(r) for r in rval])
return rval
if 0:
@symbolicmethod
def update(x, y):
pp = params()
gp = T.grad(classif.loss, pp)
return dict((p, p - 0.01*g) for p, g in zip(pp, gp))
return locals()
示例13: __init__
def __init__(self, model, type_model):
super(LatentTypeWithTuningCurve, self).__init__(model, type_model)
# Also initialize the tuning curves
self.mu = self.type_model['mu']
self.sigma = self.type_model['sigma']
# Create a basis for the stimulus response
self.spatial_basis = create_basis(self.type_model['spatial_basis'])
self.spatial_shape = self.type_model['spatial_shape']
self.spatial_ndim = len(self.spatial_shape)
(_,Bx) = self.spatial_basis.shape
self.temporal_basis = create_basis(self.type_model['temporal_basis'])
(_,Bt) = self.temporal_basis.shape
# Save the filter sizes
self.Bx = Bx
self.Bt = Bt
# Initialize interpolated bases
self.initialize_basis()
# Initialize RxBx and RxBt matrices for the per-type tuning curves
self.w_x = T.dmatrix('w_x')
self.w_t = T.dmatrix('w_t')
# Create function handles for the stimulus responses
self.stim_resp_t = T.dot(self.temporal_basis, self.w_t)
self.stim_resp_x = T.dot(self.spatial_basis, self.w_x)
# Add the probability of these tuning curves to the log probability
self.log_p += -0.5/self.sigma**2 *T.sum((self.w_x-self.mu)**2) + \
-0.5/self.sigma**2 *T.sum((self.w_t-self.mu)**2)
示例14: test_free_energy
def test_free_energy(self):
self.setUpAssociativeRBM()
rbm = self.rbm
w = rbm.W.get_value(borrow=True)
u = rbm.U.get_value(borrow=True)
v = T.dmatrix("v")
v2 = T.dmatrix("v2")
v_bias = rbm.v_bias.eval()
v_bias2 = rbm.v_bias2.eval()
h_bias = rbm.h_bias.eval()
res = rbm.free_energy(v, v2)
f = theano.function([v, v2], [res])
theano_res = f(self.x, self.y)
# Test for case only v1 is present
n1 = - np.dot(self.x, v_bias)
n2 = - np.dot(self.y, v_bias2)
n3 = - np.sum(np.log(1 + np.exp(h_bias + np.dot(self.x, w) + np.dot(self.y, u))))
np_res = n1 + n2 + n3
print theano_res
print np_res
diff = theano_res == np_res
self.assertTrue(np.all(diff))
示例15: theano_sed
def theano_sed():
"""
Function to create a theano function to compute the euclidian distances efficiently
Returns:
theano.compile.function_module.Function: Compiled function
"""
theano.config.compute_test_value = "ignore"
# Set symbolic variable as matrix (with the XYZ coords)
coord_T_x1 = T.dmatrix()
coord_T_x2 = T.dmatrix()
# Euclidian distances function
def squared_euclidean_distances(x_1, x_2):
sqd = T.sqrt(T.maximum(
(x_1 ** 2).sum(1).reshape((x_1.shape[0], 1)) +
(x_2 ** 2).sum(1).reshape((1, x_2.shape[0])) -
2 * x_1.dot(x_2.T), 0
))
return sqd
# Compiling function
f = theano.function([coord_T_x1, coord_T_x2],
squared_euclidean_distances(coord_T_x1, coord_T_x2),
allow_input_downcast=False)
return f