当前位置: 首页>>代码示例>>Python>>正文


Python tensor.fmatrices函数代码示例

本文整理汇总了Python中theano.tensor.fmatrices函数的典型用法代码示例。如果您正苦于以下问题:Python fmatrices函数的具体用法?Python fmatrices怎么用?Python fmatrices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fmatrices函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: SGD

def SGD(eta, n_epochs, valid_steps, momentum, low, high, init, random_init='gaussian'):
    t0 = time.time()
    index = T.iscalar('index')
    x, y, z, alpha = T.fmatrices('x', 'y', 'z', 'alpha')
    n_minibatch = max_minibatch - 2
    model = Model(n_tree, n_nodes, low, high, init, random_init)
    model_op, auto_upd = model.op(x)
    valid_op, valid_upd = model.valid_op(z, valid_steps)

    loss = model.loss(y, model_op)
    valid_loss = model.loss(alpha, valid_op)

    print "Updation to be compiled yet"

    params = model.params
    train_upd = gradient_updates_momentum(loss, params, eta, momentum) + auto_upd
    train_output = [model_op, loss]
    valid_output = [valid_op, valid_loss]

    print "Train function to be compiled"
    train_fn = theano.function([index], train_output, updates=train_upd,
                               givens={x: train_x[:, n_in * index: n_in * (index + 1)],
                                       y: train_x[:, (n_in * index + n_tree): (n_in * (index + 1) + 1)]}, name='train_fn')

    valid_fn = theano.function([index], valid_output, updates=valid_upd,
                               givens={z: train_x[:, n_tree * index: n_tree * (index + 1)],
                                       alpha: train_x[:, (n_in * index + n_tree): (n_in * index + n_tree + valid_steps)]}, name='valid_fn')

    print "Train function compiled"


    # Compilation over
    #################
    ## TRAIN MODEL ##
    #################
    print 'The compilation time is', time.time() - t0
    loss_list = []
    for i in range(n_epochs):
        epoch_loss = 0

        t1 = time.time()
        for idx in range(n_minibatch):
            print 'The current idx is ', idx,' and the epoch number is  ', i
            output, loss_ = train_fn(idx)[:-1], train_fn(idx)[-1]
            if idx%500 == 0:
                v_output, v_loss = valid_fn(idx/500)[:-1][0], valid_fn(idx/500)[-1]
                print 'v_pred is', ' '.join([mappings_words[prediction(abc)] for abc in v_output])
                print 'v_loss is', np.array(v_loss)
            print 'The loss is', loss_
            epoch_loss +=loss_
            loss_list.append(loss_)

            print '=='*20
        print 'The mean loss for the epoch was', epoch_loss/float(n_minibatch)
        print 'Time taken by this epoch is', time.time()-t1
        print '-'*50
    pyplot.plot(loss_list)
    pyplot.show()
开发者ID:Azrael1,项目名称:Seq-Gen,代码行数:58,代码来源:main.py

示例2: get_cost_updates

    def get_cost_updates(self, corruption_level, learning_rate, sample_method, enc_function):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        tilde_x = self.get_corrupted_input(self.x, corruption_level)
        y = self.get_hidden_values(tilde_x, enc_function)
        z = self.get_reconstructed_input(y, enc_function)

        L = T.fmatrices()

        # if only encoding but not sample
        if sample_method == -1:


            if self.error_type == 1:
                L = - T.sum(tilde_x * T.log(z) + (1 - tilde_x) * T.log(1 - z), axis=1)

            #square error, added by feng
            #print 'using'
            if self.error_type == 0:
                L = T.sum((tilde_x - z)**2, axis=1)

        else:

            sampled_x = self.get_sampled(tilde_x)
            sampled_z = self.get_sampled(z)
            #sampled version

            if self.error_type == 1:
                L = - T.sum(sampled_x * T.log(sampled_z) + (1 - sampled_x) * T.log(1 - sampled_z), axis=1)

            #square error, added by feng

            #print 'using'
            if self.error_type == 0:
                L = T.sum((sampled_x - sampled_z)**2, axis = 1)

        # note : L is now a vector, where each element is the
        #        cross-entropy cost of the reconstruction of the
        #        corresponding example of the minibatch. We need to
        #        compute the average of all these to get the cost of
        #        the minibatch
        cost = T.mean(L)

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
        ]

        return (cost,  updates)
开发者ID:lifenggg,项目名称:assaydata,代码行数:54,代码来源:dA.py

示例3: test_advinc_subtensor1

def test_advinc_subtensor1():
    """ Test the second case in the opt local_gpu_advanced_incsubtensor1 """
    shared = cuda.shared_constructor
    # shared = tensor.shared
    xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float32")
    yval = numpy.asarray([[10, 10, 10], [10, 10, 10]], dtype="float32")
    x = shared(xval, name="x")
    y = T.fmatrices("y")
    expr = T.advanced_inc_subtensor1(x, y, [0, 2])
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.env.toposort()]) == 1
    assert numpy.allclose(f(yval), [[11.0, 12.0, 13.0], [4.0, 5.0, 6.0], [17.0, 18.0, 19.0]])
开发者ID:olivierverdier,项目名称:Theano,代码行数:12,代码来源:test_basic_ops.py

示例4: test_set_subtensor

def test_set_subtensor():
    shared = cuda.shared_constructor
    #shared = tensor.shared
    x,y = T.fmatrices('x','y')
    xval = numpy.asarray([[1,2,3], [4,5,6], [7,8,9]],
                      dtype='float32')
    yval = numpy.asarray([[10,10,10], [10,10,10], [10,10,10]],
                      dtype='float32')
    expr = T.set_subtensor(x[:,1:3], y[:,1:3])
    f=theano.function([x,y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op,cuda.GpuSubtensor) for node in f.maker.env.toposort() ])==1
    assert sum([isinstance(node.op,cuda.GpuIncSubtensor) and node.op.set_instead_of_inc==True for node in f.maker.env.toposort() ])==1
    print f(xval,yval)
开发者ID:delallea,项目名称:Theano,代码行数:13,代码来源:test_basic_ops.py

示例5: test_squared_error_cost

def test_squared_error_cost():

    ySym,yhatSym = T.fmatrices('y','yhat')

    sqerr = theano.function([yhatSym,ySym],
                            outputs=squaredError(yhatSym,ySym))

    yhat = np.asarray([[1],[2],[3]],dtype=theano.config.floatX)
    y = np.asarray([[1],[2],[3]],dtype=theano.config.floatX)

    assert np.abs(sqerr(yhat,y)) < 1e-5

    yhat = np.asarray([[1],[2.1],[3]],dtype=theano.config.floatX)

    assert np.abs(sqerr(yhat,y) - 0.01/3) < 1e-5
开发者ID:terkkila,项目名称:cgml,代码行数:15,代码来源:test_costs.py

示例6: test_abs_cost

def test_abs_cost():

    ySym,yhatSym = T.fmatrices('y','yhat')

    ac = theano.function([yhatSym,ySym],
                         outputs=absoluteError(yhatSym,ySym))
    
    yhat = np.asarray([[1],[2],[3]],dtype=theano.config.floatX)
    y = np.asarray([[1],[2],[3]],dtype=theano.config.floatX)

    assert np.abs(ac(yhat,y)) < 1e-5

    yhat = np.asarray([[1],[2.1],[3]],dtype=theano.config.floatX)

    assert np.abs(ac(yhat,y) - 0.1/3) < 1e-5
开发者ID:terkkila,项目名称:cgml,代码行数:15,代码来源:test_costs.py

示例7: test_inc_subtensor

def test_inc_subtensor():
    shared = cuda.shared_constructor
    #shared = tensor.shared
    x, y = T.fmatrices('x', 'y')
    xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                      dtype='float32')
    yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
                      dtype='float32')
    expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])
    f = theano.function([x, y], expr, mode=mode_with_gpu)

    assert sum([isinstance(node.op, cuda.GpuSubtensor)
                for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
                node.op.set_instead_of_inc==False
                for node in f.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(xval, yval), [[1., 12., 13.],
                                          [4., 15., 16.], [7., 18., 19.]])
开发者ID:wqren,项目名称:Theano,代码行数:18,代码来源:test_basic_ops.py

示例8: __init__


#.........这里部分代码省略.........
        #=====================================================================

        # Update Synapses (STDP | STDC)

        # Pre::  Apre += self.dApre, w+=Apost
        # Post:: Apost+=self.dApost, w+=Apre
        #
        # USpreInner :: Perform Pre function No.1 in inner connections
        # UWInner    :: Perform Pre function No.2 in inner connections
        # UpreInner  :: Function
        def add_synap_pre(i,p,po,s,q):
            # i :: sequence
            # p :: pre | post
            # s :: dApre | dApost
            # q :: W
            index = T.nonzero(q[i,:self.Ne])
            np = T.inc_subtensor(p[i,index],s)
##            tmp = p[i,:]
##            tmp=T.inc_subtensor(tmp[index],s)
##            np=T.set_subtensor(p[i,:],tmp)
            #np = T.inc_subtensor(p[i,:],s)
            nw = T.inc_subtensor(q[i,:],po[i,:])
            nw=T.clip(nw,0,self.wmax)
            return {p:np,q:nw}

        def add_synap_pre_inp(i,p,po,s,q):
            # i :: sequence
            # p :: pre | post
            # s :: dApre | dApost
            # q :: W
            index = T.nonzero(q[i,:self.Ne])
            np = T.inc_subtensor(p[i,index],s)
##            tmp = p[i,:]
##            tmp=T.inc_subtensor(tmp[index],s)
##            np=T.set_subtensor(p[i,:],tmp)
            #np = T.inc_subtensor(p[i,:],s)
            nw = T.inc_subtensor(q[i,:],po[i,:])
            nw=T.clip(nw,0,self.wmax)
            return {p:np,q:nw}

        def add_synap_post(i,po,p,s,q):
            # i:: sequence
            # po:: post
            # p:: pre
            # s:: dA
            # q:: W
            index = T.nonzero(q[:self.Ne,i])
            npo = T.inc_subtensor(po[index,i],s)
            nw = T.inc_subtensor(q[:,i],p[:,i])
            nw = T.clip(nw,0,self.wmax)
            return {po:npo,q:nw}

        def add_synap_post_inp(i,po,p,s,q):
            # i:: sequence
            # po:: post
            # p:: pre
            # s:: dA
            # q:: W
            index = T.nonzero(q[:self.Ne,i])
            npo = T.inc_subtensor(po[index,i],s)
            nw = T.inc_subtensor(q[:,i],p[:,i])
            nw = T.clip(nw,0,self.wmax)
            return {po:npo,q:nw}

        add_dA = T.fscalar('add_dA')
        add_p,add_po,add_q = T.fmatrices('add_p','add_po','add_q')
        #-------------------------------------------------------------------------
        #USinner,updatesUinner = theano.scan(fn=add_synap_pre,sequences=vinner,non_sequences=[self.Spre_inner,self.Spost_inp,self.dApre,self.W_inner])
        'USinner,updatesUinner = theano.scan(fn=add_synap_pre,sequences=vinner.nonzero()[0],non_sequences=[add_p,add_po,add_dA,add_q])'
        #USinner1,updatesUinner1 = theano.scan(fn=add_synap_pre,sequences=vinner,non_sequences=[self.Spost_inner,self.Spre_inner,self.dApost,self.W_inner])
        #-------------------------------------------------------------------------
        #UpostInner = theano.function(inputs[vinner],updates={self.Spost_inner:USpostInner})
        #UpostInp = theano.function(inputs=[vinner],updates={self.W_inner:UWInnerpost})
        'USinner_f = theano.function(inputs=[vinner,add_p,add_po,add_dA,add_q],outputs=None,updates=updatesUinner)'
        #USinner_step2 = theano.function(inputs=[vinner,add_p,add_po,add_dA,add_q],outputs=None,updates=updatesUinner)
        USinner_inner_pre,updatesUinner_inner_pre = theano.scan(fn=add_synap_pre,sequences=vinner[:self.Ne].nonzero()[0],non_sequences=[self.Spre_inner,self.Spost_inner,add_dA,self.W_inner])
        self.USinner_f_inner_pre = theano.function(inputs=[vinner,add_dA],outputs=None,updates=updatesUinner_inner_pre,allow_input_downcast=True)

        USinner_innerpost,updatesUinner_inner_post = theano.scan(fn=add_synap_post,sequences=vinner[:self.Ne].nonzero()[0],non_sequences=[self.Spost_inner,self.Spre_inner,add_dA,self.W_inner])
        self.USinner_f_inner_post = theano.function(inputs=[vinner,add_dA],outputs=None,updates=updatesUinner_inner_post,allow_input_downcast=True)

        USinner_inp_pre,updatesUSinner_inp_pre =theano.scan(fn=add_synap_pre_inp,sequences=vinner.nonzero()[0],non_sequences=[self.Spre_inp,self.Spost_inp,add_dA,self.W_inp])
        self.USinner_f_inp_pre = theano.function(inputs=[vinner,add_dA],outputs=None,updates=updatesUSinner_inp_pre,allow_input_downcast=True)

        USinner_inp_post,updatesUSinner_inp_post =theano.scan(fn=add_synap_post_inp,sequences=vinner[:self.Ne].nonzero()[0],non_sequences=[self.Spost_inp,self.Spre_inp,add_dA,self.W_inp])
        self.USinner_f_inp_post = theano.function(inputs=[vinner,add_dA],outputs=None,updates=updatesUSinner_inp_post,allow_input_downcast=True)
        # Call reset function
        def reset_v(index,vr):
            nv = T.set_subtensor(self.S[0,index],vr)
            return{self.S:nv}
        resetV,resetV_update = theano.scan(fn=reset_v,sequences=vinner.nonzero()[0],non_sequences=[U])
        self.resetV_f = theano.function(inputs=[vinner,U],outputs=None,updates=resetV_update,allow_input_downcast=True)

        setvalue = T.fscalar('setvalue')
        iv = T.ivector('iv')
        def reset_state(i,value,state):
            nstate = T.set_subtensor(state[i,:],value)
            return {state:nstate}
        reset_S_state,Upreset_S_state = theano.scan(fn=reset_state,sequences=iv,non_sequences=[setvalue,self.S])
        self.reset_S_fn = theano.function(inputs=[iv,setvalue],outputs=None,updates=Upreset_S_state)
开发者ID:veinpy,项目名称:SNN_theano,代码行数:101,代码来源:snn_1st.py

示例9: InputLayer

    # Input Layer
    l_in         = InputLayer((batch_size, n_in), input_var=input_var)
    # Recurrent EI Net
    l_in_hid     = DenseLayer(l_in, n_hid, nonlinearity=lasagne.nonlinearities.rectify)

    # Output Layer
    l_shp        = ReshapeLayer(l_in_hid, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, nonlinearity=lasagne.nonlinearities.sigmoid)
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, n_out))

    return l_out, l_in_hid

if __name__ == '__main__':
    # Define the input and expected output variable
    input_var, target_var = T.fmatrices('input', 'target')
    
    # The generator to sample examples from
    tr_cond               = 'two_gains'
    test_cond             = 'all_gains'
    generator             = CausalInferenceTaskFFWD(max_iter=250001, batch_size=100, n_in=50, n_out=1, sigma_sq=100.0, tr_cond=tr_cond)
    test_generator        = CausalInferenceTaskFFWD(max_iter=2501,   batch_size=100, n_in=50, n_out=1, sigma_sq=100.0, tr_cond=test_cond)

    l_out, l_rec          = model(input_var, batch_size=generator.batch_size, n_in=2*generator.n_in, n_out=generator.n_out, n_hid=200)
    
    # The generated output variable and the loss function
#    all_layers            = lasagne.layers.get_all_layers(l_out)
#    l2_penalty            = lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2) * 1e-6
    pred_var              = T.clip(lasagne.layers.get_output(l_out), 1e-6, 1. - 1e-6)
    loss                  = T.mean(lasagne.objectives.squared_error(pred_var, target_var)) # + l2_penalty
    
开发者ID:eminorhan,项目名称:inevitable-probability,代码行数:30,代码来源:sqerr_causal_inference_expt.py

示例10: function

from theano import *
import theano.tensor as T
import numpy as np

# Logistic function
x = T.matrix('x', 'float32')
op = 1 / (1 + T.exp(-x))

logistic = function([x], op)

mat1 = [[0, 1], [-1, -2]]
print(logistic(mat1))

# Multiple outputs
a, b = T.fmatrices('a', 'b')
diff = a - b
absDiff = abs(diff)
sqrDiff = diff ** 2

f = function([a, b], [diff, absDiff, sqrDiff])

mat2 = [[10, 5], [5, 10]]
mat3 = [[5, 10], [10, 5]]

print(f(mat2, mat3))

# Default values

x, y = T.fscalars('x', 'y')
z = x + y
开发者ID:titu1994,项目名称:Python-Work,代码行数:30,代码来源:examples.py

示例11: find_Ys

def find_Ys(Xs_shared, Ys_shared, sigmas_shared, N, steps, output_dims,
            n_epochs, initial_lr, final_lr, lr_switch, init_stdev,
            initial_momentum, final_momentum, momentum_switch, lmbda, metric,
            verbose=0):
    """Optimize cost wrt Ys[t], simultaneously for all t"""
    # Optimization hyperparameters
    initial_lr = np.array(initial_lr, dtype=floath)
    final_lr = np.array(final_lr, dtype=floath)
    initial_momentum = np.array(initial_momentum, dtype=floath)
    final_momentum = np.array(final_momentum, dtype=floath)

    lr = T.fscalar('lr')
    lr_shared = theano.shared(initial_lr)

    momentum = T.fscalar('momentum')
    momentum_shared = theano.shared(initial_momentum)

    # Penalty hyperparameter
    lmbda_var = T.fscalar('lmbda')
    lmbda_shared = theano.shared(np.array(lmbda, dtype=floath))

    # Yv velocities
    Yvs_shared = []
    zero_velocities = np.zeros((N, output_dims), dtype=floath)
    for t in range(steps):
        Yvs_shared.append(theano.shared(np.array(zero_velocities)))

    # Cost
    Xvars = T.fmatrices(steps)
    Yvars = T.fmatrices(steps)
    Yv_vars = T.fmatrices(steps)
    sigmas_vars = T.fvectors(steps)

    c_vars = []
    for t in range(steps):
        c_vars.append(cost_var(Xvars[t], Yvars[t], sigmas_vars[t], metric))

    cost = T.sum(c_vars) + lmbda_var*movement_penalty(Yvars, N)

    # Setting update for Ys velocities
    grad_Y = T.grad(cost, Yvars)

    givens = {lr: lr_shared, momentum: momentum_shared,
              lmbda_var: lmbda_shared}
    updates = []
    for t in range(steps):
        updates.append((Yvs_shared[t], momentum*Yv_vars[t] - lr*grad_Y[t]))

        givens[Xvars[t]] = Xs_shared[t]
        givens[Yvars[t]] = Ys_shared[t]
        givens[Yv_vars[t]] = Yvs_shared[t]
        givens[sigmas_vars[t]] = sigmas_shared[t]

    update_Yvs = theano.function([], cost, givens=givens, updates=updates)

    # Setting update for Ys positions
    updates = []
    givens = dict()
    for t in range(steps):
        updates.append((Ys_shared[t], Yvars[t] + Yv_vars[t]))
        givens[Yvars[t]] = Ys_shared[t]
        givens[Yv_vars[t]] = Yvs_shared[t]

    update_Ys = theano.function([], [], givens=givens, updates=updates)

    # Momentum-based gradient descent
    for epoch in range(n_epochs):
        if epoch == lr_switch:
            lr_shared.set_value(final_lr)
        if epoch == momentum_switch:
            momentum_shared.set_value(final_momentum)

        c = update_Yvs()
        update_Ys()
        if verbose:
            print('Epoch: {0}. Cost: {1:.6f}.'.format(epoch + 1, float(c)))

    Ys = []
    for t in range(steps):
        Ys.append(np.array(Ys_shared[t].get_value(), dtype=floath))

    return Ys
开发者ID:paulorauber,项目名称:thesne,代码行数:82,代码来源:dynamic_tsne.py

示例12: init_weights

import mnist

def init_weights(n_in, n_out):
    weights = np.random.randn(n_in, n_out) / np.sqrt(n_in)
    return theano.shared(np.asarray(weights, dtype=theano.config.floatX))

def feed_forward(X, w_h, w_o):
    h = T.nnet.sigmoid(T.dot(X, w_h))
    return T.nnet.softmax(T.dot(h, w_o))

trX, trY, teX, teY = mnist.load_data(one_hot=True)

w_h, w_o = init_weights(28*28, 100), init_weights(100, 10)
num_epochs, batch_size, learn_rate = 30, 10, 0.2

X, Y = T.fmatrices('X', 'Y')
y_ = feed_forward(X, w_h, w_o)

weights = [w_h, w_o]
grads = T.grad(cost=T.nnet.categorical_crossentropy(y_, Y).mean(), wrt=weights)
train = theano.function(
    inputs=[X, Y],
    updates=[[w, w - g * learn_rate] for w, g in zip(weights, grads)],
    allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=T.argmax(y_, axis=1))

for i in range(num_epochs):
    for j in xrange(0, len(trX), batch_size):
        train(trX[j:j+batch_size], trY[j:j+batch_size])
    print i, np.mean(predict(teX) == np.argmax(teY, axis=1))
开发者ID:CaiyiZhu,项目名称:simple-neural-networks,代码行数:30,代码来源:mlp_theano.py

示例13: function

'''
A theano implementation of the T-LSTM
'''

import theano.tensor as T
from theano import function
import numpy as np
import collections
import pdb
import os
#np.seterr(under='warn')
h, b = T.fvectors('h', 'b')
W, X = T.fmatrices('W', 'X')

dotvec = function([h,b], T.dot(h,b))

dot = function([W, h], T.dot(W, h))
#dotF = function([W, h], T.dot(W, h))
#dot = lambda W, h: dotF(W, h.squeeze())
dotW = function([W, X], T.dot(W,X))

layer = function([W, h, b], T.dot(W, h) + b)
#layerF = function([W, h, b], T.dot(W, h) + b)
#layer = lambda W, h, b: layerF(W, h.squeeze(), b.squeeze())
sigmoid = function([h], T.nnet.ultra_fast_sigmoid(h))
#sigmoidF = function([h], T.nnet.ultra_fast_sigmoid(h))
#sigmoid = lambda h: sigmoidF(h.squeeze())
tanh = function([h], T.tanh(h))
#tanhF = function([h], T.tanh(h))
#tanh = lambda h: tanhF(h.squeeze())
add = function([h, b], h+b)
开发者ID:Peratham,项目名称:imgcap,代码行数:31,代码来源:tlstm_theano.py


注:本文中的theano.tensor.fmatrices函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。