当前位置: 首页>>代码示例>>Python>>正文


Python tensor.lscalar函数代码示例

本文整理汇总了Python中theano.tensor.lscalar函数的典型用法代码示例。如果您正苦于以下问题:Python lscalar函数的具体用法?Python lscalar怎么用?Python lscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了lscalar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

    def build(self):
        
        # input and output variables
        x = T.matrix('x')
        y = T.matrix('y')
        index = T.lscalar() 
        batch_count = T.lscalar() 
        LR = T.scalar('LR', dtype=theano.config.floatX)
        M = T.scalar('M', dtype=theano.config.floatX)

        # before the build, you work with symbolic variables
        # after the build, you work with numeric variables
        
        self.train_batch = theano.function(inputs=[index,LR,M], updates=self.model.updates(x,y,LR,M),givens={ 
                x: self.shared_x[index * self.batch_size:(index + 1) * self.batch_size], 
                y: self.shared_y[index * self.batch_size:(index + 1) * self.batch_size]},
                name = "train_batch", on_unused_input='warn')
        
        self.test_batch = theano.function(inputs=[index],outputs=self.model.errors(x,y),givens={
                x: self.shared_x[index * self.batch_size:(index + 1) * self.batch_size], 
                y: self.shared_y[index * self.batch_size:(index + 1) * self.batch_size]},
                name = "test_batch")
                
        if self.format == "DFXP" :  
            self.update_range = theano.function(inputs=[batch_count],updates=self.model.range_updates(batch_count), name = "update_range")
开发者ID:spideryan,项目名称:deep-learning-storage,代码行数:25,代码来源:trainer.py

示例2: getTrainModel

    def getTrainModel(self, data_x, data_y, data_sm):
        self.ngram_start_index = T.lscalar()
        self.ngram_end_index = T.lscalar()
        self.sm_start_index = T.lscalar()
        self.sm_end_index = T.lscalar()
        self.learning_rate = T.scalar()

        # TRAIN_MODEL
        self.train_outputs = [self.cost, self.grad_norm]
        self.train_set_x, self.train_set_y, self.train_set_sm = io_read_ngram.shared_dataset([data_x, data_y, data_sm])
        self.int_train_set_y = T.cast(self.train_set_y, "int32")
        self.train_model = theano.function(
            inputs=[
                self.ngram_start_index,
                self.ngram_end_index,
                self.sm_start_index,
                self.sm_end_index,
                self.learning_rate,
            ],
            outputs=self.train_outputs,
            updates=self.updates,
            givens={
                self.x: self.train_set_x[self.ngram_start_index : self.ngram_end_index],
                self.y: self.int_train_set_y[self.ngram_start_index : self.ngram_end_index],
                self.sm: self.train_set_sm[self.sm_start_index : self.sm_end_index],
                self.lr: self.learning_rate,
            },
        )

        return self.train_model
开发者ID:batman2013,项目名称:nnjm-global,代码行数:30,代码来源:model_global.py

示例3: compile

    def compile(self, objective, optimizer, constraints=None):
        if not constraints:
            constraints = [lambda x: x for _ in self.params]

        # Dummy variables as placeholder for training data,
        # which need to be shared tensor variables
        self.X_train = shared_vals(np.zeros((2, 2)), name='X_train')
        self.Y_train = shared_vals(np.zeros((2, 2)), name='Y_train')

        batch_ix = T.lscalar('ix')
        batch_size = T.lscalar('size')
        y_sym = T.matrix('Y')
        loss = objective(y_sym, self.output)
        updates = optimizer.get_updates(self.params, constraints, loss)
        self.train = theano.function(
            inputs=[batch_ix, batch_size],
            outputs=loss,
            updates=updates,
            givens={
                self.X: self.X_train[batch_ix * batch_size: (batch_ix + 1) * batch_size],
                y_sym : self.Y_train[batch_ix * batch_size: (batch_ix + 1) * batch_size]
            }
        )

        self._predict = theano.function(
            inputs=[self.X],
            outputs=self.output
        )
开发者ID:bitwise-ben,项目名称:lego,代码行数:28,代码来源:lego.py

示例4: trainer

def trainer(X,Y,alpha,lr,predictions,updates,data,labels):
	data   = U.create_shared(data,  dtype=np.int8)
	labels = U.create_shared(labels,dtype=np.int8)
	index_start = T.lscalar('start')
	index_end   = T.lscalar('end')
	print "Compiling function..."
	train_model = theano.function(
			inputs  = [index_start,index_end,alpha,lr],
			outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
			updates = updates,
			givens  = {
				X:   data[index_start:index_end],
				Y: labels[index_start:index_end]
			}
		)
	test_model = theano.function(
			inputs  = [index_start,index_end],
			outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
			givens  = {
				X:   data[index_start:index_end],
				Y: labels[index_start:index_end]
			}
		)
	print "Done."
	return train_model,test_model
开发者ID:Niyikiza,项目名称:rnn-experiment,代码行数:25,代码来源:genchar_mult.py

示例5: fiting_variables

    def fiting_variables(self, batch_size, train_set_x, train_set_y, test_set_x=None):
        """Sets useful variables for locating batches"""    
        self.index = T.lscalar('index')    # index to a [mini]batch
        self.n_ex = T.lscalar('n_ex')      # total number of examples

        assert type(batch_size) is IntType or FloatType, "Batch size must be an integer."
        if type(batch_size) is FloatType:
            warnings.warn('Provided batch_size is FloatType, value has been truncated')
            batch_size = int(batch_size)
        # Proper implementation of variable-batch size evaluation
        # Note that the last batch may be a smaller size
        # So we keep around the effective_batch_size (whose last element may
        # be smaller than the rest)
        # And weight the reported error by the batch_size when we average
        # Also, by keeping batch_start and batch_stop as symbolic variables,
        # we make the theano function easier to read
        self.batch_start = self.index * batch_size
        self.batch_stop = T.minimum(self.n_ex, (self.index + 1) * batch_size)
        self.effective_batch_size = self.batch_stop - self.batch_start

        self.get_batch_size = theano.function(inputs=[self.index, self.n_ex],
                                          outputs=self.effective_batch_size)

        # compute number of minibatches for training
        # note that cases are the second dimension, not the first
        self.n_train = train_set_x.get_value(borrow=True).shape[0]
        self.n_train_batches = int(np.ceil(1.0 * self.n_train / batch_size))
        if test_set_x is not None:
            self.n_test = test_set_x.get_value(borrow=True).shape[0]
            self.n_test_batches = int(np.ceil(1.0 * self.n_test / batch_size))
开发者ID:pabaldonedo,项目名称:stochastic_fnn,代码行数:30,代码来源:lbn.py

示例6: __compileFunctions

    def __compileFunctions(self):

        self.__logger.info("Compiling computational graph:")

        index = T.lscalar('index')

        miniBatchSize = T.lscalar('miniBatchSize')


        self.__logger.info(" - Setting up and compiling outputs")
        self.__setUpOutputs(self.input)

        self.__logger.info(" - Setting up and compiling cost functions")
        self.__setUpCostFunctions(self.input,
                                  self.output,
                                  self.supCostWeight,
                                  self.unsupCostWeight)

        self.__logger.info(" - Setting up and compiling optimizers")
        self.__setUpOptimizers(index,
                               miniBatchSize,
                               self.input,
                               self.output,
                               self.epsilon,
                               self.decay,
                               self.momentum)

        self.__setUpHelpers(index,miniBatchSize)
开发者ID:terkkila,项目名称:cgml,代码行数:28,代码来源:computational_graph.py

示例7: pretraining_functions

 def pretraining_functions(self, train_set_x, train_set_y, batch_size):
     index = tensor.lscalar('index')  
     index = tensor.lscalar('index')  
     corruption_level = tensor.scalar('corruption')  
     corruption_level = tensor.scalar('corruption')  
     learning_rate = tensor.scalar('lr')  
     learning_rate = tensor.scalar('lr')  
     switch = tensor.iscalar('switch')
     n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
     batch_begin = index * batch_size
     batch_end = batch_begin + batch_size
     pretrain_fns = []
     for sugar in self.sugar_layers:
         cost, updates = sugar.get_cost_updates(corruption_level,
                                             learning_rate,
                                             switch)
         fn = function(inputs=[index,
                                      Param(corruption_level, default=0.2),
                                      Param(learning_rate, default=0.1),
                                      Param(switch, default=1)],
                              outputs=[cost],
                              updates=updates,
                              givens={self.x: train_set_x[batch_begin:batch_end],
                                      self.y: train_set_y[batch_begin:batch_end]}, on_unused_input='ignore')
         pretrain_fns.append(fn)
     return pretrain_fns
开发者ID:lucktroy,项目名称:sugar,代码行数:26,代码来源:deepSUGAR.py

示例8: __init__

 def __init__( self, da, stop_val, corruption, rate, train_path, test_path ):
     self.fid = open( 'output.txt', 'r+' )
     self.model = da
     self.stop_val = stop_val
     self.last_cost = 9999
     self.train_path = train_path
     self.test_path = test_path
     
     self.train_set = numpy.load( train_path )
     self.test_set = numpy.load( test_path )
     
     self.shared_train = theano.shared( self.train_set )
     self.shared_test = theano.shared( self.test_set )
     self.print_set( self.shared_train, "train_set" )
     self.print_set( self.shared_test, "test_set" )
     
     self.learning_rate = rate
     self.corruption_level = corruption
     
     self.start_index = T.lscalar()
     self.end_index = T.lscalar()
     self.cost, self.updates = da.get_cost_updates( corruption, rate )
     
     self.train = theano.function( [ self.start_index, self.end_index ], self.cost, updates = self.updates,
         givens = { da.x : self.shared_train [ self.start_index : self.end_index ] } )
     self.test = theano.function( [ self.start_index, self.end_index ], self.cost, updates = self.updates,
         givens = { da.x : self.shared_test [ self.start_index : self.end_index ] } )
开发者ID:MrWolvwxyz,项目名称:sparseAutoEncoder,代码行数:27,代码来源:theano_class.py

示例9: train_rnn

def train_rnn():
    rng = numpy.random.RandomState(1234)

    q = T.lvector("q")
    pos = T.lscalar("pos")
    neg = T.lscalar("neg")
    inputs = [q, pos, neg]

    embLayer = emb_layer(None, 100, 5)
    rnn = rnn_layer(input=inputs, emb_layer=embLayer, nh=5)

    cost = rnn.loss()
    gradient = T.grad(cost, rnn.params)
    lr = 0.001
    updates = OrderedDict((p, p - lr * g) for p, g in zip(rnn.params, gradient))
    train = theano.function(inputs=[q, pos, neg], outputs=cost, updates=updates)

    print rnn.emb.eval()[0]
    e0 = rnn.emb.eval()

    for i in range(0, 3):
        idq = rng.randint(size=10, low=0, high=100)
        idpos = rng.random_integers(100)
        idneg = rng.random_integers(100)

        train(idq, idpos, idneg)
        rnn.normalize()

        print rnn.emb.eval() - e0
开发者ID:yzyz7,项目名称:attgit,代码行数:29,代码来源:train.py

示例10: test_doc

    def test_doc(self):
        """Ensure the code given in pfunc.txt works as expected"""

        # Example #1.
        a = lscalar()
        b = shared(1)
        f1 = pfunc([a], (a + b))
        f2 = pfunc([Param(a, default=44)], a + b, updates={b: b + 1})
        self.assertTrue(b.get_value() == 1)
        self.assertTrue(f1(3) == 4)
        self.assertTrue(f2(3) == 4)
        self.assertTrue(b.get_value() == 2)
        self.assertTrue(f1(3) == 5)
        b.set_value(0)
        self.assertTrue(f1(3) == 3)

        # Example #2.
        a = tensor.lscalar()
        b = shared(7)
        f1 = pfunc([a], a + b)
        f2 = pfunc([a], a * b)
        self.assertTrue(f1(5) == 12)
        b.set_value(8)
        self.assertTrue(f1(5) == 13)
        self.assertTrue(f2(4) == 32)
开发者ID:gwtaylor,项目名称:Theano,代码行数:25,代码来源:test_pfunc.py

示例11: train_model

    def train_model(self, X_train, Y_train, X_valid, Y_valid,
                    num_epochs=3000, learning_rate=0.001, batch_size=20,
                    L1_reg=0., L2_reg=0.):

        logging.info('... training model (learning_rate: %f)' % learning_rate)

        cost = self.NLL + L1_reg*self.L1 + L2_reg*self.L2_sqr

        grads = T.grad(cost=cost, wrt=self.params)
        updates = [[param, param - learning_rate*grad]
                   for param, grad in zip(self.params, grads)]

        start = T.lscalar()
        end = T.lscalar()

        train = theano.function(
            inputs=[start, end],
            outputs=cost,
            updates=updates,
            givens={
                self.X: X_train[start:end],
                self.Y: Y_train[start:end]
            }
        )

        validate = theano.function(
            inputs=[start, end],
            outputs=[cost, self.py_x],
            givens={
                self.X: X_valid[start:end],
                self.Y: Y_valid[start:end]
            }
        )

        m_train = X_train.get_value(borrow=True).shape[0]
        m_valid = X_valid.get_value(borrow=True).shape[0]

        stopping_criteria = StoppingCriteria()
        index = range(0, m_train+1, batch_size)

        y_valid = np.argmax(Y_valid.get_value(borrow=True), axis=1)
        for i in range(num_epochs):
            costs = [train(index[j], index[j+1]) for j in range(len(index)-1)]
            E_tr = np.mean(costs)

            E_va, py_x = validate(0, m_valid)
            y_pred = np.argmax(py_x, axis=1)
            A_valid = AccuracyTable(y_pred, y_valid)

            stopping_criteria.append(E_tr, E_va)
            logging.debug('epoch %3d/%d. Cost: %f  Validation: Q3=%.2f%% C3=%f'
                          '(%.2f %.2f %.2f)',
                          i+1, num_epochs, E_tr, A_valid.Q3, A_valid.C3,
                          A_valid.Ch, A_valid.Ce, A_valid.Cc)

            if stopping_criteria.PQ(1):
                logging.debug('Early Stopping!')
                break

        return stopping_criteria
开发者ID:junshuai,项目名称:PSSPred,代码行数:60,代码来源:psspred.py

示例12: test_argsort

def test_argsort():
    # Set up
    rng = np.random.RandomState(seed=utt.fetch_seed())
    m_val = rng.rand(3, 2)
    v_val = rng.rand(4)

    # Example 1
    a = tensor.dmatrix()
    w = argsort(a)
    f = theano.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    assert np.allclose(gv, gt)

    # Example 2
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    w = argsort(a, axis)
    f = theano.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        assert np.allclose(gv, gt)

    # Example 3
    a = tensor.dvector()
    w2 = argsort(a)
    f = theano.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    assert np.allclose(gv, gt)

    # Example 4
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    l = argsort(a, axis, "mergesort")
    f = theano.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        assert np.allclose(gv, gt)

    # Example 5
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = tensor.dmatrix()
    w2 = argsort(a, None)
    f = theano.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    assert np.allclose(gv, gt)
开发者ID:12190143,项目名称:Theano,代码行数:59,代码来源:test_sort.py

示例13: predict

 def predict(self, X):
     start = T.lscalar()
     end = T.lscalar()
     return theano.function(
         inputs=[start, end],
         outputs=self.py_x,
         givens={self.X: X[start:end]}
     )
开发者ID:junshuai,项目名称:PSSPred,代码行数:8,代码来源:psspred.py

示例14: pretraining_functions

    def pretraining_functions(self, train_set_x, train_set_y, alpha, batch_size):
        ''' Generates a list of functions, each of them implementing one
        component (sub-CNN) in trainnig the iCNN.
        The function will require as input the minibatch index, and to train
        a sub-CNN you just need to iterate, calling the corresponding function on
        all minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared variable that contains all datapoints used
                            for training the sub-CNN

        : train_set_y: ...

        :type batch_size: int
        :param batch_size: size of a [mini]batch

        '''

        index = T.lscalar('index')  # index to a minibatch
        learning_rate = T.scalar('learning_rate')  # learning rate to use
        # number of batches
        #n_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size))
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for subcnn in self.subcnns:
            # create a function to compute the mistakes that are made by the model
            index = T.lscalar('index')  # index to a [mini]batch
            #batch_size_var = T.lscalar('batch_size_var')  # batch_size
            # compute the gradients with respect to the model parameters
            grads = T.grad(subcnn.cost, subcnn.params_pretrain)
        
            # add momentum
            # initialize the delta_i-1
            delta_before=[]
            for param_i in subcnn.params:
                delta_before_i=theano.shared(value=numpy.zeros(param_i.get_value().shape))
                delta_before.append(delta_before_i)
        
            updates = []
            for param_i, grad_i, delta_before_i in zip(subcnn.params, grads, delta_before):
                delta_i=-learning_rate * grad_i + alpha*delta_before_i
                updates.append((param_i, param_i + delta_i ))
                updates.append((delta_before_i,delta_i))
            # compile the theano function
            fn = theano.function([index,theano.Param(learning_rate, default=0.1)], [subcnn.cost,subcnn.errors], updates=updates,
                                      givens={
                                      self.x: train_set_x[index*batch_size:(index+1)*batch_size],
                                      self.y: train_set_y[index*batch_size:(index+1)*batch_size]})
            
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:yifeng-li,项目名称:DECRES,代码行数:57,代码来源:icnn_dfs.py

示例15: test

def test(model):
    dim = 128
    v_size = 7810
    margin = 1.0
    
    #load model
    f = open(model, 'rb')
    input_params = cPickle.load(f)
    emb, wx, wh, bh, wa = input_params
    f.close()
    
    embLayer = emb_layer(pre_train=emb, v = v_size, dim = dim) 
    rnnLayer = rnn_layer(input=None, wx=wx, wh=wh, bh=bh, emb_layer = embLayer, nh = dim) 
    att = attention_layer(input=None, rnn_layer=rnnLayer, margin = margin)

    q = T.lvector('q')
    a = T.lscalar('a')
    p = T.lvector('p')
    t = T.lscalar('t')
    inputs = [q,a,p,t]
    score = att.predict(inputs)
    pred = theano.function(inputs=inputs,outputs=score)

    pool = ThreadPool()

    f = open('./data/test-small.id','r')
    count = 1
    print 'time_b:%s' %time.clock()  
    to_pred = []
    for line in f:
        if count % 10000 == 0:
	    print count / 10000
	count += 1
        #print 'time_b:%s' %time.clock()  
        line = line[:-1]
        tmp = line.split('\t')
        in_q = numpy.array(tmp[0].split(' ')).astype(numpy.int) - 1
        in_a = int(tmp[1].split(' ')[2]) - 1
        in_p = numpy.array(tmp[1].split(' ')).astype(numpy.int) - 1
        in_t = int(tmp[2]) - 1
	lis = (in_q, in_a, in_p, in_t)
	to_pred.append(lis)
        #print 'time_load:%s' %time.clock()  
        #print 'time_score:%s' %time.clock()  
    f.close()

    ay = numpy.asarray(to_pred)
    #results = map(pred, list(ay[:,0]), list(ay[:,1]),list(ay[:,2]),list(ay[:,3]))
    results = pool.map(pred, to_pred)
    #results = []
    #for p in to_pred:
    #    results.append(att.predict(p,params))
    print 'time_e:%s' %time.clock()
    #print results
    pool.close()
    pool.join()
开发者ID:yzyz7,项目名称:attgit,代码行数:56,代码来源:batch_predict.py


注:本文中的theano.tensor.lscalar函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。