当前位置: 首页>>代码示例>>Python>>正文


Python tensor.scalar函数代码示例

本文整理汇总了Python中theano.tensor.scalar函数的典型用法代码示例。如果您正苦于以下问题:Python scalar函数的具体用法?Python scalar怎么用?Python scalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了scalar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_training_functions

	def get_training_functions(self, x_lab_np=None, y_lab_np=None, x_unlab_np=None):
		# assert xlab.shape[0] == len(y_lab) 
		assert self.x_lab_np.shape[0] == len(y_lab)
		self.x_lab = self._shared_dataset(self.x_lab_np)
		self.y_lab = self._shared_dataset(self.y_lab_np)
		self.x_unlab = self._shared_dataset(self.x_unlab_np)
		self.alpha = float(xlab.shape[0] / xunlab.shape[0])
		index_unlab = T.ivector('index_unlab')
		index_lab = T.ivector('index_lab')
		momentum = T.scalar('momentum')
		learning_rate = T.scalar('learning_rate')
		# cost, updates = self.get_cost_updates(self.x_lab, self.x_unlab, self.y_lab)

		self.batch_size_lab = self.batch_size * self.alpha
		self.batch_size_unlab = self.batch_size * (1-self.alpha)
		x_lab = T.matrix('x_lab')
		x_unlab = T.matrix('x_unlab')
		y_lab = T.ivector('y_lab')

		self.num_labels = self.x_lab_np.shape[0]
		self.num_unlabels = self.x_unlab_np[0]
		self.num_samples = num_labels + num_unlabels

		num_batches = num_samples / float(self.batch_size)
		pretraining_fns = []
		for i in xrange(len(hidden_layers)):
			ssda = self.layers[i]
			exit()
			cost, updates = ssda.get_cost_updates(self.x_lab, self.x_unlab, self.y_lab)
			train_fn = theano.function(inputs=[index_lab, index_unlab], updates=updates, outputs=[cost], givens={self.x_lab:self.x_lab[index_lab], self.x_unlab:self.x_unlab[index_unlab], self.y_lab:self.y_lab[index_lab]})
			pretraining_fns.append(train_fn)

		return  pretraining_fns
开发者ID:adhaka,项目名称:kthasrdnn,代码行数:33,代码来源:SSDAE.py

示例2: _compile_func

def _compile_func():
    beta = T.vector('beta')
    b = T.scalar('b')
    X = T.matrix('X')
    y = T.vector('y')
    C = T.scalar('C')
    params = [beta, b, X, y, C]
    cost = 0.5 * (T.dot(beta, beta) + b * b) + C * T.sum(
        T.nnet.softplus(
            -T.dot(T.diag(y), T.dot(X, beta) + b)
        )
    )
    # Function computing in one go the cost, its gradient
    # with regard to beta and with regard to the bias.
    cost_grad = theano.function(params,[
        cost,
        T.grad(cost, beta),
        T.grad(cost, b)
    ])

    # Function for computing element-wise sigmoid, used for
    # prediction.
    log_predict = theano.function(
        [beta, b, X],
        T.nnet.sigmoid(b + T.dot(X, beta)),
        on_unused_input='warn'
    )

    return (cost_grad, log_predict)
开发者ID:alexisVallet,项目名称:dpm-identification,代码行数:29,代码来源:lr.py

示例3: pretraining_functions

    def pretraining_functions(self, train_set_x, batch_size):

       
        index = T.lscalar('index') 
        corruption_level = T.scalar('corruption')  
        learning_rate = T.scalar('lr')  
        batch_begin = index * batch_size
        batch_end = batch_begin + batch_size
        
        pretrain_fns = []
        for dA in self.dA_layers:
            cost, updates = dA.get_cost_updates(corruption_level,
                                                learning_rate)
            fn = theano.function(
                inputs=[
                    index,
                    theano.In(corruption_level, value=0.1),
                    theano.In(learning_rate, value=0.1)
                ],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin: batch_end]
                }
            )
            
            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:missmagnum,项目名称:sdai,代码行数:29,代码来源:sda.py

示例4: __form_input_tensor

    def __form_input_tensor(self, name):

        left_entity = T.scalar(name='le_' + name, dtype='int32')
        right_entity = T.scalar(name='re_' + name, dtype='int32')
        relation = T.scalar(name='rel_' + name, dtype='int32')

        return T.stack([left_entity, right_entity, relation])
开发者ID:subhadeepmaji,项目名称:ml_algorithms,代码行数:7,代码来源:RelationEmbedding.py

示例5: build_pretraining_function

    def build_pretraining_function(self, train_set_x, batch_size):
        
        index = T.lscalar('index')
        corruption_level = T.scalar('corruption')  # % of corruption to use
        learning_rate = T.scalar('lr')  # learning rate to use
        # number of batches
        n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

        pretrain_fns = []

        for pretrain in self.pretrain_layers:

            cost, updates = pretrain.get_cost_updates(corruption_level, \
                                                            learning_rate)


            fn = theano.function(inputs=[index, corruption_level, \
                                                        learning_rate],
                    outputs=cost, 
                    updates=updates,
                    givens= {
                        self.x: train_set_x[index * batch_size: \
                                                    (index + 1) * batch_size]})

            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:mottodora,项目名称:DNN-MultipleReg,代码行数:27,代码来源:dnn.py

示例6: adam

def adam(loss, param_list):
    """
    Recommended default settings are 
    α = 0.001, β1 = 0.9, β2 = 0.999 and  eps= 10e−8.
    t is timestep.
    """
    alpha = T.scalar("alpha")
    beta1 = T.scalar("beta1")
    beta2 = T.scalar("beta2")
    eps = T.scalar("eps")
    t = T.scalar("t")
    
    gparam_list = [T.grad(loss, p) for p in param_list]
    first_moment_list = [zero_shared(p.shape.eval()) for p in param_list]
    second_moment_list = [zero_shared(p.shape.eval()) for p in param_list]
    
    updates = OrderedDict()
    for param, gparam, first_moment, second_moment\
    in zip(param_list, gparam_list, first_moment_list, second_moment_list):
        m = beta1*first_moment + (1.-beta1)*gparam
        v = beta2*second_moment + (1.-beta2)*gparam*gparam
        m_hat = m / (1.-beta1**t)
        v_hat = v / (1.-beta2**t)
        updates[param] = param - alpha*m_hat / (T.sqrt(v_hat)+eps)
        updates[first_moment] = m
        updates[second_moment] = v
        
    opt_params = [alpha, beta1, beta2, eps, t]
    
    return updates, opt_params
开发者ID:matsui-k20xx,项目名称:d-rcn,代码行数:30,代码来源:optimizers.py

示例7: test_reallocation

def test_reallocation():
    x = tensor.scalar('x')
    y = tensor.scalar('y')
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
              vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
        m = theano.compile.get_mode(theano.Mode(linker=l))
        m = m.excluding('fusion', 'inplace')

        f = theano.function([x, y], z, name="test_reduce_memory",
                            mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if (storage_map[i][0] and
                                storage_map[i][0] is storage_map[o][0]):
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set(id(v) for v in
                       itervalues(storage_map))) < len(storage_map)
开发者ID:bouthilx,项目名称:Theano,代码行数:31,代码来源:test_vm.py

示例8: __init__

    def __init__(self,final_momentum=0.9, initial_momentum=0.5,momentum_switchover=5,times=[10,20,30,40,50],S=3,lr=1e-2,maxIter=10000,initS=0.0,numReplicates=3,theta=20,n=2000):
        self.times=times[times!=0].astype(np.float32)
        self.momentum_ = T.scalar('momentum', dtype=floatX)
        self.final_momentum=final_momentum; self.initial_momentum=initial_momentum;self.momentum_switchover=momentum_switchover;self.W=3;self.lr=lr;self.maxIter=maxIter;self.numReplicates=numReplicates;self.initS=initS;self.n=n;self.theta=theta
        self.lr_ = T.scalar();self.target_ = (T.matrix(),T.vector())[self.numReplicates==1]; self.times_ = T.ivector("times"); self.x0_ = T.scalar("x0 ");self.n_ = T.scalar("n");self.theta_ = T.scalar("theta")
        
        
        self.S__=theano.shared(np.asarray(self.initS, dtype = floatX), 'S')
        self.predall_, self.updatesRecurrence_ = theano.scan(lambda x_prev, s: (s*x_prev*x_prev+s*x_prev +2*x_prev)/(2*s*x_prev+2), outputs_info=self.x0_,non_sequences=self.S__,n_steps=self.times_[-1])
        self.pred_=Z(self.predall_[self.times_-1],self.n_,self.theta_) #we only have target at some generations e.g. 10,20,...
        self.Feedforward_ = theano.function(inputs=[self.x0_,self.times_,self.n_,self.theta_], outputs=self.pred_, updates=self.updatesRecurrence_)
        
        
        
        if self.numReplicates==1:
            self.cost_ = 0.5*((self.target_ - self.pred_)**2).mean(axis=0).sum()
        else:
            self.cost_=0
            for j in range(self.numReplicates):
                self.cost_ += 0.5*((self.target_[:,j] - self.pred_)**2).mean(axis=0).sum()
        self.Loss_ = theano.function(inputs=[self.target_,self.pred_], outputs=self.cost_)
        self.gW_ = T.grad(self.cost_, [self.S__])[0]
        self.weightUpdate__ = theano.shared(np.asarray(0, dtype = floatX))
        
        upd = self.momentum_ * self.weightUpdate__ - self.lr_ * self.gW_
        self.updatesW=[(self.weightUpdate__,  upd),(self.S__, self.S__ + upd)]

        self.Objective_ = theano.function([self.x0_, self.target_, self.lr_,self.times_,self.momentum_,self.n_,self.theta_], self.cost_, on_unused_input='warn',updates=self.updatesW,allow_input_downcast=True)
开发者ID:airanmehr,项目名称:bio,代码行数:28,代码来源:MultiLocusHAF.py

示例9: __init__

    def __init__(self, *args, learning_rate=0.01, momentum=0.9, **kwargs):
        super().__init__(*args, **kwargs)

        self.learning_rate = learning_rate
        self.momentum = momentum

        learning_rate = T.scalar('learning_rate')
        momentum = T.scalar('momentum')

        vs = self.create_shadows('v')

        updates1 = [(p, p + momentum*v)
                    for p,v in zip(self.params.values(), vs.values())]

        updates2 = [(v, momentum*v - learning_rate*grad)
                    for v,grad in zip(vs.values(), self.grad.values())] \
                 + [(p, p - learning_rate*grad)
                    for p,grad in zip(self.params.values(),
                                      self.grad.values())]

        self.step1 = theano.function(
                inputs=[momentum],
                outputs=[],
                name='Nesterov_step1',
                updates=updates1)

        self.step2 = function(
                inputs=self.inputs+self.outputs+[
                    learning_rate, momentum],
                default_mode=1,
                outputs=self.loss,
                name='Nesterov_step2',
                updates=updates2)
开发者ID:robertostling,项目名称:bnas,代码行数:33,代码来源:optimize.py

示例10: pretraining_functions

    def pretraining_functions(self, train_set_x, batch_size):
    
       
        # index to a [mini]batch
        index = T.lscalar('index')  # index to a minibatch

        corruption_level = T.scalar('corruption')  # % of corruption to use
        learning_rate = T.scalar('lr')  # learning rate to use
       
        batch_begin = index * batch_size
        batch_end = batch_begin + batch_size


        pretrain_fns = []
        for dA in self.dA_layers:
            cost, updates = dA.get_cost_updates(corruption_level, learning_rate)
            fn = theano.function(
                inputs=[
                    index,
                    corruption_level, 
                    learning_rate],
                    # http://stackoverflow.com/questions/35622784/what-is-the-right-way-to-pass-inputs-parameters-to-a-theano-function
                    #index, theano.In(corruption_level, value=0.2),     
                    #theano.In(learning_rate, value=0.1)], 
                    outputs=cost, updates=updates,
                givens={
                    self.x: train_set_x[batch_begin: batch_end] })
            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:timestocome,项目名称:DeepLearning,代码行数:30,代码来源:StackedAutoEncoders.py

示例11: __theano_build__

    def __theano_build__(self):
        params = self.params
        param_names = self.param_names
        hidden_dim = self.hidden_dim

        x1  = T.imatrix('x1')    # first sentence
        x2  = T.imatrix('x2')    # second sentence
        x1_mask = T.fmatrix('x1_mask')    #mask
        x2_mask = T.fmatrix('x2_mask')
        y   = T.ivector('y')     # label
        y_c = T.ivector('y_c')   # class weights 
        
        # Embdding words
        _E1 = params["E"].dot(params["W"][0]) + params["B"][0]
        _E2 = params["E"].dot(params["W"][1]) + params["B"][1]
        statex1 = _E1[x1.flatten(), :].reshape([x1.shape[0], x1.shape[1], hidden_dim])
        statex2 = _E2[x2.flatten(), :].reshape([x2.shape[0], x2.shape[1], hidden_dim])
        
        def rnn_cell(x, mx, ph, Wh):
            h = T.tanh(ph.dot(Wh) + x)
            h = mx[:, None] * h + (1-mx[:, None]) * ph
            return [h] 
            
        [h1], updates = theano.scan(
            fn=rnn_cell,
            sequences=[statex1, x1_mask],
            truncate_gradient=self.truncate,
            outputs_info=[dict(initial=T.zeros([self.batch_size, self.hidden_dim]))],
            non_sequences=params["W"][2])
        
        [h2], updates = theano.scan(
            fn=rnn_cell,
            sequences=[statex2, x2_mask],
            truncate_gradient=self.truncate,
            outputs_info=[dict(initial=h1[-1])],
            non_sequences=params["W"][3])
       
        #predict
        _s = T.nnet.softmax(h1[-1].dot(params["lrW"][0]) + h2[-1].dot(params["lrW"][1]) + params["lrb"])
        _p = T.argmax(_s, axis=1)
        _c = T.nnet.categorical_crossentropy(_s, y)
        _c = T.sum(_c * y_c)
        _l = T.sum(params["lrW"]**2)
        _cost = _c + 0.01 * _l
        
        # SGD parameters
        learning_rate = T.scalar('learning_rate')
        decay = T.scalar('decay')
        
        # Gradients and updates
        _grads, _updates = rms_prop(_cost, param_names, params, learning_rate, decay)
        
        # Assign functions
        self.bptt = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _grads)
        self.loss = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _c)
        self.weights = theano.function([x1, x2, x1_mask, x2_mask], _s)
        self.predictions = theano.function([x1, x2, x1_mask, x2_mask], _p)
        self.sgd_step = theano.function(
            [x1, x2, x1_mask, x2_mask, y, y_c, learning_rate, decay],
            updates=_updates)
开发者ID:wangxggc,项目名称:rnn-theano,代码行数:60,代码来源:rnn.py

示例12: __init__

    def __init__(self, num_input, num_cells=50, num_output=1, lr=0.01, rho=0.95):
        X = T.matrix('x')
        Y = T.matrix('y')
        eta = T.scalar('eta')
        alpha = T.scalar('alpha')

        self.num_input = num_input
        self.num_output = num_output
        self.num_cells = num_cells
        self.eta = eta

        inputs = InputLayer(X, name="inputs")
        lstm = LSTMLayer(num_input, num_cells, input_layer=inputs, name="lstm")
        fc = FullyConnectedLayer(num_cells, num_output, input_layer=lstm)
        Y_hat = T.mean(fc.output(), axis=2)
        layer = inputs, lstm, fc
        self.params = get_params(layer)
        self.caches = make_caches(self.params)
        self.layers = layer
        mean_cost = T.mean((Y - Y_hat)**2)
        last_cost = T.mean((Y[-1] - Y_hat[-1])**2)
        self.cost = alpha*mean_cost + (1-alpha)*last_cost
        """"
        self.updates = momentum(self.cost, self.params, self.caches, self.eta, clip_at=3.0)
        """
        self.updates,_,_,_,_ = create_optimization_updates(self.cost, self.params, method="adadelta", lr= lr, rho=rho)
        self.train = theano.function([X, Y, alpha], [self.cost, last_cost] ,\
                updates=self.updates, allow_input_downcast=True)
        self.costfn = theano.function([X, Y, alpha], [self.cost, last_cost],\
                allow_input_downcast=True)
        self.predict = theano.function([X], [Y_hat], allow_input_downcast=True)
开发者ID:abhishekmalali,项目名称:lstm_theano,代码行数:31,代码来源:basic_lstm.py

示例13: pretraining_functions

    def pretraining_functions(self, train_set_x, batch_size, k , weight_cost):

        index = T.lscalar('index')  
        momentum = T.scalar('momentum')
        learning_rate = T.scalar('lr') 
        # number of mini-batches
        n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        # start and end index of this mini-batch
        batch_begin = index * batch_size
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for rbm in self.rbm_layers:
            r_cost, fe_cost, updates = rbm.get_cost_updates(batch_size, learning_rate,
                                                            momentum, weight_cost,
                                                            persistent=None, k = k)

            # compile the theano function
            fn = theano.function(inputs=[index,
                              theano.Param(learning_rate, default=0.0001),
                              theano.Param(momentum, default=0.5)],
                              outputs= [r_cost, fe_cost],
                              updates=updates,
                              givens={self.x: train_set_x[batch_begin:batch_end]})
            # append function to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:Beronx86,项目名称:pdnn,代码行数:28,代码来源:srbm.py

示例14: get_update

def get_update(Ws_s, bs_s):
    x, fx = train.get_model(Ws_s, bs_s)

    # Ground truth (who won)
    y = T.vector('y')

    # Compute loss (just log likelihood of a sigmoid fit)
    y_pred = sigmoid(fx)
    loss = -( y * T.log(y_pred) + (1 - y) * T.log(1 - y_pred)).mean()

    # Metrics on the number of correctly predicted ones
    frac_correct = ((fx > 0) * y + (fx < 0) * (1 - y)).mean()

    # Updates
    learning_rate_s = T.scalar(dtype=theano.config.floatX)
    momentum_s = T.scalar(dtype=theano.config.floatX)
    updates = train.nesterov_updates(loss, Ws_s + bs_s, learning_rate_s, momentum_s)
    
    f_update = theano.function(
        inputs=[x, y, learning_rate_s, momentum_s],
        outputs=[loss, frac_correct],
        updates=updates,
        )

    return f_update
开发者ID:DestinyF,项目名称:deeppink,代码行数:25,代码来源:reinforcement.py

示例15: create_TrainFunc_tranPES

def create_TrainFunc_tranPES(simfn, embeddings,  marge=0.5, alpha=1., beta=1.):

    # parse the embedding data
    embedding = embeddings[0] # D x N matrix
    lembedding = embeddings[1]

    # declare the symbolic variables for training triples
    hp = S.csr_matrix('head positive') # N x batchsize matrix
    rp = S.csr_matrix('relation')
    tp = S.csr_matrix('tail positive')

    hn = S.csr_matrix('head negative')
    tn = S.csr_matrix('tail negative')

    lemb = T.scalar('embedding learning rate')
    lremb = T.scalar('relation learning rate')

    subtensorE = T.ivector('batch entities set')
    subtensorR = T.ivector('batch link set')

    # Generate the training positive and negative triples
    hpmat = S.dot(embedding.E, hp).T #  batchsize x D dense matrix
    rpmat = S.dot(lembedding.E, rp).T
    tpmat = S.dot(embedding.E, tp).T

    hnmat = S.dot(embedding.E, hn).T
    tnmat = S.dot(embedding.E, tn).T

    # calculate the score
    pos = tranPES3(simfn, T.concatenate([hpmat, tpmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tpmat)


    negh = tranPES3(simfn, T.concatenate([hnmat, tpmat], axis=1).reshape((hnmat.shape[0], 2, hnmat.shape[1])).dimshuffle(0, 2, 1), hnmat, rpmat, tpmat)
    negt = tranPES3(simfn, T.concatenate([hpmat, tnmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tnmat)

    costh, outh = margeCost(pos, negh, marge)
    costt, outt = margeCost(pos, negt, marge)

    embreg = regEmb(embedding, subtensorE, alpha)
    lembreg = regLink(lembedding, subtensorR, beta)
    

    cost = costh + costt + embreg[0] + lembreg
    out = T.concatenate([outh, outt])
    outc = embreg[1]

    # list of inputs to the function
    list_in = [lemb, lremb, hp, rp, tp, hn, tn, subtensorE, subtensorR]

    # updating the embeddings using gradient descend
    emb_grad = T.grad(cost, embedding.E)
    New_embedding = embedding.E - lemb*emb_grad

    remb_grad = T.grad(cost, lembedding.E)
    New_rembedding = lembedding.E - lremb * remb_grad

    updates = OrderedDict({embedding.E: New_embedding, lembedding.E: New_rembedding})

    return theano.function(list_in, [cost, T.mean(out), T.mean(outc), embreg[0], lembreg],
                          updates=updates, on_unused_input='ignore')
开发者ID:while519,项目名称:tranpes,代码行数:60,代码来源:model.py


注:本文中的theano.tensor.scalar函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。