当前位置: 首页>>代码示例>>Python>>正文


Python LogisticRegression.predict方法代码示例

本文整理汇总了Python中LogisticRegression.LogisticRegression.predict方法的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression.predict方法的具体用法?Python LogisticRegression.predict怎么用?Python LogisticRegression.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在LogisticRegression.LogisticRegression的用法示例。


在下文中一共展示了LogisticRegression.predict方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_classification

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
    def test_classification(self, X, y):
        logreg = LogisticRegression(lam_2=0.5)
        logreg.train(X, y)
        print("predict", logreg.predict(X[0]))
        print("error:", sum((np.array([logreg.predict(x)
                                       for x in X]) - np.array(y))**2))
        print("F:", logreg.F(logreg.w, X, y))
        print("w:", logreg.w)

        print(logreg.fevals, logreg.gevals, logreg.adp)
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:12,代码来源:LogisticRegressionTest.py

示例2: MLP

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
class MLP(object):
    def __init__(self, input, label, n_in, n_hidden, n_out, rng=None):

        self.x = input
        self.y = label

        if rng is None:
            rng = numpy.random.RandomState(1234)

        # construct hidden_layer (tanh or sigmoid so far)
        self.hidden_layer = HiddenLayer(input=self.x,
                                        n_in=n_in,
                                        n_out=n_hidden,
                                        rng=rng,
                                        activation=numpy.tanh)

        # construct log_layer (softmax)
        self.log_layer = LogisticRegression(input=self.hidden_layer.output,
                                            label=self.y,
                                            n_in=n_hidden,
                                            n_out=n_out)

    def train(self):
        layer_input = self.hidden_layer.forward()
        self.log_layer.train(input=layer_input)
        self.hidden_layer.backward(prev_layer=self.log_layer)
        

    def predict(self, x):
        x = self.hidden_layer.output(x)
        return self.log_layer.predict(x)
开发者ID:460130107,项目名称:DeepLearning,代码行数:33,代码来源:MLP.py

示例3: tuneThreshold

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
def tuneThreshold():
    """
        Explore different values of threshold to see which one fits best
    """
    thresholds = np.linspace(0.4,0.6, 10)
    
    bestAcc = 0.0
    bestModel = None
    X_tr, y_tr, w_tr = loadData()
    m, n = X_tr.shape
    for th in thresholds:
        model = LogisticRegression(features=['PRI_tau_eta',
                                            'PRI_lep_eta',
                                            'DER_deltar_tau_lep',
                                            'PRI_met_sumet',
                                            'DER_mass_transverse_met_lep'],
                                    threshold=th)
        model.train(X_tr, y_tr, w_tr)
        p, r = model.predict(X_tr)
        #calculate some accuracy on the same train set
        acc =  100.0*(p.flatten() == y_tr.flatten()).sum()/m
        print "%s %s%%"%(th, acc)
        if acc > bestAcc:
            bestAcc = acc
            bestModel = model
    
    #save the best model
    bestModel.save('data/logisticRegression%.2f.txt'%acc)
开发者ID:amccrea,项目名称:higgs-kaggle,代码行数:30,代码来源:LogisticRegressionTunning.py

示例4: main

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
def main():
    #model = FakeModel() #TODO model parameters
    model = LogisticRegression(features=['PRI_tau_eta',
                                        'PRI_lep_eta',
                                        'DER_deltar_tau_lep',
                                        'PRI_met_sumet',
                                        'DER_mass_transverse_met_lep'])
    #load some previously saved model parameters                                    
    model.load('data/logisticRegression69.61.txt')
    
    #load test data
    data = np.loadtxt("data/test.csv", delimiter=',', skiprows=1)
    ids = data[:,0].astype(int) #first column is id
    X = data[:,1:31] #30 features
    
    #make predictions (ranking and label)
    r, p = model.predict(X)
    
    #save to file
    save(ids, r, p)
开发者ID:amccrea,项目名称:higgs-kaggle,代码行数:22,代码来源:makeSubmission.py

示例5: Dropout

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
class Dropout(object):
    def __init__(self, input, label,\
                 n_in, hidden_layer_sizes, n_out,\
                 rng=None, activation=ReLU):

        self.x = input
        self.y = label

        self.hidden_layers = []
        self.n_layers = len(hidden_layer_sizes)
        
        if rng is None:
            rng = numpy.random.RandomState(1234)

        assert self.n_layers > 0


        # construct multi-layer 
        for i in xrange(self.n_layers):

            # layer_size
            if i == 0:
                input_size = n_in
            else:
                input_size = hidden_layer_sizes[i-1]

            # layer_input
            if i == 0:
                layer_input = self.x

            else:
                layer_input = self.hidden_layers[-1].output()

            # construct hidden_layer
            hidden_layer = HiddenLayer(input=layer_input,
                                       n_in=input_size,
                                       n_out=hidden_layer_sizes[i],
                                       rng=rng,
                                       activation=activation)
            
            self.hidden_layers.append(hidden_layer)


        # layer for ouput using Logistic Regression (softmax)
        self.log_layer = LogisticRegression(input=self.hidden_layers[-1].output(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_out)


    def train(self, epochs=5000, dropout=True, p_dropout=0.5, rng=None):

        for epoch in xrange(epochs):
            dropout_masks = []  # create different masks in each training epoch

            # forward hidden_layers
            for i in xrange(self.n_layers):
                if i == 0:
                    layer_input = self.x

                layer_input = self.hidden_layers[i].forward(input=layer_input)

                if dropout == True:
                    mask = self.hidden_layers[i].dropout(input=layer_input, p=p_dropout, rng=rng)
                    layer_input *= mask

                    dropout_masks.append(mask)


            # forward & backward log_layer
            self.log_layer.train(input=layer_input)


            # backward hidden_layers
            for i in reversed(xrange(0, self.n_layers)):
                if i == self.n_layers-1:
                    prev_layer = self.log_layer
                else:
                    prev_layer = self.hidden_layers[i+1]

                if dropout == True:
                    self.hidden_layers[i].backward(prev_layer=prev_layer, dropout=True, mask=dropout_masks[i])
                else:
                    self.hidden_layers[i].backward(prev_layer=prev_layer)
                


    def predict(self, x, dropout=True, p_dropout=0.5):
        layer_input = x

        for i in xrange(self.n_layers):
            if dropout == True:
                self.hidden_layers[i].W = (1 - p_dropout) * self.hidden_layers[i].W
            
            layer_input = self.hidden_layers[i].output(input=layer_input)

        return self.log_layer.predict(layer_input)
开发者ID:2php,项目名称:DeepLearning,代码行数:99,代码来源:Dropout.py

示例6: SdA

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
class SdA(object):
    def __init__(self, input=None, label=None,\
                 n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,\
                 rng=None):
        
        self.x = input
        self.y = label

        self.sigmoid_layers = []
        self.dA_layers = []
        self.n_layers = len(hidden_layer_sizes)  # = len(self.rbm_layers)

        if rng is None:
            rng = numpy.random.RandomState(1234)

        
        assert self.n_layers > 0


        # construct multi-layer
        for i in xrange(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()
                
            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layer_sizes[i],
                                        rng=rng,
                                        activation=sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)


            # construct dA_layers
            dA_layer = dA(input=layer_input,
                          n_visible=input_size,
                          n_hidden=hidden_layer_sizes[i],
                          W=sigmoid_layer.W,
                          hbias=sigmoid_layer.b)
            self.dA_layers.append(dA_layer)


        # layer for output using Logistic Regression
        self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_outs)

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()


    def pretrain(self, lr=0.1, corruption_level=0.3, epochs=100):
        for i in xrange(self.n_layers):
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)

            da = self.dA_layers[i]

            for epoch in xrange(epochs):
                da.train(lr=lr, corruption_level=corruption_level, input=layer_input)

    def finetune(self, lr=0.1, epochs=100):
        layer_input = self.sigmoid_layers[-1].sample_h_given_v()

        # train log_layer
        epoch = 0

        while epoch < epochs:
            self.log_layer.train(lr=lr, input=layer_input)
            # self.finetune_cost = self.log_layer.negative_log_likelihood()
            # print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, self.finetune_cost
            
            lr *= 0.95
            epoch += 1


    def predict(self, x):
        layer_input = x
        
        for i in xrange(self.n_layers):
            sigmoid_layer = self.sigmoid_layers[i]
            layer_input = sigmoid_layer.output(input=layer_input)

        return self.log_layer.predict(layer_input)
开发者ID:2php,项目名称:DeepLearning,代码行数:98,代码来源:SdA.py

示例7: DBN

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
class DBN(object):
    def __init__(self, input=None, label=None, n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2, rng=None):

        self.x = input
        self.y = label

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.n_layers = len(hidden_layer_sizes)  # = len(self.rbm_layers)

        if rng is None:
            rng = numpy.random.RandomState(1234)

        assert self.n_layers > 0

        # construct multi-layer
        for i in xrange(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()

            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(
                input=layer_input, n_in=input_size, n_out=hidden_layer_sizes[i], rng=rng, activation=sigmoid
            )
            self.sigmoid_layers.append(sigmoid_layer)

            # construct rbm_layer
            rbm_layer = RBM(
                input=layer_input,
                n_visible=input_size,
                n_hidden=hidden_layer_sizes[i],
                W=sigmoid_layer.W,  # W, b are shared
                hbias=sigmoid_layer.b,
            )
            self.rbm_layers.append(rbm_layer)

        # layer for output using Logistic Regression
        self.log_layer = LogisticRegression(
            input=self.sigmoid_layers[-1].sample_h_given_v(), label=self.y, n_in=hidden_layer_sizes[-1], n_out=n_outs
        )

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()

    def pretrain(self, lr=0.1, k=1, epochs=100):
        # pre-train layer-wise
        for i in xrange(self.n_layers):
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i - 1].sample_h_given_v(layer_input)
            rbm = self.rbm_layers[i]

            for epoch in xrange(epochs):
                rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)
                # log pretraining as stderr every after 100 epochs
                if (epoch + 1) % 100 == 0:  # REMOVE this block for faster training
                    cost = rbm.get_reconstruction_cross_entropy()
                    print >> sys.stderr, "Pre-training layer %d, epoch %d, cost " % (i, epoch + 1), cost

    def finetune(self, lr=0.1, epochs=100):
        layer_input = self.sigmoid_layers[-1].sample_h_given_v()

        # train log_layer
        epoch = 0
        done_looping = False
        while (epoch < epochs) and (not done_looping):
            self.log_layer.train(lr=lr, input=layer_input)
            # log finetune training as stderr every 25 epochs
            if (epoch + 1) % 25 == 0:  # REMOVE this block for faster training
                self.finetune_cost = self.log_layer.negative_log_likelihood()
                print >> sys.stderr, "Training epoch %d, cost is " % (epoch + 1), self.finetune_cost

            lr *= 0.95
            epoch += 1

    def predict(self, x):
        layer_input = x

        for i in xrange(self.n_layers):
            sigmoid_layer = self.sigmoid_layers[i]
            layer_input = sigmoid_layer.output(input=layer_input)

        out = self.log_layer.predict(layer_input)
        return out
开发者ID:kabwechansa,项目名称:DeepLearning,代码行数:96,代码来源:DBN.py

示例8: DBN

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]

#.........这里部分代码省略.........

        # construct multi-layer
        #ORIG# for i in xrange(self.n_layers):
        for i in range(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()

            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layer_sizes[i],
                                        numpy_rng=numpy_rng,
                                        activation=sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)


            # construct rbm_layer
            rbm_layer = RBM(input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layer_sizes[i],
                            W=sigmoid_layer.W,     # W, b are shared
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)


        # layer for output using Logistic Regression
        self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_outs)

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()



    def pretrain(self, lr=0.1, k=1, epochs=100):
        # pre-train layer-wise
        for i in xrange(self.n_layers):
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)
            rbm = self.rbm_layers[i]

            for epoch in xrange(epochs):
                rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)
                # cost = rbm.get_reconstruction_cross_entropy()
                # print >> sys.stderr, \
                #        'Pre-training layer %d, epoch %d, cost ' %(i, epoch), cost

    # def pretrain(self, lr=0.1, k=1, epochs=100):
    #     # pre-train layer-wise
    #     for i in xrange(self.n_layers):
    #         rbm = self.rbm_layers[i]

    #         for epoch in xrange(epochs):
    #             layer_input = self.x
    #             for j in xrange(i):
    #                 layer_input = self.sigmoid_layers[j].sample_h_given_v(layer_input)

    #             rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)
    #             # cost = rbm.get_reconstruction_cross_entropy()
    #             # print >> sys.stderr, \
    #             #        'Pre-training layer %d, epoch %d, cost ' %(i, epoch), cost


    def finetune(self, lr=0.1, epochs=100):
        layer_input = self.sigmoid_layers[-1].sample_h_given_v()

        # train log_layer
        epoch = 0
        done_looping = False
        while (epoch < epochs) and (not done_looping):
            self.log_layer.train(lr=lr, input=layer_input)
            # self.finetune_cost = self.log_layer.negative_log_likelihood()
            # print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, self.finetune_cost

            lr *= 0.95
            epoch += 1


    def predict(self, x):
        layer_input = x

        for i in xrange(self.n_layers):
            sigmoid_layer = self.sigmoid_layers[i]
            layer_input = sigmoid_layer.output(input=layer_input)

        out = self.log_layer.predict(layer_input)
        return out
开发者ID:ddofer,项目名称:Kaggle-HUJI-ML,代码行数:104,代码来源:DBN.py

示例9: sum

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
      avg_cost = sum(epoch_costs) / float(len(epoch_costs))
    else:
      logging.critical('No costs for epoch %d'%iepoch)
    avg_costs.append(avg_cost)
    if epoch_costs and iepoch % 200 == 0:
      logging.info('Epoch [%d] Cost %f'%(iepoch, cost))
  end_time = time.time()
  logging.info('Model training completed in %.0fs'%(end_time - start_time))
  plt.plot(np.arange(0, num_epochs), avg_costs)
  plt.show()
  fp = gzip.open(model_file, 'wb')
  pkl.dump(clf, fp)
  return

clf = load_model()

if clf is None:
  clf = LogisticRegression(beta=0.0005, n_in=image_size * image_size,
    n_out=num_labels)
  train_model(clf)
elif force_training:
  logging.info('force_training is set. Model will be retrained')
  train_model(clf)

logging.info('Model accuracy %f'%accuracy_score(
    test_labels,
    clf.predict(test_dataset)
  ))


开发者ID:smukherj1,项目名称:theano-fun,代码行数:30,代码来源:tut.py

示例10: DBN

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
class DBN(object):
    def __init__(self, input=None, label=None,\
                 n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,\
                 numpy_rng=None):
        
        self.x = input
        self.y = label

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.n_layers = len(hidden_layer_sizes)  # = len(self.rbm_layers)

        if numpy_rng is None:
            numpy_rng = numpy.random.RandomState(1234)

        
        assert self.n_layers > 0


        # construct multi-layer
        for i in range(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()
                
            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layer_sizes[i],
                                        numpy_rng=numpy_rng,
                                        activation=sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)


            # construct rbm_layer
            rbm_layer = RBM(input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layer_sizes[i],
                            W=sigmoid_layer.W,     # W, b are shared
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)


        # layer for output using Logistic Regression
        self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_outs)

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()

    def pretrain(self, lr=0.1, k=1, epochs=100):
        # pre-train layer-wise
        for i in range(self.n_layers):
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)
            rbm = self.rbm_layers[i]
            
            for epoch in range(epochs):
                rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)

    def finetune(self, lr=0.1, epochs=100):
        layer_input = self.sigmoid_layers[-1].sample_h_given_v()

        # train log_layer
        epoch = 0
        done_looping = False
        while (epoch < epochs) and (not done_looping):
            self.log_layer.train(lr=lr, input=layer_input)
            
            lr *= 0.95
            epoch += 1


    def predict(self, x):
        layer_input = x
        
        for i in range(self.n_layers):
            sigmoid_layer = self.sigmoid_layers[i]
            layer_input = sigmoid_layer.output(input=layer_input)

        out = self.log_layer.predict(layer_input)
        return out
开发者ID:ray306,项目名称:IPython-Notebooks,代码行数:96,代码来源:DBN.py

示例11: DBN

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]

#.........这里部分代码省略.........
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)
             
            rbm = self.rbm_layers[i]
#             print 'layer_input', layer_input
             
            for epoch in xrange(epochs):
                batch_start = time.clock()
#                 rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)
                # cost = rbm.get_reconstruction_cross_entropy()
                # print >> sys.stderr, \
                #        'Pre-training layer %d, epoch %d, cost ' %(i, epoch), cost
                
                
                cost = 0.0;
                if batch_size == -1:
                    cost = rbm.contrastive_divergence(input = layer_input, lr=lr, k=k, batch_size = -1)
                else:
                    n_train_batches = len(layer_input) / batch_size # compute number of minibatches for training, validation and testing
                    mean_cost = []
                    for batch_index in xrange(n_train_batches):
                        mean_cost += [rbm.contrastive_divergence(input = layer_input [batch_index * batch_size:(batch_index + 1) * batch_size], lr=lr, k=k, batch_size = batch_size)]
                    cost = numpy.mean(mean_cost)

                
                batch_stop = time.clock()
                batch_time = (batch_stop - batch_start)
                
                print '\tPre-training layer [%d: %d X %d], epoch %d, cost %.7fm, entropy: %.2f, time is %.2f seconds' %(i, rbm.n_visible, rbm.n_hidden, epoch, cost, rbm.get_reconstruction_cross_entropy(), (batch_time))

            # synchronous betwen rbm and sigmoid layer
            self.sigmoid_layers[i].W = rbm.W
            self.sigmoid_layers[i].b = rbm.hbias

#                 # Plot filters after each training epoch
#                 # Construct image from the weight matrix
#                 if layer == 0:
#                     if (epoch % 20 == 0):
#                         image = PIL.Image.fromarray(tile_raster_images(
#                                  X = numpy.array(rbm.get_w().T),
#                                  img_shape=(28, 28), 
#                                  tile_shape=(10, 10),
#                                  tile_spacing=(1, 1)))
#                         image.save('result/filters_at_layer_%d_epoch_%d.png' % (layer, epoch))
            
#             numpy.array(rbm.get_w().T).dump(('result/weight_at_layer_%d.txt' % layer))
#             if layer == 0:
#                 image = PIL.Image.fromarray(tile_raster_images(
#                              X = numpy.array(rbm.get_w().T),
#                              img_shape=(28, rbm.n_visible / 28), 
#                              tile_shape=(10, 10),
#                              tile_spacing=(1, 1)))
#             image.save('result/filters_at_layer_%d.png' % layer)
        
        pretaining_end_time = time.clock()
        print ('Total time for pretraining: ' + '%.2f seconds' % ((pretaining_end_time - pretaining_start_time)))
        print self.sigmoid_layers[0].W

    def finetune(self, lr=0.1, epochs=100):
        
        layer_input = self.sigmoid_layers[-1].sample_h_given_v()

#         self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
#                                     label=self.y,
#                                     n_in=hidden_layer_sizes[-1],
#                                     n_out=n_outs)
                

        # train log_layer
        epoch = 0
        done_looping = False
        start_time = time.clock()
        print layer_input
        while (epoch < epochs) and (not done_looping):
            self.log_layer.train(lr=lr, input=layer_input)
            if (epoch % 20 == 0):
                print ('\tFine-tuning epoch %d, cost is ' % epoch) #self.log_layer.negative_log_likelihood()
            
            lr *= 0.95
            epoch += 1
        end_time = time.clock()
        print ('Total time for fine-tuning: ' + '%.2f seconds' % ((end_time - start_time)))
        
        
        

    def predict(self, x = None, y = None):

        input_x = x
        
        for i in xrange(self.n_layers):
            sigmoid_layer = self.sigmoid_layers[i]
            input_x = sigmoid_layer.output(input=input_x)
            
        

        out = self.log_layer.predict(input_x, y)
        return out
开发者ID:BinbinBian,项目名称:DeepLearning,代码行数:104,代码来源:DBN.py

示例12: Hidden

# 需要导入模块: from LogisticRegression import LogisticRegression [as 别名]
# 或者: from LogisticRegression.LogisticRegression import predict [as 别名]
x, y = T.dmatrices('x', 'y')
H = Hidden( x, 784, 50, param_init = ( 'glorot', 'zero' ) , activation_func = T.tanh )
h = H.output( )
L = LogReg( h, 50, 10, param_init = ( 'zero', 'zero' ) )

lam = 0.02 # regularizer
alpha = 0.8 # learning rate/ weight decay
zeta = 0.995 # nestrov momentum

global cost
if lam is None:
	cost = L.cost( y )
else:
	cost = L.cost( y ) + L.regularizer ( {'weights':lam, 'bias':0.0} ) \
	+ lam * H.regularizer_L2( ) + 0.02 * H.regularizer_L1( )
pred = L.predict ( )
gw = T.grad (cost, wrt=L.W)
gb = T.grad (cost, wrt=L.B)
gwh1 = T.grad ( cost, wrt=H.W )
gbh1 = T.grad ( cost, wrt=H.B )

W_shape, B_shape = L.weightShapes()
WH1_shape, BH1_shape = H.weightShapes()
VW = zeros(W_shape)
VB = zeros(B_shape)
VWH1 = zeros(WH1_shape)
VBH1 = zeros(BH1_shape)

train = function( [x,y], [cost, gw, gb, gwh1, gbh1] )
predict = function ( [x], pred )
print 'Function Compiled'
开发者ID:dasayan05,项目名称:theanotests,代码行数:33,代码来源:Test_MLP.py


注:本文中的LogisticRegression.LogisticRegression.predict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。