当前位置: 首页>>代码示例>>Python>>正文


Python LogisticRegression.LogisticRegression类代码示例

本文整理汇总了Python中LogisticRegression.LogisticRegression的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression类的具体用法?Python LogisticRegression怎么用?Python LogisticRegression使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了LogisticRegression类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: MLP

class MLP(object):
    def __init__(self, input, label, n_in, n_hidden, n_out, rng=None):

        self.x = input
        self.y = label

        if rng is None:
            rng = numpy.random.RandomState(1234)

        # construct hidden_layer (tanh or sigmoid so far)
        self.hidden_layer = HiddenLayer(input=self.x,
                                        n_in=n_in,
                                        n_out=n_hidden,
                                        rng=rng,
                                        activation=numpy.tanh)

        # construct log_layer (softmax)
        self.log_layer = LogisticRegression(input=self.hidden_layer.output,
                                            label=self.y,
                                            n_in=n_hidden,
                                            n_out=n_out)

    def train(self):
        layer_input = self.hidden_layer.forward()
        self.log_layer.train(input=layer_input)
        self.hidden_layer.backward(prev_layer=self.log_layer)
        

    def predict(self, x):
        x = self.hidden_layer.output(x)
        return self.log_layer.predict(x)
开发者ID:460130107,项目名称:DeepLearning,代码行数:31,代码来源:MLP.py

示例2: tuneThreshold

def tuneThreshold():
    """
        Explore different values of threshold to see which one fits best
    """
    thresholds = np.linspace(0.4,0.6, 10)
    
    bestAcc = 0.0
    bestModel = None
    X_tr, y_tr, w_tr = loadData()
    m, n = X_tr.shape
    for th in thresholds:
        model = LogisticRegression(features=['PRI_tau_eta',
                                            'PRI_lep_eta',
                                            'DER_deltar_tau_lep',
                                            'PRI_met_sumet',
                                            'DER_mass_transverse_met_lep'],
                                    threshold=th)
        model.train(X_tr, y_tr, w_tr)
        p, r = model.predict(X_tr)
        #calculate some accuracy on the same train set
        acc =  100.0*(p.flatten() == y_tr.flatten()).sum()/m
        print "%s %s%%"%(th, acc)
        if acc > bestAcc:
            bestAcc = acc
            bestModel = model
    
    #save the best model
    bestModel.save('data/logisticRegression%.2f.txt'%acc)
开发者ID:amccrea,项目名称:higgs-kaggle,代码行数:28,代码来源:LogisticRegressionTunning.py

示例3: __init__

    def __init__(self, image_shape = [28, 12], filter_shape = [5, 5],
                nkerns = [20, 50], batch_size = 500):
        self.layers = []
        rng = np.random.RandomState(23455)

        # generate symbolic variables for input (x and y represent a
        # minibatch)
        self.x = T.matrix('x')  # data, presented as rasterized images
        self.y = T.ivector('y')  # labels, presented as 1D vector of [int] labels

        layer0_input = self.x.reshape((batch_size, 1, image_shape[0], image_shape[0]))
        # Construct the first convolutional pooling layer:
        # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
        # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
        # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
        layer0 = Layer(
            rng,
            input=layer0_input,
            image_shape=(batch_size, 1, image_shape[0], image_shape[0]),
            filter_shape=(nkerns[0], 1, filter_shape[0], filter_shape[0]),
            poolsize=(2, 2)
        )
        self.layers.append(layer0)

        # Construct the second convolutional pooling layer
        # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
        # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
        # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
        layer1 = Layer(
            rng,
            input=layer0.output,
            image_shape=(batch_size, nkerns[0], image_shape[1], image_shape[1]),
            filter_shape=(nkerns[1], nkerns[0], filter_shape[1], filter_shape[1]),
            poolsize=(2, 2)
        )
        self.layers.append(layer1)

        # the HiddenLayer being fully-connected, it operates on 2D matrices of
        # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
        # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
        # or (500, 50 * 4 * 4) = (500, 800) with the default values.
        layer2_input = layer1.output.flatten(2)

        # construct a fully-connected sigmoidal layer
        layer2 = HiddenLayer(
            input=layer2_input,
            rng = rng,
            n_in=nkerns[1] * 4 * 4,
            n_out=500,
            activ=T.tanh
        )
        self.layers.append(layer2)

        # classify the values of the fully-connected sigmoidal layer
        layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
        self.layers.append(layer3)

        # the cost we minimize during training is the NLL of the model
        self.cost = layer3.negative_log_likelihood(self.y)
开发者ID:bomboom,项目名称:MNIST_DeepLearning,代码行数:59,代码来源:LeNet.py

示例4: test_classification

    def test_classification(self, X, y):
        logreg = LogisticRegression(lam_2=0.5)
        logreg.train(X, y)
        print("predict", logreg.predict(X[0]))
        print("error:", sum((np.array([logreg.predict(x)
                                       for x in X]) - np.array(y))**2))
        print("F:", logreg.F(logreg.w, X, y))
        print("w:", logreg.w)

        print(logreg.fevals, logreg.gevals, logreg.adp)
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:10,代码来源:LogisticRegressionTest.py

示例5: cross_validation

def cross_validation(X,y,bsize, fold, eta, solver="SGD", wdecay=0):
    from sklearn.cross_validation import StratifiedKFold
    from LogisticRegression import LogisticRegression
    scores=[]
    skf = StratifiedKFold( y, fold) 
    for train_index, test_index in skf:
        X_train, X_test, y_train, y_test = X[train_index,:], X[test_index,:], y[train_index], y[test_index]
        lr = LogisticRegression(learning=solver,weight_decay=wdecay,eta_0=eta, batch_size=bsize)
        lr.fit(X_train,y_train)
        scores.append(lr.score(X_test,y_test))
    return np.mean(scores)
开发者ID:esayyari,项目名称:Projects,代码行数:11,代码来源:readdata.py

示例6: __init__

    def __init__(self, input=None, label=None,\
                 n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,\
                 rng=None):
        
        self.x = input
        self.y = label

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.n_layers = len(hidden_layer_sizes)  # = len(self.rbm_layers)

        if rng is None:
            rng = numpy.random.RandomState(1234)

        
        assert self.n_layers > 0


        # construct multi-layer
        for i in xrange(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()
                
            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layer_sizes[i],
                                        rng=rng,
                                        activation=sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)


            # construct rbm_layer
            rbm_layer = RBM(input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layer_sizes[i],
                            W=sigmoid_layer.W,     # W, b are shared
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)


        # layer for output using Logistic Regression
        #print self.sigmoid_layers[-1].sample_h_given_v().shape
        self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_outs)

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()
开发者ID:abhisg,项目名称:DeepLearning,代码行数:59,代码来源:DBN.py

示例7: main

def main():
    #model = FakeModel() #TODO model parameters
    model = LogisticRegression(features=['PRI_tau_eta',
                                        'PRI_lep_eta',
                                        'DER_deltar_tau_lep',
                                        'PRI_met_sumet',
                                        'DER_mass_transverse_met_lep'])
    #load some previously saved model parameters                                    
    model.load('data/logisticRegression69.61.txt')
    
    #load test data
    data = np.loadtxt("data/test.csv", delimiter=',', skiprows=1)
    ids = data[:,0].astype(int) #first column is id
    X = data[:,1:31] #30 features
    
    #make predictions (ranking and label)
    r, p = model.predict(X)
    
    #save to file
    save(ids, r, p)
开发者ID:amccrea,项目名称:higgs-kaggle,代码行数:20,代码来源:makeSubmission.py

示例8: __init__

class NeuralNetwork:
    def __init__(self, rng, n_in, n_out, hl):

        #  will contain basically a list of Hidden Layers objects.
        self.layers = []

        inp_size = n_in
        for i in range(len(hl)):
            HL = HiddenLayer(rng, inp_size, hl[i])
            self.layers.append(HL)
            inp_size = hl[i]

        self.op = LogisticRegression(inp_size, n_out)
        self.params = []
        for l in self.layers:
            self.params = self.params + l.params

        self.params = self.params + self.op.params
        # self.params = [l.params for l in self.layers]

        #  forward pass is here

    def forward(self, x):
        act = [x]

        for i, l in enumerate(self.layers):
            act.append(l.output(act[i]))

        return act

    def cost(self, x, y):
        act = self.forward(x)
        estimate = act[-1]
        return self.op.cost(estimate, y)

    def calcAccuracy(self, x, y):
        act = self.forward(x)
        ll = act[-1]
        return self.op.calcAccuracy(ll, y)
开发者ID:adhaka,项目名称:summers,代码行数:39,代码来源:nn.py

示例9: __init__

    def __init__(self, numpy_rng, theano_rng = None, n_ins=784,
                hidden_layers_sizes = [500, 500], n_outs = 10, mode = 'dA'):
        self.sigmoid_layers = []
        self.ae_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)

        self.x = T.matrix('x')
        self.y = T.ivector('y')  # the labels are presented as 1D vector of
                                 # [int] labels

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data

        for i in range(self.n_layers):
            if i==0:
                input_size = n_ins
                layer_input = self.x
            else:
                input_size = hidden_layers_sizes[i-1]
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng = numpy_rng, input = layer_input,
                                        n_in = input_size,
                                        n_out = hidden_layers_sizes[i],
                                        activ = T.nnet.sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)
            self.params.extend(sigmoid_layer.params)

            #initialize dA or sA
            if mode == 'sA':
                ae_layer = SparseAE(numpy_rng = numpy_rng, theano_rng = theano_rng,
                                    input = layer_input, n_visible = input_size,
                                    n_hidden = hidden_layers_sizes[i],
                                    W = sigmoid_layer.W, bhid = sigmoid_layer.b)
            else:
                ae_layer = DenoiseAE(numpy_rng = numpy_rng, theano_rng = theano_rng,
                                    input = layer_input, n_visible = input_size,
                                    n_hidden = hidden_layers_sizes[i],
                                    W = sigmoid_layer.W, bhid = sigmoid_layer.b)
            self.ae_layers.append(ae_layer)

        self.logLayer = LogisticRegression(input = self.sigmoid_layers[-1].output,
                                            n_in = hidden_layers_sizes[-1],
                                            n_out = n_outs)
        self.params.extend(self.logLayer.params)
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
        self.errors = self.logLayer.errors(self.y)
开发者ID:bomboom,项目名称:MNIST_DeepLearning,代码行数:51,代码来源:StackedAE.py

示例10: __init__

    def __init__(self, input, label,\
                 n_in, hidden_layer_sizes, n_out,\
                 rng=None, activation=ReLU):

        self.x = input
        self.y = label

        self.hidden_layers = []
        self.n_layers = len(hidden_layer_sizes)
        
        if rng is None:
            rng = numpy.random.RandomState(1234)

        assert self.n_layers > 0


        # construct multi-layer 
        for i in xrange(self.n_layers):

            # layer_size
            if i == 0:
                input_size = n_in
            else:
                input_size = hidden_layer_sizes[i-1]

            # layer_input
            if i == 0:
                layer_input = self.x

            else:
                layer_input = self.hidden_layers[-1].output()

            # construct hidden_layer
            hidden_layer = HiddenLayer(input=layer_input,
                                       n_in=input_size,
                                       n_out=hidden_layer_sizes[i],
                                       rng=rng,
                                       activation=activation)
            
            self.hidden_layers.append(hidden_layer)


        # layer for ouput using Logistic Regression (softmax)
        self.log_layer = LogisticRegression(input=self.hidden_layers[-1].output(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_out)
开发者ID:2php,项目名称:DeepLearning,代码行数:47,代码来源:Dropout.py

示例11: testF

 def testF(self, X, y):
     logreg = LogisticRegression(lam_2=0.5)
     logreg.train(X, y)
     print("f complete")
     print(logreg.f(logreg.w, X[0], y[0]))
     print("f for first entry")
     print(logreg.f(logreg.w, X[0], y[0]))
     print("F")
     print(logreg.F(logreg.w, X, y))
     print("g ")
     print(logreg.g(logreg.w, X[0], y[0]))
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:11,代码来源:LogisticRegressionTest.py

示例12: __init__

    def __init__(self, input, label, n_in, n_hidden, n_out, rng=None):

        self.x = input
        self.y = label

        if rng is None:
            rng = numpy.random.RandomState(1234)

        # construct hidden_layer (tanh or sigmoid so far)
        self.hidden_layer = HiddenLayer(input=self.x,
                                        n_in=n_in,
                                        n_out=n_hidden,
                                        rng=rng,
                                        activation=numpy.tanh)

        # construct log_layer (softmax)
        self.log_layer = LogisticRegression(input=self.hidden_layer.output,
                                            label=self.y,
                                            n_in=n_hidden,
                                            n_out=n_out)
开发者ID:460130107,项目名称:DeepLearning,代码行数:20,代码来源:MLP.py

示例13: __init__

    def __init__(self, np_rng, theano_rng=None, n_ins=784, hidden_layer_sizes=[500, 500], n_outs=10):
        
        self.sigmoid_layers = []
        self.dA_layers = []
        self.params = []
        self.n_layers = len(hidden_layer_sizes)
        
        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(np_rng.randint(2 ** 30))
     
        self.x = T.matrix('x') 
        self.y = T.ivector('y') 
        
        for i in xrange(self.n_layers):
            if i == 0:
                n_in = n_ins
                layer_input = self.x
            else:
                n_in = hidden_layer_sizes[i-1]
                layer_input = self.sigmoid_layers[-1].output

            n_out = hidden_layer_sizes[i]            
            
            sigmoid_layer = HiddenLayer(np_rng, layer_input, n_in, n_out, activation=T.nnet.sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)
            
            self.params.extend(sigmoid_layer.params)
            
            dA_layer = AutoEncoder(np_rng, n_in, n_out, theano_rng=theano_rng, input=layer_input, 
                                   W=sigmoid_layer.W, b_hid=sigmoid_layer.b)
            self.dA_layers.append(dA_layer)
            
        self.log_layer = LogisticRegression(self.sigmoid_layers[-1].output, self.y, hidden_layer_sizes[-1], n_outs)
        self.params.extend(self.log_layer.params)

        self.finetune_cost = self.log_layer.negative_log_likelihood()
        self.errors = self.log_layer.errors()        
开发者ID:kmakantasis,项目名称:DL-Multispectral,代码行数:39,代码来源:StackedAutoEncoders.py

示例14: modifyModel

    def modifyModel(self, batch_size, dataset):
        print("Start to modify the model---------------------------")
        dataset = dataset
        self.batch_size = batch_size

        """
            create Model
        """

        datasets = YiWenData.load_data(dataset)

        test_set_x, test_set_y = datasets[2]

        # allocate symbolic variables for the data
        index = T.lscalar()  # index to a [mini]batch

        # start-snippet-1
        x = T.matrix('x')  # the data is presented as rasterized images

        # [int] labels

        print('... building the model')

        self.layer0_input = x.reshape((self.batch_size, 1, 28, 28))

        self.layer0.modify(self.layer0_input, (self.batch_size, 1, 28, 28))

        self.layer1.modify(self.layer0.output, (self.batch_size, self.nkerns[0], 12, 12))

        self.layer2_input = self.layer1.output.flatten(2)

        # construct a fully-connected sigmoidal layer
        self.layer2 = self.layer2

        # classify the values of the fully-connected sigmoidal layer
        self.layer3 = LogisticRegression(input=self.layer2.output, n_in=500, n_out=7)

        print("-------batch_size---------batch_size---------batch_size------------",self.layer0.image_shape)
开发者ID:Shimion100,项目名称:YiWen4p27,代码行数:38,代码来源:CnnModel.py

示例15: __init__

    def __init__(self, input=None, label=None,\
                 n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,\
                 numpy_rng=None):
        """
            documentation copied from:
            http://www.cse.unsw.edu.au/~cs9444/Notes13/demo/DBN.py
        This class is made to support a variable number of layers.

        :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`

        :type n_ins: int
        :param n_ins: dimension of the input to the DBN

        :type n_layers_sizes: list of ints
        :param n_layers_sizes: intermediate layers size, must contain
                               at least one value

        :type n_outs: int
        :param n_outs: dimension of the output of the network
        """



        self.x = input
        self.y = label

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.n_layers = len(hidden_layer_sizes)  # = len(self.rbm_layers)

        if numpy_rng is None:
            numpy_rng = numpy.random.RandomState(1234)


        assert self.n_layers > 0


        # construct multi-layer
        #ORIG# for i in xrange(self.n_layers):
        for i in range(self.n_layers):
            # layer_size
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layer_sizes[i - 1]

            # layer_input
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].sample_h_given_v()

            # construct sigmoid_layer
            sigmoid_layer = HiddenLayer(input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layer_sizes[i],
                                        numpy_rng=numpy_rng,
                                        activation=sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)


            # construct rbm_layer
            rbm_layer = RBM(input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layer_sizes[i],
                            W=sigmoid_layer.W,     # W, b are shared
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)


        # layer for output using Logistic Regression
        self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
                                            label=self.y,
                                            n_in=hidden_layer_sizes[-1],
                                            n_out=n_outs)

        # finetune cost: the negative log likelihood of the logistic regression layer
        self.finetune_cost = self.log_layer.negative_log_likelihood()
开发者ID:ddofer,项目名称:Kaggle-HUJI-ML,代码行数:84,代码来源:DBN.py


注:本文中的LogisticRegression.LogisticRegression类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。