当前位置: 首页>>代码示例>>Python>>正文


Python FeedForwardNetwork.activateOnDataset方法代码示例

本文整理汇总了Python中pybrain.structure.FeedForwardNetwork.activateOnDataset方法的典型用法代码示例。如果您正苦于以下问题:Python FeedForwardNetwork.activateOnDataset方法的具体用法?Python FeedForwardNetwork.activateOnDataset怎么用?Python FeedForwardNetwork.activateOnDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.structure.FeedForwardNetwork的用法示例。


在下文中一共展示了FeedForwardNetwork.activateOnDataset方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
def run():
    import scipy
    from scipy import linalg

    f = open("modelfitDatabase1.dat", "rb")
    import pickle

    dd = pickle.load(f)
    node = dd.children[13]

    rfs = node.children[0].data["ReversCorrelationRFs"]

    pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
    pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])

    training_set = node.data["training_set"]
    validation_set = node.data["validation_set"]
    training_inputs = node.data["training_inputs"]
    validation_inputs = node.data["validation_inputs"]

    ofs = contrib.modelfit.fit_sigmoids_to_of(numpy.mat(training_set), numpy.mat(pred_act))
    pred_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_act), ofs)
    pred_val_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_val_act), ofs)

    (sx, sy) = numpy.shape(rfs[0])
    print sx, sy
    n = FeedForwardNetwork()

    inLayer = LinearLayer(sx * sy)
    hiddenLayer = SigmoidLayer(4)
    outputLayer = SigmoidLayer(1)

    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    n.addOutputModule(outputLayer)

    in_to_hidden = RBFConnection(sx, sy, inLayer, hiddenLayer)
    # in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outputLayer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)
    n.sortModules()
    gradientCheck(n)
    return

    from pybrain.datasets import SupervisedDataSet

    ds = SupervisedDataSet(sx * sy, 1)
    val = SupervisedDataSet(sx * sy, 1)

    for i in xrange(0, len(training_inputs)):
        ds.addSample(training_inputs[i], training_set[i, 0])

    for i in xrange(0, len(validation_inputs)):
        val.addSample(validation_inputs[i], validation_set[i, 0])

    tstdata, trndata = ds.splitWithProportion(0.1)

    from pybrain.supervised.trainers import BackpropTrainer

    trainer = BackpropTrainer(n, trndata, momentum=0.1, verbose=True, learningrate=0.002)

    training_set = numpy.array(numpy.mat(training_set)[:, 0])
    validation_set = numpy.array(numpy.mat(validation_set)[:, 0])
    pred_val_act_t = numpy.array(numpy.mat(pred_val_act_t)[:, 0])

    out = n.activateOnDataset(val)
    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    print "Start training"
    for i in range(50):
        trnresult = percentError(trainer.testOnData(), trndata)
        tstresult = percentError(trainer.testOnData(dataset=tstdata), tstdata)

        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
        trainer.trainEpochs(1)

        out = n.activateOnDataset(val)
        (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
        print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
            numpy.power(validation_set - out, 2)
        )

    out = n.activateOnDataset(val)

    print numpy.shape(out)
    print numpy.shape(validation_set)

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, pred_val_act_t)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
        numpy.power(validation_set - pred_val_act_t, 2)
    )

    return n
开发者ID:ioam,项目名称:svn-history,代码行数:101,代码来源:pybrainstuff.py

示例2: brescia_nn

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
def brescia_nn(train, test, max_epochs=None, verbose=False):
    trainval_ds = SupervisedDataSet(5, 1)
    test_ds = SupervisedDataSet(5, 1)
    
    for datum in train:
        trainval_ds.addSample(datum[:5], (datum[5],))

    for datum in test:
        test_ds.addSample(datum[:5], (datum[5],))
    
    train_ds, val_ds = trainval_ds.splitWithProportion(0.75)
    
    if verbose:
        print "Train, validation, test:", len(train_ds), len(val_ds), len(test_ds)
    
    ns = {}
    min_error = -1
    min_h = -1
    
    # use validation to form 4-layer network with two hidden layers,
    # with (2n + 1) nodes in the first hidden layer and somewhere from
    # 1 to (n - 1) in the second hidden layer
    for h2 in range(1, 5):
        if verbose:
            start = time.time()
            print "h2 nodes:", h2
    
        # create the network
        if verbose:
            print "building network"

        n = FeedForwardNetwork()
        inLayer = LinearLayer(5)
        hiddenLayer1 = SigmoidLayer(11)
        hiddenLayer2 = SigmoidLayer(h2)
        outLayer = LinearLayer(1)
    
        n.addInputModule(inLayer)
        n.addModule(hiddenLayer1)
        n.addModule(hiddenLayer2)
        n.addOutputModule(outLayer)
    
        in_to_hidden = FullConnection(inLayer, hiddenLayer1)
        hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
        hidden_to_out = FullConnection(hiddenLayer2, outLayer)
    
        n.addConnection(in_to_hidden)
        n.addConnection(hidden_to_hidden)
        n.addConnection(hidden_to_out)
    
        n.sortModules()
    
        # training
        if verbose:
            print "beginning training"
        trainer = BackpropTrainer(n, train_ds)
        trainer.trainUntilConvergence(maxEpochs=max_epochs)

        ns[h2] = n
    
        # validation
        if verbose:
            print "beginning validation"

        out = n.activateOnDataset(val_ds)
        actual = val_ds['target']
        error = np.sqrt(np.sum((out - actual)**2) / len(val_ds))
        if verbose:
            print "RMSE:", error
    
        if min_error == -1 or error < min_error:
            min_error = error
            min_h = h2
    
        if verbose:
            stop = time.time()
            print "Time:", stop - start
    
    # iterate through
    if verbose:
        print "best number of h2 nodes:", min_h
    out_test = ns[min_h].activateOnDataset(test_ds)

    return ns[h2], out_test
开发者ID:HIPS,项目名称:DESI-MCMC,代码行数:86,代码来源:brescia_nn.py

示例3: SupervisedDataSet

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
  dataset = SupervisedDataSet(1, 1)
  dataset.setField('input', x)
  dataset.setField('target', y)
  
  # train the network
  trainer = RPropMinusTrainerMix(n, dataset=dataset, verbose=True, 
                                 weightdecay=0.05)
  trainer.trainEpochs(200)
 
  # plot the density and other stuff
  p.subplot(2, 2, 3)
  dens = []
  newx = np.arange(0.0, 1.0, 0.01)
  newx = newx.reshape(newx.size, 1)
  dataset.setField('input', newx)
  out = n.activateOnDataset(dataset)
  for pars in out:
      stds = pars[N_GAUSSIANS:N_GAUSSIANS*2]
      means = pars[N_GAUSSIANS*2:N_GAUSSIANS*3]
      line = multigaussian(newx, means, stds)
      density = line[:,0] * pars[0]
      for gaussian in range(1, N_GAUSSIANS):
          density += line[:, gaussian] * pars[gaussian]
      dens.append(density)
      
  newx = newx.flatten()
  dens = np.array(dens).transpose()
  p.contourf(newx, newx, dens, 30)
  p.title("cond. probab. dens.")
  
  p.subplot(221)
开发者ID:Angeliqe,项目名称:pybrain,代码行数:33,代码来源:example_mixturedensity.py

示例4: BackpropTrainer

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
# #### Build and use the Trainer

# In[ ]:

#Learning Rate shouldnt be bigger than 0.01
t = BackpropTrainer(fnn, learningrate = 0.01, momentum = 0.99, verbose = True,lrdecay=0.9999)

#Training on the DataSet with 1500 epochs
t.trainOnDataset(DS, 1500)


# # Model Evaluation

# In[ ]:

y_pred = fnn.activateOnDataset(DS)
#"DeNormalize" Again to turn the data into the original monetary value
y_pred = y_pred * wy


# In[ ]:

#Create the DataSet for a RegressionLine

x_pred = np.arange(5,870, 10)



DS_Eval = SupervisedDataSet( 1, 0 )
#Append Linked: x,y
for i in range(len(x_pred)):
开发者ID:Herka,项目名称:NeuralNetworkRegression,代码行数:33,代码来源:Neuralnetwork_Regression.py

示例5: fit

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
 def fit(self, ds, 
         epochs=100,
         hiddenSize=100, 
         initialLearningrate=0.002,
         decay=0.9999,
         myWeightdecay=0.8,
         plot=False,
         testDs=None,
         momentum=0): 
     firstSample = ds.getSample(0)
     print firstSample
     inputSize, hiddenSize, outputSize = len(firstSample[0]), hiddenSize, len(firstSample[1])
     inLayer = LinearLayer(inputSize)
     hiddenLayer =SigmoidLayer(hiddenSize)
     outLayer = LinearLayer(outputSize)
     n = FeedForwardNetwork()
     n.addInputModule(inLayer)
     n.addModule(hiddenLayer)
     b = BiasUnit()
     n.addModule(b)
     n.addOutputModule(outLayer)
     in_to_hidden = FullConnection(inLayer, hiddenLayer)
     hidden_to_out = FullConnection(hiddenLayer, outLayer)
     b_to_hidden = FullConnection(b, hiddenLayer)
     b_to_out = FullConnection(b, outLayer)
     n.addConnection(in_to_hidden)
     n.addConnection(hidden_to_out)
     n.addConnection(b_to_hidden)
     n.addConnection(b_to_out)
     n.sortModules()
     self.supervisedNet = n
     n = self.supervisedNet
     self.supervisedTrainer = BackpropTrainer(n, ds,  
                         learningrate=initialLearningrate,
                         lrdecay=decay, 
                         verbose=True, 
                         weightdecay=myWeightdecay,
                         batchlearning=True,
                         momentum=momentum)
     """
     #supervisedTrainer.trainEpochs(epochs)
     def eval(representationNet, output, target):
         output = [1 if o>0.5 else 0 for o in output]
         output = np.array(output)
         target = np.array(target)
         assert len(output) == len(target)
         n_correct = sum( output == target )
         return float(n_correct) / float(len(output))
     """
     #cv = CrossValidator(self.supervisedTrainer, ds,n_folds=int(len(ds)/5)) #valfunc=eval)
     
     Y = np.array([y for x,y in ds])
     if plot:
         cvResults = []
         trainResults = []
         totalF1s=[]
         totalTestf1 = []
         sums=[]
         lastTrainVec = np.zeros(Y.shape)
         for epochNum in range(epochs):
             self.supervisedTrainer.train()
             """
             pred = n.activateOnDataset(ds)
             f1s = []
             for col in range(pred.shape[1]):
                 _, bestF1 = labanUtil.getSplitThreshold(pred[:, col], Y[:, col])
                 f1s.append(bestF1)
             cvResults.append(np.mean(f1s))
             """
             #cvResults.append(cv.validate())
             trainVec = n.activateOnDataset(ds)
             #print trainVec[0]
             trainDif = np.abs(np.subtract(Y, trainVec))
             difdif = np.abs(np.subtract(lastTrainVec, trainVec))
             lastTrainVec = copy.deepcopy(trainVec)
             #print trainDif
             trainRes = float(sum(sum(trainDif)))/Y.shape[0]/Y.shape[1]
             print 'epoch num:', epochNum
             print 'trainDif sum: ', sum(sum(trainDif))
             print 'trainVec sum: ', sum(sum(np.abs(trainVec)))
             print 'difdif sum: ', sum(sum(difdif))
             print 'hiddenSize: ', hiddenSize
             print 'initialLearningrate', initialLearningrate
             print 'decay', decay
             print 'myWeightdecay', myWeightdecay
             print 'momentum', momentum
             s = sum(np.abs(trainVec[0]))
             s2 = sum(Y[0])
             print 'sum(trainVec[0])', s
             print 'sum(Y[0])', sum(Y[0])
             trainResults.append(trainRes)
             sums.append(s2)
             splits = []
             for col in range(trainVec.shape[1]):
                 bestSplit, bestF1 = labanUtil.getSplitThreshold(trainVec[:, col], Y[:, col])
                 splits.append(bestSplit)
             if not testDs is None: 
                 testPred = np.array(n.activateOnDataset(testDs))
                 Y_test = np.array([y for x,y in testDs])
             for col in range(trainVec.shape[1]):
#.........这里部分代码省略.........
开发者ID:ranBernstein,项目名称:Laban,代码行数:103,代码来源:autoencoder.py

示例6: buildNetwork

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
        feed_forward_net.addConnection(bias_to_hidden)
        feed_forward_net.addConnection(bias_to_out)
        """

    feed_forward_net.sortModules()

    # fnn = buildNetwork( trndata.indim, 20, trndata.outdim, bias = True, outclass=SigmoidLayer, hiddenclass=TanhLayer)

    trainer = BackpropTrainer(feed_forward_net, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
    trainer.trainUntilConvergence(validationProportion=0.25, maxEpochs=100, continueEpochs=5)
    trnresult = percentError(trainer.testOnClassData(), trndata["class"])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata["class"])

    print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult

    out = feed_forward_net.activateOnDataset(tstdata)
    print out.shape
    sur_test = []
    for row in out:

        if row[0] >= 0.5:
            row[0] = 1
            row[1] = 0
        else:
            row[0] = 0
            row[1] = 1

    # print out
    # print sur_test
    # print sum(sur_test)
开发者ID:swanderingf,项目名称:Titanic,代码行数:32,代码来源:NN_pybrain.py

示例7: open

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
if os.path.isfile(filetoopen):
  myfile = open('myparam2.txt','r')
  c=[]
  for line in myfile:
    c.append(float(line))
  n._setParameters(c)
else:
  myfile = open('myparam2.txt','w')
  for i in n.params:
    myfile.write(str(i)+'\n')
myfile.close()

#activate the neural networks
act = SupervisedDataSet(1,1)
act.addSample((0.2,),(0.880422606518061,))
n.activateOnDataset(act)
#create the test DataSet
x = numpy.arange(0.0, 1.0+0.01, 0.01)
s = 0.5+0.4*numpy.sin(2*numpy.pi*x)
tsts = SupervisedDataSet(1,1)
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))

#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))

#create the trainer

t = BackpropTrainer(n, learningrate = 0.01 ,
                    momentum = mom)
#train the neural network from the train DataSet
开发者ID:Boblogic07,项目名称:pybrain,代码行数:33,代码来源:jpq2layersWriter.py

示例8: NeuralNetworkClassification

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
class  NeuralNetworkClassification(algorithmbase):		
	
	def ExtraParams(self, hiddenlayerscount, hiddenlayernodescount):
		self.hiddenlayerscount = hiddenlayerscount
		self.hiddenlayernodescount = hiddenlayernodescount
		return self
	
	def PreProcessTrainData(self):
		self.traindata = preprocess_apply(self.traindata, self.missingvaluemethod, self.preprocessingmethods)		
		
	def PrepareModel(self, savedmodel = None):
		
		if savedmodel != None:
			self.trainer = savedmodel
		else:
			attributescount=len(self.traindata[0])
			nrclass = len(set(self.trainlabel))
			self.ds = ClassificationDataSet(attributescount, target=nrclass, nb_classes=nrclass, class_labels=list(set(self.trainlabel)))
				
			for i in range(len(self.traindata)):
				self.ds.appendLinked(self.traindata[i], [self.trainlabel[i]])
			self.ds._convertToOneOfMany()
	
			self.net = FeedForwardNetwork()
			inLayer = LinearLayer(len(self.traindata[0]))
			self.net.addInputModule(inLayer)
			hiddenLayers=[]
			for i in range(self.hiddenlayerscount):
				hiddenLayer=SigmoidLayer(self.hiddenlayernodescount)
				hiddenLayers.append(hiddenLayer)
				self.net.addModule(hiddenLayer)
			outLayer = SoftmaxLayer(nrclass)
			self.net.addOutputModule(outLayer)
			
			layers_connections=[]
			layers_connections.append(FullConnection(inLayer, hiddenLayers[0]))
			for i in range(self.hiddenlayerscount-1):
				layers_connections.append(FullConnection(hiddenLayers[i-1], hiddenLayers[i]))
			layers_connections.append(FullConnection(hiddenLayers[-1], outLayer))
		
			for layers_connection in layers_connections:
				self.net.addConnection(layers_connection)
			self.net.sortModules()
			
			#training the network
			self.trainer = BackpropTrainer(self.net, self.ds)
			self.trainer.train()
		
		
	def PreProcessTestDate(self):
		self.testdata=preprocess_apply(self.testdata, self.missingvaluemethod, self.preprocessingmethods)
			

	def Predict(self):
		prediction=[]
		
		attributescount=len(self.testdata[0])
		nrclass = len(set(self.testlabel))
		dstraindata = ClassificationDataSet(attributescount, target=nrclass, nb_classes=nrclass, class_labels=list(set(self.testlabel)))
		for i in range(len(self.testdata)):
			dstraindata.appendLinked(self.testdata[i], self.testlabel[i])
		dstraindata._convertToOneOfMany()
		out = self.net.activateOnDataset(dstraindata)
		prediction = out.argmax(axis=1)
		'''
		for testrecord in self.testdata :
			out = self.net.activate(testrecord)[0]
			prediction.append(out)
		'''	
			
		self.result = 	[self.testlabel, prediction]
		
		
	def GetModel(self):
		return self.trainer
开发者ID:drossegger,项目名称:ml-ex1,代码行数:77,代码来源:neuralnetworkclassification.py

示例9: NeuralNet

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]

#.........这里部分代码省略.........
                tmp=layersDict[layer[0]](layer[1])
                self.net.addModule(tmp)
                hiddenLayers.append(tmp)
             
            #connection between input and first hidden Layer  
            in_to_hidden=FullConnection(inLayer,hiddenLayers[0])
            self.net.addConnection(in_to_hidden)
            
            #connection between hidden Layers
            i=0
            for i in range(1,len(hiddenLayers)):
                hidden_to_hidden=FullConnection(hiddenLayers[i-1],hiddenLayers[i])
                self.net.addConnection(hidden_to_hidden)
            
            #connection between last hidden Layer and output Layer   
            hidden_to_out= FullConnection(hiddenLayers[i],outLayer)
            self.net.addConnection(hidden_to_out)     
            
            if(rs.bias==True):
                bias=BiasUnit('bias')
                self.net.addModule(bias)
                for layer in hiddenLayers :
                    bias_to_hidden = FullConnection(bias, layer)
                    self.net.addConnection(bias_to_hidden)
                
                bias_to_out = FullConnection(bias, outLayer)
                self.net.addConnection(bias_to_out)
                

        
        #initilisation of weight
        self.net.sortModules()
        self.shape=self.net.params.shape
        self.net._setParameters(np.random.normal(0.0,0.1,self.shape))
            
        
        self.ds = SupervisedDataSet(self.inputDimension, self.outputDimension)
        #print(self.net)
            
    def setTheta(self, theta):
        self.net._setParameters(theta.reshape(self.shape))

    def getTheta(self):
        return self.net.params

    def load(self,thetaFile):
        '''
        load wheight of the neural network from the thetafile
        '''
        self.net._setParameters(np.loadtxt(thetaFile+".theta"))
        #print ("theta LOAD : ", self.net.params)
        return self.net.params

    def getTrainingData(self, inputData, outputData):
        '''
        Verifies the validity of the given input and output data
        Data should be organized by columns
        
        Input:      -inputdata, numpy N-D array
                    -outputData, numpy N-D array
        '''
        regression.getTrainingData(self,inputData, outputData)

        for i in range(self.numberOfSamples):
            self.ds.addSample(inputData[i],outputData[i])

    def train(self):
        '''
        Perform batch regression
        '''
        trainer = BackpropTrainer(self.net, self.ds, learningrate=self.learningRate, momentum=self.momentum)

        minError=10
        while(True):
            error=trainer.train()
            print(self.meanSquareError())
            if(error<minError):
                minError=error
                self.saveTheta(self.rs.path+self.rs.thetaFile+".theta")

        
        #trainer.trainUntilConvergence(maxEpochs=10, verbose=True)
        #trainer.trainEpochs(10)
    def computeOutput(self, inputVal):
        '''
        Returns the output depending on the given input and theta
        
        Input:      -inputVal: numpy N-D array
                    -theta: numpy N-D array
        
        Output:     -fa_out: numpy N-D array, output approximated
        '''
        assert(inputVal.shape[0]==self.inputDimension), "NeuralNet: Bad input format : " + str(inputVal.shape[0])+"/"+str(self.inputDimension)
        output=self.net.activate(inputVal)
        #print(output)
        return output

    def meanSquareError(self):
        output=self.net.activateOnDataset(self.ds)
        return np.mean((self.outputData - output)**2)
开发者ID:osigaud,项目名称:ArmModelPython,代码行数:104,代码来源:NeuralNet.py

示例10: FullConnection

# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import activateOnDataset [as 别名]
net.addOutputModule(outLayer)
#
#	do the plumbing
#
in_to_hidden1      = FullConnection(inLayer,hiddenLayer1)
hidden1_to_hidden2 = FullConnection(hiddenLayer1,hiddenLayer2)
hidden2_to_out     = FullConnection(hiddenLayer2,outLayer)
#
net.addConnection(in_to_hidden1)
net.addConnection(hidden1_to_hidden2)
net.addConnection(hidden2_to_out)
net.sortModules()
#
#	activate on the training data set
#
net.activateOnDataset(trndata)
#
#	build a backpropagation trainer
#
trainer = BackpropTrainer(net,			\
			  dataset=trndata,	\
			  momentum=0.1,		\
			  verbose=True,		\
			  weightdecay=0.01)

#
#	Generate a square grid of data points and put it into
#	a dataset, which we can then classify to get a nice
#	contour field for visualization...so the target values
#	for this data set aren't going to be used...
#
开发者ID:Milstein,项目名称:ml,代码行数:33,代码来源:cffnb.py


注:本文中的pybrain.structure.FeedForwardNetwork.activateOnDataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。