当前位置: 首页>>代码示例>>Python>>正文


Python BackpropTrainer.trainOnDataset方法代码示例

本文整理汇总了Python中pybrain.supervised.trainers.BackpropTrainer.trainOnDataset方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.trainOnDataset方法的具体用法?Python BackpropTrainer.trainOnDataset怎么用?Python BackpropTrainer.trainOnDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.supervised.trainers.BackpropTrainer的用法示例。


在下文中一共展示了BackpropTrainer.trainOnDataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: anntrain

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
开发者ID:gibranfp,项目名称:authorid,代码行数:30,代码来源:ML.py

示例2: computeModel

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
	def computeModel(self, path, user):
		# Create a supervised dataset for training.
		trndata = SupervisedDataSet(24, 1)
		tstdata = SupervisedDataSet(24, 1)
		
		#Fill the dataset.
		for number in range(0,10):
			for variation in range(0,7):
				# Pass all the features as inputs.
				trndata.addSample(self.getSample(user, number, variation),(user.key,))
				
			for variation in range(7,10):
				# Pass all the features as inputs.
				tstdata.addSample(self.getSample(user, number, variation),(user.key,))
				
		# Build the LSTM.
		n = buildNetwork(24, 50, 1, hiddenclass=LSTMLayer, recurrent=True, bias=True)

		# define a training method
		trainer = BackpropTrainer(n, dataset = trndata, momentum=0.99, learningrate=0.00002)

		# carry out the training
		trainer.trainOnDataset(trndata, 2000)
		valueA = trainer.testOnData(tstdata)
		print '\tMSE -> {0:.2f}'.format(valueA)
		self.saveModel(n, '.\NeuralNets\SavedNet_%d' %(user.key))
		
		return n
开发者ID:ThomasRouvinez,项目名称:UserRecognizer,代码行数:30,代码来源:PyBrain.py

示例3: simpleNeuralNetworkTrain

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def simpleNeuralNetworkTrain(fileName, numFeatures, numClasses, possibleOutputs, numHiddenNodes, numTrainingEpochs):

    data = np.genfromtxt(fileName)
    trnIn = data[:, 0:5]
    trnOut = data[:, 6]
    trnOut = [int(val) for val in trnOut]

    normalizeData(trnIn, numFeatures)
    trndata = ClassificationDataSet(numFeatures, possibleOutputs, nb_classes=numClasses)
    for row in range(0, len(trnIn)):
        tempListOut = []
        tempListIn = []
        tempListOut.append(int(trnOut[row]))
        for i in range(0, numFeatures):
            tempListIn.append(trnIn[row][i])
        trndata.addSample(tempListIn, tempListOut)

    trndata._convertToOneOfMany()

    #  When running for the first time
    myNetwork = buildNetwork(numFeatures, numHiddenNodes, numClasses, outclass=SoftmaxLayer, bias=True, recurrent=False)

    # Read from file after the first try.
    #  myNetwork = NetworkReader.readFrom('firstTime.xml')    # Use saved results.
    trainer = BackpropTrainer(myNetwork, dataset=trndata, momentum=0.0, verbose=True, weightdecay=0.0)
    for i in range(numTrainingEpochs):
        trainer.trainOnDataset(dataset=trndata)
开发者ID:abdullah2891,项目名称:NeuralNet,代码行数:29,代码来源:trainNeuralNetwork.py

示例4: neuralnetworktrain

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
    def neuralnetworktrain(self):
        dataset = self.getdata()

        # Constructing a multiple output neural network.
        # Other neural network architectures will also be experimented,
        # like using different single output neural networks.
        net = FeedForwardNetwork()
        inp = LinearLayer(9)
        h1 = SigmoidLayer(20)
        h2 = TanhLayer(10)
        outp = LinearLayer(3)

        # Adding the modules to the architecture
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)

        # Creating the connections
        net.addConnection(FullConnection(inp, h1))
        net.addConnection(FullConnection(h1, h2))
        net.addConnection(FullConnection(h2, outp))
        net.sortModules()

        # Training the neural network using Backpropagation
        t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)
        t.trainOnDataset(dataset, 5)
        t.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
开发者ID:casyazmon,项目名称:mars_city,代码行数:33,代码来源:neuraltraining.py

示例5: train_neural_network

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def train_neural_network():
    start = time.clock()
    ds = get_ds()

    # split main data to train and test parts
    train, test = ds.splitWithProportion(0.75)

    # build nn with 10 inputs, 3 hidden layers, 1 output neuron
    net = buildNetwork(10,3,1, bias=True)

    # use backpropagation algorithm
    trainer = BackpropTrainer(net, train, momentum = 0.1, weightdecay = 0.01)

    # plot error
    trnError, valError = trainer.trainUntilConvergence(dataset = train, maxEpochs = 50)

    plot_error(trnError, valError)

    print "train the model..."
    trainer.trainOnDataset(train, 500)
    print "Total epochs: %s" % trainer.totalepochs

    print "activate..."
    out = net.activateOnDataset(test).argmax(axis = 1)
    percent = 100 - percentError(out, test['target'])
    print "%s" % percent

    end = time.clock()
    print "Time: %s" % str(end-start)
开发者ID:zhzhussupovkz,项目名称:forest-cover-type-prediction,代码行数:31,代码来源:forest_cover_type_neural_network.py

示例6: nn

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def nn(tx, ty, rx, ry, iterations):
    network = buildNetwork(14, 5, 5, 1)
    ds = ClassificationDataSet(14,1, class_labels=["<50K", ">=50K"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds)
    trainer.trainOnDataset(ds, iterations)
    NetworkWriter.writeToFile(network, "network.xml")
    results = sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry))
    return results
开发者ID:iRapha,项目名称:Machine-Learning,代码行数:12,代码来源:hills.py

示例7: estimateNot

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def estimateNot():
    ds_not = SupervisedDataSet(1, 1)
    ds_not.addSample( (0,) , (1,))
    ds_not.addSample( (1,) , (0,))
    net = buildNetwork(1, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_not, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOT value:'
    print 'NOT 0  = ', net.activate((0,))
    print 'NOT 1  = ', net.activate((1,))
开发者ID:andreas-koukorinis,项目名称:Applied-Data-Science,代码行数:13,代码来源:problem1b2.py

示例8: RunNet

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def RunNet(net, dataset, train_epochs):
	"a function to build a neural net and test on it, for testing purposes right now"
	#print net.activate([2, 1])
	#ds = SupervisedDataSet(15, 1)
	#ds.addSample((1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), (100))
	#ds.addSample((0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0))

	#trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99, verbose = True)
	trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.5, verbose = True)
	
	trainer.trainOnDataset(dataset, train_epochs)
	
	trainer.testOnData(verbose = True)
开发者ID:ddemarco5,项目名称:Neural-Network-AI,代码行数:15,代码来源:base.py

示例9: buildAndTrain

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def buildAndTrain(ds):
  
  net = buildNetwork(2, 4, 1, bias=True)

  # try:
        #         f = open('_learned', 'r')
  #   net = pickle.load(f)
  #   f.close()
  # except:
  trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
  trainer.trainOnDataset(ds, 1000)
  trainer.testOnData()
  return net
开发者ID:pvarsh,项目名称:applied_data_science,代码行数:15,代码来源:and.py

示例10: xtrain

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
    def xtrain(self):
        dataset = self.getdata()

        # Constructing a two hidden layes Neural Network
        net = buildNetwork(9, 15, 5, 1, recurrent=True)

        # Training using Back Propagation
        trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.75,
                                  weightdecay=0.02, verbose=True)
        trainer.trainOnDataset(dataset, 10)
        trainer.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
开发者ID:casyazmon,项目名称:mars_city,代码行数:16,代码来源:xtraining.py

示例11: __init__

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
class BackpropNet:
    def __init__(self, input_size, hidden_layer_size, output_size):
        self._input_size = input_size
        self._output_size = output_size
        self._net = buildNetwork(input_size, hidden_layer_size, output_size)
        self._trainer = BackpropTrainer(self._net, learningrate=0.001)

    def process(self, inp):
        return self._net.activate(inp)

    def train(self, inp, output):
        dataset = SupervisedDataSet(self._input_size, self._output_size)
        dataset.addSample(inp, output)
        self._trainer.trainOnDataset(dataset)
开发者ID:gaborpapp,项目名称:AIam,代码行数:16,代码来源:backprop_net.py

示例12: estimateAnd

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def estimateAnd():
    ds_and = SupervisedDataSet(2, 1)
    ds_and.addSample( (0,0) , (0,))
    ds_and.addSample( (0,1) , (0,))
    ds_and.addSample( (1,0) , (0,))
    ds_and.addSample( (1,1) , (1,))
    net = buildNetwork(2, 4, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData() 
    print '\nthe prediction for AND value:'
    print '1 AND 1 = ', net.activate((1,1))
    print '1 AND 0 = ', net.activate((1,0))
    print '0 AND 1 = ', net.activate((0,1))
    print '0 AND 0 = ', net.activate((0,0))
开发者ID:andreas-koukorinis,项目名称:Applied-Data-Science,代码行数:17,代码来源:problem1a.py

示例13: estimateNor

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def estimateNor():
    ds_nor = SupervisedDataSet(2, 1)
    ds_nor.addSample( (0,0) , (1,))
    ds_nor.addSample( (0,1) , (0,))
    ds_nor.addSample( (1,0) , (0,))
    ds_nor.addSample( (1,1) , (0,))
    net = buildNetwork(2, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_nor, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOR value:'
    print '1 NOR 1 = ', net.activate((1,1))
    print '1 NOR 0 = ', net.activate((1,0))
    print '0 NOR 1 = ', net.activate((0,1))
    print '0 NOR 0 = ', net.activate((0,0))
开发者ID:andreas-koukorinis,项目名称:Applied-Data-Science,代码行数:17,代码来源:problem1b2.py

示例14: build_network

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
def build_network():

    # get iris data
    iris = datasets.load_iris()
    d,t = iris.data, iris.target

    # build dataset
    ds = _get_classification_dataset()
    for i in range(len(d)):
        ds.addSample(d[i],t[i])

    print "Dataset input: {}".format(ds['input'])
    print "Dataset output: {}".format(ds['target'])
    print "Dataset input length: {}".format(len(ds['input']))
    print "Dataset output length: {}".format(len(ds['target']))
    print "Dataset length: {}".format(len(ds))
    print "Dataset input|output dimensions are {}|{}".format(ds.indim, ds.outdim)

    # split dataset
    train_data,test_data = _split_with_proportion(ds, 0.70)
    
    print "Train Data length: {}".format(len(train_data))
    print "Test Data length: {}".format(len(test_data))

    # encode with one output neuron per class
    train_data._convertToOneOfMany()
    test_data._convertToOneOfMany()

    print "Train Data input|output dimensions are {}|{}".format(train_data.indim, train_data.outdim)
    print "Test Data input|output dimensions are {}|{}".format(test_data.indim, test_data.outdim)

    # build network
    network = buildNetwork(INPUT,HIDDEN,CLASSES,outclass=SoftmaxLayer)

    # train network
    trainer = BackpropTrainer(network,dataset=train_data,momentum=0.1,verbose=True,weightdecay=0.01)
    trainer.trainOnDataset(train_data, 500)

    print "Total epochs: {}".format(trainer.totalepochs)

    # test network
    output = network.activateOnDataset(test_data).argmax(axis=1)
    
    print "Percent error: {}".format(percentError(output, test_data['class']))

    # return network
    return network
开发者ID:drat,项目名称:SaltwashAR,代码行数:49,代码来源:neuralnetwork.py

示例15: PHC_NN

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import trainOnDataset [as 别名]
class PHC_NN(PHC_FA):
    '''PHC with neural function approximation. '''
    delta=0.1
    maxNumberofAverage=30
    weightdecay=0.001
    trainingEpochPerUpdateWight=2
    
    def __init__(self, num_features, num_actions, indexOfAgent=None):    
        PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
        self.linQ = buildNetwork(num_features + num_actions, (num_features + num_actions), 1, hiddenclass = SigmoidLayer, outclass = LinearLayer)
        self.linPolicy = buildNetwork(num_features, (num_features + num_actions), num_actions, hiddenclass = SigmoidLayer,outclass = SigmoidLayer)
        self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
        self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,weightdecay=self.weightdecay)

    def _pi(self, state):
        """Given state, compute probabilities for each action."""
        values = np.array(self.linPolicy.activate(r_[state]))
        z=np.sum(values)
        return (values/z).flatten()
    
    def _qValues(self, state):
        """ Return vector of q-values for all actions, 
        given the state(-features). """
        values = np.array([self.linQ.activate(r_[state, one_to_n(i, self.num_actions)]) for i in range(self.num_actions)])
        return values.flatten()

            
    def _updateWeights(self, state, action, reward, next_state):
        """ state and next_state are vectors, action is an integer. """
        #update Q-value function approximator
        target=reward + self.rewardDiscount * max(self._qValues(next_state))
        inp=r_[asarray(state), one_to_n(action, self.num_actions)]
        self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
        ds = SupervisedDataSet(self.num_features+self.num_actions,1)
        ds.addSample(inp, target)
        self.trainer4LinQ.trainOnDataset(ds)
        #Update policy
        bestAction=r_argmax(self._qValues(state))
        target= one_to_n(bestAction, self.num_actions)
        inp=r_[asarray(state)]
        ds = SupervisedDataSet(self.num_features,self.num_actions)
        ds.addSample(inp, target)
        self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,
                                               learningrate=self.delta,
                                               weightdecay=self.weightdecay)
        self.trainer4LinPolicy.setData(ds)
        self.trainer4LinPolicy.trainEpochs(epochs=self.trainingEpochPerUpdateWight)
开发者ID:Snazz2001,项目名称:Multi-Agent-Reinforcement-Learning-in-Stochastic-Games,代码行数:49,代码来源:phc.py


注:本文中的pybrain.supervised.trainers.BackpropTrainer.trainOnDataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。