当前位置: 首页>>代码示例>>Python>>正文


Python BackpropTrainer.trainEpochs方法代码示例

本文整理汇总了Python中pybrain.supervised.BackpropTrainer.trainEpochs方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.trainEpochs方法的具体用法?Python BackpropTrainer.trainEpochs怎么用?Python BackpropTrainer.trainEpochs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.supervised.BackpropTrainer的用法示例。


在下文中一共展示了BackpropTrainer.trainEpochs方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: EightBitBrain

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
class EightBitBrain(object):
    
    def __init__(self, dataset, inNodes, outNodes, hiddenNodes, classes):
        self.__dataset = ClassificationDataSet(inNodes, classes-1)
        for element in dataset:
            self.addDatasetSample(self._binaryList(element[0]), element[1])
        self.__dataset._convertToOneOfMany()
        self.__network = buildNetwork(inNodes, hiddenNodes, self.__dataset.outdim, recurrent=True)
        self.__trainer = BackpropTrainer(self.__network, learningrate = 0.01, momentum = 0.99, verbose = True)
        self.__trainer.setData(self.__dataset)

    def _binaryList(self, n):
        return [int(c) for c in "{0:08b}".format(n)]
    
    def addDatasetSample(self, argument, target):
        self.__dataset.addSample(argument, target)

    def train(self, epochs):
        self.__trainer.trainEpochs(epochs)
    
    def activate(self, information):
        result = self.__network.activate(self._binaryList(information))
        highest = (0,0)
        for resultClass in range(len(result)):
            if result[resultClass] > highest[0]:
                highest = (result[resultClass], resultClass)
        return highest[1]
开发者ID:oskanberg,项目名称:pyconomy,代码行数:29,代码来源:brains.py

示例2: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
  def train(self, params):
    """
    Train TDNN network on buffered dataset history
    :param params:
    :return:
    """
    # self.net = buildNetwork(params['encoding_num'] * params['num_lags'],
    #                         params['num_cells'],
    #                         params['encoding_num'],
    #                         bias=True,
    #                         outputbias=True)

    ds = SupervisedDataSet(params['encoding_num'] * params['num_lags'],
                           params['encoding_num'])
    history = self.window(self.history, params['learning_window'])

    n = params['encoding_num']
    for i in xrange(params['num_lags'], len(history)):
      targets = numpy.zeros((1, n))
      targets[0, :] = self.encoder.encode(history[i])

      features = numpy.zeros((1, n * params['num_lags']))
      for lags in xrange(params['num_lags']):
        features[0, lags * n:(lags + 1) * n] = self.encoder.encode(
          history[i - (lags + 1)])
      ds.addSample(features, targets)

    trainer = BackpropTrainer(self.net,
                              dataset=ds,
                              verbose=params['verbosity'] > 0)

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
开发者ID:andrewmalta13,项目名称:nupic.research,代码行数:35,代码来源:suite.py

示例3: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
 def train(self, epochs=None):
     trainer = BackpropTrainer(
         self.net,
         self.training_data
     )
     if epochs:
         trainer.trainEpochs(epochs)
     else:
         trainer.trainUntilConvergence()
开发者ID:jo-soft,项目名称:footballResultEstimation,代码行数:11,代码来源:pyBrainNeuronalNet.py

示例4: PerceptronPyBrainFilter

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
class PerceptronPyBrainFilter(LinearPerceptron): # PYBRAIN
    def __init__(self, *args, **kwargs):    
        super(PerceptronPyBrainFilter, self).__init__(*args, **kwargs)
        
        # input, hidden_layers, output
        self.perceptron = buildNetwork(self.num_last_measures, 0, 1, \
                                       hiddenclass=pybrain.structure.modules.SigmoidLayer, #@UndefinedVariable \
                                       outclass=pybrain.structure.modules.SigmoidLayer) #@UndefinedVariable
        
        # input dimension, target dimension
        self.pointer = 0
        self.data = SupervisedDataSet(self.num_last_measures, 1)
        for _i in xrange(self.dataset_size):
            self.data.addSample([0] * self.num_last_measures, 0)     
        self.trainer = BackpropTrainer(self.perceptron, self.data, learningrate=self.learning_rate)
        
        # This call does some internal initialization which is necessary before the net can finally
        # be used: for example, the modules are sorted topologically.
        self.perceptron.sortModules()
        

    def train(self):
        self.trainer.trainEpochs(1)
    
    
    def guess(self, x):
        return self.perceptron.activate(x)
    

    def apply(self, x):                
        if len(self.lag_buffer) < self.lag - 1:
            if len(self.last_measures) < self.num_last_measures:
                self.last_measures.append(x)
            else:
                self.lag_buffer.append(x)                  
            return x
        
        self.lag_buffer.append(x)
        #self.data.addSample(tuple(self.last_measures), self.lag_buffer[-1])
        self.data['input'][self.pointer] = np.array(self.last_measures)
                                                
        self.train()
        
        if len(self.data) == self.dataset_size:        
            #del self.data[0]
            #self.data.removeSample
            #self.data.removeSample
            pass
            
        del self.last_measures[0]
        self.last_measures.append(self.lag_buffer[0])
        
        del self.lag_buffer[0]
                        
        return self.guess(self.last_measures)
开发者ID:labtempo,项目名称:TMON,代码行数:57,代码来源:filters.py

示例5: network

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(FullConnection(input_layer1,
                                     hidden_layer,
                                     name='in1_to_hidden'))
    net.addConnection(FullConnection(input_layer2, hidden_layer,
                                     name='in2_to_hidden'))
    net.addConnection(FullConnection(hidden_layer,
                                     output_layer,
                                     name='hidden_to_output'))
    net.addConnection(FullConnection(input_layer1,
                                     output_layer,
                                     name='in1_to_out'))
    net.addConnection(FullConnection(input_layer2,
                                     output_layer,
                                     name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net, dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
开发者ID:reetesh11,项目名称:trigram,代码行数:47,代码来源:neuro_probb.py

示例6: training_and_testing

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
def training_and_testing():
    nn= init_neural_network()

    training = learning.get_labeled_data('%strain-images-idx3-ubyte.gz'%(database_folder),
                                '%strain-labels-idx1-ubyte.gz'%(database_folder)
                                ,'%strainig'%(database_folder))
    test = learning.get_labeled_data('%st10k-images-idx3-ubyte.gz'%(database_folder),
                                 '%st10k-labels-idx1-ubyte.gz'%(database_folder),
                                 '%stest'%(database_folder))

    FEATURES = N_INPUT_LAYER
    print("Caracteristicas a analizar: %i"%FEATURES)
    testdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)
    trainingdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)


    for i in range(len(test['data'])):
        testdata.addSample(test['data'][i],test['label'][i])
    for j in range(len(training['data'])):
        trainingdata.addSample(training['data'][j],training['label'][j])

    trainingdata._convertToOneOfMany()
    testdata._convertToOneOfMany()

    trainer = BackpropTrainer(nn,dataset=trainingdata,momentum=MOMENTUM,verbose=True,
                         weightdecay=W_DECAY,learningrate=L_RATE,lrdecay=L_DECAY)

    for i in range(EPOCHS):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),
                                 trainingdata['class'])
        tstresult = percentError(trainer.testOnClassData(
                                 dataset=testdata), testdata['class'])

        print("epoch: %4d" % trainer.totalepochs,
                     "  train error: %5.2f%%" % trnresult,
                     "  test error: %5.2f%%" % tstresult)
    return nn
开发者ID:rbalda,项目名称:neural_ocr,代码行数:40,代码来源:neural_ocr.py

示例7: exists

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
    trndata._convertToOneOfMany( bounds=[0.,1.] )
    tstdata._convertToOneOfMany( bounds=[0.,1.] )

    if exists("params.xml"):
        rnn = NetworkReader.readFrom('params.xml')
    else:
        # construct LSTM network - note the missing output bias
        rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)

    # define a training method
    trainer = BackpropTrainer( rnn, dataset=trndata, momentum=0.1, weightdecay=0.01)

    # lets training (exclamation point)
    for i in range(100):
    	# setting the ephocs for the training
        trainer.trainEpochs( 2 )
        # calculating the error
        trnresult = (1.0-testOnSequenceData(rnn, trndata))
        tstresult = (1.0-testOnSequenceData(rnn, tstdata))
        #print("train error: %5.2f%%" % trnresult, ",  test error: %5.2f%%" % tstresult)

        # activating the softmax layer
        out = rnn.activate(X_train[0])
        out = out.argmax(axis=0)

    
    index=0

    # evaluate the net in test data
    result = []
    for x in X_test:
开发者ID:PierreHao,项目名称:BoVW-LSTM,代码行数:33,代码来源:lstm.py

示例8: initializeTDNNnet

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
  random.seed(6)
  net = initializeTDNNnet(nDimInput=X.shape[1],
                         nDimOutput=1, numNeurons=200)

  predictedInput = np.zeros((len(sequence),))
  targetInput = np.zeros((len(sequence),))
  trueData = np.zeros((len(sequence),))
  for i in xrange(nTrain, len(sequence)-predictionStep):
    Y = net.activate(X[i])

    if i % 336 == 0 and i > numLags:
      ds = SupervisedDataSet(X.shape[1], 1)
      for i in xrange(i-nTrain, i):
        ds.addSample(X[i], T[i])
      trainer = BackpropTrainer(net, dataset=ds, verbose=1)
      trainer.trainEpochs(30)

    predictedInput[i] = Y[-1]
    targetInput[i] = sequence['data'][i+predictionStep]
    trueData[i] = sequence['data'][i]
    print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
      i, targetInput[i], predictedInput[i])

  predictedInput = (predictedInput * stdSeq) + meanSeq
  targetInput = (targetInput * stdSeq) + meanSeq
  trueData = (trueData * stdSeq) + meanSeq

  saveResultToFile(dataSet, predictedInput, 'tdnn')

  plt.figure()
  plt.plot(targetInput)
开发者ID:mewbak,项目名称:nupic.research,代码行数:33,代码来源:run_tdnn.py

示例9: BackpropTrainer

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainEpochs [as 别名]
net.addOutputModule(output_layer)
net.addConnection(FullConnection(input_layer,
                                 hidden_layer,
                                 name='in_to_hidden'))
net.addConnection(FullConnection(hidden_layer,
                                 output_layer,
                                 name='hidden_to_out'))
net.sortModules()

#backpropagation
trainer = BackpropTrainer(net, dataset=trndata,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
#error checking part
for i in range(10):
    trainer.trainEpochs(1)
    trnresult = percentError(trainer.testOnClassData(), trndata['target'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['target'])

trigram_file = open('trigram.txt', 'w')
trigram_file.writelines(["%s\n" % item for item in sorted_list])

word_file = open('word_list', 'w')
word_file.writelines(["%s\n" % item for item in input_list])

word_file.close()
trigram_file.close()
text_file.close()
开发者ID:reetesh11,项目名称:trigram,代码行数:32,代码来源:trigram.py


注:本文中的pybrain.supervised.BackpropTrainer.trainEpochs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。