本文整理汇总了Python中pybrain.supervised.trainers.RPropMinusTrainer.testOnData方法的典型用法代码示例。如果您正苦于以下问题:Python RPropMinusTrainer.testOnData方法的具体用法?Python RPropMinusTrainer.testOnData怎么用?Python RPropMinusTrainer.testOnData使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.supervised.trainers.RPropMinusTrainer
的用法示例。
在下文中一共展示了RPropMinusTrainer.testOnData方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ClassificationDataSet
# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import testOnData [as 别名]
#1-N output encoding , N=10
trndata = ClassificationDataSet(np.shape(train)[1], 10, nb_classes=10)
for i in xrange(np.shape(train)[0]):
trndata.addSample(train[i], traint[i])
validata = ClassificationDataSet(np.shape(valid)[1], 10, nb_classes=10)
for i in xrange(np.shape(valid)[0]):
trndata.addSample(valid[i], validt[i])
testdata = ClassificationDataSet(np.shape(test)[1], 10, nb_classes=10)
for i in xrange(np.shape(test)[0]):
testdata.addSample(test[i], testt[i])
#Build the network
if nlayers > 1:
net = buildNetwork(trndata.indim, nhidden, nhiddeno, trndata.outdim, outclass=SoftmaxLayer )
else:
net = buildNetwork(trndata.indim, nhidden, trndata.outdim, outclass=SoftmaxLayer )
#construct the trainer object
#We can also train Bprop using pybrain using the same argumets as below: trainer = BackpropTrainer(...)
trainer = RPropMinusTrainer(net, dataset=trndata, momentum=0.9, verbose=True, weightdecay=0.01, learningrate=0.1)
#train and test
trainer.trainUntilConvergence(maxEpochs=percent_dataset_usage*300)#,trainingData=trndata,validationData = validata)
trainer.testOnData(verbose=True, dataset=testdata)
print_NN_params() #remind us what architecture was tested
print_time_elapsed(start) #print training time
filename = 'instances/NN_' +str(percent_dataset_usage) +'perc_'+ str(nhidden) + '_' +str(nhiddeno) +'.save'
save_NN_instance(filename) #save trained object to disk
示例2: len
# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import testOnData [as 别名]
networkPath='20LSTMCell/TrainUntilConv.xml'
figPath='20LSTMCell/ErrorGraph'
#####################
#####################
print "Training Data Length: ", len(trndata)
print "Num of Training Seq: ", trndata.getNumSequences()
print "Validation Data Length: ", len(tstdata)
print "Num of Validation Seq: ", tstdata.getNumSequences()
print 'Start Training'
time_start = time.time()
while (tstErrorCount<100):
print "********** Classification with 20LSTMCell with RP- **********"
trnError=trainer.train()
tstError = trainer.testOnData(dataset=tstdata)
trnAccu = 100-percentError(trainer.testOnClassData(), trndata['class'])
tstAccu = 100-percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
trn_class_accu.append(trnAccu)
tst_class_accu.append(tstAccu)
trn_error.append(trnError)
tst_error.append(tstError)
np.savetxt(trnErrorPath, trn_error)
np.savetxt(tstErrorPath, tst_error)
np.savetxt(trnClassErrorPath, trn_class_accu)
np.savetxt(tstClassErrorPath, tst_class_accu)
if(oldtstError==0):
oldtstError = tstError
示例3: createDataset3
# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import testOnData [as 别名]
test_set_num = 10 #int(math.floor(len_pList*0.15))
epochs = 35
hiddenNodes = 8
print "======== Settings ========"
print "input_interval: %d, input_vector_size: %d, data_set: %d, test_set_num: %d, epochs: %d" % (interval, inputSize, len_pList, test_set_num, epochs, )
limit = len_pList-test_set_num
ds = createDataset3(pList[0:int(limit)], limit,inputSize,1)
#net = buildNetwork(1,6,1,bias=True,recurrent=True)
#trainer = BackpropTrainer(net,ds,batchlearning=False,lrdecay=0.0,momentum=0.0,learningrate=0.01)
net = buildNetwork(inputSize, hiddenNodes, 1, bias=True)
trainer = RPropMinusTrainer(net, verbose=True,)
#trainer = BackpropTrainer(net,ds,batchlearning=False,lrdecay=0.0,momentum=0.0,learningrate=0.01, verbose=True)
trainer.trainOnDataset(ds,epochs)
trainer.testOnData(verbose=True)
i = len_pList-test_set_num
last_value = normalize(pList[i-2][1])
last_last_value = normalize(pList[i-1][1])
out_data = []
print "======== Testing ========"
for i in range(len_pList-test_set_num+1, len_pList):
value = denormalize(net.activate([last_last_value, last_value]))
out_datum = (i, pList[i][1], value)
out_data.append(out_datum)
print "Index: %d Actual: %f Prediction: %f" % out_datum
last_value = normalize(value)
last_last_value = last_value