本文整理汇总了Python中pybrain.supervised.BackpropTrainer.testOnClassData方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.testOnClassData方法的具体用法?Python BackpropTrainer.testOnClassData怎么用?Python BackpropTrainer.testOnClassData使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.supervised.BackpropTrainer
的用法示例。
在下文中一共展示了BackpropTrainer.testOnClassData方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: network
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import testOnClassData [as 别名]
def network(dataset, input_list):
num_words = len(input_list)
#dividing the dataset into training and testing data
tstdata, trndata = dataset.splitWithProportion(0.25)
#building the network
net = RecurrentNetwork()
input_layer1 = LinearLayer(num_words, name='input_layer1')
input_layer2 = LinearLayer(num_words, name='input_layer2')
hidden_layer = TanhLayer(num_words, name='hidden_layer')
output_layer = SoftmaxLayer(num_words, name='output_layer')
net.addInputModule(input_layer1)
net.addInputModule(input_layer2)
net.addModule(hidden_layer)
net.addOutputModule(output_layer)
net.addConnection(FullConnection(input_layer1,
hidden_layer,
name='in1_to_hidden'))
net.addConnection(FullConnection(input_layer2, hidden_layer,
name='in2_to_hidden'))
net.addConnection(FullConnection(hidden_layer,
output_layer,
name='hidden_to_output'))
net.addConnection(FullConnection(input_layer1,
output_layer,
name='in1_to_out'))
net.addConnection(FullConnection(input_layer2,
output_layer,
name='in2_to_out'))
net.sortModules()
#backpropagation
trainer = BackpropTrainer(net, dataset=trndata,
momentum=0.1,
verbose=True,
weightdecay=0.01)
#error checking part
for i in range(10):
trainer.trainEpochs(1)
trnresult = percentError(trainer.testOnClassData(), trndata['target'])
tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
tstdata['target'])
print "epoch: %4d" % trainer.totalepochs
print " train error: %5.10f%%" % trnresult
print " test error: %5.10f%%" % tstresult
return net
示例2: training_and_testing
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import testOnClassData [as 别名]
def training_and_testing():
nn= init_neural_network()
training = learning.get_labeled_data('%strain-images-idx3-ubyte.gz'%(database_folder),
'%strain-labels-idx1-ubyte.gz'%(database_folder)
,'%strainig'%(database_folder))
test = learning.get_labeled_data('%st10k-images-idx3-ubyte.gz'%(database_folder),
'%st10k-labels-idx1-ubyte.gz'%(database_folder),
'%stest'%(database_folder))
FEATURES = N_INPUT_LAYER
print("Caracteristicas a analizar: %i"%FEATURES)
testdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)
trainingdata = ClassificationDataSet(FEATURES,1,nb_classes=OUTPUT_LAYER)
for i in range(len(test['data'])):
testdata.addSample(test['data'][i],test['label'][i])
for j in range(len(training['data'])):
trainingdata.addSample(training['data'][j],training['label'][j])
trainingdata._convertToOneOfMany()
testdata._convertToOneOfMany()
trainer = BackpropTrainer(nn,dataset=trainingdata,momentum=MOMENTUM,verbose=True,
weightdecay=W_DECAY,learningrate=L_RATE,lrdecay=L_DECAY)
for i in range(EPOCHS):
trainer.trainEpochs(1)
trnresult = percentError(trainer.testOnClassData(),
trainingdata['class'])
tstresult = percentError(trainer.testOnClassData(
dataset=testdata), testdata['class'])
print("epoch: %4d" % trainer.totalepochs,
" train error: %5.2f%%" % trnresult,
" test error: %5.2f%%" % tstresult)
return nn
示例3: __init__
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import testOnClassData [as 别名]
class Classifier:
def __init__(self,Xtrain,Ytrain):
self._Xtrain=Xtrain
self._Ytrain=Ytrain
self.features=Xtrain.keys()
def Train(self,feat_list=None,type='logreg',gamma=0.0,domeanstd=True,special_bias=None,add_bias=True, weight=None, class_instance=None, method='sigmoid',factor=10.0,arch=[10],
cv_feats=None, cv_special_bias=None,cv_class_instance=None):
if feat_list==None:
feat_list=self.features
self.feat_list=feat_list
self._gamma=gamma
self._type=type
self._special_bias = special_bias
self._add_bias = add_bias
Xtrain_feats = np.ascontiguousarray(np.hstack((self._Xtrain[feat] for feat in feat_list)))
self.m, self.std = classifier.feature_meanstd(Xtrain_feats)
if domeanstd==False: #hacky, overwrite the things we computed
self.m[:] = 0
self.std[:] = 1
Xtrain_feats -= self.m
Xtrain_feats /= self.std
if special_bias != None:
Xtrain_feats = np.ascontiguousarray(np.hstack((Xtrain_feats, special_bias)))
#CV
if cv_feats!=None:
cv_feats = np.ascontiguousarray(np.hstack((cv_feats[feat] for feat in feat_list)))
cv_feats -= self.m
cv_feats /= self.std
if special_bias != None:
cv_feats = np.ascontiguousarray(np.hstack((cv_feats, cv_special_bias)))
'''Classifier stage'''
if type=='linsvm':
self.w, self.b = classifier.svm_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
return (self.w,self.b)
elif type=='logreg':
self.w, self.b = l2logreg_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
return (self.w,self.b)
elif type=='logreg_atwv':
self.w, self.b = Train_atwv(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias, method=method,
factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
elif type=='nn_atwv':
self._arch = arch
self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias,
arch=self._arch, method=method, factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
#self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=self._weights_nn,special_bias=special_bias, add_bias=add_bias,
# arch=self._arch, method=method, factor=factor*10.0)
elif type=='nn_debug':
if mpi.COMM.Get_size() > 1:
print 'Warning!!! Running NN training with MPI with more than one Node!'
#FIXME: Collect X and Y at root to avoid this
# prob = mpi.COMM.gather(prob)
# if mpi.is_root():
# np.vstack(prob)
# #Train
# mpi.COMM.Bcast(self._nn)
# mpi.distribute(prob)
DS = ClassificationDataSet( Xtrain_feats.shape[1], 1, nb_classes=2 )
#for i in range(Xtrain_feats.shape[0]):
# DS.addSample( Xtrain_feats[i,:], [self._Ytrain[i]] )
DS.setField('input', Xtrain_feats)
DS.setField('target', self._Ytrain[:,np.newaxis])
DS._convertToOneOfMany()
self._nn = buildNetwork(DS.indim, 10, DS.outdim, outclass=SoftmaxLayer, fast=True)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.01, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=8)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.001, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=8)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.0001, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=5)
return self._nn
def Accuracy(self, X, Y, special_bias = None):
X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
X_feats -= self.m
X_feats /= self.std
if special_bias != None:
X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
if self._type=='linsvm' or self._type=='logreg' or self._type=='logreg_atwv':
self.test_accu = classifier.Evaluator.accuracy(Y, np.dot(X_feats,self.w)+self.b)
elif self._type=='nn_atwv':
pred = get_predictions_nn(X_feats, self._weights_nn, arch=[10])[0]
pred[:,0] = 0.5
self.test_accu = classifier.Evaluator.accuracy(Y, pred)
else:
DS = ClassificationDataSet( X_feats.shape[1], 1, nb_classes=2 )
#for i in range(X_feats.shape[0]):
# DS.addSample( X_feats[i,:], [Y[i]] )
DS.setField('input', X_feats)
DS.setField('target', Y[:,np.newaxis])
DS._convertToOneOfMany()
predict,targts = self._nn_trainer.testOnClassData(DS, verbose=True,return_targets=True)
self.test_accu = np.sum(np.array(predict)==np.array(targts))/float(len(targts))
return self.test_accu
def loss_multiclass_logreg(self, X, Y, special_bias=None):
X_feats=np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list))))
X_feats -= self.m
X_feats /= self.std
if special_bias != None:
#.........这里部分代码省略.........
示例4: BackpropTrainer
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import testOnClassData [as 别名]
net.addOutputModule(output_layer)
net.addConnection(FullConnection(input_layer,
hidden_layer,
name='in_to_hidden'))
net.addConnection(FullConnection(hidden_layer,
output_layer,
name='hidden_to_out'))
net.sortModules()
#backpropagation
trainer = BackpropTrainer(net, dataset=trndata,
momentum=0.1,
verbose=True,
weightdecay=0.01)
#error checking part
for i in range(10):
trainer.trainEpochs(1)
trnresult = percentError(trainer.testOnClassData(), trndata['target'])
tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
tstdata['target'])
trigram_file = open('trigram.txt', 'w')
trigram_file.writelines(["%s\n" % item for item in sorted_list])
word_file = open('word_list', 'w')
word_file.writelines(["%s\n" % item for item in input_list])
word_file.close()
trigram_file.close()
text_file.close()
示例5: ClassificationDataSet
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import testOnClassData [as 别名]
leg = plt.legend([dot1, dot2], ['0','1'])
ax = plt.gca().add_artist(leg)
plt.show()
"""
elif sys.argv[1] == 'nn':
DS = ClassificationDataSet(165)
training_data = np.array(training_data)
training_target = np.vstack(np.array(training_target))
print len(training_data[0])
#print len(training_target[0])
assert(training_data.shape[0] == training_target.shape[0])
DS.setField('input', training_data)
DS.setField('target', training_target)
tstdata, trndata = DS.splitWithProportion(0.15)
hidden_layer_neurons = (DS.indim+DS.outdim)/2
rnn = buildNetwork(DS.indim,hidden_layer_neurons,DS.outdim,hiddenclass=LSTMLayer,outclass=SigmoidLayer,outputbias=False,recurrent=True)
#print hidden_layer_neurons
# define a training method
trainer = BackpropTrainer(rnn,dataset=trndata, verbose=True)
trainer.trainUntilConvergence(verbose = True, validationProportion = 0.3, maxEpochs = 1000, continueEpochs = 10)
print 'Percent Error on Test dataset: ' , percentError(trainer.testOnClassData(tstdata, verbose=True), tstdata['target'] )
print 'Percent Error on Test dataset: ' , percentError(trainer.testOnClassData(tstdata, verbose=True), tstdata['target'] )
#print 'Percent Error on Training dataset: ' , percentError(trainer.testOnClassData(trndata), trndata['target'] )
else:
print("Current classifier algorithms available: svm, knn, dt, kmeans, nn")
sys.exit(1)
if running_tests:
print "\nMax Accuracy: " + str(max(accuracy_arr))