本文整理汇总了Python中pybrain.supervised.BackpropTrainer.trainOnDataset方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.trainOnDataset方法的具体用法?Python BackpropTrainer.trainOnDataset怎么用?Python BackpropTrainer.trainOnDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.supervised.BackpropTrainer
的用法示例。
在下文中一共展示了BackpropTrainer.trainOnDataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initializeNetwork
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def initializeNetwork(self):
can1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can1.png'), self.encodingDict["can"])
can2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can2.png'), self.encodingDict["can"])
can3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can3.png'), self.encodingDict["can"])
stain1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain1.png'), self.encodingDict["stain"])
stain2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain2.png'), self.encodingDict["stain"])
stain3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain3.png'), self.encodingDict["stain"])
dirt1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt1.png'), self.encodingDict["dirt"])
dirt2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt2.png'), self.encodingDict["dirt"])
dirt3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt3.png'), self.encodingDict["dirt"])
self.trainData.append(can1)
self.trainData.append(can2)
self.trainData.append(can3)
self.trainData.append(stain1)
self.trainData.append(stain2)
self.trainData.append(stain3)
self.trainData.append(dirt1)
self.trainData.append(dirt2)
self.trainData.append(dirt3)
for x in self.trainData:
x.prepareTrainData()
self.net = buildNetwork(4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
ds = SupervisedDataSet(4, 3)
for x in self.trainData:
ds.addSample((x.contours/100.0, x.color[0]/1000.0, x.color[1]/1000.0, x.color[2]/1000.0), x.output)
trainer = BackpropTrainer(self.net, momentum=0.1, verbose=True, weightdecay=0.01)
trainer.trainOnDataset(ds, 1000)
trainer.testOnData(verbose=True)
print "\nSiec nauczona\n"
示例2: trainedRNN
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def trainedRNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
# n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
n.sortModules()
draw_connections(n)
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 50:
return trainedRNN()
# exportRNN(n)
draw_connections(n)
return n
示例3: generate_and_test_nn
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def generate_and_test_nn():
d = load_training_set()
n = buildNetwork(d.indim, 13, d.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
t.trainOnDataset(d, 1000)
t.testOnData(verbose=True)
return (n, d)
示例4: testOldTraining
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def testOldTraining(hidden=15, n=None):
d = XORDataSet()
if n is None:
n = buildNetwork(d.indim, hidden, d.outdim, recurrent=False)
t = BackpropTrainer(n, learningrate=0.01, momentum=0., verbose=False)
t.trainOnDataset(d, 250)
t.testOnData(verbose=True)
示例5: trainedANN
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def trainedANN():
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
draw_connections(n)
# d = generateTrainingData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 20:
return trainedANN()
exportANN(n)
draw_connections(n)
return n
示例6: main
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def main():
print '----- loading train/test datasets -----'
train_ds, test_ds = create_datasets()
print '----- building the network -----'
net = ann_network()
trainer = BackpropTrainer(net, learningrate=0.1, momentum=0.1, verbose=True)
print '----- training the model -----'
trainer.trainOnDataset(train_ds)
示例7: execute
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def execute(self):
network = self.networkFactoryMethod()
trainer = BackpropTrainer(network, learningrate = self.learningrate, momentum = self.momentum)
trainer.trainOnDataset(self.datasetForTraining, self.epochs)
averageError = trainer.testOnData(self.datasetForTest)
self.collectedErrors.append(averageError)
return averageError
示例8: testTraining
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def testTraining():
d = PrimesDataSet()
d._convertToOneOfMany()
n = buildNetwork(d.indim, 8, d.outdim, recurrent=True)
t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
t.trainOnDataset(d, 1000)
t.testOnData(verbose=True)
for i in range(15):
print "Guess: %s || Real: %s" % (str(n.activate(i)), str(i in d.generatePrimes(10)))
print d
示例9: testTraining
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def testTraining():
print "Reading data"
d = XORDataSet()
traind,testd = d.splitWithProportion(0.8)
print "Building network"
n = buildNetwork(traind.indim, 4, traind.outdim, recurrent=True)
print "Training"
t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
t.trainOnDataset(traind,100)
testd = XORDataSet(begin=60000,end=80000)
print t.module.params
t.testOnData(testd,verbose= True)
示例10: __init__
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def __init__(self, stock_to_predict, days_of_prediction = 10, days_of_training = 450):
self.number_of_days_before = 8
self.days_of_prediction = days_of_prediction
self.downloader = StockDownloader()
stock_training_data = self.downloader.download_stock(stock_to_predict, days_of_training, days_of_prediction)
self.stock_prediction_data = self.downloader.download_stock(stock_to_predict, days_of_prediction)
self.starting_price = self.stock_prediction_data[0]
self.dataset = StockSupervisedDataSet(self.number_of_days_before, stock_training_data)
self.network = buildNetwork(self.dataset.indim, 10, self.dataset.outdim, recurrent=True)
t = BackpropTrainer(self.network, learningrate = 0.00005, momentum=0., verbose = True)
t.trainOnDataset(self.dataset, 200)
t.testOnData(verbose= True)
self.starting_prices = self.dataset['input'][-1]
示例11: __init__
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def __init__(self):
self.code = {
'cat': [1, 0, 0],
'dust': [0, 1, 0],
'water': [0, 0, 1]
}
pack = 'media.images_train'
train_data = [
(Neuron(load(file_path(pack, 'cat1.png'))), self.code['cat']),
(Neuron(load(file_path(pack, 'cat2.png'))), self.code['cat']),
(Neuron(load(file_path(pack, 'cat3.png'))), self.code['cat']),
(Neuron(load(file_path(pack, 'dust1.png'))), self.code['dust']),
(Neuron(load(file_path(pack, 'dust2.png'))), self.code['dust']),
(Neuron(load(file_path(pack, 'dust3.png'))), self.code['dust']),
(Neuron(load(file_path(pack, 'water1.png'))), self.code['water']),
(Neuron(load(file_path(pack, 'water2.png'))), self.code['water']),
(Neuron(load(file_path(pack, 'water3.png'))), self.code['water']),
]
for x, output in train_data:
x.prepare()
self.net = buildNetwork(
4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer
)
data = SupervisedDataSet(4, 3)
for x, output in train_data:
data.addSample(
(
x.contours / 100.0, x.color[0] / 1000.0,
x.color[1] / 1000.0, x.color[2] / 1000.0,
),
output
)
trainer = BackpropTrainer(
self.net, momentum=0.1, verbose=True, weightdecay=0.01
)
trainer.trainOnDataset(data, 1000) # 1000 iterations
trainer.testOnData(verbose=True)
示例12: testTraining
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def testTraining():
# the AnBnCn dataset (sequential)
d = AnBnCnDataSet()
# build a recurrent network to be trained
hsize = 2
n = RecurrentNetwork()
n.addModule(TanhLayer(hsize, name = 'h'))
n.addModule(BiasUnit(name = 'bias'))
n.addOutputModule(LinearLayer(1, name = 'out'))
n.addConnection(FullConnection(n['bias'], n['h']))
n.addConnection(FullConnection(n['h'], n['out']))
n.addRecurrentConnection(FullConnection(n['h'], n['h']))
n.sortModules()
# initialize the backprop trainer and train
t = BackpropTrainer(n, learningrate = 0.1, momentum = 0.0, verbose = True)
t.trainOnDataset(d, 200)
# the resulting weights are in the network:
print 'Final weights:', n.params
示例13: buildNetwork
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
trainIn = []
for x in row[:numberOfInputs]:
trainIn.append(x)
trainOut = []
for x in row[numberOfInputs:]:
trainOut.append(x)
d.appendLinked(trainIn, trainOut)
# build a neural network with the second parameter being the number of hidden layers
n = buildNetwork(d.indim, 3, d.outdim, recurrent=True)
# configure the trainer
t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
# split the data randomly into 75% training - 25% testing
train, test = d.splitWithProportion(0.75)
print "{} - {}".format(len(train), len(test))
# train the data with n number of epochs
t.trainOnDataset(train, 10)
# test the data with the remaining data
t.testOnData(test, verbose=True)
# try the same test but with a different method
net = buildNetwork(d.indim, 3, d.outdim, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, d)
trainer.trainUntilConvergence(verbose=True)
示例14: testTraining
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
def testTraining():
d = SequentialXORDataSet()
n = buildNetwork(d.indim, 4, d.outdim, recurrent=True)
t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
t.trainOnDataset(d, 1000)
t.testOnData(verbose=True)
示例15: __init__
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainOnDataset [as 别名]
class Classifier:
def __init__(self,Xtrain,Ytrain):
self._Xtrain=Xtrain
self._Ytrain=Ytrain
self.features=Xtrain.keys()
def Train(self,feat_list=None,type='logreg',gamma=0.0,domeanstd=True,special_bias=None,add_bias=True, weight=None, class_instance=None, method='sigmoid',factor=10.0,arch=[10],
cv_feats=None, cv_special_bias=None,cv_class_instance=None):
if feat_list==None:
feat_list=self.features
self.feat_list=feat_list
self._gamma=gamma
self._type=type
self._special_bias = special_bias
self._add_bias = add_bias
Xtrain_feats = np.ascontiguousarray(np.hstack((self._Xtrain[feat] for feat in feat_list)))
self.m, self.std = classifier.feature_meanstd(Xtrain_feats)
if domeanstd==False: #hacky, overwrite the things we computed
self.m[:] = 0
self.std[:] = 1
Xtrain_feats -= self.m
Xtrain_feats /= self.std
if special_bias != None:
Xtrain_feats = np.ascontiguousarray(np.hstack((Xtrain_feats, special_bias)))
#CV
if cv_feats!=None:
cv_feats = np.ascontiguousarray(np.hstack((cv_feats[feat] for feat in feat_list)))
cv_feats -= self.m
cv_feats /= self.std
if special_bias != None:
cv_feats = np.ascontiguousarray(np.hstack((cv_feats, cv_special_bias)))
'''Classifier stage'''
if type=='linsvm':
self.w, self.b = classifier.svm_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
return (self.w,self.b)
elif type=='logreg':
self.w, self.b = l2logreg_onevsall(Xtrain_feats, self._Ytrain, self._gamma, weight = weight, special_bias=special_bias, add_bias=add_bias)
return (self.w,self.b)
elif type=='logreg_atwv':
self.w, self.b = Train_atwv(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias, method=method,
factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
elif type=='nn_atwv':
self._arch = arch
self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=weight,special_bias=special_bias, add_bias=add_bias,
arch=self._arch, method=method, factor=factor, gamma=self._gamma, cv_class_instance=cv_class_instance, cv_feats=cv_feats)
#self._weights_nn = Train_atwv_nn(Xtrain_feats,class_instance=class_instance,weight=self._weights_nn,special_bias=special_bias, add_bias=add_bias,
# arch=self._arch, method=method, factor=factor*10.0)
elif type=='nn_debug':
if mpi.COMM.Get_size() > 1:
print 'Warning!!! Running NN training with MPI with more than one Node!'
#FIXME: Collect X and Y at root to avoid this
# prob = mpi.COMM.gather(prob)
# if mpi.is_root():
# np.vstack(prob)
# #Train
# mpi.COMM.Bcast(self._nn)
# mpi.distribute(prob)
DS = ClassificationDataSet( Xtrain_feats.shape[1], 1, nb_classes=2 )
#for i in range(Xtrain_feats.shape[0]):
# DS.addSample( Xtrain_feats[i,:], [self._Ytrain[i]] )
DS.setField('input', Xtrain_feats)
DS.setField('target', self._Ytrain[:,np.newaxis])
DS._convertToOneOfMany()
self._nn = buildNetwork(DS.indim, 10, DS.outdim, outclass=SoftmaxLayer, fast=True)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.01, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=8)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.001, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=8)
self._nn_trainer = BackpropTrainer( self._nn, dataset=DS, momentum=0.1, verbose=True, weightdecay=gamma, learningrate=0.0001, lrdecay=1.0)
self._nn_trainer.trainOnDataset(DS,epochs=5)
return self._nn
def Accuracy(self, X, Y, special_bias = None):
X_feats = np.ascontiguousarray(np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list)))))
X_feats -= self.m
X_feats /= self.std
if special_bias != None:
X_feats = np.ascontiguousarray(np.hstack((X_feats, special_bias)))
if self._type=='linsvm' or self._type=='logreg' or self._type=='logreg_atwv':
self.test_accu = classifier.Evaluator.accuracy(Y, np.dot(X_feats,self.w)+self.b)
elif self._type=='nn_atwv':
pred = get_predictions_nn(X_feats, self._weights_nn, arch=[10])[0]
pred[:,0] = 0.5
self.test_accu = classifier.Evaluator.accuracy(Y, pred)
else:
DS = ClassificationDataSet( X_feats.shape[1], 1, nb_classes=2 )
#for i in range(X_feats.shape[0]):
# DS.addSample( X_feats[i,:], [Y[i]] )
DS.setField('input', X_feats)
DS.setField('target', Y[:,np.newaxis])
DS._convertToOneOfMany()
predict,targts = self._nn_trainer.testOnClassData(DS, verbose=True,return_targets=True)
self.test_accu = np.sum(np.array(predict)==np.array(targts))/float(len(targts))
return self.test_accu
def loss_multiclass_logreg(self, X, Y, special_bias=None):
X_feats=np.hstack((X[self.feat_list[i]] for i in range(len(self.feat_list))))
X_feats -= self.m
X_feats /= self.std
if special_bias != None:
#.........这里部分代码省略.........