本文整理匯總了Python中pybrain.supervised.trainers.RPropMinusTrainer.trainEpochs方法的典型用法代碼示例。如果您正苦於以下問題:Python RPropMinusTrainer.trainEpochs方法的具體用法?Python RPropMinusTrainer.trainEpochs怎麽用?Python RPropMinusTrainer.trainEpochs使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pybrain.supervised.trainers.RPropMinusTrainer
的用法示例。
在下文中一共展示了RPropMinusTrainer.trainEpochs方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: partial_fit
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def partial_fit(self, X, y):
"""
Additional training of the estimator
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:return: self
"""
dataset = self._prepare_dataset(X, y, self._model_type)
if not self.is_fitted():
self._prepare_net(dataset=dataset, model_type=self._model_type)
if self.use_rprop:
trainer = RPropMinusTrainer(self.net,
etaminus=self.etaminus,
etaplus=self.etaplus,
deltamin=self.deltamin,
deltamax=self.deltamax,
delta0=self.delta0,
dataset=dataset,
learningrate=self.learningrate,
lrdecay=self.lrdecay,
momentum=self.momentum,
verbose=self.verbose,
batchlearning=self.batchlearning,
weightdecay=self.weightdecay)
else:
trainer = BackpropTrainer(self.net,
dataset,
learningrate=self.learningrate,
lrdecay=self.lrdecay,
momentum=self.momentum,
verbose=self.verbose,
batchlearning=self.batchlearning,
weightdecay=self.weightdecay)
if self.epochs < 0:
trainer.trainUntilConvergence(maxEpochs=self.max_epochs,
continueEpochs=self.continue_epochs,
verbose=self.verbose,
validationProportion=self.validation_proportion)
else:
trainer.trainEpochs(epochs=self.epochs, )
return self
示例2: train_net
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def train_net():
fnn = buildNetwork(len(input_args), 3, 2)
ds = ClassificationDataSet(len(input_args),2,nb_classes=2)
ds = generate_data(ds , hour_to_use_app = 10)
trainer = RPropMinusTrainer( fnn, dataset= ds, verbose=True)
trainer.train()
trainer.trainEpochs(15)
test = ClassificationDataSet(4,2)
test.addSample((12,6,10,6),[1,0])
test.addSample((12,1,7,2),[0,1])
test.addSample((12,3,11,1),[0,1])
fnn.activateOnDataset(test)
return fnn,trainer,ds,test
示例3: generate_data
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
training_dataset._convertToOneOfMany( bounds=[0,1] )
# same for the independent test data set
testing_dataset = generate_data(test=True)
testing_dataset._convertToOneOfMany( bounds=[0,1] )
# build a feed-forward network with 20 hidden units, plus
# a corresponding trainer
# fnn = buildNetwork( training_dataset.indim, 15,15, training_dataset.outdim, outclass=SoftmaxLayer )
fnn = buildNetwork( training_dataset.indim, 15, training_dataset.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer( fnn, dataset=training_dataset, verbose=True )
#trainer = BackpropTrainer( fnn, dataset=training_dataset,verbose=True)
for i in range(500):
# train the network for 1 epoch
trainer.trainEpochs( 15 )
# evaluate the result on the training and test data
trnresult = percentError( trainer.testOnClassData(),
training_dataset['class'] )
tstresult = percentError( trainer.testOnClassData(
dataset=testing_dataset ), testing_dataset['class'] )
# print the result
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult
if tstresult <= 0.5 :
print 'Bingo !!!!!!!!!!!!!!!!!!!!!!'
break
示例4: load_snd
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
# clock
snd_20043 = load_snd(20043)
append2DS(DS, snd_20043, 2, nClasses)
# fnn = buildNetwork(1, 15, 5, hiddenclass = LSTMLayer, outclass = SoftmaxLayer, outputbias = False, recurrent = True)
fnn = buildNetwork(1, 1, nClasses, hiddenclass=LSTMLayer, outclass=TanhLayer, outputbias=False, recurrent=True)
# Create a trainer for backprop and train the net.
# trainer = BackpropTrainer(fnn, DStrain, learningrate = 0.005)
trainer = RPropMinusTrainer(fnn, dataset=DS, verbose=True)
for i in range(4):
# train the network for 1 epoch
trainer.trainEpochs(1)
print trainer.train()
fnn.reset()
summed = numpy.zeros(nClasses)
for sample in snd_18768:
summed += fnn.activate([sample])
print summed / len(snd_18768)
fnn.reset()
summed = numpy.zeros(nClasses)
for sample in snd_21649:
summed += fnn.activate([sample])
print summed / len(snd_21649)
fnn.reset()
示例5: train
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def train(self, epoch):
self.ds._convertToOneOfMany( )
trainer = RPropMinusTrainer(self.net, dataset=self.ds, momentum=0.1, verbose=True, weightdecay=0.01)
trainer.trainEpochs( epoch )
示例6: BnTNN
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def BnTNN(ds, hiddensize, epoches):
net = buildNetwork(ds.indim, hiddensize, ds.outdim, outclass=SoftmaxLayer)
st = time()
trainer = RPropMinusTrainer(net, dataset=ds, momentum=0.1, verbose=True, weightdecay=0.01)#
trainer.trainEpochs( epoches )
return net
示例7: RPropMinusTrainer
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
nHidden,
segments,
hiddenclass=LSTMLayer,
outclass=TanhLayer,
outputbias=True,
recurrent=True,
peepholes=False,
fast=False,
)
trainer = RPropMinusTrainer(fnn, dataset=DS, verbose=True)
print "Begin training..."
# Train the network
trainer.trainEpochs(nEpoch)
# Store the encoder
file = open("autoencoder-%i.xml" % (nHidden), "w")
pickle.dump(fnn, file)
file.close()
# Show an example
id = 86464
cochleogram = data.load_cochleogram(id)
fnn.reset()
haha = numpy.zeros(cochleogram.shape, dtype=numpy.float32)
for i in range(cochleogram.shape[1]):
output = fnn.activate(cochleogram[:, i].T.tolist()[0])
haha[:, i] = output
示例8: train_network
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def train_network(options_file_location,training_data_location,output_location):
training_file_handle = open(training_data_location,"r")
training_reader = csv.reader(training_file_handle)
stdout_file = output_location+'training_console_output.txt'
stderr_file = output_location+'training_console_errput.txt'
sys.stdout = open(stdout_file,"w")
sys.stderr = open(stderr_file,"w")
options_file_location = options_file_location
options_file_handle = open(options_file_location,'r')
options_dictionary = {}
for option in options_file_handle.readlines():
key,val = option.split('=')
print key
print val
options_dictionary[key] = val;
num_predictors = int(options_dictionary['num_predictors'])
num_outputs = int(options_dictionary['num_outputs'])
num_training_epochs = int(options_dictionary['num_training_epochs'])
num_hidden_neurons = int(options_dictionary['num_hidden_neurons'])
num_classes = int((options_dictionary['num_classes']))
hidden_neuron_type_str = options_dictionary['hidden_neuron_type']
output_neuron_type_str = options_dictionary['output_neuron_type']
hidden_layer_type,output_layer_type = net_topol.get_layer_types(options_dictionary)
training_dataset = SequenceClassificationDataSet(num_predictors, 1,num_classes)
previous_sequence_number = 1
#read data into dataset objects
print 'reading in training data...'
for row in training_reader:
#convert list of strings to list of floats
list = [float(s) for s in row]
#split input line
predictors = list[0:num_predictors]
#+1 is to skip over the sequence column
outputs = list[num_predictors+1:num_predictors+1+num_outputs]
#convert from python list to numpy array
predictors = np.array(predictors)
outputs = np.array(outputs)
sequence_number = math.trunc(list[num_predictors])
if not sequence_number==previous_sequence_number:
# print sequence_number
# print previous_sequence_number
training_dataset.newSequence()
previous_sequence_number = sequence_number
#add to dataset
training_dataset.appendLinked(predictors, outputs)
network = shortcuts.buildNetwork(num_predictors, num_hidden_neurons, num_outputs, hiddenclass=LSTMLayer, outclass=SoftmaxLayer)
network.sortModules();
training_dataset._convertToOneOfMany();
print str(network)
print str(training_dataset)
trainer = RPropMinusTrainer(module=network, dataset=training_dataset)
for i in range(num_training_epochs):
print 'Starting training epoch: '+str(i)
trainer.trainEpochs(1)
sys.stdout.flush()
network_file_location = output_location+'trained_network.xml'
NetworkWriter.writeToFile(network, network_file_location)
done_file_handle = open(output_location+'training_done.txt',"w")
done_file_handle.write('%s' % 'done!')
done_file_handle.close()
示例9: open
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
f = open('___learned3', 'r')
net = pickle.load(f)
f.close()
print 'loaded from file', time() - st
except:
'''
#exit()
if True:
st = time()
trainer = RPropMinusTrainer(net, dataset=ds, momentum=0.1, verbose=True, weightdecay=0.01)#
#trainer = BackpropTrainer( net, dataset=ds, momentum=0.1, verbose=True, weightdecay=0.01)
#trainer.trainOnDataset(ds, 10)
#trainer.testOnData()
print 'start train'
trainer.trainEpochs( 100 )
#trainer.trainUntilConvergence()
print 'Learning time:', time() - st
f = open('___learned3', 'w')
pickle.dump(net, f)
f.close()
#print net.activate(FeaturesFromFile('pos/5.jpg'))
counter = 0
st = time()
for i in xrange(40000, 50000):
image = cv.LoadImage(posnames[i], cv.CV_LOAD_IMAGE_GRAYSCALE)
res = net.activate(FeaturesFromImg(image, size)).argmax()
#print i, 'pos', res
if res == 1:
示例10: sequence_classifier
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
class sequence_classifier():
num_time_steps = 5
def __init__(self, ds):
num_inputs = ds.num_features * sequence_classifier.num_time_steps
self.alldata = SequenceClassificationDataSet(num_inputs,
target = 1,
nb_classes = ds.get_num_classes(),
class_labels = ds.get_classes() )
for Idx in range(len(ds.all_moves)):
if not (Idx + sequence_classifier.num_time_steps < len(ds.all_moves)):
continue
class_first = ds.all_moves[Idx].class_
features = []
for i in range(sequence_classifier.num_time_steps):
features = features + ds.all_moves[Idx + i].get_features()
class_last = ds.all_moves[Idx + sequence_classifier.num_time_steps].class_
if class_first == class_last:
self.alldata.appendLinked(features, [ds.get_classes().index(ds.all_moves[Idx].class_)])
self.alldata.newSequence()
self.tstdata, self.trndata = self.alldata.splitWithProportion(0.25)
self.trndata._convertToOneOfMany()
self.tstdata._convertToOneOfMany()
self.seq_rnn = None #buildNetwork(num_inputs, 2, self.trndata.outdim, hiddenclass=LSTMLayer,recurrent=True ,outclass=SoftmaxLayer)
self.create_network(num_inputs)
self.trainer = RPropMinusTrainer(module=self.seq_rnn, dataset=self.trndata)
def create_network(self, num_inputs):
self.seq_rnn = RecurrentNetwork()
in_layer = LinearLayer(num_inputs)
hidden_LSTM_ = LSTMLayer(24)
hidden_layer_0 = LinearLayer(12)
hidden_layer_1 = SigmoidLayer(12)
output_layer = LinearLayer(self.trndata.outdim)
self.seq_rnn.addInputModule(in_layer)
self.seq_rnn.addModule(hidden_layer_0)
self.seq_rnn.addModule(hidden_layer_1)
self.seq_rnn.addOutputModule(output_layer)
#Now add the connections:
in_to_LTSM = FullConnection(in_layer , hidden_LSTM)
LTSM_to_h0 = FullConnection(hidden_LSTM, hidden_layer_0)
in_to_h0 = FullConnection(in_layer , hidden_layer_0)
h0_to_h1 = FullConnection(hidden_layer_0, hidden_layer_1)
h1_to_out = FullConnection(hidden_layer_1, output_layer)
self.seq_rnn.addConnection(in_to_LSTM)
self.seq_rnn.addConnection(LSTM_to_h0)
self.seq_rnn.addConnection(in_to_h0)
self.seq_rnn.addConnection(h0_to_h1)
self.seq_rnn.addConnection(h1_to_out)
self.seq_rnn.sortModules()
def start_training(self):
f = open("./results/seq_rnn_perf.txt", "w");
for i in range(200):
print "training step: " , i
self.trainer.trainEpochs(1)
err = self.evaluate()
f.write(str(err) + ",")
f.flush()
f.close()
def evaluate(self):
print "epoch:" , self.trainer.totalepochs
correct = 0
wrong = 0
self.seq_rnn.sortModules()
for Idx in range (len(self.tstdata)):
out = self.seq_rnn.activate(self.tstdata['input'][Idx])
if argmax(out) == argmax(self.tstdata['target'][Idx]) :
correct += 1
else:
wrong += 1
correct_ratio = correct*1.0/(wrong + correct)
self.correct_perc.append(correct_ratio)
print "Wrong Predictions: " , wrong , "Ratio = ", wrong*100.0/(wrong+correct) , "%"
print "Correct Predictions: ", correct, "Ratio = ", correct*100.0/(wrong+correct) , "%"
if (self.max_ratio < correct_ratio):
print "Found new max, saving network"
self.write_out("best_perfrming_")
self.max_ratio = correct_ratio
return 1 - correct_ratio
#.........這裏部分代碼省略.........
示例11: work
# 需要導入模塊: from pybrain.supervised.trainers import RPropMinusTrainer [as 別名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainEpochs [as 別名]
def work(self, image):
width = self.pt2[0] - self.pt1[0]
height = self.pt2[1] - self.pt1[1]
currentrect = (self.pt1[0], self.pt1[1], width, height)#self.currentrect
#print currentrect
image_size = cv.GetSize(image)
# create grayscale version
grayscale = cv.CreateImage(image_size, 8, 1)
cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
storage = cv.CreateMemStorage(0)
cv.EqualizeHist(grayscale, grayscale)
frame = np.asarray(image[:, :])
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask = np.zeros_like(frame_gray)
mask[self.pt1[1]:self.pt2[1], self.pt1[0]:self.pt2[0]] = 255
st = time.time()
tracks = self.detector.detect(frame_gray, mask)
print 'detect time', time.time() - st
cv.Rectangle (image, self.pt1, self.pt2, (0, 255, 0), 1)
cv.SetImageROI(image, currentrect)
st = time.time()
out = self.ClassifyWindow(grayscale, currentrect)
print out, '|time', time.time() - st
cv.ResetImageROI(image)
if out == 1:
cv.Rectangle (image, (currentrect[0], currentrect[1]), (currentrect[2] + currentrect[0], currentrect[3] + currentrect[1]), (0, 255, 255), 5)
self.pt1, self.pt2 = PatchBoundary(tracks, self.pt1, self.pt2)
tmp = self.ImageFromRect(grayscale, currentrect)
features = FeaturesFromImg(grayscale, self.ninsize)
self.additionslds.addSample(features, [1])
for i in xrange(2):
badwin = self.ImageFromRect(grayscale, RandomRect(currentrect))
features = FeaturesFromImg(badwin, self.ninsize)
self.additionslds.addSample(features, [0])
print 'len of new dataset', len(self.additionslds)
'''
else:
rect = self.SearchObject(grayscale, currentrect)
if rect:
self.pt1, self.pt2 = RectToPoints(rect)
'''
if self.stage > 0:
tmp = self.ImageFromRect(grayscale, currentrect)
features = FeaturesFromImg(grayscale, self.ninsize)
self.additionslds.addSample(features, [1])
for i in xrange(2):
badwin = self.ImageFromRect(grayscale, RandomRect(currentrect))
features = FeaturesFromImg(badwin, self.ninsize)
self.additionslds.addSample(features, [0])
print 'len of new dataset', len(self.additionslds)
if len(self.additionslds) > 20:
self.additionslds._convertToOneOfMany( )
self.additionslds.outdim = self.net.outdim
#net = buildNetwork(self.ninsize[0] * self.ninsize[1], 96, self.additionslds.outdim, outclass=SoftmaxLayer)
trainer = RPropMinusTrainer(
self.net, dataset=self.additionslds,
momentum=0.1, verbose=True, weightdecay=0.01)
'''trainer = BackpropTrainer(
self.net, dataset=self.additionslds,
momentum=0.1, verbose=True, weightdecay=0.01)'''
trainer.trainEpochs( 3 )
self.additionslds.clear()
self.numoflearning += 1
if self.key == 113:
cv.SaveImage('img.bmp', image)
self.key = 255
if self.key == 119:
self.stage = 1
self.key = 255
if self.stage == 1 and self.numoflearning > 2:
self.stage = 2
'''
for item in tracks:
cv.Circle(image, (item[-1][0], item[-1][1]), 2, (0, 255, 0), -1)
'''
return image