本文整理汇总了Python中pybrain.supervised.BackpropTrainer.trainUntilConvergence方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.trainUntilConvergence方法的具体用法?Python BackpropTrainer.trainUntilConvergence怎么用?Python BackpropTrainer.trainUntilConvergence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.supervised.BackpropTrainer
的用法示例。
在下文中一共展示了BackpropTrainer.trainUntilConvergence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: training
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def training(d):
# net = buildNetwork(d.indim, 55, d.outdim, bias=True,recurrent=False, hiddenclass =SigmoidLayer , outclass = SoftmaxLayer)
net = FeedForwardNetwork()
inLayer = SigmoidLayer(d.indim)
hiddenLayer1 = SigmoidLayer(d.outdim)
hiddenLayer2 = SigmoidLayer(d.outdim)
outLayer = SigmoidLayer(d.outdim)
net.addInputModule(inLayer)
net.addModule(hiddenLayer1)
net.addModule(hiddenLayer2)
net.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer1)
hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
hidden_to_out = FullConnection(hiddenLayer2, outLayer)
net.addConnection(in_to_hidden)
net.addConnection(hidden_to_hidden)
net.addConnection(hidden_to_out)
net.sortModules()
print net
t = BackpropTrainer(net, d, learningrate = 0.9,momentum=0.9, weightdecay=0.01, verbose = True)
t.trainUntilConvergence(continueEpochs=1200, maxEpochs=1000)
NetworkWriter.writeToFile(net, 'myNetwork'+str(time.time())+'.xml')
return t
示例2: train
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def train(self, epochs=None):
trainer = BackpropTrainer(
self.net,
self.training_data
)
if epochs:
trainer.trainEpochs(epochs)
else:
trainer.trainUntilConvergence()
示例3: initializeNetwork
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def initializeNetwork(self):
self.net = buildNetwork(26, 15, 5, hiddenclass=TanhLayer, outclass=SoftmaxLayer) # 15 is just a mean
ds = ClassificationDataSet(26, nb_classes=5)
for x in self.train:
ds.addSample(x.frequency, self.encodingDict[x.lang])
ds._convertToOneOfMany()
trainer = BackpropTrainer(self.net, dataset=ds, weightdecay=0.01, momentum=0.1, verbose=True)
trainer.trainUntilConvergence(maxEpochs=100)
示例4: training
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def training(d):
"""
Builds a network and trains it.
"""
n = buildNetwork(d.indim, INPUTS-3,INPUTS-4, d.outdim,recurrent=True)
print n;
t = BackpropTrainer(n, d, learningrate = 0.02, momentum = 0.88)
#for epoch in range(0,700):
t.trainUntilConvergence(d, 1190)
return t
示例5: train
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def train(training_data):
training_set = ClassificationDataSet(len(feats), nb_classes=len(classes))
for inst in training_data:
training_set.appendLinked(inst.features(), [inst.class_idx()])
training_set._convertToOneOfMany([0, 1])
net_placeholder[0] = buildNetwork(
training_set.indim,
int((training_set.indim + training_set.outdim)/2),
training_set.outdim, bias=True,
hiddenclass=TanhLayer,
outclass=SoftmaxLayer
)
trainer = BackpropTrainer(
net_placeholder[0], training_set, momentum=0.75, verbose=False, learningrate=0.05
)
trainer.trainUntilConvergence(maxEpochs=100, validationProportion=0.1)
示例6: build_net
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def build_net(self):
if os.path.exists(self.NET_FILE):
return NetworkReader.readFrom(self.NET_FILE)
ds = ClassificationDataSet(len(feats), nb_classes=len(classes))
for c in classes:
print c
with codecs.open(os.path.join(self.data_root, c+".txt"), 'r', 'utf8') as f:
for line in f:
r = Record("11", line, c, "")
ds.appendLinked(r.features(), [r.class_idx()])
ds._convertToOneOfMany([0, 1])
net = buildNetwork(ds.indim, int((ds.indim + ds.outdim)/2), ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, ds, momentum=0.75, verbose=True)
trainer.trainUntilConvergence(maxEpochs=300)
NetworkWriter.writeToFile(net, self.NET_FILE)
return net
示例7: train
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def train(self, **kwargs):
if "verbose" in kwargs:
verbose = kwargs["verbose"]
else:
verbose = False
"""t = BackpropTrainer(self.rnn, dataset=self.trndata, learningrate = 0.1, momentum = 0.0, verbose = True)
for i in range(1000):
t.trainEpochs(5)
"""
# pdb.set_trace()
#print self.nn.outdim, " nn | ", self.trndata.outdim, " trndata "
trainer = BackpropTrainer(self.nn, self.trndata, learningrate = 0.0005, momentum = 0.99)
assert (self.tstdata is not None)
assert (self.trndata is not None)
b1, b2 = trainer.trainUntilConvergence(verbose=verbose,
trainingData=self.trndata,
validationData=self.tstdata,
maxEpochs=10)
#print b1, b2
#print "new parameters are: "
#self.print_connections()
return b1, b2
示例8: learn_until_convergence
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def learn_until_convergence(self, learning_rate, momentum, max_epochs, continue_epochs, verbose=True):
if verbose:
print "Training neural network..."
trainer = BackpropTrainer(self.network, self.learn_data, learningrate=learning_rate, momentum=momentum)
training_errors, validation_errors = trainer.trainUntilConvergence(continueEpochs=continue_epochs,
maxEpochs=max_epochs)
self.x = range(1, len(training_errors) + 1)
self.err = training_errors
return self.network
示例9: create_network
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def create_network(timesteps):
trndata, validdata, tstdata = read_data_MNIST(timesteps)
rnn = buildNetwork(
trndata.indim, 20, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=True, recurrent=True
)
# 20 is the number of LSTM blocks in the hidden layer
# we use the BPTT algo to train
trainer = BackpropTrainer(rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.00001)
print "Training started ..."
t1 = time.clock()
# trainer.trainEpochs(10)
trainer.trainUntilConvergence(maxEpochs=1000)
t2 = time.clock()
print "Training 1000 epochs took : ", (t2 - t1) / 60.0, "minutes "
# train for 1000 epochs
trnresult = 100.0 * (1.0 - testOnSequenceData(rnn, trndata))
tstresult = 100.0 * (1.0 - testOnSequenceData(rnn, tstdata))
print "Train Error : %5.2f%%" % trnresult, " , test error :%5.2f%%" % tstresult
示例10: main
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def main(args=[__file__]):
trnDs, tstDs = getSeparateDataSets()
net = buildNetwork(trnDs.indim, int((trnDs.indim + trnDs.outdim)/2), trnDs.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, trnDs, momentum=0.75, verbose=True, learningrate=0.05)
trainer.trainUntilConvergence(maxEpochs=100, validationProportion=0.1)
eval = evaluate(net, tstDs)
print "accuracy:", eval.getWeightedAccuracy()
print "recall:", eval.getWeightedRecall()
print "precision:", eval.getWeightedPrecision()
print "F-measure:", eval.getWeightedFMeasure()
if detailed:
for evalRes in eval.evals:
print "Class:", evalRes.clazz
print "Accuracy:", evalRes.getAccuracy()
print "Recall:", evalRes.getRecall()
print "Precision:", evalRes.getPrecision()
print "F-measure:", evalRes.getFMeasure()
print '-'*35
print '-'*70
示例11: train
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def train(self):
"""t = BackpropTrainer(self.rnn, dataset=self.trndata, learningrate = 0.1, momentum = 0.0, verbose = True)
for i in range(1000):
t.trainEpochs(5)
"""
print self.nn.outdim, " nn | ", self.trndata.outdim, " trndata "
trainer = BackpropTrainer(self.nn, self.trndata, learningrate = 0.0005, momentum = 0.99)
b1, b2 = trainer.trainUntilConvergence(verbose=True,
trainingData=self.trndata,
validationData=self.tstdata,
maxEpochs=10)
print b1, b2
print "new parameters are: "
self.print_connections()
示例12: trainNetworkBackprop
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def trainNetworkBackprop(self, dataset,maxIter):
trainer = BackpropTrainer(self.net, dataset)
print "\tInitialised backpropogation traininer. Now execute until convergence::"
trainer.trainUntilConvergence(verbose=True,maxEpochs=maxIter)
print "\tConvergence achieved."
示例13: RWR
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
#.........这里部分代码省略.........
y[chosen] = 1
acts.append(y)
rewards.append(reward)
avgReward += sum(rewards) / float(len(rewards))
# compute the returns from the list of rewards
current = 0
returns = []
for r in reversed(rewards):
current *= self.task.discount
current += r
returns.append(current)
returns.reverse()
for i in range(len(obss)):
self.rawDs.addSample(obss[i], acts[i], returns[i])
self.valueDs.addSample(obss[i], returns[i])
r0s.append(returns[0])
lens.append(len(returns))
r0s = array(r0s)
self.totalSteps += sum(lens)
avgLen = sum(lens) / float(self.batchSize)
avgR0 = mean(r0s)
avgReward /= self.batchSize
if self.verbose:
print '***', round(avgLen, 3), '***', '(avg init exp. return:', round(avgR0, 5), ')',
print 'avg reward', round(avgReward, 5), '(tau:', round(self.tau, 3), ')'
print lens
# storage:
self.rewardAvg.append(avgReward)
self.lengthAvg.append(avgLen)
self.initr0Avg.append(avgR0)
# if self.vnet == None:
# # case 1: no value estimator:
# prepare the dataset for training the acting network
shaped = self.shapingFunction(r0s)
self.updateTau(r0s, shaped)
shaped /= max(shaped)
for i, seq in enumerate(self.rawDs):
self.weightedDs.newSequence()
for sample in seq:
obs, act, dummy = sample
self.weightedDs.addSample(obs, act, shaped[i])
# else:
# # case 2: value estimator:
#
#
# # train the value estimating network
# if self.verbose: print 'Old value error: ', self.vbp.testOnData()
# self.vbp.trainEpochs(self.valueTrainEpochs)
# if self.verbose: print 'New value error: ', self.vbp.testOnData()
#
# # produce the values and analyze
# rminusvs = []
# sizes = []
# for i, seq in enumerate(self.valueDs):
# self.vnet.reset()
# seq = list(seq)
# for sample in seq:
# obs, ret = sample
# val = self.vnet.activate(obs)
# rminusvs.append(ret-val)
# sizes.append(len(seq))
#
# rminusvs = array(rminusvs)
# shapedRminusv = self.shapingFunction(rminusvs)
# # CHECKME: here?
# self.updateTau(rminusvs, shapedRminusv)
# shapedRminusv /= array(sizes)
# shapedRminusv /= max(shapedRminusv)
#
# # prepare the dataset for training the acting network
# rvindex = 0
# for i, seq in enumerate(self.rawDs):
# self.weightedDs.newSequence()
# self.vnet.reset()
# for sample in seq:
# obs, act, ret = sample
# self.weightedDs.addSample(obs, act, shapedRminusv[rvindex])
# rvindex += 1
# train the acting network
tmp1, tmp2 = self.bp.trainUntilConvergence(maxEpochs=self.maxEpochs,
validationProportion=self.validationProportion,
continueEpochs=self.continueEpochs,
verbose=self.verbose)
if self.supervisedPlotting:
from pylab import plot, legend, figure, clf, draw
figure(1)
clf()
plot(tmp1, label='train')
plot(tmp2, label='valid')
legend()
draw()
return avgLen, avgR0
示例14: main
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
def main(f_samples):
f_reading = open(f_samples, 'r')
global data
data = []
for line in f_reading:
line = line.split()
data.append( (float(line[0]), float(line[-1])) )
#function
data_module = lambda x: map( lambda z: data[z], filter( lambda y: y% 5 == x, xrange(len(data)) ) )
global data1
data1 = [data_module(0), data_module(1), data_module(2), data_module(3), data_module(4)]
global data_transformed
data_transformed = take(data, rate = 60)
global data_transformed_training
data_transformed_training = map( lambda x: data_transformed[x], filter( lambda x: uniform(0, 1) > 0.3, xrange(len(data_transformed)) ))
#Learning process-----------------------------------------------------------------
global net, samples, trainer
net = FeedForwardNetwork()
inLayer = LinearLayer(3)
hiddenLayer0 = SigmoidLayer(1)
hiddenLayer1 = SigmoidLayer(3)
outLayer = LinearLayer(1)
net.addInputModule(inLayer)
# net.addModule(hiddenLayer0)
# net.addModule(hiddenLayer1)
net.addOutputModule(outLayer)
# net.addConnection(FullConnection(inLayer, hiddenLayer0))
net.addConnection(FullConnection(inLayer, outLayer))
# net.addConnection(FullConnection(hiddenLayer0, outLayer))
# net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
# net.addConnection(FullConnection(hiddenLayer1, outLayer))
net.sortModules()
print net
##Net with 3 inputs, 8 hidden neurons in a layer and 8 in another, and 1 out.
#net = buildNetwork(3,8,8,1)
##Set with 2 inputs and one output for each sample
samples = SupervisedDataSet(3,1)
for i in data_transformed_training:
samples.addSample(i['past'], i['next'] - i['average'])
trainer = BackpropTrainer(net, samples)
print 'Training'
trainer.trainUntilConvergence(maxEpochs= 10)
#Comparing step-------------------------------------------------------------------
print 'Naive1'
aux = map(lambda y: y['past'], data_transformed)
aux2 = map(lambda y: y['next']-y['average'], data_transformed)
compare_forecast_samples(Forecaster(predict_function = lambda x: aux2[aux.index(x)-1]), data_transformed)
print 'Network'
compare_forecast_samples(Forecaster(predict_function = net.activate), data_transformed)
print "Number of samples %d for training." %len(data_transformed_training)
示例15: read_synergy_data
# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import trainUntilConvergence [as 别名]
synergy_dict = read_synergy_data(synergy)
# dump_drug_dict_as_flat(pca_dict, out)
training_input,input_len = build_training_input(pca_dict, synergy_dict)
# input_len = training_input[list(training_input.keys())[0]]['INPUT']
target_len = 1
ds = SupervisedDataSet(input_len, target_len)
for t1 in training_input:
for t2 in training_input[t1]:
print("Input Vector", training_input[t1][t2]['INPUT'], training_input[t1][t2]['OUTPUT'])
ds.addSample(training_input[t1][t2]['INPUT'], training_input[t1][t2]['OUTPUT'])
n = buildNetwork(ds.indim, 3, ds.outdim, bias=True)
t = BackpropTrainer(n, learningrate=0.001, momentum=0.05, verbose=True)
print("Training")
t.trainUntilConvergence(ds,
verbose=True)
NetworkWriter.writeToFile(n, 'trainedNetwork.xml')
# n = NetworkReader.readFrom('trainedNetwork_2.xml')
predictions = {}
for d1 in pca_dict:
if not predictions.get(d1, None):
predictions[d1]={}
for d2 in pca_dict:
predictions[d1][d2] = n.activate(pca_dict[d1] + pca_dict[d2])[0]
with open('predictions_4.json', 'w') as outfile:
json.dump(predictions, outfile)