本文整理汇总了Python中pybrain.structure.RecurrentNetwork.addOutputModule方法的典型用法代码示例。如果您正苦于以下问题:Python RecurrentNetwork.addOutputModule方法的具体用法?Python RecurrentNetwork.addOutputModule怎么用?Python RecurrentNetwork.addOutputModule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.structure.RecurrentNetwork
的用法示例。
在下文中一共展示了RecurrentNetwork.addOutputModule方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getNetwork
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def getNetwork(trndata):
n = RecurrentNetwork()
n.addInputModule(LinearLayer(trndata.indim, name='in'))
n.addModule(SigmoidLayer(100, name='hidden'))
n.addOutputModule(LinearLayer(trndata.outdim, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
n.sortModules()
# fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer( n, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
# TODO: return network and trainer here. Make another function for training
# for i in range(20):
# trainer.trainEpochs(1)
# trainer.trainUntilConvergence(maxEpochs=100)
# trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
# tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
# print "epoch: %4d" % trainer.totalepochs, \
# " train error: %5.2f%%" % trnresult
# out = fnn.activateOnDataset(tstdata)
# out = out.argmax(axis=1) # the highest output activation gives the class
return (n, trainer)
示例2: buildNonGravityNet
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def buildNonGravityNet(recurrent = False):
if recurrent:
net = RecurrentNetwork()
else:
net = FeedForwardNetwork()
l1 = LinearLayer(2)
l2 = LinearLayer(3)
s1 = SigmoidLayer(2)
l3 = LinearLayer(1)
net.addInputModule(l1)
net.addModule(l2)
net.addModule(s1)
net.addOutputModule(l3)
net.addConnection(IdentityConnection(l1, l2, outSliceFrom = 1))
net.addConnection(IdentityConnection(l1, l2, outSliceTo = 2))
net.addConnection(IdentityConnection(l2, l3, inSliceFrom = 2))
net.addConnection(IdentityConnection(l2, l3, inSliceTo = 1))
net.addConnection(IdentityConnection(l1, s1))
net.addConnection(IdentityConnection(l2, s1, inSliceFrom = 1))
net.addConnection(IdentityConnection(s1, l3, inSliceFrom = 1))
if recurrent:
net.addRecurrentConnection(IdentityConnection(s1, l1))
net.addRecurrentConnection(IdentityConnection(l2, l2, inSliceFrom = 1, outSliceTo = 2))
net.sortModules()
return net
示例3: trained_cat_dog_RFCNN
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def trained_cat_dog_RFCNN():
n = RecurrentNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
n.sortModules()
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogRFCNN(n)
return n
示例4: __init__
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
class MoveBrain:
def __init__(self):
self.n = RecurrentNetwork()
inLayer = LinearLayer(8)
hiddenLayer = SigmoidLayer(4)
self.numInputs = 8
outLayer = LinearLayer(4)
self.n.addInputModule(inLayer)
self.n.addModule(hiddenLayer)
self.n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
self.n.addConnection(in_to_hidden)
self.n.addConnection(hidden_to_out)
self.n.sortModules()
self.ds = SupervisedDataSet(8, 4)
self.trainer = BackpropTrainer(self.n, self.ds)
def run(inputs):
if inputs.size() == self.numInputs:
self.n.activate(inputs)
else:
print "num of inputs do not match"
def addRule(self,rule):
self.ds.append(rule)
def saveNetwork(self):
fileObject = open('networks/avoidandfindv1', 'w')
pickle.dump(self.n, fileObject)
fileObject.close()
示例5: trainedRNN
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def trainedRNN():
n = RecurrentNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
# n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
n.sortModules()
draw_connections(n)
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 50:
return trainedRNN()
# exportRNN(n)
draw_connections(n)
return n
示例6: main
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def main():
inData=createDataset()
env = MarketEnvironment(inData)
task = MaximizeReturnTask(env)
numIn=min(env.worldState.shape)
net=RecurrentNetwork()
net.addInputModule(BiasUnit(name='bias'))
#net.addOutputModule(TanhLayer(1, name='out'))
net.addOutputModule((SignLayer(1,name='out')))
net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
net.addInputModule(LinearLayer(numIn,name='in'))
net.addConnection(FullConnection(net['in'],net['out'],name='c1'))
net.addConnection((FullConnection(net['bias'],net['out'],name='c2')))
net.sortModules()
# remove bias (set weight to 0)
#initialParams=append(array([0.0]),net._params[1:])
#net._setParameters(initialParams)
#net._setParameters([ 0.0,-0.05861005,1.64281513,0.98302613])
#net._setParameters([0., 1.77132063, 1.3843613, 4.73725269])
#net._setParameters([ 0.0, -0.95173719, 1.92989266, 0.06837472])
net._setParameters([ 0.0, 1.29560957, -1.14727503, -1.80005888, 0.66351325, 1.19240189])
ts=env.ts
learner = RRL(numIn+2,ts) # ENAC() #Q_LinFA(2,1)
agent = LearningAgent(net,learner)
exp = ContinuousExperiment(task,agent)
print(net._params)
exp.doInteractionsAndLearn(len(ts)-1)
print(net._params)
outData=DataFrame(inData['RETURNS']/100)
outData['ts']=[i/100 for i in ts]
outData['cum_log_ts']=cumsum([log(1+i) for i in outData['ts']])
outData['Action_Hist']=env.actionHistory
outData['trading rets']=pE.calculateTradingReturn(outData['Action_Hist'],outData['ts'])
outData['cum_log_rets']=cumsum([log(1+x) for x in outData['trading rets']])
paramHist=learner.paramHistory
plt.figure(0)
for i in range(len(net._params)):
plt.plot(paramHist[i])
plt.draw()
print(pE.percentOfOutperformedMonths(outData['trading rets'],outData['ts']))
#ax1.plot(sign(actionHist),'r')
plt.figure(1)
outData['cum_log_ts'].plot(secondary_y=True)
outData['cum_log_rets'].plot(secondary_y=True)
outData['Action_Hist'].plot()
plt.draw()
plt.show()
示例7: runNeuralLearningCurveSimulation
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def runNeuralLearningCurveSimulation(dataTrain, dataTest, train_tfidf, test_tfidf, outFile):
print 'running neural learning curve'
outFile.write('-------------------------------------\n')
outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
outFile.write('test==> %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
trainDS = getDataSetFromTfidf(train_tfidf, dataTrain.target)
testDS = getDataSetFromTfidf(test_tfidf, dataTest.target)
print "Number of training patterns: ", len(trainDS)
print "Input and output dimensions: ", trainDS.indim, trainDS.outdim
print "First sample (input, target, class):"
print len(trainDS['input'][0]), trainDS['target'][0], trainDS['class'][0]
'''
with SimpleTimer('time to train', outFile):
net = buildNetwork(trainDS.indim, trainDS.indim/2, trainDS.indim/4, trainDS.indim/8, trainDS.indim/16, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.1, verbose=True, weightdecay=0.01, batchlearning=True)
'''
net = RecurrentNetwork()
net.addInputModule(LinearLayer(trainDS.indim, name='in'))
net.addModule(SigmoidLayer(trainDS.indim/2, name='hidden'))
net.addModule(SigmoidLayer(trainDS.indim/4, name='hidden2'))
net.addOutputModule(SoftmaxLayer(2, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
net.addRecurrentConnection(FullConnection(net['hidden2'], net['hidden'], name='c4'))
net.sortModules()
trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.01, verbose=True, weightdecay=0.01)
outFile.write('%s \n' % (net.__str__()))
epochs = 200
with SimpleTimer('time to train %d epochs' % epochs, outFile):
for i in range(epochs):
trainer.trainEpochs(1)
trnresult = percentError( trainer.testOnClassData(),
trainDS['class'] )
tstresult = percentError( trainer.testOnClassData(
dataset=testDS ), testDS['class'] )
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult
outFile.write('%5.2f , %5.2f \n' % (100.0-trnresult, 100.0-tstresult))
predicted = trainer.testOnClassData(dataset=testDS)
results = predicted == testDS['class'].flatten()
wrong = []
for i in range(len(results)):
if not results[i]:
wrong.append(i)
print 'classifier got these wrong:'
for i in wrong[:10]:
print dataTest.data[i], dataTest.target[i]
outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
示例8: createRecurrent
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def createRecurrent(inputSize,nHidden):
n = RecurrentNetwork()
n.addInputModule(LinearLayer(inputSize, name='in'))
n.addModule(SigmoidLayer(nHidden, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
n.sortModules()
return n
示例9: buildMinimalLSTMNetwork
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def buildMinimalLSTMNetwork():
N = RecurrentNetwork('simpleLstmNet')
i = LinearLayer(4, name='i')
h = LSTMLayer(1, peepholes=True, name='lstm')
o = LinearLayer(1, name='o')
N.addInputModule(i)
N.addModule(h)
N.addOutputModule(o)
N.addConnection(IdentityConnection(i, h))
N.addConnection(IdentityConnection(h, o))
N.sortModules()
return N
示例10: build_rec
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def build_rec(inp, hid, out):
n = RecurrentNetwork()
n.addInputModule(LinearLayer(inp, name='in'))
n.addModule(TanhLayer(hid, name='hidden'))
n.addOutputModule(SoftmaxLayer(out, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
n.sortModules()
#n.randomize()
return n
示例11: buildMinimalMDLSTMNetwork
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def buildMinimalMDLSTMNetwork():
N = RecurrentNetwork('simpleMdLstmNet')
i = LinearLayer(4, name = 'i')
h = MDLSTMLayer(1, peepholes = True, name = 'mdlstm')
o = LinearLayer(1, name = 'o')
N.addInputModule(i)
N.addModule(h)
N.addOutputModule(o)
N.addConnection(IdentityConnection(i, h, outSliceTo = 4))
N.addRecurrentConnection(IdentityConnection(h, h, outSliceFrom = 4, inSliceFrom = 1))
N.addConnection(IdentityConnection(h, o, inSliceTo = 1))
N.sortModules()
return N
示例12: build_rnn
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def build_rnn(input_size, output_size, layers):
net = RecurrentNetwork()
layers_list = ["in"]
net.addInputModule(LinearLayer(input_size, name="in"))
for i in range(0, layers):
net.addModule(ReluLayer(input_size, name="hidden"+str(i)))
layers_list.append("hidden"+str(i))
net.addOutputModule(TanhLayer(output_size, name="out"))
layers_list.append("out")
for i in range(0, len(layers_list)-1):
net.addConnection(FullConnection(net[layers_list[i]], net[layers_list[i+1]]))
net.sortModules()
return net
示例13: buildMixedNestedNetwork
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def buildMixedNestedNetwork():
""" build a nested network with the inner one being a ffn and the outer one being recurrent. """
N = RecurrentNetwork('outer')
a = LinearLayer(1, name = 'a')
b = LinearLayer(2, name = 'b')
c = buildNetwork(2, 3, 1)
c.name = 'inner'
N.addInputModule(a)
N.addModule(c)
N.addOutputModule(b)
N.addConnection(FullConnection(a,b))
N.addConnection(FullConnection(b,c))
N.addRecurrentConnection(FullConnection(c,c))
N.sortModules()
return N
示例14: _CreateRecurentNN
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def _CreateRecurentNN():
net = RecurrentNetwork()
net.addInputModule(LinearLayer(4, name='in'))
net.addModule(BiasUnit(name='hidden_bias'))
net.addModule(TanhLayer(13, name='hidden'))
#net.addModule(BiasUnit(name='out_bias'))
net.addOutputModule(SoftmaxLayer(2, name='out_class'))
#net.addOutputModule(LinearLayer(1, name='out_predict'))
#net.addConnection(FullConnection(net['out_bias'], net['out_predict']))
net.addConnection(FullConnection(net['hidden_bias'], net['hidden']))
net.addConnection(FullConnection(net['in'], net['hidden'], name='fc1'))
net.addConnection(FullConnection(net['hidden'], net['out_class'], name='fc2'))
#net.addConnection(FullConnection(net['hidden'], net['out_predict'], name='fc3'))
net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='rc3'))
net.sortModules()
return net
示例15: buildToddNetwork
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import addOutputModule [as 别名]
def buildToddNetwork(hiddenSize):
net = RecurrentNetwork()
inLayer = LinearLayer(sampleSize())
hiddenLayer = SigmoidLayer(hiddenSize)
outLayer = SigmoidLayer(outputSize())
net.addInputModule(inLayer)
net.addModule(hiddenLayer)
net.addOutputModule(outLayer)
inRecursive = WeightedPartialIdentityConnection(0.8, pitchCount+1, inLayer, inLayer)
inToHidden = FullConnection(inLayer, hiddenLayer)
hiddenToOut = FullConnection(hiddenLayer, outLayer)
net.addRecurrentConnection(inRecursive)
net.addConnection(inToHidden)
net.addConnection(hiddenToOut)
net.sortModules()
return net