本文整理汇总了Python中pybrain.structure.FeedForwardNetwork.addOutputModule方法的典型用法代码示例。如果您正苦于以下问题:Python FeedForwardNetwork.addOutputModule方法的具体用法?Python FeedForwardNetwork.addOutputModule怎么用?Python FeedForwardNetwork.addOutputModule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.structure.FeedForwardNetwork
的用法示例。
在下文中一共展示了FeedForwardNetwork.addOutputModule方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: crearRN
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def crearRN():
#Se crea la red neuronal
n = FeedForwardNetwork()
#Se declaran las laminas de entrada, las laminas escondidas y las de salida de la red neuronal
inLayer = LinearLayer(4096)
hiddenLayer = SigmoidLayer(3)
outLayer = LinearLayer(1)
#Se agregan los layers a la red neuronal
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
#Se declaran las conexiones de los nodos
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
#Se establecen las conexiones en los layers de la red neuronal
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
#Red neuronal lista para usar
n.sortModules()
return n
示例2: trainedANN
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def trainedANN():
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(4, name='in'))
n.addModule(SigmoidLayer(6, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
draw_connections(n)
# d = generateTrainingData()
d = getDatasetFromFile(root.path()+"/res/dataSet")
t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
t.trainOnDataset(d)
# FIXME: I'm not sure the recurrent ANN is going to converge
# so just training for fixed number of epochs
count = 0
while True:
globErr = t.train()
print globErr
if globErr < 0.01:
break
count += 1
if count == 20:
return trainedANN()
exportANN(n)
draw_connections(n)
return n
示例3: build_network
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def build_network(self, layers=None, end=1):
layerobjects = []
for item in layers:
try:
t, n = item
if t == "sig":
if n == 0:
continue
layerobjects.append(SigmoidLayer(n))
except TypeError:
layerobjects.append(LinearLayer(item))
n = FeedForwardNetwork()
n.addInputModule(layerobjects[0])
for i, layer in enumerate(layerobjects[1:-1]):
n.addModule(layer)
connection = FullConnection(layerobjects[i], layerobjects[i+1])
n.addConnection(connection)
n.addOutputModule(layerobjects[-1])
connection = FullConnection(layerobjects[-2], layerobjects[-1])
n.addConnection(connection)
n.sortModules()
return n
示例4: ann_network
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def ann_network():
nn = FeedForwardNetwork()
# define the activation function and # of nodes per layer
in_layer = LinearLayer(13)
hidden_layer = SigmoidLayer(5)
bias_unit = BiasUnit(name='bias')
out_layer = LinearLayer(1)
# add modules to the network
nn.addInputModule(in_layer)
nn.addModule(hidden_layer)
nn.addModule(bias_unit)
nn.addOutputModule(out_layer)
# define connections between the nodes
hidden_with_bias = FullConnection(hidden_layer, bias_unit)
in_to_hidden = FullConnection(in_layer, hidden_layer)
hidden_to_out = FullConnection(hidden_layer, out_layer)
# add connections to the network
nn.addConnection(in_to_hidden)
nn.addConnection(hidden_with_bias)
nn.addConnection(hidden_to_out)
# perform network interal initialization
nn.sortModules()
return nn
示例5: __init__
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def __init__(self, index, name, params):
self.name = name
self.index = index
self.liste = []#ClassificationDataSet(17, 1, nb_classes=4)
self.status_good = True
self.number_of_moves = 0
self.number_of_sound_moves = 0
n = FeedForwardNetwork()
self.inLayer = LinearLayer(5)
self.hiddenLayer1 = SigmoidLayer(15)
self.hiddenLayer2 = SigmoidLayer(15)
self.hiddenLayer3 = SigmoidLayer(15)
self.outLayer = LinearLayer(4)
n.addInputModule(self.inLayer)
n.addModule(self.hiddenLayer1)
n.addModule(self.hiddenLayer2)
n.addModule(self.hiddenLayer3)
n.addOutputModule(self.outLayer)
from pybrain.structure import FullConnection
in_to_hidden = FullConnection(self.inLayer, self.hiddenLayer1)
hidden_to_hidden1 = FullConnection(self.hiddenLayer1, self.outLayer2)
hidden_to_hidden2 = FullConnection(self.hiddenLayer2, self.outLayer3)
hidden_to_out = FullConnection(self.hiddenLayer3, self.outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_hidden1)
示例6: encoderdecoder
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def encoderdecoder(outersize,innersize,indata,
fname):
# create network
n = FeedForwardNetwork()
inLayer = LinearLayer(outersize)
hiddenLayer = SigmoidLayer(innersize)
outLayer = LinearLayer(outersize)
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
n.sortModules()
# create dataset
ds = SupervisedDataSet(outersize,outersize)
for x,y in indata,indata:
ds.addSample(x,y)
# train network
trainer = BackpropTrainer(n,ds)
trainer.trainUntilConvergence()
n.saveNetwork(fname)
return [[in_to_hidden,hidden_to_out],
[inLayer,hiddenLayer,outLayer],
n]
示例7: _constructNetwork
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def _constructNetwork(self, nIn, nOut, params):
''' Construct the network '''
nHidden = params.setdefault('nHidden', 2)
hiddenSize = np.empty(nHidden)
for i in range(nHidden):
pstr = 'hiddenSize[' + str(i) + ']'
hiddenSize[i] = params.setdefault(pstr, nIn + nOut)
# Construct network
ann = FeedForwardNetwork()
# Add layers
layers = []
layers.append(LinearLayer(nIn))
for nHid in hiddenSize:
layers.append(SoftmaxLayer(nHid))
layers.append(LinearLayer(nOut))
ann.addOutputModule(layers[-1])
ann.addInputModule(layers[0])
for mod in layers[1:-1]:
ann.addModule(mod)
# Connections
for i, mod in enumerate(layers):
if i < len(layers) - 1:
conn = FullConnection(mod, layers[i+1])
ann.addConnection(conn)
# Sort the modules
ann.sortModules()
return ann
示例8: buildMLP
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def buildMLP(dataSet, num_hidden):
'''
Function that builds a feed forward network based
on the datset inputed.
The hidden layer has nodes equal to num_hidden.
'''
#make the network
network = FeedForwardNetwork()
#make network layers
inputLayer = LinearLayer(dataSet.indim)
hiddenLayer = SigmoidLayer(num_hidden)
outputLayer = LinearLayer(dataSet.outdim)
#add the layers to the network
network.addInputModule(inputLayer)
network.addModule(hiddenLayer)
network.addOutputModule(outputLayer)
#add bias
network.addModule(BiasUnit(name='bias'))
#create connections between layers
inToHidden = FullConnection(inputLayer, hiddenLayer)
hiddenToOut = FullConnection(hiddenLayer, outputLayer)
#connect bias
network.addConnection(FullConnection(network['bias'], outputLayer))
network.addConnection(FullConnection(network['bias'], hiddenLayer))
#add connections to the network
network.addConnection(inToHidden)
network.addConnection(hiddenToOut)
network.sortModules()
return network
示例9: trained_cat_dog_ANN
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def trained_cat_dog_ANN():
n = FeedForwardNetwork()
d = get_cat_dog_trainset()
input_size = d.getDimension('input')
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
n.addOutputModule(LinearLayer(2, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.sortModules()
n.convertToFastNetwork()
print 'successful converted to fast network'
t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)
count = 0
while True:
globErr = t.train()
print globErr
count += 1
if globErr < 0.01:
break
if count == 30:
break
exportCatDogANN(n)
return n
示例10: BackupNetwork
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def BackupNetwork(genome=None):
#initial a network [12,12,4] and initial weights are baseline policy versions
from pybrain.structure import FeedForwardNetwork,LinearLayer,TanhLayer,FullConnection
network = FeedForwardNetwork()
inLayer= LinearLayer(12)
hiddenLayer = LinearLayer(12)
outLayer = TanhLayer(4)
network.addInputModule(inLayer)
network.addModule(hiddenLayer)
network.addOutputModule(outLayer)
weights = []
if(genome == None):
import pickle
weights = pickle.load(open("seed"))
else:
weights = genome
in_to_hidden = FullConnection(inLayer,hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
for i in range(0,144):
in_to_hidden.params[i] = weights[i]
for j in range(0,48):
hidden_to_out.params[j] = weights[j+144]
network.addConnection(in_to_hidden)
network.addConnection(hidden_to_out)
network.sortModules()
return network
示例11: buildNet
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def buildNet(input_size, hidden_size):
n = FeedForwardNetwork()
in1Layer = LinearLayer(input_size)
in2Layer = LinearLayer(input_size)
hidden1Layer = SigmoidLayer(hidden_size)
hidden2Layer = SigmoidLayer(hidden_size)
hidden3Layer = SigmoidLayer(2)
outLayer = LinearLayer(1)
n.addInputModule(in1Layer)
n.addInputModule(in2Layer)
n.addModule(hidden1Layer)
n.addModule(hidden2Layer)
n.addModule(hidden3Layer)
n.addOutputModule(outLayer)
in1_to_hidden1 = FullConnection(in1Layer, hidden1Layer)
in2_to_hidden2 = FullConnection(in2Layer, hidden2Layer)
hidden1_to_hidden3 = FullConnection(hidden1Layer, hidden3Layer)
hidden2_to_hidden3 = FullConnection(hidden2Layer, hidden3Layer)
hidden3_to_out = FullConnection(hidden3Layer, outLayer)
n.addConnection(in1_to_hidden1)
n.addConnection(in2_to_hidden2)
n.addConnection(hidden1_to_hidden3)
n.addConnection(hidden2_to_hidden3)
n.addConnection(hidden3_to_out)
n.sortModules()
return n
示例12: main
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def main():
n = FeedForwardNetwork()
in_layer = LinearLayer(2)
hidden_layer = SigmoidLayer(3)
out_layer = LinearLayer(1)
n.addInputModule(in_layer)
n.addModule(hidden_layer)
n.addOutputModule(out_layer)
in_to_hidden = FullConnection(in_layer, hidden_layer)
hidden_to_out = FullConnection(hidden_layer, out_layer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
n.sortModules()
print(">>> print n")
print(n)
print(">>> n.activate([1, 2])")
print(n.activate([1, 2]))
print(">>> in_to_hidden.params")
print(in_to_hidden.params)
print(">>> hidden_to_out.params")
print(hidden_to_out.params)
print(">>> n.params")
print(n.params)
示例13: create_ff_network
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def create_ff_network(options):
"""Create the FeedForware network
:param options: The input options.
:return:
"""
# Create FF network
net = FeedForwardNetwork()
# Create each Layer instance
in_layer = LinearLayer(options['inUnitCount'])
hidden_layer = SigmoidLayer(options['hiddenUnitCount'])
out_layer = LinearLayer(options['outUnitCount'])
# Build network layer topology
net.addInputModule(in_layer)
net.addModule(hidden_layer)
net.addOutputModule(out_layer)
in_to_hidden = FullConnection(in_layer, hidden_layer)
hidden_to_out = FullConnection(hidden_layer, out_layer)
net.addConnection(in_to_hidden)
net.addConnection(hidden_to_out)
# Complete structure network
net.sortModules()
return net
示例14: create_network
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def create_network():
# Create the network itself
network = FeedForwardNetwork()
# Create layers
NUMBER_OF_INPUT_BYTES = 1600 # because at input we have picture 40x40 size
NUMBER_OF_HIDDEN_LAYERS = 10 # number of hidden layers
NUMBER_OF_OUTPUT_CLASSES = 8 # because in output we have 8 classes
inLayer = LinearLayer( NUMBER_OF_INPUT_BYTES )
hiddenLayer = SigmoidLayer( NUMBER_OF_HIDDEN_LAYERS )
outLayer = LinearLayer( NUMBER_OF_OUTPUT_CLASSES )
# Create connections between layers
# We create FullConnection - each neuron of one layer is connected to each neuron of other layer
in_to_hidden = FullConnection( inLayer, hiddenLayer )
hidden_to_out = FullConnection( hiddenLayer, outLayer )
# Add layers to our network
network.addInputModule( inLayer )
network.addModule( hiddenLayer )
network.addOutputModule( outLayer )
# Add connections to network
network.addConnection( in_to_hidden )
network.addConnection( hidden_to_out )
# Sort modules to make multilayer perceptron usable
network.sortModules()
# prepare array to activate network
d_letter_array = read_array( "d" )
# activate network
network.activate( d_letter_array )
return network
示例15: _createRBF
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import addOutputModule [as 别名]
def _createRBF(self):
# choose random centers on map
for i in range(self.numCenters):
self.centers.append(self.env._randomInitPose())
# create an RBF network
params = FeedForwardNetwork()
inLayer = LinearLayer(self.task.outdim)
hiddenLayer = RBFLayer(self.numCenters, self.centers)
#inLayer = RBFLayer(self.numCenters, self.centers)
outLayer = LinearLayer(self.task.indim)
params.addInputModule(inLayer)
params.addModule(hiddenLayer)
params.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer,hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
params.addConnection(in_to_hidden)
params.addConnection(hidden_to_out)
params.sortModules()
return params