本文整理汇总了Python中pybrain.structure.FeedForwardNetwork.reset方法的典型用法代码示例。如果您正苦于以下问题:Python FeedForwardNetwork.reset方法的具体用法?Python FeedForwardNetwork.reset怎么用?Python FeedForwardNetwork.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.structure.FeedForwardNetwork
的用法示例。
在下文中一共展示了FeedForwardNetwork.reset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LinearLayer
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import reset [as 别名]
print in_to_hidden.params
print hidden_to_out.params
print n.params
print n.activate([1, 2])
# Naming your NN
print LinearLayer(2).name
LinearLayer(2, name='foo')
print LinearLayer(2).name
# Using Recurrent NN
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
# Looks back in time one timestep
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
# Using RNN, every steps gets different value of Neron
n.sortModules()
print n.activate([2, 2])
print n.activate([2, 2])
print n.activate([2, 2])
n.reset() # Clear n and Reset it
print n.activate([2, 2])
print n.activate([2, 2])
示例2: FullConnection
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import reset [as 别名]
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
# this is required to make the MLP usable
n.sortModules()
print n.activate((2,2)) # forward pass
print 'n.params\n', n.params # all weights
# same but for recurrent network
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
n.sortModules()
print n.activate((2,2)) # forward pass
print n.activate((2,2)) # forward pass
print n.activate((2,2)) # forward pass
print n.reset(), '\nafter reset'
print n.activate((2,2)) # forward pass
示例3: ANN_edge_analysis
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import reset [as 别名]
def ANN_edge_analysis(a_network, a_gene, a_dataset, boot_val):
"Creates and trains a network that is created to reflect the structure of the hypothesized network"
regulatory_network = FeedForwardNetwork()
# retrievingneeded parameters from the input network
data_node_list = get_sub_list_from_network(a_network, a_gene, "gene,TF", 1)
# Need to add +1 node to the input layer that represents the "other" control variables
# describing network modules to be used
inLayer = LinearLayer(len(data_node_list)-1)
#hiddenLayer = LinearLayer(len(data_node_list)-1))
outLayer = LinearLayer(1)
# Adding layers to network
regulatory_network.addInputModule(inLayer)
#regulatory_network.addModule(hiddenLayer)
regulatory_network.addOutputModule(outLayer)
# Adding connections between layers
#in_to_hidden = LinearConnection(inLayer,hiddenLayer)
#hidden_to_out = FullConnection(hiddenLayer, outLayer)
in_to_out = FullConnection(inLayer, outLayer)
#regulatory_network.addConnection(in_to_hidden)
#regulatory_network.addConnection(hidden_to_out)
regulatory_network.addConnection(in_to_out)
get_nn_details(regulatory_network)
# Other stuff added
regulatory_network.sortModules()
# Formatting the dataset
input_dimention = len(data_node_list)-1
print "in_dimention = ", input_dimention
DS = SupervisedDataSet( input_dimention, 1 )
# Adding data, there may be a problem with order here where tfs are not always the same... seems ok though
for experiment in a_dataset:
tf_list = []
gene_list = []
tf_labels = []
for TF in data_node_list:
if TF != a_gene:
#print TF, "<---"
tf_list.append(experiment[TF])
tf_labels.append(TF)
else:
#print TF, "<---gene"
gene_list.append(experiment[TF])
print tf_list
print gene_list
if (check_missing_experiments(tf_list) == True) and (check_missing_experiments(gene_list) == True):
float_tf_list = [float(i) for i in tf_list]
float_gene_list = [float(i) for i in gene_list]
DS.appendLinked( float_tf_list, float_gene_list )
print "......"
print DS
# Training
trainer = BackpropTrainer(regulatory_network, momentum=0.1, verbose=True, weightdecay=0.01)
trainer.setData(DS)
result_list = []
boot_count = 0
while boot_count < boot_val:
#trainer.trainEpochs(1000)
trainer.trainUntilConvergence(validationProportion=0.25)
print regulatory_network
this = get_nn_details(regulatory_network)
result_list.append(this)
regulatory_network.reset()
boot_count += 1
print tf_labels
print regulatory_network.params
print in_to_out.params
print inLayer
pesos_conexiones(regulatory_network)
NetworkWriter.writeToFile(regulatory_network, 'trained_net.xml')
#.........这里部分代码省略.........
示例4: FullConnection
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import reset [as 别名]
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addModule(biasinUnit)
n.addModule(biasoutUnit)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer,hiddenLayer)
bias_to_hidden = FullConnection(biasinUnit,hiddenLayer)
bias_to_out = FullConnection(biasoutUnit,outLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
n.addConnection(in_to_hidden)
n.addConnection(bias_to_hidden)
n.addConnection(bias_to_out)
n.addConnection(hidden_to_out)
n.sortModules()
n.reset()
#read the initail weight values from myparam2.txt
filetoopen = os.path.join(os.getcwd(),'myparam2.txt')
if os.path.isfile(filetoopen):
myfile = open('myparam2.txt','r')
c=[]
for line in myfile:
c.append(float(line))
n._setParameters(c)
else:
myfile = open('myparam2.txt','w')
for i in n.params:
myfile.write(str(i)+'\n')
myfile.close()
示例5: ANN_blind_analysis
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import reset [as 别名]
#.........这里部分代码省略.........
# This is where the ordered dict needs to be used to link the input name to the input node.
for experiment in a_dataset:
tf_list = []
gene_list = []
tf_labels = []
first_round = True
for TF in data_node_list:
if TF != a_gene:
#print TF, "<---"
tf_list.append(experiment[TF])
if first_round == True:
tf_labels.append(TF)
else:
#print TF, "<---gene"
gene_list.append(experiment[TF])
first_round = False
# View the input data sets
print tf_labels
print tf_list
print gene_list
if (check_missing_experiments(tf_list) == True) and (check_missing_experiments(gene_list) == True):
float_tf_list = [float(i) for i in tf_list]
float_gene_list = [float(i) for i in gene_list]
DS.appendLinked( float_tf_list, float_gene_list )
print "......"
print 'Network before training'
print regulatory_network
pesos_conexiones(regulatory_network)
print regulatory_network.outputerror
#print DS
# Training
trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)
trainer.setData(DS)
result_list = []
best_run_error = 1000
boot_count = 0
while boot_count < boot_val:
print '\n'
print 'Bootstrap round ' + str(boot_count + 1)
trainer.trainEpochs(500)
this = get_nn_details(regulatory_network)
# Corrected error
print trainer.total_error
current_run_error = trainer.total_error
print 'Bootstrap round ' + str(boot_count + 1) + ' error: ' + str(current_run_error)
if abs(current_run_error) < abs(best_run_error):
best_run_error = current_run_error
trained_net_filename = a_gene + '_trained_net.xml'
NetworkWriter.writeToFile(regulatory_network, trained_net_filename)
export_to_gml(regulatory_network, tf_labels, a_gene)
#result_list.append(this)
regulatory_network.reset()
regulatory_network.randomize()
trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)
trainer.setData(DS)
boot_count += 1
#print "TF Labels"
#print tf_labels
#print regulatory_network.params
#print inLayer
#print "Pesos Conexiones"
#pesos_conexiones(regulatory_network)
#print dir(regulatory_network)
#print dir(trainer)
#print 'look here'
#print regulatory_network.outputerror
#print '<><><><><>'
#print dir(regulatory_network['SigmoidLayer-7'])
#print '\n'
#print vars(regulatory_network['SigmoidLayer-7'])
#print '\n'
#print regulatory_network['SigmoidLayer-7'].forward
#print regulatory_network['SigmoidLayer-7'].bufferlist
result_list.append(a_gene)
result_list.append(best_run_error)
result_list.append(len(tf_list))
return result_list