本文整理汇总了Python中pybrain.structure.FeedForwardNetwork.randomize方法的典型用法代码示例。如果您正苦于以下问题:Python FeedForwardNetwork.randomize方法的具体用法?Python FeedForwardNetwork.randomize怎么用?Python FeedForwardNetwork.randomize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.structure.FeedForwardNetwork
的用法示例。
在下文中一共展示了FeedForwardNetwork.randomize方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_2ffnn
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import randomize [as 别名]
def build_2ffnn(inp, h1, h2, out):
n = FeedForwardNetwork()
inLayer = LinearLayer(inp)
hiddenLayer1 = TanhLayer(h1)
hiddenLayer2 = TanhLayer(h2)
outLayer = LinearLayer(out)
#outLayer = SoftmaxLayer(out)
n.addInputModule(inLayer)
n.addModule(hiddenLayer1)
n.addModule(hiddenLayer2)
n.addOutputModule(outLayer)
in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
hidden2_to_out = FullConnection(hiddenLayer2, outLayer)
n.addConnection(in_to_hidden1)
n.addConnection(hidden1_to_hidden2)
n.addConnection(hidden2_to_out)
n.sortModules()
n.randomize()
return n
示例2: MP_Pybrain
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import randomize [as 别名]
class MP_Pybrain(Regression):
"""
Fully connected multilayer perceptron using pybrain library.
"""
def __init__(self, train_data, hyper, n_targets=None, label_targets=None):
"""
------------
train_data: pandas DataFrame
Contains columns for features and for target variables. The names of the target variables ends
with the suffix "_tau"
hyper: dictionary
It contains the hyperparameters necessary to run all the functionalities of the model.
They are the following:
"structure" is a list of integers determining the number of neurons in each hidden layer
"epochs" an integer specifying the maximum number of epochs to run during every training session
"learning_rate" a float giving the learning rate of the gradient descend
"momentum" a float giving the value of the momentum for the algorithm
"batch" a bool. If True the method performs full batch learning, i.e. updates of the weights is done
using all the instances of the training set. Else, normal online method is performed
Other parameters regarding cross validation are explained in the base class
"""
Regression.__init__(self, train_data, hyper, n_targets=n_targets, label_targets=label_targets)
self.N = FeedForwardNetwork()
self.structure = [self.n_feature] + hyper['structure'] + [self.n_target]
self._build_net(self.structure)
self.res_params = [self.N.params[i] for i in range(len(self.N.params))]
self.train_fraction = hyper['train_fraction']
self.seed = hyper['seed']
self.epochs = hyper['epochs']
self.learning_rate = hyper['learning_rate']
self.momentum = hyper['momentum']
self.batch = bool(hyper['batch'])
def learn(self, train_data = None, seed = None):
"""
Performs single run training, and it is designed to be called after network instantiation.
----------
train_data: pandas Dataframe
It needs to contain datetime objects on index, and both features and target variables.
The target variables need to end with the suffix "_tau". If None the self.train_set
variable passed at the moment of instantiation will be used.
Returns: tuple(MP_Pybrain object,float)
It returns the model with the lowest training error, and the value of the training error.
"""
if train_data is not None:
self.train_set = train_data
self.randomize()
ds_train, ds_valid = self._build_dataset(self.train_set)
trainer = BackpropTrainer(self.N, ds_train, learningrate=self.learning_rate,
momentum=self.momentum,batchlearning=self.batch)
trainer.train()
e_train = [self._error(ds_train)]
e_valid = [self._error(ds_valid)]
final_model = copy(self)
fin_error_train = e_train[0]
fin_error_valid = e_valid[0]
for i in range(1,self.epochs):
if i%10 == 0:
print "epoch: ", i
trainer.train()
e_train.append(self._error(ds_train))
e_valid.append(self._error(ds_valid))
if e_train[-1] < fin_error_train:
final_model = deepcopy(self)
fin_error_train = e_train[-1]
fin_error_valid = e_valid[-1]
return final_model, fin_error_train, fin_error_valid
def xvalidate(self, train_data = None, folds = None):
"""
Performs n-folds cross-validation on the a data set. The method is designed to reset the network
to an initial configuration (decided at the moment of instantiation) every time a new training is
started. The purpose is to make model comparison and returning an average error given a specific
data set and collection of hyper-parameters. At the moment training and validation sets are chosen
based on the input sequence of data, i.e. there is no random shuffling of the instances of the data set.
----------
train_data: pandas Dataframe
It needs to contain datetime objects on index, and both features and target variables.
The target variables need to end with the suffix "_tau". If None the self.train_set
variable passed at the moment of instantiation will be used.
folds: integer
The number of training/validation partition used in the method. If None it needs to be
passed in the constructor when instantiating the object for the first time. If not passed
ever, the method cannot work and an exception needs to be thrown.
Returns: list, float, float
A list of all the models trained for each fold, the mean train error and the cross-validation error,
i.e. the average of NRMSE for all the training/validation partitions created.
#.........这里部分代码省略.........
示例3: ANN_blind_analysis
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import randomize [as 别名]
#.........这里部分代码省略.........
# This is where the ordered dict needs to be used to link the input name to the input node.
for experiment in a_dataset:
tf_list = []
gene_list = []
tf_labels = []
first_round = True
for TF in data_node_list:
if TF != a_gene:
#print TF, "<---"
tf_list.append(experiment[TF])
if first_round == True:
tf_labels.append(TF)
else:
#print TF, "<---gene"
gene_list.append(experiment[TF])
first_round = False
# View the input data sets
print tf_labels
print tf_list
print gene_list
if (check_missing_experiments(tf_list) == True) and (check_missing_experiments(gene_list) == True):
float_tf_list = [float(i) for i in tf_list]
float_gene_list = [float(i) for i in gene_list]
DS.appendLinked( float_tf_list, float_gene_list )
print "......"
print 'Network before training'
print regulatory_network
pesos_conexiones(regulatory_network)
print regulatory_network.outputerror
#print DS
# Training
trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)
trainer.setData(DS)
result_list = []
best_run_error = 1000
boot_count = 0
while boot_count < boot_val:
print '\n'
print 'Bootstrap round ' + str(boot_count + 1)
trainer.trainEpochs(500)
this = get_nn_details(regulatory_network)
# Corrected error
print trainer.total_error
current_run_error = trainer.total_error
print 'Bootstrap round ' + str(boot_count + 1) + ' error: ' + str(current_run_error)
if abs(current_run_error) < abs(best_run_error):
best_run_error = current_run_error
trained_net_filename = a_gene + '_trained_net.xml'
NetworkWriter.writeToFile(regulatory_network, trained_net_filename)
export_to_gml(regulatory_network, tf_labels, a_gene)
#result_list.append(this)
regulatory_network.reset()
regulatory_network.randomize()
trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)
trainer.setData(DS)
boot_count += 1
#print "TF Labels"
#print tf_labels
#print regulatory_network.params
#print inLayer
#print "Pesos Conexiones"
#pesos_conexiones(regulatory_network)
#print dir(regulatory_network)
#print dir(trainer)
#print 'look here'
#print regulatory_network.outputerror
#print '<><><><><>'
#print dir(regulatory_network['SigmoidLayer-7'])
#print '\n'
#print vars(regulatory_network['SigmoidLayer-7'])
#print '\n'
#print regulatory_network['SigmoidLayer-7'].forward
#print regulatory_network['SigmoidLayer-7'].bufferlist
result_list.append(a_gene)
result_list.append(best_run_error)
result_list.append(len(tf_list))
return result_list
示例4: __init__
# 需要导入模块: from pybrain.structure import FeedForwardNetwork [as 别名]
# 或者: from pybrain.structure.FeedForwardNetwork import randomize [as 别名]
class BlondieBrain:
def __init__(self, datadir, insize=None, outsize=None, paramfile=None):
self.datadir = datadir
if insize == None:
g = runner.Game()
ip = self._game2input(g)
self.insize = len(ip)
else:
self.insize = insize
if outsize == None:
self.outsize = 1
else:
self.outsize = outsize
if paramfile:
f = os.path.join(self.datadir, paramfile)
self.nn = NetworkReader.readFrom(f)
try:
self.name = re.search("(.*)-bestof-(.*)", paramfile).group(1)
except AttributeError:
self.name = "blondie-%s" % (datetime.datetime.now())
else:
self.nn = FeedForwardNetwork()
tmpname = "blondie-%s" % (datetime.datetime.now())
self.name = re.sub("[.: ]", "-", tmpname)
inLayer = LinearLayer(self.insize)
hiddenLayer1 = SigmoidLayer(self.insize)
hiddenLayer2 = SigmoidLayer(self.insize)
outLayer = LinearLayer(self.outsize)
self.nn.addInputModule(inLayer)
self.nn.addModule(hiddenLayer1)
self.nn.addModule(hiddenLayer2)
self.nn.addOutputModule(outLayer)
in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
hidden2_to_out = FullConnection(hiddenLayer2, outLayer)
self.nn.addConnection(in_to_hidden1)
self.nn.addConnection(hidden1_to_hidden2)
self.nn.addConnection(hidden2_to_out)
self.nn.sortModules()
def nextmove(self, game):
inputdata = self._game2input(game)
if self.outsize == 1:
op = int(self.nn.activate(inputdata))
else:
r = self.nn.activate(inputdata)
op = r.argmax()
return op
def save(self, suffix=""):
f = os.path.join(self.datadir, self.name + suffix + ".xml")
NetworkWriter.writeToFile(self.nn, f)
def mutate(self):
self.nn.mutate()
def randomize(self):
self.nn.randomize()
def copy(self):
x = copy.deepcopy(self)
x.nn = x.nn.copy()
return x
@classmethod
def _game2input(cls, game):
mysymbol = len(game.moves) % 2
cells = [cls._trsymb(x, mysymbol) for x in itertools.chain.from_iterable(game.grid_columns)]
cols = [cls._trsum(c, mysymbol) for c in game.grid_columns]
rows = [cls._trsum(r, mysymbol) for r in game.grid_rows]
diags = [cls._trsum(r, mysymbol) for r in game.diags]
l = itertools.chain.from_iterable([cells, cols, rows, diags])
return list(l)
@classmethod
def _trsymb(cls, piece, mysymbol):
# Transform symbol
if piece == None:
return 0
elif piece == mysymbol:
return 1
else:
return -1
@classmethod
def _trsum(cls, l, mysymbol):
# Transform symbol and sum list
s = 0
for ll in l:
s += cls._trsymb(ll, mysymbol)
return s