本文整理汇总了Python中pybrain.structure.RecurrentNetwork.activateOnDataset方法的典型用法代码示例。如果您正苦于以下问题:Python RecurrentNetwork.activateOnDataset方法的具体用法?Python RecurrentNetwork.activateOnDataset怎么用?Python RecurrentNetwork.activateOnDataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.structure.RecurrentNetwork
的用法示例。
在下文中一共展示了RecurrentNetwork.activateOnDataset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: trainFunc
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activateOnDataset [as 别名]
def trainFunc(params):
iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:', eta, 'Lamda:', lmda, 'Activation:', func)
# Build network
n = RecurrentNetwork()
n.addInputModule(LinearLayer(input_size, name = 'in'))
n.addModule(func(hidden, name = 'hidden'))
n.addModule(LinearLayer(hidden, name = 'context'))
n.addOutputModule(LinearLayer(1, name = 'out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name = 'in_to_hidden'))
n.addConnection(FullConnection(n['hidden'], n['out'], name = 'hidden_to_out'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
rnet = n
rnet.sortModules()
trainer = BackpropTrainer(n, trainds, learningrate=eta, weightdecay=lmda, momentum=0.1, shuffle=False)
trainer.trainEpochs(epochs)
pred = np.nan_to_num(n.activateOnDataset(validds))
validerr = eval.calc_RMSE(validds['target'], pred)
varscore = explained_variance_score(validds['target'], pred)
return validerr, varscore, n
示例2: len
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activateOnDataset [as 别名]
# print len(tstdata)
# print len(trndata)
trainer = BackpropTrainer(n, DS, learningrate=0.1, momentum=0.5, weightdecay=0.0001)
trainer.trainUntilConvergence(verbose=True, maxEpochs=100)
# print trainer.trainUntilConvergence()
# trainer.trainOnDataset(trndata, 100)
#print n.activate((2, 1, 3, 0))
#print n.activate((2, 1, 3, 90))
## ----------------------- Results & Performance mesurements ---------------------------- ##
yhat = []
yhat = n.activateOnDataset(tstdata)
#print yhat
#print tstdata['target']
def vect_se(X,y):
vect_se = []
for i in range(len(X)):
vect_se.append((float(y[i]) - float(X['target'][i]))**2)
return vect_se
def mse(X,y):
return float(sum(vect_se(X, y)))/float(len(vect_se(X, y)))
print mse(tstdata, yhat)
示例3: __init__
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activateOnDataset [as 别名]
class RecurrentNeuralNetwork:
"""
Recurent neural network.
"""
def __init__(self, nin, nout):
singleton.append(self)
self.inn = nin
self.outn = nout
self.n = buildNetwork(nin, 20, nout, bias=False, recurrent=True)
self.n = RecurrentNetwork()
self.n.addInputModule(LinearLayer(nin, name='in'))
self.n.addOutputModule(LinearLayer(nout, name='out'))
self.n.addModule(SigmoidLayer(8, name='hidden2'))
self.n.addModule(TanhLayer(nin+nout/2, name='hidden1'))
self.n.addModule(BiasUnit(name='bias'))
self.n.addModule(LSTMLayer(5, name='memory'))
self.n.addConnection(FullConnection(self.n['in'], self.n['hidden1']))
self.n.addConnection(FullConnection(self.n['bias'], self.n['hidden1']))
self.n.addConnection(FullConnection(self.n['hidden1'], self.n['hidden2']))
self.n.addConnection(FullConnection(self.n['hidden2'], self.n['out']))
self.n.addConnection(FullConnection(self.n['hidden1'], self.n['memory']))
self.n.addConnection(FullConnection(self.n['memory'], self.n['hidden2']))
self.n.addConnection(FullConnection(self.n['in'], self.n['hidden2']))
self.n.addConnection(FullConnection(self.n['hidden2'], self.n['out']))
self.n.addRecurrentConnection(FullConnection(self.n['hidden1'], self.n['hidden1']))
self.n.addRecurrentConnection(FullConnection(self.n['memory'], self.n['hidden1']))
self.n.sortModules()
def set_learning_data(self, dataset):
"""
Set dataset used to train network.
"""
self.ds_learn = dataset
def train(self, epochs=100):
"""
Train the network
"""
#self.n.reset()
trainer = BackpropTrainer(self.n, self.ds_learn, verbose=True)
# trainer.setData(self.ds_learn)
return trainer.trainEpochs(epochs=epochs)
def validate_error(self, dataset):
"""
Return error value for given dataset
"""
v = Validator()
#self.n.reset()
return v.MSE(self.n, dataset)
def calculate(self, dataset):
"""
Return network response for given dataset
"""
#self.n.reset()
return self.n.activateOnDataset(dataset)
示例4: ClassificationDataSet
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activateOnDataset [as 别名]
har_train_test_data = har_data.iloc[0:2000,0:17];
har_train_test_label = har_data.iloc[0:2000,17:18];
alldata = ClassificationDataSet(17, nb_classes=5);
for i in range(len(har_train_test_data)):
t=int(har_train_test_label.iloc[i])-1;
alldata.addSample(har_train_test_data.iloc[i],[t]);
alldata._convertToOneOfMany(bounds=[0, 1]);
tstdata,trndata = alldata.splitWithProportion(0.5);
rnn = RecurrentNetwork(17,20,30,14,13,12,11,7,5);
#fnn = buildNetwork(17,11,8,6,5,outclass=SoftmaxLayer);
#fnn = buildNetwork(17,11,8,6,5,bias=True);
neural_trainer = BackpropTrainer(rnn, trndata, verbose=True, learningrate=0.01);
neural_trainer.trainUntilConvergence(maxEpochs=100);
out = rnn.activateOnDataset(tstdata);
out = out.argmax(axis=1);
out2=tstdata.getField('target').argmax(axis=1);
length = len(out);
count=0;
for i in range(len(out)):
if out[i]!=out2[i]:
count+=1;
errorrate = float(count)/float(length);
示例5: FitNeuralNetworkDeptAnimate
# 需要导入模块: from pybrain.structure import RecurrentNetwork [as 别名]
# 或者: from pybrain.structure.RecurrentNetwork import activateOnDataset [as 别名]
def FitNeuralNetworkDeptAnimate(dept = 1, num = 1000):
train_file = input_file_path + train_file_name[0] + str(dept) + train_file_name[1]
test_file = input_file_path + test_file_name[0] + str(dept) + test_file_name[1]
train = np.loadtxt( train_file, delimiter = ' ' )
test = np.loadtxt( test_file, delimiter = ' ' )
print len(train)
x_train = train[0:num, 0 : -1]
y_train = train[0:num, -1]
y_max = max(y_train)
y_min = min(y_train)
y_train = (y_train - y_min) / (y_max-y_min)
y_train = y_train.reshape(-1,1)
input_size = x_train.shape[1]
target_size = y_train.shape[1]
x_test = test[0:num/4, 0 : -1]
y_test = test[0:num/4, -1]
y_test = y_test.reshape(-1,1)
ds_test = SDS( input_size, target_size )
ds_test.setField( 'input', x_test )
ds_test.setField( 'target', y_test )
ds = SDS( input_size, target_size )
ds.setField( 'input', x_train )
ds.setField( 'target', y_train )
hidden_size = input_size*hidden_size_ratio
n = RecurrentNetwork()
n.addInputModule(LinearLayer(input_size, name='in'))
n.addModule(BiasUnit('bias'))
for i in range(0, num_hidden_layer+1):
hidden_name = 'hidden'+str(i)
n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
n.addOutputModule(LinearLayer(target_size, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
next_hidden = 'hidden0'
for i in range(0,num_hidden_layer ):
current_hidden = 'hidden'+str(i)
next_hidden = 'hidden'+str(i+1)
n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))
n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))
n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))
n.sortModules()
print n
trainer = BackpropTrainer(n,ds ,weightdecay=weightdecay, learningrate=learningrate, lrdecay=1.0, momentum = momentum)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.annotate("Dept1", (10,-15000))
plt.annotate("Dept2", (180,-30000))
plt.annotate("Dept3", (300,-15000))
plt.annotate("Dept4", (450,-30000))
plt.annotate("Dept5", (600,-15000))
plt.annotate("Dept6", (700,-30000))
plt.annotate("Dept7", (900,-15000))
line1, = ax.plot([],[],'-b',label='train')
line2, = ax.plot([],[],'-r',label='test')
ax.legend()
dummy = raw_input("Plot the graph?")
for i in range(epochs):
error = trainer.train()
print "Epoch: %d, Error: %7.4f" % (i, error)
p_train = n.activateOnDataset( ds )
p_test = n.activateOnDataset( ds_test )
plot_result = np.vstack((p_train*(y_max-y_min) + y_min, p_test*(y_max-y_min) + y_min ))
p_test_print = p_test.reshape(-1,len(p_test))
p_test_print = p_test_print*(y_max-y_min) + y_min
line1.set_ydata(y_train*(y_max-y_min) + y_min)
line1.set_xdata(range(len(y_train)))
line2.set_ydata(plot_result)
#.........这里部分代码省略.........