本文整理汇总了Python中pybrain.tools.customxml.networkwriter.NetworkWriter类的典型用法代码示例。如果您正苦于以下问题:Python NetworkWriter类的具体用法?Python NetworkWriter怎么用?Python NetworkWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NetworkWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
def train(self):
print "Enter the number of times to train, -1 means train until convergence:"
t = int(raw_input())
print "Training the Neural Net"
print "self.net.indim = "+str(self.net.indim)
print "self.train_data.indim = "+str(self.train_data.indim)
trainer = BackpropTrainer(self.net, dataset=self.train_data, momentum=0.1, verbose=True, weightdecay=0.01)
if t == -1:
trainer.trainUntilConvergence()
else:
for i in range(t):
trainer.trainEpochs(1)
trnresult = percentError( trainer.testOnClassData(), self.train_data['class'])
# print self.test_data
tstresult = percentError( trainer.testOnClassData(dataset=self.test_data), self.test_data['class'] )
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult
if i % 10 == 0 and i > 1:
print "Saving Progress... Writing to a file"
NetworkWriter.writeToFile(self.net, self.path)
print "Done training... Writing to a file"
NetworkWriter.writeToFile(self.net, self.path)
return trainer
示例2: move_function
def move_function(board):
global net
best_max_move = None
max_value = -1000
best_min_move = None
min_value = 1000
#value is the chance of black winning
for m in board.get_moves():
nextboard = board.peek_move(m)
value = net.activate(board_to_input(nextboard))
if value > max_value:
max_value = value
best_max_move = m
if value < min_value:
min_value = value
best_min_move = m
ds = SupervisedDataSet(97, 1)
best_move = None
#active player
if board.active == BLACK:
ds.addSample(board_to_input(board), max_value)
best_move = best_max_move
elif board.active == WHITE:
ds.addSample(board_to_input(board), min_value)
best_move = best_min_move
trainer = BackpropTrainer(net, ds)
trainer.train()
NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml')
return best_move
示例3: save
def save(self, filename, desc=None):
NetworkWriter.writeToFile(self.net, filename + '.xml')
params = {'labels': self.labels,
'mean': self.mean.tolist(),
'std': self.std.tolist()}
with open(filename + '.yaml', 'w') as f:
f.write(yaml.dump(params, default_flow_style=False))
示例4: trainData
def trainData(data, filename):
net = buildNetwork(data.indim, 40, data.outdim, hiddenclass=TanhLayer, outclass=SigmoidLayer)
trainer = BackpropTrainer( net, dataset=data, verbose=True, momentum=0.1, weightdecay=0.01)
_ , valid_errors = trainer.trainUntilConvergence(continueEpochs=2)
NetworkWriter.writeToFile(net, filename)
print "Valid error: ", min(valid_errors)
return net
示例5: main
def main():
train_file = 'data/train.csv'
# validation_file = 'data/validation.csv'
output_model_file = 'model.xml'
# hidden_size = 4
epochs = 500
# load data
# def loadData():
train = np.loadtxt(train_file, delimiter=' ')
Input = train[0:,0:3]
Output = train[0:,3:5]
# validation = np.loadtxt(validation_file, delimiter=',')
# train = np.vstack((train, validation))
# x_train = train[:, 0:-1]
# y_train = train[:, -1]
# y_train = y_train.reshape(-1, 1)
# input_size = x_train.shape[1]
# target_size = y_train.shape[1]
# prepare dataset
# def prepare dataset(input_size, target_size):
ds = SDS(Input,Output)
# ds.addSample(input_size)
# ds.setField('input', x_train)
# ds.setField('target', y_train)
# init and train
# def initTrain(input_size, hidden_size, input, output):
# net = buildNetwork(input_size, hidden_size, target_size, bias=True)
net = buildNetwork(3, # input layer
4, # hidden0
2, # output
hiddenclass=SigmoidLayer,
outclass=SigmoidLayer,
bias=True
)
net = NetworkReader.readFrom('model.xml')
for i,o in zip(Input,Output):
ds.addSample(i,o)
print i, o
trainer = BackpropTrainer(net, ds)
print "training for {} epochs...".format(epochs)
for i in range(epochs):
mse = trainer.train()
rmse = sqrt(mse)
print "training RMSE, epoch {}: {}".format(i + 1, rmse)
if os.path.isfile("../stopfile.txt") == True:
break
NetworkWriter.writeToFile(net, output_model_file)
示例6: writetrainedinfo
def writetrainedinfo(self, neuralnetwork):
"""
# Using the Python pickle
fileObject = open('traininfo', 'w')
pickle.dump(neuralnetwork, fileObject)
fileObject.close()
"""
# Writing file using the NetworkWriter
NetworkWriter.writeToFile(neuralnetwork, 'xtrainedinfo.xml')
示例7: SaveNetwork
def SaveNetwork(self):
"""
Creating dump of network.
"""
FCLogger.debug('Saving network to PyBrain xml-formatted file...')
NetworkWriter.writeToFile(self.network, self.networkFile)
FCLogger.info('Network saved to file: {}'.format(os.path.abspath(self.networkFile)))
示例8: save
def save( history, net ):
"""
This function gets called after each training/testing block or when the
script gets closed. It saves the neural net and RL history of the agent so
that it can be restored or reused in another model.
"""
base = os.path.splitext( sys.argv[2] )[0]
print 'Saving network to: ' + base + '.xml'
NetworkWriter.writeToFile( net, base + '.xml' )
fileObject = open( base + '.history', 'w' )
pickle.dump( history, fileObject )
fileObject.close()
示例9: save_network_to_file
def save_network_to_file(self, filename):
"""Save Network to File
Saves the neural network including all connection weights into a
NetworkWriter format xml file for future loading.
Arguments:
filename: The filename into which the network should be saved.
"""
NetworkWriter.writeToFile(self._network, filename)
return
示例10: trainNetwork
def trainNetwork():
print "[Training] Network has Started..."
inputSize = 0
with open('file1.txt', 'r') as f: #automatically closes file at the end of the block
#first_line = f.readline()
#inputSize = len(first_line)
dataset = SupervisedDataSet(4, 1) #specify size of data and target
f.seek(0) #Move back to beginnning of file
#iterate through the file. 1 picture per line
for line in f:
mylist = json.loads(line) #list object
target = mylist[-1] #retrieve and then delete the target classification
del mylist[-2:]
#print target
dataset.addSample(tuple(mylist), (target,))
#print json.loads(line)
if os.path.isfile('annModel.xml'):
skynet = NetworkReader.readFrom('annModel.xml')#for use if individual sample files used
else:
skynet = buildNetwork(dataset.indim, 8, dataset.outdim, bias=True, hiddenclass=TanhLayer) #input,hidden,output
#SoftmaxLayer, SigmoidLayer, LinearLayer, GaussianLayer
#Note hidden neuron number is arbitrary, can try 1 or 4 or 3 or 5 if this methods doesnt work out
trainer = BackpropTrainer(skynet, dataset,learningrate = 0.3, weightdecay = 0.01,momentum = 0.9)
#trainer.trainUntilConvergence()
for i in xrange(1000):
trainer.train()
#trainer.trainEpochs(1000)
#Save the now trained neural network
NetworkWriter.writeToFile(skynet,'annModel.xml')
print "[Network] has been Written"
################## SVM Method #######################
#Change append method in write method for target persistence
dataX = []
datay = []
with open(writeFile, 'r') as f:
for line in f:
mylist = json.loads(line)
target2 = mylist[-1]
dataX.append(mylist[:-2])
datay.append(target2)
#datay = [target2] * len(dataX) #Targets, size is n_samples, for use with indiviual sample files with same target
print [target2]
print dataX
print datay
clf = svm.LinearSVC()
clf.fit(dataX,datay)
#Persist the trained model
joblib.dump(clf,'svmModel.pkl')
示例11: Treinar
def Treinar():
print 'Inicializando o treinamento da Rede......Aguarde'
ds = SupervisedDataSet(50,1)
with open('trainning.txt') as f:
for line in f:
if line[0] != '#':
line = line.replace('\n','')
line = line.split(',')
exemplo = []
for x in line: exemplo.append(x)
ds.addSample(exemplo[1:],exemplo[:1]) # o 1: pega o primeiro valor que e targer.
## Dataset
#trainer = BackpropTrainer(net, learningrate = 0.04, momentum = 0.07, verbose = False)
trainer = BackpropTrainer(net, learningrate = 0.04, momentum = 0.07, verbose = False)
trainer.trainOnDataset(ds,10000)
NetworkWriter.writeToFile(net, 'filename.xml')
print 'Treinado e Pronto'
示例12: perceptron
def perceptron(hidden_neurons=20, weightdecay=0.01, momentum=0.1):
INPUT_FEATURES = 200
CLASSES = 9
HIDDEN_NEURONS = hidden_neurons
WEIGHTDECAY = weightdecay
MOMENTUM = momentum
g = generate_data()
alldata = g['d']
testdata = generate_Testdata(g['index'])['d']
#tstdata, trndata = alldata.splitWithProportion(0.25)
#print type(tstdata)
trndata = _convert_supervised_to_classification(alldata,CLASSES)
tstdata = _convert_supervised_to_classification(testdata,CLASSES)
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
#fnn = NetworkReader.readFrom('ncibig(500+83.85).xml')
fnn = buildNetwork(trndata.indim, HIDDEN_NEURONS, trndata.outdim,outclass=SoftmaxLayer)
trainer = BackpropTrainer(fnn, dataset=trndata, momentum=MOMENTUM,verbose=True, weightdecay=WEIGHTDECAY,learningrate=0.01)
result = 0;
ssss = 0;
for i in range(200):
trainer.trainEpochs(1)
trnresult = percentError(trainer.testOnClassData(),trndata['class'])
tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
out = fnn.activateOnDataset(tstdata)
ssss = out
out = out.argmax(axis=1)
result = out
df = pd.DataFrame(ssss)
df.to_excel("ncibigout.xls")
df = pd.DataFrame(result)
df.insert(1,'1',tstdata['class'])
df.to_excel("ncibig.xls")
error = 0;
for i in range(len(tstdata['class'])):
if tstdata['class'][i] != result[i]:
error = error+1
#print (len(tstdata['class'])-error)*1.0/len(tstdata['class'])*100
print AAC(result,tstdata['class'])
print AUC(np.transpose(tstdata['class'])[0],result.transpose())
print Fscore(np.transpose(tstdata['class'])[0],result.transpose())
NetworkWriter.writeToFile(fnn, 'ncibig.xml')
示例13: end_function
def end_function(board, lose):
global net
ds = SupervisedDataSet(97, 1)
if lose:
if board.active == BLACK:
ds.addSample(board_to_input(board), 0)
whiteboard = board_to_input(board)
whiteboard[96] = 0
ds.addSample(whiteboard, 1)
elif board.active == WHITE:
ds.addSample(board_to_input(board), 1)
blackboard = board_to_input(board)
blackboard[96] = 1
ds.addSample(blackboard, 0)
else:
#black loses
if board.active == BLACK:
ds.addSample(board_to_input(board), 0)
whiteboard = board_to_input(board)
whiteboard[96] = 0
ds.addSample(whiteboard, 0)
#black wins
elif board.active == WHITE:
ds.addSample(board_to_input(board), 1)
blackboard = board_to_input(board)
blackboard[96] = 1
ds.addSample(blackboard, 1)
trainer = BackpropTrainer(net, ds)
trainer.train()
NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml')
示例14: move_function
def move_function(board):
global net
#active player
#if board.active == BLACK:
# ds.addSample(board_to_input(board), max_value)
# best_move = best_max_move
#elif board.active == WHITE:
# ds.addSample(board_to_input(board), min_value)
# best_move = best_min_move
boardString=board_to_input(board)
black_material=0
white_material=0
for i in range(32):
isKing=boardString[i+64]=='1'
if boardString[i]=='1':
if isKing:
black_material=black_material+2
else:
black_material=black_material+1
if boardString[i+32]=='1':
if isKing:
white_material=white_material+2
else:
white_material=white_material+1
board_val = black_material/(black_material+white_material)
#create a new dataset. Add a sample with board as input, value as output
ds = SupervisedDataSet(97, 1)
ds.addSample(boardString, board_val)
trainer = BackpropTrainer(net, ds)
trainer.train()
NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_primer1.xml')
NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_primer1_copy.xml')
return random.choice(board.get_moves())
示例15: plotLearningCurve
if(oldtstError==0):
oldtstError = tstError
if(oldtstError<tstError):
tstErrorCount = tstErrorCount+1
print 'No Improvement, count=%d' % tstErrorCount
print ' Old Validation Error:', oldtstError
print 'Current Validation Error:', tstError
if(oldtstError>tstError):
print 'Improvement made!'
print ' Old Validation Error:', oldtstError
print 'Current Validation Error:', tstError
tstErrorCount=0
oldtstError = tstError
NetworkWriter.writeToFile(TDNNClassificationNet, networkPath)
plotLearningCurve()
trainingTime = time.time()-time_start
trainingTime=np.reshape(trainingTime, (1))
np.savetxt("25sigmoid/Trainingtime.txt", trainingTime)
####################
# Manual OFFLINE Test
####################
# TDNNClassificationNet = NetworkReader.readFrom('25sigmoid/TrainUntilConv.xml')
# print 'Loaded Trained Network!'
#
# print TDNNClassificationNet.paramdim