本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.clear方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.clear方法的具体用法?Python SupervisedDataSet.clear怎么用?Python SupervisedDataSet.clear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SupervisedDataSet
的用法示例。
在下文中一共展示了SupervisedDataSet.clear方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: neural_network
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
def neural_network(data, target, network):
DS = SupervisedDataSet(len(data[0]), 1)
nn = buildNetwork(len(data[0]), 7, 1, bias = True)
kf = KFold(len(target), 10, shuffle = True);
RMSE_NN = []
for train_index, test_index in kf:
data_train, data_test = data[train_index], data[test_index]
target_train, target_test = target[train_index], target[test_index]
for d,t in zip(data_train, target_train):
DS.addSample(d, t)
bpTrain = BackpropTrainer(nn,DS, verbose = True)
#bpTrain.train()
bpTrain.trainUntilConvergence(maxEpochs = 10)
p = []
for d_test in data_test:
p.append(nn.activate(d_test))
rmse_nn = sqrt(np.mean((p - target_test)**2))
RMSE_NN.append(rmse_nn)
DS.clear()
time = range(1,11)
plt.figure()
plt.plot(time, RMSE_NN)
plt.xlabel('cross-validation time')
plt.ylabel('RMSE')
plt.show()
print(np.mean(RMSE_NN))
示例2: train
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
def train(nn, data, N, predictionLength, iterations, validationSize):
loss = 0.
lossSize = 1.
for n in range(iterations):
dataSet = SupervisedDataSet(5 * N, 1)
start = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * n
end = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * (n + 1) - validationSize
validation = end + validationSize
start = int(start)
end = int(end)
validation = int(validation)
for i in range(start, end):
sample, mainValue = data.contiguousArray(i, i + N)
output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1, mainValue)
dataSet.addSample(sample, (output,))
print "iteration: ", n, " start: ", start, " end: ", end
trainer = BackpropTrainer(nn, dataSet)
trainer.train()
dataSet.clear()
for i in range(end, validation):
sample, mainValue = data.contiguousArray(i, i + N)
realOutput = data.max(i + N + 1, i + N + predictionLength + 1)
nnOutputValue = nn.activate(sample)[0] + mainValue
dt = data.date(i + N + 1)
currentLoss = nnOutputValue - realOutput
loss += currentLoss * currentLoss
print '============================'
print dt
print "NN: ", "{0:.10f}".format(nnOutputValue), " Real: ", "{0:.10f}".format(realOutput)
print "LOSS: ", "{0:.10f}".format(currentLoss)
print "LOSS TOTAL: ", "{0:.10f}".format(sqrt(loss / lossSize))
print '============================'
lossSize += 1.
示例3: bpNetController
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
class bpNetController(object):
def __init__(self, *args):
self.debug = False
self.setup(*args)
def setup(self, depth = 4, refLen =5):
self.inCnt = refLen + 1
self.net = buildNetwork(self.inCnt, depth, 1, bias=True, hiddenclass=TanhLayer)
self.ds = SupervisedDataSet(self.inCnt, 1)
self.trainer = BackpropTrainer(self.net, self.ds)
self.clear()
def enableDebug(self):
self.debug = True
def sample(self, refs, inp, expectedOut):
if self.debug: print "added {}".format([refs, inp, expectedOut])
self.ds.addSample(refs+[inp], expectedOut)
def train(self, epochs = 100):
self.trainer.trainEpochs(epochs)
def clear(self):
self.ds.clear()
def act(self, refs, inp):
return self.net.activate(refs+[inp])
@property
def curEpoch(self):
return self.trainer.epoch
示例4: trainUntilConvergence
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
def trainUntilConvergence(nn, data, N, predictionLength):
dataSet = SupervisedDataSet(5 * N, 1)
start = 0
end = len(data) + 1 - N - predictionLength
for i in range(start, end):
sample, mainValue = data.contiguousArray(i, i + N)
output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1, mainValue)
dataSet.addSample(sample, (output,))
trainer = BackpropTrainer(nn, dataSet)
trainer.trainUntilConvergence()
dataSet.clear()
示例5: updateNN
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
def updateNN(self, state, action, reward, state_new):
#learning target
if reward == REWARD_WIN or reward == REWARD_LOSS: #terminal states
yi = reward
else: #transition states
yi = reward + GAMMA * max(self.nn.activate(state_new))
dataSet = SupervisedDataSet(NODE_INPUT,NODE_OUTPUT)
learn_target = self.nn.activate(state)
learn_target[action] = yi
dataSet.addSample(state, learn_target)
trainer = BackpropTrainer(self.nn, dataSet)
trainer.train()
dataSet.clear()
示例6: StateNetwork
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
class StateNetwork():
'''
用来存储状态转移的函数的,具体来说就是我给定一个input,返回给我下一个时刻的状态
下一个时刻的状态不包括action
'''
def __init__(self, name='deep_state', inputNum=192, hidden1Num=192, hidden2Num=192, hidden3Num=192, outNum=144):
self.net = buildNetwork(inputNum, hidden1Num, hidden2Num, hidden3Num, outNum)
self.ds = SupervisedDataSet(inputNum, outNum)
self.name = name
self.turn = 0
def train(self, input, output):
self.ds.clear()
self.ds.addSample(input, output)
trainer = BackpropTrainer(self.net, self.ds)
trainer.train()
def saveNet(self):
if not os.path.isdir(self.name):
os.mkdir(self.name)
print self.name + '/' + str(self.turn), ' has saved'
with open(self.name + '/' + str(self.turn), 'w') as f:
pickle.dump(self.net, f)
def loadNet(self, turn=0):
print 'loading ', self.name + '/' + str(turn)
time.sleep(1)
if os.path.isfile(self.name + '/' + str(turn)):
with open(self.name + '/' + str(turn), 'r') as f:
self.net = pickle.load(f)
def getValue(self, input):
output = self.net.activate(input)
for i,v in enumerate(output):
if v > 0.5:
output[i] = 1
else:
output[i] = 0
return output
def getInput(self, state, action, type=1):
return RunFastAgent.getInput(state, action, type=type)
def getOutput(self, state):
input = RunFastAgent.getInput(state, [])
return input[:144]
示例7: RunFastNetwork
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
class RunFastNetwork():
'''
用来存储runfast游戏中agent的Q值
'''
def __init__(self, name='', inputNum=192, hiddenNum=192, outNum=1):
self.net = buildNetwork(inputNum, hiddenNum, outNum)
self.ds = SupervisedDataSet(inputNum, outNum)
self.name = name
self.turn = 0
def train(self, input, output):
self.ds.clear()
self.ds.addSample(input, output)
trainer = BackpropTrainer(self.net, self.ds)
trainer.train()
def addLearner(self, learner):
self.learner = learner
def saveNet(self, filename=''):
with open(self.name + '/' + str(self.turn), 'w') as f:
print self.name + '/' + str(self.turn), ' has saved'
pickle.dump(self, f)
def loadNet(self, playName, turn=0):
if os.path.isfile(playName + '/' + str(turn)):
with open(self.name + '/' + str(turn), 'r') as f:
print 'loading ', playName + '/' + str(turn)
time.sleep(0.5)
obj = pickle.load(f)
print obj.turn
self.turn = obj.turn
self.net = obj.net
self.name = obj.name
def getValue(self, input):
return self.net.activate(input)
示例8: linspace
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
# We will vary the training set so that we have 10 different sizes
sizes = linspace(10, len(X_train), 10)
train_err = zeros(len(sizes))
test_err = zeros(len(sizes))
# Build a network with 3 hidden layers
net = buildNetwork(13, 9, 7, 5, 1)
# The dataset will have 13 input features and 1 output
ds = SupervisedDataSet(13, 1)
for i,s in enumerate(sizes):
# Populate the dataset for training
ds.clear()
for j in range(1, int(s)):
ds.addSample(X_train[j], y_train[j])
# Setup a backprop trainer
trainer = BackpropTrainer(net, ds)
# Train the NN for 50 epochs
# The .train() function returns MSE over the training set
for e in range(0, 50):
train_err[i] = trainer.train()
# Find labels for the test set
y = zeros(len(X_test))
for j in range(0, len(X_test)):
y[j] = net.activate(X_test[j])
示例9: Agent
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
class Agent(object):
def __init__(self, use_brain):
self.price_belief_high = random.uniform(PRICE_LOW, PRICE_HIGH)
self.price_belief_low = random.uniform(PRICE_LOW, self.price_belief_high)
self.price = random.uniform(self.price_belief_low, self.price_belief_high)
self.consumption_value_low = random.randint(15, 60) #Killowats used per day
self.consumption_value_high = random.randint(self.consumption_value_low, 60)
self.production_value = random.randint(2, 15) #Square Meters of Solar Panels
self.no_trades = 0
self.wealth = 0
self.supply = 0
self.demand = 0
self.weather = 1.0
self.power = 0.0
self.reserve_power = 0.0
self.observed_prices = [] #Prices which the agent successfully traded.
self.use_brain = use_brain
self.price_history = []
self.wealth_history = []
if use_brain:
self.brain = buildNetwork(3, 40, 1)
self.memory = SupervisedDataSet(3, 1)
self.trainer = BackpropTrainer(self.brain)
def sell(self, units, price):
self.observed_prices.append(price)
self.power -= units
self.no_trades += 1
self.wealth += (units * price)
def buy(self, units, price):
self.observed_prices.append(price)
self.power += units
self.no_trades += 1
self.wealth -= (units * price)
def day_begin(self, weather, market):
self.price_history.append(self.price)
self.wealth_history.append(self.wealth)
self.weather = weather
self.consumption_value = random.randint(self.consumption_value_low, self.consumption_value_high)
self.power = ((self.production_value * self.weather) - self.consumption_value)
#Use any reserve power if we have it.
if self.reserve_power > 0:
self.power += self.reserve_power
self.reserve_power = 0
#Update Supply and Demand unless "Smart Agent"
if not self.use_brain or self.power <= 0 or len(market.price_history) < 3:
self.update_supply_demand(market)
return
#Predict price
buyers = [agent for agent in market.agents if agent.demand > 0]
sellers = [agent for agent in market.agents if agent.supply > 0]
supply = sum(seller.supply for seller in sellers)
demand = sum(buyer.demand for buyer in buyers)
weather = self.weather
predicted_price = self.brain.activate((weather, supply, demand))[0]
#Store power instead of selling it if price is going to be low.
threshold = statistics.median(market.price_history) #(PRICE_LOW + PRICE_HIGH) * 0.5
if predicted_price < threshold:
self.reserve_power += self.power
self.power = 0
self.update_supply_demand(market)
def day_end(self, market):
if not self.use_brain:
return
supply = market.asks[-1]
demand = market.bids[-1]
weather = self.weather
price = market.price_history[-1]
self.price_belief_low = self.brain.activate((weather, supply, demand))[0]
self.price_belief_high = self.brain.activate((weather, supply, demand))[0]
self.price = random.uniform(self.price_belief_low, self.price_belief_high)
self.price_history[-1] = self.price
self.memory.clear()
self.memory.addSample((weather, supply, demand), (price,))
self.trainer.trainOnDataset(self.memory)
def update_price_belief(self, market, did_sell, success):
public_mean_price = market.average_price()
mean = (self.price_belief_low + self.price_belief_high) / 2
confidence_sigma = 0.05
#.........这里部分代码省略.........
示例10: print
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
batch_iter = 0
while True:
print("batch_iter: " + str(batch_iter))
training_set_x, training_set_y = loadData(training_file, 1)
print(len(training_set_x))
if len(training_set_x) == 0:
break
print("there")
for i in range(len(training_set_x)):
dataset.addSample(training_set_x[i],training_set_y[i])
print("here")
trainer.train()
print("now")
dataset.clear()
batch_iter += 1
# Clear references to these so the garbage collector can clean them
# once the garbage collector chooses to.
del training_set_x
del training_set_y
correct = 0
total = 0
while True:
print("Testing validation set")
validation_set_x, validation_set_y = loadData(validation_file, 1)
if len(validation_set_x) == 0:
示例11: __init__
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
class Image2Text:
# size参数描述了要识别的每一个切分后的字符大小,因为神经网络要求全部识别内容等向量长度,
# 一般设置为能够包含其中切割后最大的字符即可
# types参数表示最终神经网络要分的类别数,也即总共出现的所有可能的字符种类数
def __init__(self, size=(8, 12), types=12):
self.imgsize = size
self.types = types
self.ds = SupervisedDataSet(size[0] * size[1], types)
self.net = buildNetwork(self.imgsize[0] * self.imgsize[1],
100,
types,
bias=True)
def cutting(self, im):
w, h = im.size
data = im.getdata()
cut_imgs = []
vlast_sum = 0
vbegin = 0
vend = 0
for i in xrange(h):
vsum = 0
for j in xrange(w):
vsum += data[i * w + j]
if vsum > 0 and vlast_sum == 0:
vbegin = i
if vsum == 0 and vlast_sum > 0:
vend = i
begin = 0
end = 0
last_sum = 0
for j in xrange(w):
sum = 0
for i in xrange(vbegin, vend):
sum += data[i * w + j]
if sum > 0 and last_sum == 0:
begin = j
if sum == 0 and last_sum > 0:
end = j
cut_imgs.append(im.crop((begin, vbegin, end, vend)))
# print begin, vbegin, end, vend
last_sum = sum
vlast_sum = vsum
return cut_imgs
def resize(self, im):
img = Image.new('1', self.imgsize, 0)
img.paste(im, (0, 0))
return img
def ann_addsample(self, input, output):
myoutput = [0 for i in xrange(self.types)]
myoutput[output] = 1
self.ds.addSample(input, myoutput)
def ann_clear(self):
self.ds.clear()
def ann_train(self):
trainer = BackpropTrainer(self.net, self.ds,
momentum=0.1,
verbose=True,
weightdecay=0.0001)
trainer.trainUntilConvergence(maxEpochs=50, validationProportion=0.01)
def ann_sim(self, input):
output = self.net.activate(input)
maxoutput = 0
maxi = 0
for i in range(len(output)):
if maxoutput < output[i]:
maxoutput = output[i]
maxi = i
return maxi
def ann_save(self, path='ann.db'):
fileObject = open(path, 'w')
pickle.dump(self.net, fileObject)
fileObject.close()
def ann_load(self, path='ann.db'):
try:
with open(path, 'r') as data:
self.net = pickle.load(data)
return True
except IOError as err:
print("File Error:"+str(err)) #str()将对象转换为字符串
return False
def open_file(self, path):
fp = open(path, "rb")
im = Image.open(fp)
return self.open(im)
#.........这里部分代码省略.........
示例12: PyImpNetwork
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
#.........这里部分代码省略.........
self.num_hidden = n_hidden
def setReccurentFlag(self,flag):
if (flag == "R"):
self.recurrent_flag=True
elif (flag == "F"):
self.recurrent_flag=False
def load_dataset(self,open_filename):
self.ds = SupervisedDataSet.loadFromFile(open_filename)
#print self.ds
def save_dataset(self,filename):
if str(filename[0]) != '':
csv_file = open(filename[0]+".csv", "w")
csv_file.write("[inputs][outputs]\r\n")
for inpt, tgt in self.ds:
new_str=str("{" + repr(inpt) + "," + repr(tgt) + "}")
new_str=new_str.strip('\n')
new_str=new_str.strip('\r')
new_str=new_str+"\r"
csv_file.write(new_str)
if len(new_str)>1:
csv_file.close()
def save_net(self,save_filename):
networkwriter.NetworkWriter.writeToFile(net,save_filename)
def load_net(self,open_filename):
from pybrain.tools.customxml import networkreader
self.net = networkreader.NetworkReader.readFrom(open_filename)
def clear_dataset(self):
if self.ds != 0:
self.ds.clear()
def clear_network(self):
#resets the module buffers but doesn't reinitialise the connection weights
#TODO: reinitialise network here or make a new option for it.
self.net.reset()
def learn_callback(self):
if self.learning == 0:
print ("learning is", self.learning)
self.learning = 1
elif self.learning == 1:
print ("learning is", self.learning)
self.learning = 0
def compute_callback(self):
if self.compute==1:
self.compute =0
print ("Compute network output is now OFF!")
elif self.compute ==0:
self.compute =1
print ("Compute network output is now ON!")
def train_callback(self):
self.trainer = BackpropTrainer(self.net, learningrate=0.01, lrdecay=1, momentum=0.0, verbose=True)
print 'MSE before', self.trainer.testOnData(self.ds, verbose=True)
epoch_count = 0
while epoch_count < 1000:
epoch_count += 10
self.trainer.trainUntilConvergence(dataset=self.ds, maxEpochs=10)
networkwriter.NetworkWriter.writeToFile(self.net,'autosave.network')
print 'MSE after', self.trainer.testOnData(self.ds, verbose=True)
print ("\n")
print 'Total epochs:', self.trainer.totalepochs
def main_loop(self):
self.learnMapperDevice.poll(1)
if ((self.learning == 1) and (self.compute == 0)):
# Query output values upon change in GUI
for index in range(self.num_outputs):
self.data_output[index] = self.l_outputs[index].query_remote()
print self.data_output[index]
print ("Inputs: ")
print (tuple(self.data_input.values()))
print ("Outputs: ")
print (tuple(self.data_output.values()))
self.ds.addSample(tuple(self.data_input.values()),tuple(self.data_output.values()))
if ((self.compute == 1) and (self.learning == 0)):
activated_out = self.net.activate(tuple(self.data_input.values()))
for out_index in range(self.num_outputs):
self.data_output[out_index] = activated_out[out_index]
self.l_outputs[out_index].update(self.data_output[out_index])
示例13: Slave
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
#.........这里部分代码省略.........
self.net.addConnection(FullConnection(self.net['in'], self.hiddenLayers[0]))
for h1, h2 in zip(self.hiddenLayers[:-1], self.hiddenLayers[1:]):
self.net.addConnection(FullConnection(self.net['networkBias'],h1))
self.net.addConnection(FullConnection(h1,h2))
if outPutBias:
self.net.addConnection(FullConnection(self.net['networkBias'],self.net['out']))
self.net.addConnection(FullConnection(self.hiddenLayers[-1],self.net['out']))
else:
if outPutBias:
self.net.addConnection(FullConnection(self.net['networkBias'],self.net['out']))
self.net.addConnection(FullConnection(self.net['in'],self.net['out']))
else:
# Definição da camada de entrada
if inLType == 0:
self.net.addInputModule(LinearLayer(inLayer,name='in'))
elif inLType == 1:
self.net.addInputModule(SigmoidLayer(inLayer,name='in'))
elif inLType == 2:
self.net.addInputModule(TanhLayer(inLayer,name='in'))
elif inLType == 3:
self.net.addInputModule(SoftmaxLayer(inLayer,name='in'))
elif inLType == 4:
self.net.addInputModule(GaussianLayer(inLayer,name='in'))
# Definição das camadas escondidas
self.hiddenLayers = []
if hLayersType == 0:
for i in range(0, hLayerNum):
self.hiddenLayers.append(LinearLayer(hiddenLayers[i]))
self.net.addModule(self.hiddenLayers[i])
elif hLayersType == 1:
for i in range(0, hLayerNum):
self.hiddenLayers.append(SigmoidLayer(hiddenLayers[i]))
self.net.addModule(self.hiddenLayers[i])
elif hLayersType == 2:
for i in range(0, hLayerNum):
self.hiddenLayers.append(TanhLayer(hiddenLayers[i]))
self.net.addModule(self.hiddenLayers[i])
elif hLayersType == 3:
for i in range(0, hLayerNum):
self.hiddenLayers.append(SoftmaxLayer(hiddenLayers[i]))
self.net.addModule(self.hiddenLayers[i])
elif hLayersType == 4:
for i in range(0, hLayerNum):
self.hiddenLayers.append(GaussianLayer(hiddenLayers[i]))
self.net.addModule(self.hiddenLayers[i])
# Definição da camada de saída
if outLType == 0:
self.net.addOutputModule(LinearLayer(outLayer,name='out'))
elif outLType == 1:
self.net.addOutputModule(SigmoidLayer(outLayer,name='out'))
elif outLType == 2:
self.net.addOutputModule(TanhLayer(outLayer,name='out'))
elif outLType == 3:
self.net.addOutputModule(SoftmaxLayer(inLayer,name='out'))
elif outLType == 4:
self.net.addOutputModule(GaussianLayer(outLayer,name='out'))
if self.hiddenLayers:
self.net.addConnection(FullConnection(self.net['in'], self.hiddenLayers[:1]))
for h1, h2 in zip(self.hiddenLayers[:-1], self.hiddenLayers[1:]):
self.net.addConnection(FullConnection(h1,h2))
self.net.addConnection(FullConnection(self.hiddenLayers[-1:],self.net['out']))
else:
self.net.addConnection(FullConnection(self.net['in'],self.net['out']))
# Termina de construir a rede e a monta corretamente
self.net.sortModules()
def setParameters(self, parameters):
self.net._setParameters(parameters)
def getParameters(self):
return self.net.params.tolist()
def createDataSet(self, ds):
inp = ds.indim
targ = ds.outdim
self.ds = SupervisedDataSet(inp, targ)
for i,t in ds:
self.ds.addSample(i,t)
def updateDataSet(self, ds):
self.ds.clear(True)
for i,t in ds:
self.ds.addSample(i,t)
self.trainer.setData(self.ds)
def createTrainer(self, learnrate=0.01, ldecay=1.0, momentum=0.0, batchlearn=False, wdecay=0.0):
self.trainer = BackpropTrainer(self.net, self.ds, learningrate=learnrate, lrdecay=ldecay, momentum=momentum, batchlearning=batchlearn, weightdecay=wdecay)
def trainNetwork(self):
self.trainer.train()
def loadNetwork(self, net):
del self.net
self.net = net
示例14: train_network
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import clear [as 别名]
def train_network(net,
best_fraction,
trainer=default_trainer,
transit=default_transit):
"""
Author: Xander
This function performs the common grunt-work of
both build_network() and improve_network().
"""
print "Building dataset..."
ds = SupervisedDataSet(2*transit.generate_stars*transit.generate_points,
transit.generate_stars)
for i in xrange(trainer.interval_count):
print "Generating exoplanet transits..."
ds.clear()
for k in xrange(trainer.data_size):
inpt, output = generate(transit=transit)
ds.addSample(inpt, output)
print "Building trainer..."
network_trainer = BackpropTrainer(net, ds)
print "Training..."
for j in xrange(trainer.interval_size):
msg = "Iteration"
msg += " "*(len(str(trainer.interval_count*trainer.interval_size))
- len(str(trainer.interval_size*i + j + 1)) + 1)
msg += str(trainer.interval_size*i + j + 1)
msg += " of " + str(trainer.interval_count * trainer.interval_size)
msg += ": error = "
msg += str(network_trainer.train())
print msg
if i != trainer.interval_count - 1:
print "Creating interval report..."
report = message(net,
trainer.interval_check,
trainer=trainer,
transit=transit)
print report[0][:-1]
if report[1] > best_fraction:
best_fraction = report[1]
print "This interval was helpful and will be saved."
print "Saving..."
NetworkWriter.writeToFile(net, "../network.xml")
print "Writing info..."
f = open("../network_info.txt", "w")
for line in report[0]:
f.write(line)
f.close()
else:
print "This interval was not helpful and will be discarded."
print "Retreiving older version..."
net = NetworkReader.readFrom("../network.xml")
print "Creating program report..."
report = message(net,
trainer.check_size,
trainer=trainer,
transit=transit)
print report[0][:-1]
if report[1] > best_fraction:
best_fraction = report[1]
print "This interval was helpful and will be saved."
print "Saving..."
NetworkWriter.writeToFile(net, "../network.xml")
print "Writing info..."
f = open("../network_info.txt", "w")
for line in report[0]:
f.write(line)
f.close()
else:
print "This interval was not helpful and will be discarded."
print "Retreiving older version..."
net = NetworkReader.readFrom("../network.xml")
print "Improving older report..."
better_report = message(net=net,
size=trainer.check_size,
trainer=trainer,
transit=transit)
print "Writing info..."
f = open("../network_info.txt", "w")
for line in better_report[0]:
f.write(line)
f.close()