当前位置: 首页>>代码示例>>Python>>正文


Python BackpropTrainer.train方法代码示例

本文整理汇总了Python中pybrain.supervised.trainers.BackpropTrainer.train方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.train方法的具体用法?Python BackpropTrainer.train怎么用?Python BackpropTrainer.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.supervised.trainers.BackpropTrainer的用法示例。


在下文中一共展示了BackpropTrainer.train方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def train(nn, data, N, predictionLength, iterations, validationSize):
    loss = 0.
    lossSize = 1.
    for n in range(iterations):
        dataSet = SupervisedDataSet(5 * N, 1)
        start = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * n
        end = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * (n + 1) - validationSize
        validation = end + validationSize
        start = int(start)
        end = int(end)
        validation = int(validation)
        for i in range(start, end):
            sample, mainValue = data.contiguousArray(i, i + N)
            output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1, mainValue)
            dataSet.addSample(sample, (output,))
        print "iteration: ", n, " start: ", start, " end: ", end
        trainer = BackpropTrainer(nn, dataSet)
        trainer.train()
        dataSet.clear()
        for i in range(end, validation):
            sample, mainValue = data.contiguousArray(i, i + N)
            realOutput = data.max(i + N + 1, i + N + predictionLength + 1)
            nnOutputValue = nn.activate(sample)[0] + mainValue
            dt = data.date(i + N + 1)
            currentLoss = nnOutputValue - realOutput
            loss += currentLoss * currentLoss
            print '============================'
            print dt
            print "NN: ", "{0:.10f}".format(nnOutputValue), " Real: ", "{0:.10f}".format(realOutput)
            print "LOSS: ", "{0:.10f}".format(currentLoss)
            print "LOSS TOTAL: ", "{0:.10f}".format(sqrt(loss / lossSize))
            print '============================'
            lossSize += 1.
开发者ID:mistler,项目名称:nn,代码行数:35,代码来源:nn.py

示例2: move_function

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def move_function(board):
    global net  
    best_max_move = None 
    max_value = -1000
    best_min_move = None
    min_value = 1000

    #value is the chance of black winning
    for m in board.get_moves():
        nextboard = board.peek_move(m)
        value = net.activate(board_to_input(nextboard))
        if value > max_value: 
            max_value = value
            best_max_move = m 
        if value < min_value:
            min_value = value
            best_min_move = m

    ds = SupervisedDataSet(97, 1)
    best_move = None 

    #active player
    if board.active == BLACK:
        ds.addSample(board_to_input(board), max_value)
        best_move = best_max_move
    elif board.active == WHITE: 
        ds.addSample(board_to_input(board), min_value)
        best_move = best_min_move

    trainer = BackpropTrainer(net, ds)
    trainer.train()
    NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
    NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml') 
    return best_move 
开发者ID:johnny-zheng,项目名称:SynapsemonPy,代码行数:36,代码来源:synapsemon_random_black_mini_140.py

示例3: ANN

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def ANN(name, attr):
	x = getSamplesL(name, attr)[0]
	y = getSamplesL(name, attr)[1]
	t = getSamplesL(name, attr)[2]
	net = buildNetwork(40, 250, 5)
	ds = SupervisedDataSet(40, 5)
	for e in range(len(x)):
		ds.addSample(x[e], y[e])
	trainer = BackpropTrainer(net, ds)
	for i in range(20):
		trainer.train()
	error = 0
	count = 0
	for i in range(len(x))[::10]:
		count = count + 1
		tresult = net.activate(x[i])
		ans = y[i]
		for j in range(len(ans)):
			error = error + abs(tresult[j] - ans[j]) / ans[j]

	error = error / (count * 5) / 4
	print error
#	tresults = x[50:100]

	result = net.activate(t)
	print result
	return (result, error)
开发者ID:Weilin1992,项目名称:Web-Final-Project,代码行数:29,代码来源:machinel.py

示例4: prepare_neural_models

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def prepare_neural_models():
	train_df = pd.read_csv("train_set.csv")
	prod_options = ['a','b','c','d','e','f','g']
	
	neural_models = []
	for opt in prod_options:
		if opt == "a":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[15]]
		elif opt == "b":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[16]]
		elif opt == "c":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[17]]
		elif opt == "d":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[18]]
		elif opt == "e":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[19]]
		elif opt == "f":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[20]]
		elif opt == "g":
			train_cols =  [train_df.columns[3],train_df.columns[4],train_df.columns[21]]
			
		dataset = SupervisedDataSet(3,1)
		for df in train_df:
			dataset.addSample((df[1][train_cols[0]],df[1][train_cols[1]],df[1][train_cols[2]]),(df[opt],))
		#neural_ds.append(dataset)
	
		net = buildNetwork(3, 3, 1, bias=True, hiddenclass=TanhLayer)
		neural_trainer = BackpropTrainer(net,dataset)
		neural_trainer.train()
		neural_models.append(neural_trainer)	
		
	return neural_models
开发者ID:mblaauw,项目名称:kaggle-allstate-purchase-prediction,代码行数:34,代码来源:experiment_01.py

示例5: reinforce

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
    def reinforce(self, feedback, state=None, replace_last=False, end=None):
        """
        Processes a feedback signal (e.g. reward or punishment).
        
        Applies a feedback to every state in the episode decayed proportionally to how
        long ago the action happened.
        
        The network will be taught to associate the state right before the feedback
        with the feedback most strongly, whereas a state at the beginning of the episode
        will have a very weak association.
        """
        
        if not end:
            return
        
        self.rewards.append(feedback)
        
        # Build training set.
        #ds = SupervisedDataSet(inp=9, target=1)
        ds = SupervisedDataSet(inp=self.lengths[0], target=self.lengths[-1])
        
        r0 = feedback
        for normalized_state in reversed(self.history):
#            print 'sample:',normalized_state,r0
            ds.addSample(normalized_state, (r0,))
            r0 *= self.lambda_decay
        
        trainer = BackpropTrainer(
            self.network, ds, learningrate=self.alpha)
        trainer.train()
开发者ID:chrisspen,项目名称:reinforce,代码行数:32,代码来源:rl.py

示例6: __init__

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
class Brain:
	def __init__(self, hiddenNodes = 30):
		# construct neural network 
		self.myClassifierNet = buildNetwork(12, hiddenNodes, 1, bias=True, hiddenclass=TanhLayer) #parameters to buildNetwork are inputs, hidden, output
		# set up dataset
		self.myDataset = SupervisedDataSet(12, 1)
		self.myClassifierTrainer = BackpropTrainer(self.myClassifierNet, self.myDataset)

	def addSampleImageFromFile(self, imageFile, groupId):
		"adds a data sample from an image file, including needed processing"
		myImage = Image.open(imageFile)
		self.myDataset.addSample(twelveToneParallel(myImage), (groupId,))

	def train(self):
		#myClassifierTrainer.trainUntilConvergence() #this will take forever (possibly literally in the pathological case)
		for i in range(0, 15):
			self.myClassifierTrainer.train() #this may result in an inferior network, but in practice seems to work fine

	def save(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'w')
		pickle.dump(self.myClassifierNet, saveFile)
		saveFile.close()

	def load(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'r')
		myClassifierNet = pickle.load(saveFile)
		saveFile.close()

	def classify(self, fileName):
		myImage = Image.open(fileName)
		if self.myClassifierNet.activate(twelveToneParallel(myImage)) < 0.5:
			return 0
		else:
			return 1
开发者ID:anurive,项目名称:pybrain-picture-sort,代码行数:36,代码来源:recognizer.py

示例7: handle

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
    def handle(self, *args, **options):
        better_thans = BetterThan.objects.all() #.filter(pk__lte=50)

        ds = SupervisedDataSet(204960, 1)
        for better_than in better_thans:
            bt = imread(better_than.better_than.image.file)
            wt = imread(better_than.worse_than.image.file)
            better_than.better_than.image.file.close()
            better_than.worse_than.image.file.close()

            # bt = filters.sobel(bt)
            # wt = filters.sobel(wt)

            bt_input_array = np.reshape(bt, (bt.shape[0] * bt.shape[1]))
            wt_input_array = np.reshape(wt, (wt.shape[0] * wt.shape[1]))
            input_1 = np.append(bt_input_array, wt_input_array)
            input_2 = np.append(wt_input_array, bt_input_array)
            ds.addSample(np.append(bt_input_array, wt_input_array), [-1])
            ds.addSample(np.append(wt_input_array, bt_input_array), [1])
        
        net = buildNetwork(204960, 2, 1)

        train_ds, test_ds = ds.splitWithProportion(options['train_test_split'])
        _, test_ds = ds.splitWithProportion(options['test_split'])

        trainer = BackpropTrainer(net, ds)

        print 'Looking for -1: {0}'.format(net.activate(np.append(bt_input_array, wt_input_array)))
        print 'Looking for 1: {0}'.format(net.activate(np.append(wt_input_array, bt_input_array)))

        trainer.train()

        print 'Looking for -1: {0}'.format(net.activate(np.append(bt_input_array, wt_input_array)))
        print 'Looking for 1: {0}'.format(net.activate(np.append(wt_input_array, bt_input_array)))
开发者ID:722C,项目名称:nn-art-critic,代码行数:36,代码来源:nn2.py

示例8: nnTest

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def nnTest(tx, ty, rx, ry, iterations):
    print "NN start"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())

    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(16, 16, 1, bias=True)
    ds = ClassificationDataSet(16, 1, class_labels=["1", "0"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.05)
    validator = CrossValidator(trainer, ds, n_folds=10)
    print validator.validate()
    for i in positions:
        print trainer.train()
        resultst.append(sum((np.array([round(network.activate(test)) for test in tx]) - ty)**2)/float(len(ty)))
        resultsr.append(sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry)))
        print i, resultst[i], resultsr[i]
    plt.plot(positions, resultst, 'g-', positions, resultsr, 'r-')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('nn.png', dpi=500)
    print "NN end"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())
开发者ID:mmanguno,项目名称:machine-learning,代码行数:29,代码来源:bank.py

示例9: nn_predict

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def nn_predict(train, test, prediction_cols, to_predict,
               n_nodes,
               hiddenclass,
               learningrate,
               num_epochs,
               verbose = True):
                   
    ds = make_pybrain_ds(train, pour_predire_cols, to_predict)
    ds_test = make_pybrain_ds(test, pour_predire_cols, to_predict)                   

    net = buildNetwork( ds.indim, n_nodes, ds.outdim, bias = True, hiddenclass = eval(hiddenclass))
    trainer = BackpropTrainer(net, dataset=ds, learningrate= learningrate, lrdecay=1.0, momentum=0.0, verbose=False, batchlearning=False, weightdecay=0.0)   
    
    if to_predict == 'place_geny':
        train = train[train.is_place]
        
    if verbose:
        print 'XXXXXXXXXXXXXXXXXXXXXXXXXX'
        print 'Predicting :', to_predict
        print 'n_nodes_1 :', n_nodes_1
        print 'n_nodes_2 :', n_nodes_2
        print 'Layer :', hiddenclass
        print 'learningrate :', learningrate


    for epoch in range(num_epochs):
        trainer.train()
        a = pd.DataFrame(net.activateOnDataset(ds_test))
        a.columns = [to_predict + '_predict']
        a.index = test.index
        test[to_predict + '_predict'] = a[to_predict + '_predict']
        
    return (trainer, test)
开发者ID:Leobouloc,项目名称:twitter_analytics,代码行数:35,代码来源:pandas_pybrain.py

示例10: getErrorPercent

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def getErrorPercent(training_dataset, eval_dataset_list, num_hidden, num_epochs):
  num_datapoints = len(training_dataset)
  num_inputs = len(training_dataset[0][0])
  num_outputs = len(training_dataset[0][1])

  # print "Num Inputs:", num_inputs
  # print "Num Outputs:", num_outputs
  # print "Num Hidden Nodes:", num_hidden

  NN = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

  dataset = SupervisedDataSet(num_inputs, num_outputs)
  for datapoint in training_dataset:
    dataset.addSample(datapoint[0], datapoint[1])


  trainer = BackpropTrainer(NN, dataset=dataset, momentum=0.0, verbose=False, weightdecay=0.0)

  for epoch in range(0, num_epochs):
    #print epoch 
    trainer.train()

  errors = []
  for eval_set in eval_dataset_list:
    total_percent_errors = [0]*num_outputs
    for jj in range(0, len(eval_set)):
      nn_out = NN.activate(eval_set[jj][0])
      percent_error = computeError(eval_set[jj][1], nn_out)
      #print percent_error
      total_percent_errors = map(operator.add, percent_error, total_percent_errors)
    #print total_percent_errors
    errors.append(map(operator.div, total_percent_errors, [len(dataset)]*num_outputs))
  #print errors
  return errors
开发者ID:sethmccammon,项目名称:rob537,代码行数:36,代码来源:evaluation.py

示例11: pybrain_high

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def pybrain_high():
	back=[]
	alldate=New_stock.objects.filter().exclude(name='CIHKY')[0:100]
	wholelen=len(alldate)
	test=New_stock.objects.filter(name__contains="CIHKY")
	testlen=len(test)
	# test dateset
	testdata= SupervisedDataSet(5, 1)
	testwhole=newalldate(test,testlen)
	for i in testwhole:
		testdata.addSample((i[0],i[2],i[3],i[4],i[5]), (0,))	
	# 实验 dateset
	data= SupervisedDataSet(5, 1)
	wholedate=newalldate(alldate,wholelen)
	for i in wholedate:
		data.addSample((i[0],i[2],i[3],i[4],i[5]), (i[1]))	
	#print testwhole
	# 建立bp神经网络
	net = buildNetwork(5, 3, 1,bias=True,hiddenclass=TanhLayer, outclass=SoftmaxLayer)
	
	trainer = BackpropTrainer(net,data)
	trainer.trainEpochs(epochs=100)
	# train and test the network
#	print trainer.train()
	trainer.train()
	print 'ok'
	out=net.activateOnDataset(testdata)
	for j in  test:
                back.append((j.high))
	print back
	print out
	backout=backnormal(back,out)
	print 'okokokoko'
	print backout # 输出22的测试集合
	return out 
开发者ID:lanlanzky,项目名称:stock_project,代码行数:37,代码来源:views.py

示例12: Predict

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
	def Predict(self, ticker, day):
		endDay = day-datetime.timedelta(1)
		startDay = endDay - datetime.timedelta(self.trainingPeriod)
		try:
			stockData = data.DataReader(ticker, 'yahoo', startDay, endDay)
		except:
			return [0]

		rawTrainFeatures = []
		rawTrainResponses = []
		for currentDay in range(self.windowLength, len(stockData)):
			window = stockData[currentDay-self.windowLength:currentDay]
			currentPrice = stockData.iloc[currentDay]['Open']
			response = stockData.iloc[currentDay]['Close']
			rawTrainFeatures.append(self.GetFeature(window))
			rawTrainResponses.append(response)

		rawTestFeatures = self.GetFeature(stockData[len(stockData)-self.windowLength:len(stockData)])

		# normalTrainFeatures, normalTestFeatures = self.NormalizeFeatures(rawTrainFeatures, rawTestFeatures)
		alldata = SupervisedDataSet(len(rawTrainFeatures[0]), 1)
		for index in range(0, len(rawTrainFeatures)):
			alldata.addSample(rawTrainFeatures[index],[rawTrainResponses[index]])

		self.network = buildNetwork(alldata.indim, (alldata.indim+alldata.outdim)/2, alldata.outdim, hiddenclass=SigmoidLayer, outclass=LinearLayer)
		trainer = BackpropTrainer(self.network, dataset=alldata)
		activations = []
		for i in range(50):
			for x in range(5):
				trainer.train()
		return float(self.network.activate(rawTestFeatures))
开发者ID:DerekHunter,项目名称:Algo,代码行数:33,代码来源:algo.py

示例13: fit

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
    def fit(self, X, y):
        """
        Train the regressor model.

        :param X: pandas.DataFrame of shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples]

        :return: self
        """

        dataset = self._prepare_net_and_dataset(X, y, 'regression')

        trainer = BackpropTrainer(self.net,
                                  dataset,
                                  learningrate=self.learningrate,
                                  lrdecay=self.lrdecay,
                                  momentum=self.momentum,
                                  verbose=self.verbose,
                                  batchlearning=self.batchlearning,
                                  weightdecay=self.weightdecay)
        if self.epochs < 0:
            trainer.trainUntilConvergence(maxEpochs=self.max_epochs,
                                          continueEpochs=self.continue_epochs,
                                          verbose=self.verbose,
                                          validationProportion=self.validation_proportion)
        else:
            for i in range(self.epochs):
                trainer.train()
        self.__fitted = True

        return self
开发者ID:tyamana,项目名称:rep,代码行数:33,代码来源:pybrain.py

示例14: main

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def main():
    start_time = time.time()
    novice = ArtificialNovice()
    genius = ArtificialGenius()
    game = HangmanGame(genius, novice)

    if __debug__:
        print "------------------- EVALUATION ------------------------"
        network = NetworkReader.readFrom("../IA/network_weight_1000.xml")
        j = 0
        while j < 1:
            game.launch(False, None, network)
            j += 1

        print ("--- %s total seconds ---" % (time.time() - start_time))
    else:
        print "------------------- LEARNING ------------------------"
        network = buildNetwork(3, 4, 1, hiddenclass=SigmoidLayer)
        ds = SupervisedDataSet(3, 1)
        i = 0
        while i < 100:
            game.launch(True, ds)
            i += 1

        print " INITIATE trainer : "
        trainer = BackpropTrainer(network, ds)
        print " START trainer : "
        start_time_trainer = time.time()
        trainer.train()
        print ("---  END trainer in % seconds ---" % (time.time() - start_time_trainer))
        print " START EXPORT network : "
        NetworkWriter.writeToFile(network, "../IA/network_weight_test_learning.xml")
        print " END EXPORT network : "
开发者ID:CelyaRousseau,项目名称:NaoHangman,代码行数:35,代码来源:main.py

示例15: nntester

# 需要导入模块: from pybrain.supervised.trainers import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.BackpropTrainer import train [as 别名]
def nntester(tx, ty, rx, ry, iterations):
    """
    builds, tests, and graphs a neural network over a series of trials as it is
    constructed
    """
    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(100, 50, 1, bias=True)
    ds = ClassificationDataSet(100,1, class_labels=["valley", "hill"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.01)
    for i in positions:
        print trainer.train()
        resultst.append(sum((np.array([round(network.activate(test)) for test in tx]) - ty)**2)/float(len(ty)))
        resultsr.append(sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry)))
        print i, resultst[i], resultsr[i]
    NetworkWriter.writeToFile(network, "network.xml")
    plt.plot(positions, resultst, 'ro', positions, resultsr, 'bo')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('3Lnn.png', dpi=300)
开发者ID:iRapha,项目名称:Machine-Learning,代码行数:27,代码来源:hills.py


注:本文中的pybrain.supervised.trainers.BackpropTrainer.train方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。