当前位置: 首页>>代码示例>>Python>>正文


Python SupervisedDataSet.saveToFile方法代码示例

本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.saveToFile方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.saveToFile方法的具体用法?Python SupervisedDataSet.saveToFile怎么用?Python SupervisedDataSet.saveToFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.datasets.SupervisedDataSet的用法示例。


在下文中一共展示了SupervisedDataSet.saveToFile方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: buildDataSet

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
	def buildDataSet(self, filename, dateList):
		ds = SupervisedDataSet(self.historySize*2, 1)

		# Hack because for some absurd reason the stocks close on weekends
		for date in dateList:
			# inputs - the last historySize of score and stock data
			ds.addSample(self.getInputs(date), (self.targetTs.getVal(date),))

		ds.saveToFile(filename)

		return ds
开发者ID:ncvc,项目名称:Sentiment,代码行数:13,代码来源:NeuralNet.py

示例2: create1OrderDataSet

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
def create1OrderDataSet():
    lab_images = get_train_set(instance=False, number_of_instances=10)
    ds = SupervisedDataSet(100, 1)
    for i in range(len(lab_images)):
        data = np.zeros((100))
        for j in range(100):
            data[j] = lab_images[i][0][j]
        ds.addSample(data, lab_images[i][1])
        print "creating dataset, iteration:",i,"of",len(lab_images)
    ds.saveToFile(root.path() + '/res/dataset1')
    return ds
开发者ID:kamilsa,项目名称:KAIProject,代码行数:13,代码来源:honn.py

示例3: constructDataset

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
def constructDataset():
    ds =  SupervisedDataSet(50,50)  
    for line in open("C:\Users\maxence\Documents\data.txt"):
        input=normalizedDataset(line)
        cipher = ceas.encipher(line)
        #print cipher
        output = normalizedDataset(cipher)
        #print input
        #print output
    
        ds.addSample( input, output )
        ds.saveToFile('C:\\Users\\maxence\\Documents\\ds.xml')
    return ds
开发者ID:maxence-schmitt,项目名称:NeuralNetwork,代码行数:15,代码来源:pybrainTest.py

示例4: create2OrderDataSet

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
def create2OrderDataSet():
    lab_images = get_train_set(instance=True)
    ds = SupervisedDataSet(5150, 1)
    for i in range(len(lab_images)):
        data = np.zeros((5150))
        for j in range(100):
            data[j] = lab_images[i][0][j]
        count = 100
        for x1 in range(100):
            for x2 in range(x1, 100):
                # print count
                data[count] = lab_images[i][0][x1]*lab_images[i][0][x2]
                count += 1
        ds.addSample(data, lab_images[i][1])
        print "creating dataset, iteration:",i,"of",len(lab_images)
    ds.saveToFile(root.path() + '/res/dataset2')
    return ds
开发者ID:kamilsa,项目名称:KAIProject,代码行数:19,代码来源:honn.py

示例5: generateTrainingData

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
def generateTrainingData(size=10000, saveAfter = False):
    """
    Creates a set of training data with 4-dimensioanal input and 2-dimensional output
    with `size` samples
    """
    np.random.seed()
    data = SupervisedDataSet(4,2)
    for i in xrange(1, int(size/2)):
        [a, b] = np.random.random_integers(1, 100, 2)
        [c, d] = np.random.random_integers(100, 500, 2)
        data.addSample((a, b, c, d), (-1, 1))

    for i in xrange(1, int(size/2)):
        [a, b] = np.random.random_integers(100, 500, 2)
        [c, d] = np.random.random_integers(1, 100, 2)
        data.addSample((a, b, c, d), (1, -1))

    if saveAfter:
        data.saveToFile(root.path()+"/res/dataSet")
    return data
开发者ID:DianaShatunova,项目名称:NEUCOGAR,代码行数:22,代码来源:main.py

示例6: save_data

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
 def save_data(self,fName="./data/mydata"):
     SupervisedDataSet.saveToFile(self.ds, fName)
开发者ID:huangzhixin,项目名称:Mutifunction_Auto,代码行数:4,代码来源:network.py

示例7: storeBoards

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
def storeBoards():
    ds = SupervisedDataSet(97,1)
    for i in range(1000):
        boardList=makeBoard()
        ds.addSample(boardList, boardVal(boardList))
    ds.saveToFile('SynapsemonPie/boards')
开发者ID:johnny-zheng,项目名称:SynapsemonPy,代码行数:8,代码来源:primer_evaluation.py

示例8: SupervisedDataSet

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
    plt.ylim([0,5000])
    plt.show()





######## Build training set and save to file ############
print "Saving to file..."
#PyBrain has some nice classes to do all this.
from pybrain.datasets import SupervisedDataSet
import numpy as np

DS = SupervisedDataSet(dict_size,1)

for m_list,target in [[spamlist,1],[hamlist,0]]:
    for mail in m_list:
        #each data point is a list (or vector) the size of the dictionary
        wordvector=np.zeros(dict_size)
        #now go through the email and put the occurrences of each word
        #in it's respective spot (i.e. word_dict[word]) in the vector 
        for word in mail:
            if word in word_dict:
                wordvector[word_dict[word]] += 1
        DS.appendLinked(np.log(wordvector+1)   , [target]) #put word occurrences on a log scale

#TODO: use MySQL instead of csv
DS.saveToFile('dataset.csv')
print "Done."

开发者ID:antonvh,项目名称:PySpamfilters,代码行数:31,代码来源:spam_create_dataset.py

示例9: print

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
DataSetCompleteWhitenClass = np.load("Data/DataSetCompleteWhitenClass.npy")
DataSetCompleteNorm = np.load("Data/DataSetCompleteNorm.npy")
DataSetCompleteNormClass = np.load("Data/DataSetCompleteNormClass.npy")

for data in DataSetCompleteRaw:
	DSSuperRaw.appendLinked(data[0],data[1])
for data in DataSetCompleteRawClass:
	DSClassRaw.addSample(data[0],data[1])
for data in DataSetCompleteWhiten:
	DSSuperWhiten.appendLinked(data[0],data[1])
for data in DataSetCompleteWhitenClass:
	DSClassWhiten.addSample(data[0],data[1])
for data in DataSetCompleteNorm:
	DSSuperNorm.appendLinked(data[0],data[1])
for data in DataSetCompleteNormClass:
	DSClassNorm.addSample(data[0],data[1])

DSSuperRaw.saveToFile("Data/DSSuperRaw")
DSClassRaw.saveToFile("Data/DSClassRaw")
DSSuperWhiten.saveToFile("Data/DSSuperWhiten")
DSClassWhiten.saveToFile("Data/DSClassWhiten")
DSSuperNorm.saveToFile("Data/DSSuperNorm")
DSClassNorm.saveToFile("Data/DSClassNorm")


# np.save("Data/DataSetCompleteWhiten.npy", DataSetCompleteRaw)
# print(np.argmin(tdata, axis=0))

# np.save("Data/DataSetCompleteWhitenClass.npy", DataSetCompleteWhiten)

# #DS.saveToFile("DataSetComplete")
开发者ID:audioocelot,项目名称:Website,代码行数:33,代码来源:DataManip.py

示例10: ProcessImage

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
		print filename
		image_file='Images(Training)/A/'+ filename
		colordata = ProcessImage(image_file, partition_size)
		#webbrowser.open("pixels.png")
		#raw_input()
		dataset.addSample(colordata, (1, 0))
		
	for filename in os.listdir("Images(Training)/B"):
		print filename
		image_file='Images(Training)/B/'+ filename
		colordata = ProcessImage(image_file, partition_size)
		#webbrowser.open("pixels.png")
		#raw_input()
		dataset.addSample(colordata, (0, 1))
	
	dataset.saveToFile("dataset")



net = buildNetwork(partition_size*partition_size, 35, 8, 2)

epochs = int(raw_input("How many epochs do you want to train the network for?: "))

RunNet(net, dataset, epochs)

prompt = raw_input("Do you want to choose specific files?: ")

if (prompt == 'y'):
	while 1 == 1:
		file = raw_input("Filename: ")
		weights = ActivateNet(ProcessImage("Images(Unclassified)/" + file, partition_size))
开发者ID:ddemarco5,项目名称:Neural-Network-AI,代码行数:33,代码来源:base.py

示例11: range

# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import saveToFile [as 别名]
		train_count += 100
		print "Trains:", train_count
		print "Error:", trainer.train()
		print " "
	
	print "Running movement loop..."
	while True:
		try:
			for i in range(9):
				trainer.train()
				train_count += 1
			print "Error:", trainer.train()
			train_count += 1
		except:
			raise Exception
		cm = round(Lobsang.sensors.distance(), -1) / 10
		right_speed = net.activate([cm])
		print "CM: %i, LS: %f, RS: %f" %(cm * 10, 0.0, right_speed)
		#left_speed = round(left_speed)
		right_speed = round(right_speed)
		print "Speeds to motors (L, R): (", 0, ",", right_speed, ")"
		print " "
		Lobsang.wheels.both(right_speed)
except Exception as e:
	Lobsang.quit()
	print e
	print "Halted after", loop_count, "loops and", train_count, "trainings."
	ds.saveToFile("nndist.ds")
else:
	Lobsang.quit()
开发者ID:welshgeekboy,项目名称:Lobsang,代码行数:32,代码来源:nndist.py


注:本文中的pybrain.datasets.SupervisedDataSet.saveToFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。