当前位置: 首页>>代码示例>>Python>>正文


Python ClassificationDataSet.addSample方法代码示例

本文整理汇总了Python中pybrain.datasets.ClassificationDataSet.addSample方法的典型用法代码示例。如果您正苦于以下问题:Python ClassificationDataSet.addSample方法的具体用法?Python ClassificationDataSet.addSample怎么用?Python ClassificationDataSet.addSample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.datasets.ClassificationDataSet的用法示例。


在下文中一共展示了ClassificationDataSet.addSample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: EightBitBrain

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
class EightBitBrain(object):
    
    def __init__(self, dataset, inNodes, outNodes, hiddenNodes, classes):
        self.__dataset = ClassificationDataSet(inNodes, classes-1)
        for element in dataset:
            self.addDatasetSample(self._binaryList(element[0]), element[1])
        self.__dataset._convertToOneOfMany()
        self.__network = buildNetwork(inNodes, hiddenNodes, self.__dataset.outdim, recurrent=True)
        self.__trainer = BackpropTrainer(self.__network, learningrate = 0.01, momentum = 0.99, verbose = True)
        self.__trainer.setData(self.__dataset)

    def _binaryList(self, n):
        return [int(c) for c in "{0:08b}".format(n)]
    
    def addDatasetSample(self, argument, target):
        self.__dataset.addSample(argument, target)

    def train(self, epochs):
        self.__trainer.trainEpochs(epochs)
    
    def activate(self, information):
        result = self.__network.activate(self._binaryList(information))
        highest = (0,0)
        for resultClass in range(len(result)):
            if result[resultClass] > highest[0]:
                highest = (result[resultClass], resultClass)
        return highest[1]
开发者ID:oskanberg,项目名称:pyconomy,代码行数:29,代码来源:brains.py

示例2: generate_data

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def generate_data(n=400):
    INPUT_FEATURES = 2
    CLASSES = 3
    #means = [(-1, 0), (2, 4), (3, 1)]
    #cov = [diag([1, 1]), diag([0.5, 1.2]), diag([1.5, 0.7])]
    alldata = ClassificationDataSet(INPUT_FEATURES, 1, nb_classes=CLASSES)
    #minX, maxX = means[0][0], means[0][0]
    #minY, maxY = means[0][1], means[0][1]
    #print minX, maxX , minY, maxY
    # #for i in range(n):
    #     for klass in range(CLASSES):

    #         features = multivariate_normal(means[klass], cov[klass])
    #         #print means[klass], cov[klass]
    #         #print features
    #         x, y = features
    #         minX, maxX = min(minX, x), max(maxX, x)
    #         minY, maxY = min(minY, y), max(maxY, y)
    #         alldata.addSample(features, [klass])
    #print alldata
    alldata.addSample([0,0], [0])
    alldata.addSample([0,1], [1])
    alldata.addSample([1,0], [1])
    alldata.addSample([1,1], [0])

    return {'minX': 0, 'maxX': 1,
            'minY': 0, 'maxY': 1, 'd': alldata}
开发者ID:Guosmilesmile,项目名称:pythonstudy,代码行数:29,代码来源:brain.py

示例3: __init__

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
class NeuralNetLearner:
    def __init__(self):
        self.bunch = load_digits()
        self.X = np.asarray(self.bunch.data, 'float32')
        self.Y = np.asarray(self.bunch.target, 'float32')
        #self.X, self.Y = nudge_dataset(self.X, self.bunch.target)
        self.X = (self.X - np.min(self.X, 0)) / (np.max(self.X, 0) + 0.0001)  # 0-1 scaling

        self.ds = ClassificationDataSet(64, nb_classes=10, class_labels=self.bunch.target_names)
        for (x, y) in zip(self.X, self.Y):
            self.ds.addSample(x, y)

        self.test_data, self.train_data = self.ds.splitWithProportion(0.3)

        self.network = buildNetwork(64, 10, 1)

    def get_datasets(self):
        return self.train_data, self.test_data

    def activate(self, x):
        self.network.activate(x.tolist())

    def fitness_func(self, x):
        if not (x.size == 64):
            print("Bad input vector: ", x)
            return
        sum_of_squared_error = 0
        for (input, target) in self.ds:
            sum_of_squared_error += (target - self.activate(input.tolist()))
        return (sum_of_squared_error / self.ds.length)

    def get_weights(self):
        return
开发者ID:camsmith293,项目名称:CS4641_HW2,代码行数:35,代码来源:NeuralNetLearner.py

示例4: generateDataSet

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def generateDataSet():

    inFile = open("data/input.txt")
    inData = inFile.readlines()
    inFile.close()
    
    outFile = open("data/output.txt")
    outData = outFile.readlines()
    outFile.close()


    inputs = 120 #you will want to update this based on the state you have... ###I don't understand this comment. How do we update if we haven't calculated the state yet?
    classes= 11 #11 #Not much reson to change this one, there are only 11 destinations.
    allData = ClassificationDataSet(inputs,1,nb_classes=classes)
    start = time.clock()
    for i in range(len(inData)):
        b = loadBrain(inData[i].strip())
        #inputs = len(b.g.heroes) - 1 + len(b.g.taverns_locs) + 4
        #calls functions inside of the ai object.  you will want to write these fcns. 
        ins = b.createInputs(inputs)
        klass = b.determineClass(classes,eval(outData[i].strip()))
        expectedKlass = b.classInverse(klass)
        #if expectedKlass != eval(outData[i].strip()):
        #    print expectedKlass, eval(outData[i].strip())
        allData.addSample(ins,[klass])
        #if(i > 1000): break
        if(i%100==0): print i,len(inData), "elapsed between sets", time.clock() - start
    
    return allData    
开发者ID:DanSGraham,项目名称:code,代码行数:31,代码来源:Classifier.py

示例5: __init__

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
class NNetwork:
	def __init__(self):
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)  #8 since we have 8 gestures, 7 since we have 7 features
		
	def add_data(self, training_data):
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])  #a method to add all the training data we have
			
	def newData(self, training_data):   #a method for replacing the data already existing and adding data from scratch
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])
	
	def train(self, shouldPrint):
		tstdata, trndata = self.ds.splitWithProportion(0.2)  #splits the data into training and verification data
		trndata._convertToOneOfMany()
		tstdata._convertToOneOfMany()
		self.fnn = buildNetwork(trndata.indim, 64, trndata.outdim, outclass=SoftmaxLayer) #builds a network with 64 hidden neurons
		self.trainer = BackpropTrainer(self.fnn, dataset=trndata, momentum=0.1, learningrate=0.01, verbose=True, weightdecay=0.1)
		#uses the backpropagation algorithm
		self.trainer.trainUntilConvergence(dataset=trndata, maxEpochs=100, verbose=True, continueEpochs=10, validationProportion=0.20) #early stopping with 20% as testing data
		trnresult = percentError( self.trainer.testOnClassData(), trndata['class'] )
		tstresult = percentError( self.trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
		
		if shouldPrint:
			print "epoch: %4d" % self.trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
	def activate(self, data): #tests a particular data point (feature vector)
	    return self.fnn.activate(data)
开发者ID:sverrirth,项目名称:IIS-master,代码行数:30,代码来源:nnetwork.py

示例6: main

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def main():
    for stock in STOCK_TICKS:
        # Download Data
        get_data(stock)

        # Import Data
        days = extract_data(stock)
        today = days.pop(0)

        # Make DataSet
        data_set = ClassificationDataSet(INPUT_NUM, 1, nb_classes=2)
        for day in days:
            target = 0
            if day.change > 0:
                target = 1
            data_set.addSample(day.return_metrics(), [target])

        # Make Network
        network = buildNetwork(INPUT_NUM, MIDDLE_NUM, MIDDLE_NUM, OUTPUT_NUM)

        # Train Network
        trainer = BackpropTrainer(network)
        trainer.setData(data_set)
        trainer.trainUntilConvergence(maxEpochs=EPOCHS_MAX)

        # Activate Network
        prediction = network.activate(today.return_metrics())
        print prediction
开发者ID:NBlair52,项目名称:ANN-Stock-Prediction-python,代码行数:30,代码来源:stocks2.py

示例7: make_data_set

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def make_data_set(beg,end):
    ds = ClassificationDataSet(HISTORY*2+1, class_labels=['None', 'Buy' , 'Sell']) #SupervisedDataSet(HISTORY*3, 1) 
    trainQ = rawData[(rawData.tradeDate <= end) & ( rawData.tradeDate >= beg)]
    

    for idx in range(1, len(trainQ) - HISTORY - 1 - HOLD-1):
        cur = idx + HISTORY - 1  
        if( abs( trainQ.iloc[cur]['MACD'] ) > 0.5 ):
            continue        
        sample = []
        for i in range(HISTORY):
            #sample.append( trainQ.iloc[idx+i]['EMAL'] )#  [['EMAL','DIFF','DEA','CDIS']] ) )
            sample.append( trainQ.iloc[idx+i]['DIFF'] )
            sample.append( trainQ.iloc[idx+i]['DEA'] )
                   
        sample.append( trainQ.iloc[cur]['CDIS'] )
        if max( trainQ.iloc[cur+1:cur+HOLD+1]['EMAS'] ) / trainQ.iloc[cur]['closeIndex'] > 1.05 : 
            answer = 1
        elif min( trainQ.iloc[cur+1:cur+HOLD+1]['EMAS'] ) / trainQ.iloc[cur]['closeIndex'] < 0.95:
            answer = 2
        else:
            answer = 0
#        print(sample)    
        ds.addSample(sample, answer)
    return ds
开发者ID:ZiqiuZang,项目名称:PythonTraining,代码行数:27,代码来源:000300.py

示例8: generate_Testdata

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def generate_Testdata(index):
    INPUT_FEATURES = 200 
    CLASSES = 5
    train_text,train_classfi_number,train_classfi,train_feature_name = getTargetData("Breast_test.data")
    
    train_text = getIndexData(train_text,index)   

    alldata = ClassificationDataSet(INPUT_FEATURES, 1, nb_classes=CLASSES)
    for i in range(len(train_text)):
        features = train_text[i]
        if train_classfi[i]=="lumina" :
            klass = 0
            alldata.addSample(features, klass)
        elif train_classfi[i]=="ERBB2" :
            klass = 1
            alldata.addSample(features, klass)
        elif train_classfi[i]=="basal" :
            klass = 2
            alldata.addSample(features, klass)
        elif train_classfi[i]=="normal" :
            klass = 3
            alldata.addSample(features, klass)
        elif train_classfi[i]=="cell_lines" :
            klass = 4
            alldata.addSample(features, klass)
    return {'minX': 0, 'maxX': 1,
            'minY': 0, 'maxY': 1, 'd': alldata,'index':index}
开发者ID:Guosmilesmile,项目名称:pythonstudy,代码行数:29,代码来源:breastrf.py

示例9: conductGeneration

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def conductGeneration(generation, corpus):
        '''
        Conducts a generation of learning and testing on the input data
                generation (int) --- the number of the generation
                corpus (object) --- corpus object containing info needed
        '''
        # Set up the dataset skeleton
        alldata = ClassificationDataSet(2, 1, nb_classes=3, class_labels=['a', 'b', 'c'])

        # means = [(-1,0),(2,4),(3,1)]
        # cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]

        # alldata = ClassificationDataSet(2, 1, nb_classes=3)
        # for n in xrange(400):
        #     for klass in range(3):
        #         input = multivariate_normal(means[klass],cov[klass])
        #         print type(input)
        #         alldata.addSample(input, [klass])

        alldata.addSample((0, 1), (1))
        alldata.addSample((1, 0), (0))
        alldata.addSample((0, 0), (2))
        alldata.addSample((1, 1), (0))

        trndata, partdata = alldata.splitWithProportion(0.5)

        return alldata
开发者ID:tylerlau07,项目名称:romance_nominal_change,代码行数:29,代码来源:main.py

示例10: run_nn_fold

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def run_nn_fold(training_data, test_data):
    test_features, ignore, featureMap, labels, labelMap = fs.mutualinfo(training_data)

    input_len = len(test_features[0])
    num_classes = len(labelMap.keys())
    train_ds = ClassificationDataSet(input_len, 1,nb_classes=num_classes)
    for i in range(len(test_features)):
        train_ds.addSample(tuple(test_features[i]), (labels[i]))
    train_ds._convertToOneOfMany()
    net = buildNetwork(train_ds.indim, 2, train_ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
    trainer = BackpropTrainer(net, train_ds, verbose=True)
    print "training until convergence..."
    trainer.trainUntilConvergence(maxEpochs=100)
    print "done. testing..."


    test_ds = ClassificationDataSet(input_len, 1,nb_classes=num_classes)  

    labels = []
    for tweetinfo in test_data:
        featuresFound = tweetinfo["Features"]
        label = tweetinfo["Answer"]
        labels.append(label)
        features = [0]*len(featureMap.keys())
        for feat in featuresFound:
            if feat in featureMap:
                features[ featureMap[feat] ] = 1
        test_ds.addSample(tuple(features), (labelMap[label]))

    test_ds._convertToOneOfMany()
    tstresult = percentError( trainer.testOnClassData(
            dataset=test_ds ), test_ds['class'] )
    print tstresult
开发者ID:AvenTu,项目名称:emote-cat,代码行数:35,代码来源:neural_net.py

示例11: generate_data

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def generate_data():
    index = [8673,1646,116,2191,4326,6718,7796,8531,8763,5646,3626,5451,2004,8079,4044,6471,675,3746,6338,3149,4880,4869,6213,5316,3544,1046,7739,8309,4147,5526,5555,1504,1625,2680,5814,1305,3998,794,4355,6788,3343,867,343,3706,6902,4250,9014,5478,788,5323,677,9215,9214,9213,9212,9211,9210,9209,9208,9207,9206,9205,9204,9203,9202,9201,9200,9199,9198,9197,9196,9195,9194,9193,9192,9191,9190,9189,9188,9187,9186,9185,9184,9183,9182,9181,9180,9179,9178,9177,9176,9175,9174,9173,9172,9171,9170,9169,9168,9167,9166,9165,9164,9163,9162,9161,9160,9159,9158,9157,9156,9155,9154,9153,9152,9151,9150,9149,9148,9147,9146,9145,9144,9143,9142,9141,9140,9139,9138,9137,9136,9135,9134,9133,9132,9131,9130,9129,9128,9127,9126,9125,9124,9123,9122,9121,9120,9119,9118,9117,9116,9115,9114,9113,9112,9111,9110,9109,9108,9107,9106,9105,9104,9103,9102,9101,9100,9099,9098,9097,9096,9095,9094,9093,9092,9091,9090,9089,9088,9087,9086,9085,9084,9083,9082,9081,9080,9079,9078,9077,9076,9075,9074,9073,9072,9071,9070,9069,9068,9067]

    INPUT_FEATURES = 200 
    CLASSES = 5
    train_text,train_classfi_number,train_classfi,train_feature_name = getTargetData("Breast_train.data")

    train_text = getIndexData(train_text,index)    

    alldata = ClassificationDataSet(INPUT_FEATURES, 1, nb_classes=CLASSES)
    for i in range(len(train_text)):
        features = train_text[i]
        if train_classfi[i]=="lumina" :
            klass = 0
            alldata.addSample(features, klass)
        elif train_classfi[i]=="ERBB2" :
            klass = 1
            alldata.addSample(features, klass)
        elif train_classfi[i]=="basal" :
            klass = 2
            alldata.addSample(features, klass)
        elif train_classfi[i]=="normal" :
            klass = 3
            alldata.addSample(features, klass)
        elif train_classfi[i]=="cell_lines" :
            klass = 4
            alldata.addSample(features, klass)
    return {'minX': 0, 'maxX': 1,
            'minY': 0, 'maxY': 1, 'd': alldata,'index':index}
开发者ID:Guosmilesmile,项目名称:pythonstudy,代码行数:31,代码来源:breastrf.py

示例12: createnetwork

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def createnetwork(n_hoglist,n_classlist,n_classnum,n_hiddensize=100):
    n_inputdim=len(n_hoglist[0])
    n_alldata = ClassificationDataSet(n_inputdim,1, nb_classes=n_classnum)
    for i in range(len(n_hoglist)):
        n_input = n_hoglist[i]
        n_class = n_classlist[i]
        n_alldata.addSample(n_input, [n_class])
    n_tstdata, n_trndata = n_alldata.splitWithProportion( 0.25 )
    n_trndata._convertToOneOfMany( )
    n_tstdata._convertToOneOfMany( )

    print "Number of training patterns: ", len(n_trndata)
    print "Input and output dimensions: ", n_trndata.indim, n_trndata.outdim
    print "First sample (input, target, class):"
    print n_trndata['input'][0], n_trndata['target'][0], n_trndata['class'][0]

    n_fnn = buildNetwork(n_trndata.indim,n_hiddensize, n_trndata.outdim, outclass=SoftmaxLayer)
    n_trainer = BackpropTrainer(n_fnn, dataset=n_trndata, momentum=0.1, verbose=True, weightdecay=0.01)

    n_result = 1
    while n_result > 0.1:
        print n_result
        n_trainer.trainEpochs(1)
        n_trnresult = percentError(n_trainer.testOnClassData(),
                                 n_trndata['class'])
        n_tstresult = percentError(n_trainer.testOnClassData(
            dataset=n_tstdata), n_tstdata['class'])

        print "epoch: %4d" % n_trainer.totalepochs, \
            "  train error: %5.2f%%" % n_trnresult, \
            "  test error: %5.2f%%" % n_tstresult
        n_result = n_tstresult
开发者ID:junwangcas,项目名称:network_rs,代码行数:34,代码来源:create_neuralnet.py

示例13: batch_classify

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
 def batch_classify(self, samples):
   ds = ClassificationDataSet(len(self._fx))
   for sample in samples:
     fvec = [sample[l] for l in self._fx]
     ds.addSample(fvec, [0])
   results = self._trainer.testOnClassData(ds)
   return [self._rmap[r] for r in results]
开发者ID:aboSamoor,项目名称:NLP,代码行数:9,代码来源:ml.py

示例14: gen_data

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def gen_data(csv_file, db):
	keywords = {}
	count = 0
	img_list = []

	with open(csv_file) as f:
		content = f.readlines()
	f.close()

	for line in content:
		aux = line.replace('\n', '').split(',')
		if aux[1] not in keywords:
			keywords[aux[1]] = count
			count += 1
		img_list.append(aux)

	data = ClassificationDataSet(768, len(keywords), nb_classes=len(keywords))
	n = len(keywords)

	for img in img_list:
		path = db + '/' + img[0]
		im = Image.open(path).convert('RGB')
		data.addSample(get_img_feats(im), get_keyword_class(keywords[img[1]], n))

	return data, n, keywords
开发者ID:rgoomes,项目名称:mini-google,代码行数:27,代码来源:features.py

示例15: generate_data

# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import addSample [as 别名]
def generate_data():
    INPUT_FEATURES = 9216 
    CLASSES = 5

    train_text,train_classfi = getTargetData("Breast_train.data")

    alldata = ClassificationDataSet(INPUT_FEATURES, 1, nb_classes=CLASSES)
    for i in range(len(train_text)):
        features = train_text[i]
        if train_classfi[i]=="lumina" :
            klass = 0
            alldata.addSample(features, klass)
        elif train_classfi[i]=="ERBB2" :
            klass = 1
            alldata.addSample(features, klass)
        elif train_classfi[i]=="basal" :
            klass = 2
            alldata.addSample(features, klass)
        elif train_classfi[i]=="normal" :
            klass = 3
            alldata.addSample(features, klass)
        elif train_classfi[i]=="cell_lines" :
            klass = 4
            alldata.addSample(features, klass)
    return {'minX': 0, 'maxX': 1,
            'minY': 0, 'maxY': 1, 'd': alldata}
开发者ID:Guosmilesmile,项目名称:pythonstudy,代码行数:28,代码来源:brainStudy.py


注:本文中的pybrain.datasets.ClassificationDataSet.addSample方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。