当前位置: 首页>>代码示例>>Python>>正文


Python supervised.SupervisedDataSet类代码示例

本文整理汇总了Python中pybrain.datasets.supervised.SupervisedDataSet的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet类的具体用法?Python SupervisedDataSet怎么用?Python SupervisedDataSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SupervisedDataSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ANN

def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate, para_momentum
):  # netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while i < sampleNum:
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(
        netStructure[0],
        netStructure[1],
        netStructure[2],
        netStructure[3],
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )
    T = BackpropTrainer(Network, Dataset, learningrate=para_rate, momentum=para_momentum, verbose=True)
    # print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001:
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  # this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    # print(testLabel)
    print(Network.activate([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
    return errorList
开发者ID:Codelegant92,项目名称:CreditScoring,代码行数:34,代码来源:ANN.py

示例2: test_train

	def test_train(self, epochs=1):
		print("Training...")

		split = int(len(self.samples) * 0.7)
		train_samples = self.samples[0:split]
		train_labels  = self.labels[0:split]

		test_samples = self.samples[split:]
		test_labels  = self.labels[split:]

		net = buildNetwork(300, 300, 1)	
		ds = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			ds.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		trainer = BackpropTrainer(net, ds, verbose=True)
		trainer.trainEpochs(epochs)
		self.totalEpochs = epochs
		
		error = 0
		counter = 0
		for i in range(0, 100):
			output = net.activate(tuple(np.array(test_samples[i], dtype='float64')))
			if round(output[0]) != test_labels[i]:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
				error += 1
			else:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")
		return error
开发者ID:skrustev,项目名称:traffic-sign-recognition,代码行数:33,代码来源:neural_network.py

示例3: NN_data

def NN_data(ts, max_lag):
    '''Function for creating a normalized dataset suitable for training 
    PyBrain's neural networks from pandas Series object.
    Returns: dataset suitable for neural net training, max value of 
    dataset for denormalization purposes'''
    ds = SupervisedDataSet(max_lag, 1)
    times = ts.index
    prices = [item for item in normalize(ts.values)[0]]
    target = list()
    
    for item in prices:
        target.append(item)
        input_cols = list()
        for i in range(1, max_lag+1):
            col = prices[:-i]
            while len(col) < len(prices):
                col = ['nan'] + list(col)
            input_cols.append(col)
    #convert input columns to input rows
    input_rows = zip(*input_cols)
    #Remove rows containing 'nan'
    input_rows  = input_rows[max_lag:]
    target = target[max_lag:]
    for i in range(0, len(target)):
        ds.appendLinked(input_rows[i], target[i])
        
    return ds, normalize(ts.values)[1]
开发者ID:martin1,项目名称:thesis,代码行数:27,代码来源:neural_networks.py

示例4: anntrain

def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
开发者ID:gibranfp,项目名称:authorid,代码行数:28,代码来源:ML.py

示例5: create_dataset

def create_dataset():
    dataset = SupervisedDataSet(1, 1)

    for x in arange(0, 4*pi, pi/30):
        dataset.addSample(x, sin(x))

    return dataset
开发者ID:slnowak,项目名称:msi_byrski,代码行数:7,代码来源:zad2.py

示例6: readFromExcel

def readFromExcel(inCols,targetCols, numRows, fileName, offset=0, sheet=0, dataSet=None, conversionFun=None):
    """Populates a given dataset or creates a new SupervisedDataSet from an exccel file.
       
       inCols = array of colum numbers containing the input data colums, colums are indexed from 0
       targetCols = array of colum numbers containing the target data colums, colums are indexed from 0
       numRows = the number of rows ofs data
       fileName= the name of the excel file
       offset = the row the vaild data starts at
       sheet = the sheet of the workbook the data is on, indexed from 0 as it is in xlrd
       dataSet = the dataset to be populated, a SupervisedDataSet if created if it is None
       conversionFun = used to preprocess data.
    """
    book = open_workbook(fileName)
    sheet=book.sheet_by_index(sheet)
    
    if dataSet is None:
        dataSet=SupervisedDataSet(len(inCols),len(targetCols))
    for r in range(offset,(offset+numRows)):
        input=[]
        target=[]
        for inC in inCols:
            input.append(sheet.cell_value(r,inC))

        for tC in targetCols:
            target.append(sheet.cell_value(r,tC))
        try:
            if conversionFun:
                input=[conversionFun(i) for i in input]
                target=[conversionFun(t) for t in target]
                print input,target
        
            dataSet.addSample(input, target)
        except Exception:
            print 'rejected row {}'.format(r)
    return dataSet
开发者ID:PatrickHunter,项目名称:PyBrainExcel,代码行数:35,代码来源:ExcelTools.py

示例7: Predict

	def Predict(self, ticker, day):
		endDay = day-datetime.timedelta(1)
		startDay = endDay - datetime.timedelta(self.trainingPeriod)
		try:
			stockData = data.DataReader(ticker, 'yahoo', startDay, endDay)
		except:
			return [0]

		rawTrainFeatures = []
		rawTrainResponses = []
		for currentDay in range(self.windowLength, len(stockData)):
			window = stockData[currentDay-self.windowLength:currentDay]
			currentPrice = stockData.iloc[currentDay]['Open']
			response = stockData.iloc[currentDay]['Close']
			rawTrainFeatures.append(self.GetFeature(window))
			rawTrainResponses.append(response)

		rawTestFeatures = self.GetFeature(stockData[len(stockData)-self.windowLength:len(stockData)])

		# normalTrainFeatures, normalTestFeatures = self.NormalizeFeatures(rawTrainFeatures, rawTestFeatures)
		alldata = SupervisedDataSet(len(rawTrainFeatures[0]), 1)
		for index in range(0, len(rawTrainFeatures)):
			alldata.addSample(rawTrainFeatures[index],[rawTrainResponses[index]])

		self.network = buildNetwork(alldata.indim, (alldata.indim+alldata.outdim)/2, alldata.outdim, hiddenclass=SigmoidLayer, outclass=LinearLayer)
		trainer = BackpropTrainer(self.network, dataset=alldata)
		activations = []
		for i in range(50):
			for x in range(5):
				trainer.train()
		return float(self.network.activate(rawTestFeatures))
开发者ID:DerekHunter,项目名称:Algo,代码行数:31,代码来源:algo.py

示例8: retrain

def retrain(N, dataset, net):
    ds = SupervisedDataSet(20, 20)
    for data in dataset:
        ds.addSample(data[0], data[1])
    trainer = BackpropTrainer(net, ds)
    for i in range(N):
        trainer.train()
    return net
开发者ID:shoz,项目名称:predlife,代码行数:8,代码来源:trainer.py

示例9: train

def train(N, dataset):
    ds = SupervisedDataSet(20, 20)
    for data in dataset:
        ds.addSample(data[0], data[1])
    net = buildNetwork(20, 20, 20, bias=True, hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net, ds)
    for i in range(N):
        sys.stdout.write("Progress: %d/%d \r" % (i, N))
        sys.stdout.flush()
        trainer.train()
    return net
开发者ID:shoz,项目名称:predlife,代码行数:11,代码来源:trainer.py

示例10: update_neural_network

 def update_neural_network(self, old_state, old_value, new_state,action, reward):
    desired_value = old_value + self.learning_rate * (reward + self.discount_factor * self.get_best_action(new_state)[1] - old_value)
    ds = SupervisedDataSet(self.states_and_actions_num,1)
    ds.addSample(old_state + action, desired_value)
    trainer = BackpropTrainer(self.neural_network,ds)
    trainer.train()
    
     
     
     
     
开发者ID:lastkuku,项目名称:HearthstoneAI,代码行数:6,代码来源:q_learner.py

示例11: absorb

    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)
开发者ID:splendor-kill,项目名称:ml-five,代码行数:15,代码来源:mcts.py

示例12: buildDataset

def buildDataset(filenames,
                 history=2, # how many snapshots into the past?
                 ):
    D = SupervisedDataSet(set_feats + history * snap_feats, num_targ)
    for fname in filenames:
        rundata = quickload(fname)
        snapshots = rundata['snapshots']
        settings = rundata['setting']
        for i in range(len(snapshots) - history - 1):
            inp = parseFeatures(settings, snapshots[i:i + history])
            prevtarget = parseTarget(snapshots[i + history-1])
            nexttarget = parseTarget(snapshots[i + history])
            # percentage gain
            target = (-nexttarget+prevtarget)/(nexttarget+prevtarget)/2.
            D.addSample(inp, [target])        
    return D
开发者ID:schaul,项目名称:nnsandbox,代码行数:16,代码来源:paresdata1.py

示例13: createXORData

    def createXORData(self,inputdim,outputdim):
 
        self.data = SupervisedDataSet(inputdim,outputdim)
        self.data.addSample([1,1],[0])
        self.data.addSample([1,0],[1])
        self.data.addSample([0,1],[1])
        self.data.addSample([0,0],[0])
开发者ID:ansrivas,项目名称:Thesis,代码行数:7,代码来源:neuralnet.py

示例14: NetworkTrain

def NetworkTrain(trainDataSet, mnetwork=NetworkBuild(), file='NetworkDump.pkl',maxEpochs=100):
    mnetwork = NetworkBuild(new = True)
    assert len(mnetwork[0].inmodules) == len(mnetwork[1].keys())
    print('DEBUG')
    #print(trainDataSet)
    print("lens " + str(len(trainDataSet[0][0])) + " " + str(len(mnetwork[0].inmodules)))
    # 定义数据集的格式
    DS = SupervisedDataSet(len(trainDataSet[0][0]), len(trainDataSet[0][1]))

    for itrainDataSet in trainDataSet:
        indata = itrainDataSet[0]
        outdata = itrainDataSet[1]

        DS.addSample(indata, outdata)

    # 如果要获得里面的输入/输出时,可以用
    # 如果要把数据集切分成训练集和测试集,可以用下面的语句,训练集:测试集=8:2
    # 为了方便之后的调用,可以把输入和输出拎出来




    # 训练器采用BP算法
    # verbose = True即训练时会把Total error打印出来,库里默认训练集和验证集的比例为4:1,可以在括号里更改
    mnetwork[0].sortModules()
    trainer = BackpropTrainer(mnetwork[0], DS, verbose=True, learningrate=0.01)
    # 0.0575
    # maxEpochs即你需要的最大收敛迭代次数,这里采用的方法是训练至收敛,我一般设为1000
    trainer.trainUntilConvergence(maxEpochs=maxEpochs)
    '''
    for mod in mnetwork[0].modules:
        print "Module:", mod.name
        if mod.paramdim > 0:
            print "--parameters:", mod.params
        for conn in mnetwork[0].connections[mod]:
            print "-connection to", conn.outmod.name
            if conn.paramdim > 0:
                print "- parameters", conn.params
        if hasattr(mnetwork[0], "recurrentConns"):
            print "Recurrent connections"
            for conn in mnetwork[0].recurrentConns:
                print "-", conn.inmod.name, " to", conn.outmod.name
                if conn.paramdim > 0:
                    print "- parameters", conn.params
        '''
    pickle.dump(mnetwork, open(file, 'wb'))
    return mnetwork
开发者ID:nickisverygood,项目名称:MindMapMain,代码行数:47,代码来源:RNN.py

示例15: train

def train(
    train,
    label,
    custom_net=None,
    training_mse_threshold=0.40,
    testing_mse_threshold=0.60,
    epoch_threshold=10,
    epochs=100,
    hidden_size=20,
):
    # Test Set.
    x_train = train[0:split_at, :]
    y_train_slice = label.__getslice__(0, split_at)
    y_train = y_train_slice.reshape(-1, 1)
    x_test = train[split_at:, :]
    y_test_slice = label.__getslice__(split_at, label.shape[0])
    y_test = y_test_slice.reshape(-1, 1)

    # Shape.
    input_size = x_train.shape[1]
    target_size = y_train.shape[1]

    # prepare dataset
    ds = SDS(input_size, target_size)
    ds.setField("input", x_train)
    ds.setField("target", y_train)

    # prepare dataset
    ds_test = SDS(input_size, target_size)
    ds_test.setField("input", x_test)
    ds_test.setField("target", y_test)

    min_mse = 1000000

    # init and train
    if custom_net == None:
        net = buildNetwork(input_size, hidden_size, target_size, bias=True)
    else:
        print "Picking up the custom network"
        net = custom_net

    trainer = RPropMinusTrainer(net, dataset=ds, verbose=False, weightdecay=0.01, batchlearning=True)
    print "training for {} epochs...".format(epochs)

    for i in range(epochs):
        mse = trainer.train()
        print "training mse, epoch {}: {}".format(i + 1, math.sqrt(mse))

        p = net.activateOnDataset(ds_test)
        mse = math.sqrt(MSE(y_test, p))
        print "-- testing mse, epoch {}: {}".format(i + 1, mse)
        pickle.dump(net, open("current_run", "wb"))

        if min_mse > mse:
            print "Current minimum found at ", i
            pickle.dump(net, open("current_min_epoch_" + model_file, "wb"))
            min_mse = mse

    pickle.dump(net, open(model_file, "wb"))
    return net
开发者ID:korkam,项目名称:beta_learning-matlab-through-case-studies,代码行数:60,代码来源:PyBrainWithCV.py


注:本文中的pybrain.datasets.supervised.SupervisedDataSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。