当前位置: 首页>>代码示例>>Python>>正文


Python RPropMinusTrainer.trainUntilConvergence方法代码示例

本文整理汇总了Python中pybrain.supervised.trainers.RPropMinusTrainer.trainUntilConvergence方法的典型用法代码示例。如果您正苦于以下问题:Python RPropMinusTrainer.trainUntilConvergence方法的具体用法?Python RPropMinusTrainer.trainUntilConvergence怎么用?Python RPropMinusTrainer.trainUntilConvergence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.supervised.trainers.RPropMinusTrainer的用法示例。


在下文中一共展示了RPropMinusTrainer.trainUntilConvergence方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: exam

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
    def exam(self, dc, train_com, train_path):
        """
        Here you can train your networks.

        Parameters
        ----------
        :param dc: dict
            Dict of commands with values.
        :param train_com:
            Command what you want teach by ann to recognize.
        :param train_path:
            Path to folder with train examples.

        Returns
        -------
        :return:
            File with network.

        """
        num_hid = 1
        put, out = [], []

        ds = SupervisedDataSet(420, 1)
        nt = buildNetwork(420, 3, 1, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

        for way in train_path:
            for i in os.listdir(way):
                lk = self.link(i)
                if lk:
                    self.logger.debug(u'File was added to training list %s' % i)
                    result = self.ext_t(way+i)
                    ds.addSample(result, (dc[lk],))
                    put.append(result)
                    out.append([dc[lk]])

        net = nl.net.newff([[np.min(put), np.max(put)]]*420, [num_hid, 1], [nl.trans.LogSig(), nl.trans.SatLinPrm()])
        net.trainf = nl.train.train_rprop
        trainer = RPropMinusTrainer(nt, dataset=ds, verbose=False)
        self.logger.info(u'Training brain...')
        trainer.trainUntilConvergence(maxEpochs=100, verbose=False, continueEpochs=100, validationProportion=1e-7)
        self.logger.info(u'Training neural...')
        error = net.train(put, out, epochs=500, show=500, goal=1e-4, lr=1e-10)

        while error[-1] > 1e-3:
            self.logger.info(u'Try to one more training, because MSE are little not enough!')
            net = nl.net.newff([[np.min(put), np.max(put)]]*420, [num_hid, 1], [nl.trans.LogSig(), nl.trans.SatLinPrm()])
            net.trainf = nl.train.train_rprop
            self.logger.info(u'Training neural...')
            error = net.train(put, out, epochs=500, show=500, goal=1e-4, lr=1e-10)
            num_hid += 1

        try:
            net.save(u'networks/%s_neurolab' % train_com)
            fl = open(u'networks/%s_brain' % train_com, 'w')
            pickle.dump(nt, fl)
            fl.close()

        except IOError:
            os.mkdir(u'networks')
            net.save(u'networks/%s_neurolab' % train_com)
开发者ID:noob-saibot,项目名称:Recognition-with-ANN,代码行数:62,代码来源:genann.py

示例2: trainNetwork

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
def trainNetwork(net, sample_list, validate_list, net_filename, max_epochs=5500, min_epochs=300):
    count_input_samples = len(sample_list)
    count_outputs = len(validate_list)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    ds.addSample(sample_list, validate_list)
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs)
    NetworkWriter.writeToFile(net, net_filename)
    return net
开发者ID:ivalykhin,项目名称:neirohome,代码行数:12,代码来源:netManagement.py

示例3: fit

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
    def fit(self, X, y):
        """
        Trains the classifier

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: labels of events - array-like of shape [n_samples]
        .. note::
            doesn't support sample weights
        """

        dataset = self._prepare_net_and_dataset(X, y, 'classification')

        if self.use_rprop:
            trainer = RPropMinusTrainer(self.net,
                                        etaminus=self.etaminus,
                                        etaplus=self.etaplus,
                                        deltamin=self.deltamin,
                                        deltamax=self.deltamax,
                                        delta0=self.delta0,
                                        dataset=dataset,
                                        learningrate=self.learningrate,
                                        lrdecay=self.lrdecay,
                                        momentum=self.momentum,
                                        verbose=self.verbose,
                                        batchlearning=self.batchlearning,
                                        weightdecay=self.weightdecay)
        else:
            trainer = BackpropTrainer(self.net,
                                      dataset,
                                      learningrate=self.learningrate,
                                      lrdecay=self.lrdecay,
                                      momentum=self.momentum,
                                      verbose=self.verbose,
                                      batchlearning=self.batchlearning,
                                      weightdecay=self.weightdecay)

        if self.epochs < 0:
            trainer.trainUntilConvergence(maxEpochs=self.max_epochs,
                                          continueEpochs=self.continue_epochs,
                                          verbose=self.verbose,
                                          validationProportion=self.validation_proportion)
        else:
            for i in range(self.epochs):
                trainer.train()
        self.__fitted = True

        return self
开发者ID:tyamana,项目名称:rep,代码行数:49,代码来源:pybrain.py

示例4: partial_fit

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
    def partial_fit(self, X, y):
        """
        Additional training of the estimator

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: labels of events - array-like of shape [n_samples]

        :return: self
        """
        dataset = self._prepare_dataset(X, y, self._model_type)

        if not self.is_fitted():
            self._prepare_net(dataset=dataset, model_type=self._model_type)

        if self.use_rprop:
            trainer = RPropMinusTrainer(self.net,
                                        etaminus=self.etaminus,
                                        etaplus=self.etaplus,
                                        deltamin=self.deltamin,
                                        deltamax=self.deltamax,
                                        delta0=self.delta0,
                                        dataset=dataset,
                                        learningrate=self.learningrate,
                                        lrdecay=self.lrdecay,
                                        momentum=self.momentum,
                                        verbose=self.verbose,
                                        batchlearning=self.batchlearning,
                                        weightdecay=self.weightdecay)
        else:
            trainer = BackpropTrainer(self.net,
                                      dataset,
                                      learningrate=self.learningrate,
                                      lrdecay=self.lrdecay,
                                      momentum=self.momentum,
                                      verbose=self.verbose,
                                      batchlearning=self.batchlearning,
                                      weightdecay=self.weightdecay)

        if self.epochs < 0:
            trainer.trainUntilConvergence(maxEpochs=self.max_epochs,
                                          continueEpochs=self.continue_epochs,
                                          verbose=self.verbose,
                                          validationProportion=self.validation_proportion)
        else:
            trainer.trainEpochs(epochs=self.epochs, )
        return self
开发者ID:AlexanderTek,项目名称:rep,代码行数:48,代码来源:pybrain.py

示例5: createAndTrainNetworkFromList

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
def createAndTrainNetworkFromList(train_list, count_input_samples, net_filename, count_layers=33,
                          count_outputs=1, max_epochs=15000, min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    count_samples = len(train_list)
    for i in range(0, count_samples):
        ds.addSample(train_list[i][:-count_outputs], train_list[i][-count_outputs])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs, validationProportion=0.15)
    net_filename = net_filename[:-4]+str(a[0][-1])+'.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
开发者ID:ivalykhin,项目名称:neirohome,代码行数:16,代码来源:netManagement.py

示例6: createAndTrainNetworkFromFile

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
def createAndTrainNetworkFromFile(curs_filename, count_input_samples, count_samples, net_filename, count_layers=33,
                          count_outputs=1, max_epochs=15000, min_epochs=300):
    net = buildNetwork(count_input_samples, count_layers, count_outputs)
    ds = SupervisedDataSet(count_input_samples, count_outputs)
    wb = load_workbook(filename=curs_filename)
    ws = wb.active
    for i in range(0, count_samples):
        loaded_data = []
        for j in range(0, count_input_samples + 1):
            loaded_data.append(round(float(ws.cell(row=i+1, column=j+1).value), 4))
            #ds.addSample(loaded_data[:-1], loaded_data[-1])
        #print loaded_data[:-1], loaded_data[-1]
        ds.addSample(loaded_data[:-1], loaded_data[-1])
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.setData(ds)
    a = trainer.trainUntilConvergence(maxEpochs=max_epochs, continueEpochs=min_epochs, validationProportion=0.15)
    net_filename = net_filename[:-4]+str(a[0][-1])+'.xml'
    NetworkWriter.writeToFile(net, net_filename)
    result_list = [a, net_filename]
    return result_list
开发者ID:ivalykhin,项目名称:neirohome,代码行数:22,代码来源:netManagement.py

示例7: train

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
 def train(self, input_row, output_row):
     """
     Training network by r-prop.
     PARTITION_OF_EDUCATION_VERIFICATION_SET - education|validation ratio
     MAX_EPOCHS - count of max steps of education
     OUTCASTING_EPOCHS - if education can't get out of local minimum it given count of steps, it stops
     """
     self._form_set(input_row, output_row)
     trainer = RPropMinusTrainer(module=self.network, dataset=self.data_set)
     self.training_errors, self.validation_errors = trainer.trainUntilConvergence(
         validationProportion=self.settings.training_part_fraction,
         maxEpochs=self.settings.maximum_training_epochs,
         continueEpochs=self.settings.quit_epochs)
     len_validate = int(len(output_row[0]['data']) * (1 - self.settings.training_part_fraction))
     results_of = [list(self.network.activate(x))[0] for x in self.inputs_for_validation[len_validate:]]
     self.mse = sum(map(lambda result, target: fabs(result - target), list(results_of),
                        list(output_row[0]['data'][len_validate:]))) / len(results_of)
     print 'DUMB-dd'
     for it in results_of:
         print it
     print 'DUMB-pp'
     for it in list(output_row[0]['data'][len_validate:]):
         print it
     print '| | |-MSE = ', self.mse
开发者ID:UIR-workigteam,项目名称:UIR,代码行数:26,代码来源:OwnNeuro.py

示例8: normalize

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
    window[9] = normalize(window[9], max_volume, min_volume)
    output = n.activate(window)

    for j in range(0, 5):
        prediction = denormalize(output[j], max_price, min_price)
        print prediction, ticks_future[j][0]
        writer.writerow([ticks_future[j][2], prediction, ticks_future[j][0]])

last_five = []

for day in range(0, 100):
    ticks = map(lambda x: data.next(), range(0, 5))
    last_five = map(lambda x: data.next(), range(0, 5))

    DS.appendLinked(*ticks_to_inputs_outputs(ticks, last_five))

with open('predictions.csv', 'wb') as output_file:
    writer = csv.writer(output_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
    
    for i in range(0, 18):
        trainer.trainUntilConvergence(validationProportion=0.55, maxEpochs=1000, verbose=False)

        ticks = map(lambda x: data.next(), range(0, 5))

        predict_next_five(last_five, ticks, writer)

        DS.appendLinked(*ticks_to_inputs_outputs(last_five, ticks))

        last_five = ticks

开发者ID:vladignatyev,项目名称:coin,代码行数:31,代码来源:test_per_prediction.py

示例9: FFNetwork

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
class FFNetwork(Network):
    def __init__(self, sensor_ids, action_ids, n_hidden, bias=True):
        super(FFNetwork, self).__init__(sensor_ids=sensor_ids, action_ids=action_ids)
        self.net = buildNetwork(SensorModel.array_length(sensor_ids), n_hidden, 1,
                                hiddenclass=TanhLayer,
                                #outclass=TanhLayer,
                                bias=bias)
        self.scaler_input = None
        self.trainer = None

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            ffn = pickle.load(f)
        self.net = ffn.net
        self.sensor_ids = ffn.sensor_ids
        self.action_ids = ffn.action_ids
        self.scaler_input = ffn.scaler_input
        del ffn

    def get_action(self, sensors):
        x = sensors.get_array(self.sensor_ids)
        if self.scaler_input is not None:
            x = self.scaler_input.transform(x)
        return self.net.activate(x)[0]

    def get_params(self):
        pass

    def train(self, training_files, learningrate=0.01, scaling=True, noise=False, verbose=True):
        print "building dataset..."
        ds = SupervisedDataSet(SensorModel.array_length(self.sensor_ids), 1)
        # read training file line, create sensormodel object, do backprop
        a = None
        s = None
        for logfile in training_files:
            print "loading file", logfile
            with open(logfile) as f:
                for line in f:
                    if line.startswith("Received:"):
                        s = SensorModel(string=line.split(' ', 1)[1])
                    elif line.startswith("Sending:"):
                        a = Actions.from_string(string=line.split(' ', 1)[1])
                    if s is not None and a is not None:
                        ds.addSample(inp=s.get_array(self.sensor_ids), target=a[self.action_ids[0]])
                        if noise:
                            # add the same training sample again but with noise in the sensors
                            s.add_noise()
                            ds.addSample(inp=s.get_array(self.sensor_ids), target=a[self.action_ids[0]])
                        s = None
                        a = None
        print "dataset size:", len(ds)
        if scaling:
            print "scaling dataset"
            self.scaler_input = StandardScaler(with_mean=True, with_std=False).fit(ds.data['input'])
            ds.data['input'] = self.scaler_input.transform(ds.data['input'])
            ds.data['target'] = ds.data['target']
        #self.trainer = BackpropTrainer(self.net, learningrate=learningrate, verbose=verbose)
        self.trainer = RPropMinusTrainer(self.net, verbose=verbose, batchlearning=True)
        print "training network..."
        self.trainer.trainUntilConvergence(dataset=ds, validationProportion=0.25, maxEpochs=10, continueEpochs=2)
开发者ID:lqrz,项目名称:computational_intelligence,代码行数:66,代码来源:network.py

示例10: RPropMinusTrainer

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
    inputs[1] = window_volumes[0]
    inputs[3] = window_volumes[1]
    inputs[5] = window_volumes[2]
    inputs[7] = window_volumes[3]
    inputs[9] = window_volumes[4]

    DS.appendLinked(inputs, forecast)

    day += 1


# training
trainer = RPropMinusTrainer(n, verbose=True, batchlearning=True, learningrate=0.01, lrdecay=0.0, momentum=0.0,
                            weightdecay=0.0)
trainer.setData(DS)
trainer.trainUntilConvergence(validationProportion=0.25, maxEpochs=100)

# validating
valid_data = days('btceUSD.days.csv')
for i in range(0, 105):
    valid_data.next()

window = []

max_price = 0.0
min_price = float('inf')

max_volume = 0.0
min_volume = float('inf')

print "Test window: "
开发者ID:vladignatyev,项目名称:coin,代码行数:33,代码来源:test.py

示例11: ClassificationDataSet

# 需要导入模块: from pybrain.supervised.trainers import RPropMinusTrainer [as 别名]
# 或者: from pybrain.supervised.trainers.RPropMinusTrainer import trainUntilConvergence [as 别名]
        #1-N output encoding , N=10 
        trndata = ClassificationDataSet(np.shape(train)[1], 10, nb_classes=10) 
        for i in xrange(np.shape(train)[0]):
            trndata.addSample(train[i], traint[i])
        validata = ClassificationDataSet(np.shape(valid)[1], 10, nb_classes=10) 
        for i in xrange(np.shape(valid)[0]):
            trndata.addSample(valid[i], validt[i])
        testdata = ClassificationDataSet(np.shape(test)[1], 10, nb_classes=10)
        for i in xrange(np.shape(test)[0]):
            testdata.addSample(test[i], testt[i])
        
        #Build the network 
        if nlayers > 1:
            net = buildNetwork(trndata.indim, nhidden, nhiddeno, trndata.outdim, outclass=SoftmaxLayer )
        else:
            net = buildNetwork(trndata.indim, nhidden, trndata.outdim, outclass=SoftmaxLayer )
        #construct the trainer object
        #We can also train Bprop using pybrain using the same argumets as below: trainer = BackpropTrainer(...)
        trainer = RPropMinusTrainer(net, dataset=trndata, momentum=0.9, verbose=True, weightdecay=0.01, learningrate=0.1)
        #train and test
        trainer.trainUntilConvergence(maxEpochs=percent_dataset_usage*300)#,trainingData=trndata,validationData = validata)
        trainer.testOnData(verbose=True, dataset=testdata)
        
        
    print_NN_params() #remind us what architecture was tested
    print_time_elapsed(start)  #print training time
    filename = 'instances/NN_' +str(percent_dataset_usage) +'perc_'+ str(nhidden) + '_' +str(nhiddeno) +'.save'
    save_NN_instance(filename) #save trained object to disk
    
    
开发者ID:JavierCrisostomo,项目名称:ocr_mnist,代码行数:30,代码来源:run_MLP.py


注:本文中的pybrain.supervised.trainers.RPropMinusTrainer.trainUntilConvergence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。