当前位置: 首页>>代码示例>>Python>>正文


Python BackpropTrainer.train方法代码示例

本文整理汇总了Python中pybrain.supervised.BackpropTrainer.train方法的典型用法代码示例。如果您正苦于以下问题:Python BackpropTrainer.train方法的具体用法?Python BackpropTrainer.train怎么用?Python BackpropTrainer.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.supervised.BackpropTrainer的用法示例。


在下文中一共展示了BackpropTrainer.train方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: training

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def training(d):
    """
    Builds a network and trains it.
    """
    n = buildNetwork(d.indim, 4, d.outdim,recurrent=True)
    t = BackpropTrainer(n, d, learningrate = 0.01, momentum = 0.99, verbose = True)
    for epoch in range(0,500):
        t.train()
    return t
开发者ID:NealSchneier,项目名称:finance,代码行数:11,代码来源:xorNetwork.py

示例2: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
    def train(self, train_data_set, test_data_set, epoch=100):
        trainer = BackpropTrainer(self.network, train_data_set)

        progress_bar = ProgressBar(epoch)

        for i in range(epoch):
            progress_bar.update(i+1)
            time.sleep(0.01)
            trainer.train()

        return trainer.testOnData(test_data_set, verbose=True)
开发者ID:piruty-joy,项目名称:voice_actor_recog,代码行数:13,代码来源:NN.py

示例3: train_network

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def train_network(d, iterations):
    print("Training")
    n = buildNetwork(d.indim, 4, d.outdim, bias=True)
    t = BackpropTrainer(
        n,
        d,
        learningrate=0.01,
        momentum=0.99,
        verbose=False)
    for epoch in range(iterations):
        t.train()
    return n
开发者ID:di,项目名称:astro-pybrain,代码行数:14,代码来源:brain.py

示例4: trained_cat_dog_RFCNN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trained_cat_dog_RFCNN():
    n = RecurrentNetwork()

    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
    n.sortModules()

    t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogRFCNN(n)
    return n
开发者ID:DianaShatunova,项目名称:NEUCOGAR,代码行数:29,代码来源:main.py

示例5: trainedRNN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
开发者ID:DianaShatunova,项目名称:NEUCOGAR,代码行数:33,代码来源:main.py

示例6: trainedANN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trainedANN():
    n = FeedForwardNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.sortModules()

    draw_connections(n)
    # d = generateTrainingData()
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 20:
            return trainedANN()

    exportANN(n)
    draw_connections(n)

    return n
开发者ID:DianaShatunova,项目名称:NEUCOGAR,代码行数:35,代码来源:main.py

示例7: trained_cat_dog_ANN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trained_cat_dog_ANN():
    n = FeedForwardNetwork()
    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.sortModules()
    n.convertToFastNetwork()
    print 'successful converted to fast network'
    t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break


    exportCatDogANN(n)
    return n
开发者ID:DianaShatunova,项目名称:NEUCOGAR,代码行数:29,代码来源:main.py

示例8: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def train():
    f = open('train.csv', 'r')

    csv_reader = csv.reader(f)

    dataset = SupervisedDataSet(64, 1)
    for d in csv_reader:
        dataset.addSample(d[0:64], d[64])

    network = buildNetwork(64, 19, 1)
    trainer = BackpropTrainer(network, dataset)
    for i in range(100):
        trainer.train()

    NetworkWriter.writeToFile(network, "model.xml")

    f.close()
开发者ID:piruty-joy,项目名称:imagedetecter,代码行数:19,代码来源:face-image-detector.py

示例9: __init__

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
class PredictorTrainer:
    def __init__(self, euro_predictor):
        self.euro_predictor = euro_predictor
        self.trainer = BackpropTrainer(euro_predictor.net, euro_predictor.ds)
        self.errors = []

    def train(self, error):
        self.trainer.train()
        e = self.trainer.train()
        errors = []
        while e > error:
            e = self.trainer.train()
            errors.append(e)
            print e
        self.errors = errors
        return errors

    def determined_train(self, iterations):
        self.trainer.train()
        self.trainer.train()
        errors = []
        for i in range(iterations):
            e = self.trainer.train()
            errors.append(e)
            print e
        self.errors = errors
        return errors

    def plot_errors(self):
        xs = [i for i in range(len(self.errors))]
        ys = self.errors

        plt.plot(xs, ys)
        plt.show()
开发者ID:goodot,项目名称:euro-prediction-georgia,代码行数:36,代码来源:euro_pred.py

示例10: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
 def train(self, data, iterations=NETWORK_ITERATIONS):
     for item in data:
         self.dataset.addSample(item[0], item[1])
     trainer = BackpropTrainer(self.network, self.dataset, learningrate=NETWORK_LEARNING_RATE,
                               momentum=NETWORK_MOMENTUM)
     error = 0
     for i in xrange(iterations):
         error = trainer.train()
         print (i + 1), error
     return error
开发者ID:zacharyliu,项目名称:CarDetect,代码行数:12,代码来源:analyzer.py

示例11: train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def train():
    f = open('train_tower.csv', 'r')

    csvreader = csv.reader(f)

    dataset = SupervisedDataSet(64, 2)
    for d in csvreader:
        if d[64] == '0':
            dataset.addSample(d[0:64], [1, 0])
        else:
            dataset.addSample(d[0:64], [0, 1])

    network = buildNetwork(64, 19, 2)
    trainer = BackpropTrainer(network, dataset)
    for i in range(100):
        trainer.train()
    trainer.testOnData(dataset, verbose=True)

    NetworkWriter.writeToFile(network, "tower.xml")

    f.close()
开发者ID:piruty-joy,项目名称:imagedetecter,代码行数:23,代码来源:tower_detector.py

示例12: _train

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def _train(X, Y, filename, epochs=50):
    global nn
    nn = buildNetwork(INPUT_SIZE, HIDDEN_LAYERS, OUTPUT_LAYER, bias=True, outclass=SoftmaxLayer)
    ds = ClassificationDataSet(INPUT_SIZE, OUTPUT_LAYER)
    for x, y in zip(X, Y):
        ds.addSample(x, y)
    trainer = BackpropTrainer(nn, ds)
    for i in xrange(epochs):
        error = trainer.train()
        print "Epoch: %d, Error: %7.4f" % (i+1, error)
    # trainer.trainUntilConvergence(verbose=True, maxEpochs=epochs, continueEpochs=10)
    if filename:
        NetworkWriter.writeToFile(nn, 'data/' + filename + '.nn')
开发者ID:igorcoding,项目名称:digrecog,代码行数:15,代码来源:views.py

示例13: training

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
    def training(self,d):
        """
        Builds a network ,trains and returns it
        """

        self.net = FeedForwardNetwork()

        inLayer = LinearLayer(4) # 4 inputs
        hiddenLayer = SigmoidLayer(3) # 5 neurons on hidden layer with sigmoid function
        outLayer = LinearLayer(2) # 2 neuron as output layer


        "add layers to NN"
        self.net.addInputModule(inLayer)
        self.net.addModule(hiddenLayer)
        self.net.addOutputModule(outLayer)

        "create connections"
        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)

        "add connections"
        self.net.addConnection(in_to_hidden)
        self.net.addConnection(hidden_to_out)

        "some unknown but necessary function :)"
        self.net.sortModules()

        print self.net

        "generate big sized training set"
        trainingSet = SupervisedDataSet(4,2)

        trainArr = self.generate_training_set()
        for ri in range(2000):
            input = ((trainArr[0][ri][0],trainArr[0][ri][1],trainArr[0][ri][2],trainArr[0][ri][3]))
            target = ((trainArr[1][ri][0],trainArr[1][ri][1]))
            trainingSet.addSample(input, target)

        "create backpropogation trainer"
        t = BackpropTrainer(self.net,d,learningrate=0.00001, momentum=0.99)
        while True:
            globErr = t.train()
            print "global error:", globErr
            if globErr < 0.0001:
                break

        return self.net
开发者ID:MFarida,项目名称:NEUCOGAR,代码行数:50,代码来源:Main.py

示例14: trainedLSTMNN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trainedLSTMNN():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildNetwork(100, 50, 1, hiddenclass = LSTMLayer, outputbias=False, recurrent = True)

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs
    print "Learning started"
    count = 0
    while True:
        globErr = t.train()
        print "iteration #", count," error = ", globErr
        if globErr < 0.1:
            break
        count = count + 1
        # if (count == 60):
        #     break

    # for i in range(100):
    #     print t.train()


    exportANN(n)

    return n
开发者ID:kamilsa,项目名称:KAIProject,代码行数:48,代码来源:honn.py

示例15: trained3ONN

# 需要导入模块: from pybrain.supervised import BackpropTrainer [as 别名]
# 或者: from pybrain.supervised.BackpropTrainer import train [as 别名]
def trained3ONN():
    n = FeedForwardNetwork()

    inp = LinearLayer(176850, name = 'input')
    hid = LinearLayer(3, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid, inSliceTo = 100, outSliceTo = 1))
    n.addConnection(FullConnection(inp, hid, inSliceFrom = 100, inSliceTo = 5150, outSliceFrom = 1, outSliceTo = 2))
    n.addConnection(FullConnection(inp, hid, inSliceFrom = 5150, outSliceFrom = 2))
    n.addConnection(FullConnection(hid, out))

    n.sortModules()
    print "Network created"
    d = load3OrderDataSet()
    print "Data loaded"
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs
    print "Learning started"
    count = 0
    while True:
        globErr = t.train()
        print "iteration #", count," error = ", globErr
        if globErr < 0.01:
            break
        count = count + 1
        # if (count == 100):
        #     break

    # for i in range(100):
    #     print t.train()


    exportANN(n)

    return n
开发者ID:kamilsa,项目名称:KAIProject,代码行数:45,代码来源:honn.py


注:本文中的pybrain.supervised.BackpropTrainer.train方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。