当前位置: 首页>>代码示例>>Python>>正文


Python supervised.RPropMinusTrainer类代码示例

本文整理汇总了Python中pybrain.supervised.RPropMinusTrainer的典型用法代码示例。如果您正苦于以下问题:Python RPropMinusTrainer类的具体用法?Python RPropMinusTrainer怎么用?Python RPropMinusTrainer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了RPropMinusTrainer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: trainedLSTMNN2

def trainedLSTMNN2():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildSimpleLSTMNetwork()

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = RPropMinusTrainer(n, dataset=d, verbose=True)
    t.trainUntilConvergence()

    exportANN(n)

    return n
开发者ID:kamilsa,项目名称:KAIProject,代码行数:31,代码来源:honn.py

示例2: train

  def train(self, params):

    self.net.reset()

    ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
    trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=False)

    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(params['prediction_nstep'], len(history)):
      if not resets[i-1]:
        ds.addSample(self.inputEncoder.encode(history[i-params['prediction_nstep']]),
                     self.outputEncoder.encode(history[i][0]))
      if resets[i]:
        ds.newSequence()

    # print ds.getSample(0)
    # print ds.getSample(1)
    # print ds.getSample(1000)
    # print " training data size", ds.getLength(), " len(history) ", len(history), " self.history ", len(self.history)
    # print ds

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])

    self.net.reset()
    for i in xrange(len(history) - params['prediction_nstep']):
      symbol = history[i]
      output = self.net.activate(ds.getSample(i)[0])

      if resets[i]:
        self.net.reset()
开发者ID:chanceraine,项目名称:nupic.research,代码行数:33,代码来源:suite.py

示例3: main

def main():
	generated_data = [0 for i in range(10000)]
	rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
	data = data[1000:190000]
	print("Got wav")
	ds = SequentialDataSet(1, 1)
	for sample, next_sample in zip(data, cycle(data[1:])):
	    ds.addSample(sample, next_sample)

	net = buildNetwork(1, 5, 1, 
                   hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 10
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    train_errors.append(trainer.testOnData())
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
	    stdout.flush()

	# predict new values
	old_sample = [100]

	for i in xrange(500000):
		new_sample = net.activate(old_sample)
		old_sample = new_sample
		generated_data[i] = new_sample[0]
		print(new_sample)
	
	wavfile.write("../../output/test.wav", rate, np.array(generated_data))
开发者ID:cy94,项目名称:ml2,代码行数:34,代码来源:rnn.py

示例4: train

  def train(self, params):
    n = params['encoding_num']
    net = buildNetwork(n, params['num_cells'], n,
                       hiddenclass=LSTMLayer,
                       bias=True,
                       outputbias=params['output_bias'],
                       recurrent=True)
    net.reset()

    ds = SequentialDataSet(n, n)
    trainer = RPropMinusTrainer(net, dataset=ds)

    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(1, len(history)):
      if not resets[i-1]:
        ds.addSample(self.encoder.encode(history[i-1]),
                     self.encoder.encode(history[i]))
      if resets[i]:
        ds.newSequence()

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
      net.reset()

    for i in xrange(len(history) - 1):
      symbol = history[i]
      output = self.net.activate(self.encoder.encode(symbol))
      predictions = self.encoder.classify(output, num=params['num_predictions'])

      if resets[i]:
        net.reset()

    return net
开发者ID:chanceraine,项目名称:nupic.research,代码行数:35,代码来源:suite.py

示例5: train

  def train(self, params):
    """
    Train LSTM network on buffered dataset history
    After training, run LSTM on history[:-1] to get the state correct
    :param params:
    :return:
    """
    if params['reset_every_training']:
      n = params['encoding_num']
      self.net = buildNetwork(n, params['num_cells'], n,
                               hiddenclass=LSTMLayer,
                               bias=True,
                               outputbias=params['output_bias'],
                               recurrent=True)
      self.net.reset()

    # prepare training dataset
    ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(1, len(history)):
      if not resets[i - 1]:
        ds.addSample(self.encoder.encode(history[i - 1]),
                     self.encoder.encode(history[i]))
      if resets[i]:
        ds.newSequence()

    print "Train LSTM network on buffered dataset of length ", len(history)
    if params['num_epochs'] > 1:
      trainer = RPropMinusTrainer(self.net,
                                  dataset=ds,
                                  verbose=params['verbosity'] > 0)

      if len(history) > 1:
        trainer.trainEpochs(params['num_epochs'])

      # run network on buffered dataset after training to get the state right
      self.net.reset()
      for i in xrange(len(history) - 1):
        symbol = history[i]
        output = self.net.activate(self.encoder.encode(symbol))
        self.encoder.classify(output, num=params['num_predictions'])

        if resets[i]:
          self.net.reset()
    else:
      self.trainer.setData(ds)
      self.trainer.train()

      # run network on buffered dataset after training to get the state right
      self.net.reset()
      for i in xrange(len(history) - 1):
        symbol = history[i]
        output = self.net.activate(self.encoder.encode(symbol))
        self.encoder.classify(output, num=params['num_predictions'])

        if resets[i]:
          self.net.reset()
开发者ID:rhyolight,项目名称:nupic.research,代码行数:59,代码来源:suite.py

示例6: trainLSTMnet

def trainLSTMnet(net, numTrainSequence, seedSeq=1):
  np.random.seed(seedSeq)
  for _ in xrange(numTrainSequence):
    (ds, in_seq, out_seq) = getReberDS(maxLength)
    print("train seq", _, sequenceToWord(in_seq))
    trainer = RPropMinusTrainer(net, dataset=ds)
    trainer.trainEpochs(rptPerSeq)

  return net
开发者ID:Starcounter-Jack,项目名称:nupic.research,代码行数:9,代码来源:reberSequencePrediction_LSTM.py

示例7: train

def train(d, cycles=100, epochs_per_cycle=7):
    ds = SequentialDataSet(1, 1)
    net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)

    for sample, next_sample in zip(d, cycle(d[1:])):
        ds.addSample(sample, next_sample)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    for i in xrange(cycles):
        trainer.trainEpochs(epochs_per_cycle)
        train_errors.append(trainer.testOnData())
        stdout.flush()

    return net, train_errors
开发者ID:Morgaroth,项目名称:msi_lab2,代码行数:15,代码来源:zadanie.py

示例8: train

def train (ds, net):
	# Train the network 
	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 100
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    error = trainer.testOnData()
	    train_errors.append(error)
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS))
	    stdout.flush()

	# print("final error =", train_errors[-1])

	return train_errors, EPOCHS, EPOCHS_PER_CYCLE
开发者ID:DUTANGx,项目名称:GI15-Group-Project-Time-Series,代码行数:18,代码来源:timeseries.py

示例9: train

  def train(self, params, verbose=False):

    if params['reset_every_training']:
      if verbose:
        print 'create lstm network'

      random.seed(6)
      if params['output_encoding'] == None:
        self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                           hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
      elif params['output_encoding'] == 'likelihood':
        self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                           hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)

    self.net.reset()

    ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
    networkInput = self.window(self.networkInput, params)
    targetPrediction = self.window(self.targetPrediction, params)

    # prepare a training data-set using the history
    for i in xrange(len(networkInput)):
      ds.addSample(self.inputEncoder.encode(networkInput[i]),
                   self.outputEncoder.encode(targetPrediction[i]))

    if params['num_epochs'] > 1:
      trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)

      if verbose:
        print " train LSTM on ", len(ds), " records for ", params['num_epochs'], " epochs "

      if len(networkInput) > 1:
        trainer.trainEpochs(params['num_epochs'])

    else:
      self.trainer.setData(ds)
      self.trainer.train()

    # run through the training dataset to get the lstm network state right
    self.net.reset()
    for i in xrange(len(networkInput)):
      self.net.activate(ds.getSample(i)[0])
开发者ID:andrewmalta13,项目名称:nupic.research,代码行数:42,代码来源:run_lstm_suite.py

示例10: handle

    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(symbol=ticker).order_by('-created_on').values_list('price',flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [ int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i,val in enumerate(data):
                DS.addSample((data[i], data[i+1], data[i+2], data[i+3], data[i+4]), (data[i+5],))
        except Exception:
            pass;

        net = buildNetwork(5, 40, 1, 
                           hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = [] # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i+1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(show_sample[0],round(show_pred_sample[0],3),show_target[0],int(round(show_diff_pct[0],0))))
开发者ID:AnthonyNystrom,项目名称:pytrader,代码行数:40,代码来源:predict_price_v1a.py

示例11: say_hello_text

def say_hello_text(username = "World",text="You are good"):

    object_data_new = pd.read_csv('/Users/ruiyun_zhou/Documents/cmpe-274/data/data.csv')
    data_area_new = object_data_new[object_data_new.Area==username]
    data_area_new_1=data_area_new[data_area_new.Disease== text]
    data_list_new = data_area_new_1['Count'].values.tolist()
    print data_list_new.__len__()
    data_list=data_list_new
    ds = SequentialDataSet(1,1)
    isZero=0;
    for sample,next_sample in zip(data_list,cycle(data_list[1:])):
        ds.addSample(sample, next_sample)
        if sample:
            isZero=1

    if(isZero==0):
        return '[0, 0]'

    net = buildNetwork(1,5,1,hiddenclass=LSTMLayer,outputbias=False,recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        print "Doing epoch %d" %i
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
#    return '<p>%d</p>\n' % (data_list_new.__len__())
#        print("final error =", train_errors[-1])
#    print "Value for last week is %4.1d" % abs(data_list[-1])
#    print "Value for next week is %4.1d" % abs(net.activate(data_list[-1]))
#    result = (abs(data_list[-1]))
    result = (abs(net.activate(data_list[-1])))
    result_1 = (abs(net.activate(result)))
    return '[%d, %d]' % (result,result_1)
开发者ID:farcryzry,项目名称:cmpe-274,代码行数:37,代码来源:application.py

示例12: Train

    def Train(self, dataset, error_observer, logger, dump_file):
        gradientCheck(self.m_net)

        net_dataset = SequenceClassificationDataSet(4, 2)
        for record in dataset:
            net_dataset.newSequence()

            gl_raises = record.GetGlRises()
            gl_min = record.GetNocturnalMinimum()

            if DayFeatureExpert.IsHypoglycemia(record):
                out_class = [1, 0]
            else:
                out_class = [0, 1]

            for gl_raise in gl_raises:
                net_dataset.addSample([gl_raise[0][0].total_seconds() / (24*3600), gl_raise[0][1] / 300, gl_raise[1][0].total_seconds() / (24*3600), gl_raise[1][1] / 300] , out_class)

        train_dataset, test_dataset = net_dataset.splitWithProportion(0.8)

        trainer = RPropMinusTrainer(self.m_net, dataset=train_dataset, momentum=0.8, learningrate=0.3, lrdecay=0.9, weightdecay=0.01, verbose=True)
        validator = ModuleValidator()

        train_error = []
        test_error = []
        for i in range(0, 80):
            trainer.trainEpochs(1)
            train_error.append(validator.MSE(self.m_net, train_dataset)) # here is validate func, think it may be parametrised by custom core function
            test_error.append(validator.MSE(self.m_net, test_dataset))
            print train_error
            print test_error
            error_observer(train_error, test_error)
            gradientCheck(self.m_net)

        dump_file = open(dump_file, 'wb')
        pickle.dump(self.m_net, dump_file)
开发者ID:sersajur,项目名称:NeuralPredictor,代码行数:36,代码来源:RNNPredictor.py

示例13: train

def train(data,name):
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)
    net = buildNetwork(1, 200, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 20
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    store=[]
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        print tm.time()-atm
        stdout.flush() 
    for sample, target in ds.getSequenceIterator(0):
        store.append(net.activate(sample))
    abcd=pd.DataFrame(store)
    abcd.to_csv(pwd+"lstmdata/"+name+".csv",encoding='utf-8')
    print "result printed to file"
开发者ID:elishaROBINSON,项目名称:stock_Prediction_Neural_net,代码行数:24,代码来源:neural_net_train&store_data.py

示例14: RecurrentNetwork


layerCount = 10

net = RecurrentNetwork()
net.addInputModule(LinearLayer(10, name='in'))
for x in range(layerCount):
    net.addModule(LSTMLayer(20, name='hidden' + str(x)))
net.addOutputModule(LinearLayer(10, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden1'], name='cIn'))
for x in range(layerCount - 1):
    net.addConnection(FullConnection(net[('hidden' + str(x))], net['hidden' + str(x + 1)], name=('c' + str(x + 1))))
net.addConnection(FullConnection(net['hidden' + str(layerCount - 1)], net['out'], name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1,17))
    startingduration2 = random.choice(range(1, 17))
    song = [[startingnote, startingduration, 1, 1, 0, startingnote2, startingduration2, 1, 1, 0]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
开发者ID:ml-lab,项目名称:Bach_AI,代码行数:29,代码来源:musicnetwork.py

示例15: SequentialDataSet

net.addRecurrentConnection(FullConnection(h, h, inSliceTo = dim, outSliceTo = 4*dim, name = 'r1'))
net.addRecurrentConnection(IdentityConnection(h, h, inSliceFrom = dim, outSliceFrom = 4*dim, name = 'rstate'))
net.addConnection(FullConnection(h, o, inSliceTo = dim, name = 'f3'))
net.sortModules()

print net

ds = SequentialDataSet(15, 1)
ds.newSequence()

input = open(sys.argv[1], 'r')
for line in input.readlines():
    row = np.array(line.split(','))
    ds.addSample([float(x) for x in row[:15]], float(row[16]))
print ds

if len(sys.argv) > 2:
    test = SequentialDataSet(15, 1)
    test.newSequence()
    input = open(sys.argv[2], 'r')
    for line in input.readlines():
        row = np.array(line.split(','))
        test.addSample([float(x) for x in row[:15]], float(row[16]))
else:
    test = ds
print test

net.reset()
trainer = RPropMinusTrainer( net, dataset=ds, verbose=True)
trainer.trainEpochs(1000)
evalRnnOnSeqDataset(net, test, verbose = True)
开发者ID:babsher,项目名称:ann-indicator,代码行数:31,代码来源:rann.py


注:本文中的pybrain.supervised.RPropMinusTrainer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。