本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.getSequenceIterator方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.getSequenceIterator方法的具体用法?Python SupervisedDataSet.getSequenceIterator怎么用?Python SupervisedDataSet.getSequenceIterator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SupervisedDataSet
的用法示例。
在下文中一共展示了SupervisedDataSet.getSequenceIterator方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getSequenceIterator [as 别名]
def handle(self, *args, **options):
ticker = args[0]
print("****** STARTING PREDICTOR " + ticker + " ******* ")
prices = Price.objects.filter(symbol=ticker).order_by('-created_on').values_list('price',flat=True)
data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
data = [ int(x * MULT_FACTOR) for x in data]
print(data)
ds = SupervisedDataSet(5, 1)
try:
for i,val in enumerate(data):
DS.addSample((data[i], data[i+1], data[i+2], data[i+3], data[i+4]), (data[i+5],))
except Exception:
pass;
net = buildNetwork(5, 40, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
stdout.flush()
print()
print("final error =", train_errors[-1])
for sample, target in ds.getSequenceIterator(0):
show_pred_sample = net.activate(sample) / MULT_FACTOR
show_sample = sample / MULT_FACTOR
show_target = target / MULT_FACTOR
show_diff = show_pred_sample - show_target
show_diff_pct = 100 * show_diff / show_pred_sample
print("{} => {}, act {}. ({}%)".format(show_sample[0],round(show_pred_sample[0],3),show_target[0],int(round(show_diff_pct[0],0))))