本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.getSample方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.getSample方法的具体用法?Python SupervisedDataSet.getSample怎么用?Python SupervisedDataSet.getSample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SupervisedDataSet
的用法示例。
在下文中一共展示了SupervisedDataSet.getSample方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tuple
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getSample [as 别名]
indata = tuple(data[:20])
outdata = tuple(data[20:])
testDataSet.addSample(indata,outdata)
print "NORMALIZING DATASET"
mx = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=numpy.float64)
for inp, out in dataSet:
for i in range(0, len(inp)):
mx[i]+=inp[i]
for i in range(0, len(mx)):
mx[i] = mx[i] / len(dataSet)
nfactor = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=numpy.float64)
for i in range(0, len(dataSet)):
for j in range(0, len(nfactor)):
t = dataSet.getSample(i)[0][j]
nfactor[j] += (t - mx[j]) * (t - mx[j])
for i in range(0, len(nfactor)):
nfactor[i] = nfactor[i]/len(dataSet)
nfactor[i] = math.sqrt(nfactor[i])
print nfactor
for inp, out in dataSet:
for i in range(0, len(inp)):
if(nfactor[i]!=0):
inp[i] = (inp[i] - mx[i])/nfactor[i]
#print inp
print "TRAINING OUR NEURAL NET"
neuralNet = buildNetwork(20, 6, 1)
trainer = BackpropTrainer(neuralNet, dataSet)
t=""
示例2: range
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getSample [as 别名]
outputMin = output
trains = []
tests = []
epochsNums = []
parameters = range(1, 200)
for i in parameters:
tstdata, trndata = ds.splitWithProportion( 0.25 )
hidden_size = i
numOfEpocs = 10
"""
n = buildNetwork( 1, hidden_size, 1, bias = True )
"""
inLayer = LinearLayer(len(ds.getSample(0)[0]))
hiddenLayer = SigmoidLayer(hidden_size)
outLayer = LinearLayer(len(ds.getSample(0)[1]))
n = FeedForwardNetwork()
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
b = BiasUnit()
n.addModule(b)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
b_to_hidden = FullConnection(b, hiddenLayer)
b_to_out = FullConnection(b, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
示例3: ClassificationDataSet
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getSample [as 别名]
# split the data into testing and training data
tstdata_temp, trndata_temp = alldata.splitWithProportion(0.15)
# small bug with _convertToOneOfMany function. This fixes that
tstdata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0, tstdata_temp.getLength()):
tstdata.addSample(tstdata_temp.getSample(n)[0], tstdata_temp.getSample(n)[1])
trndata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0,trndata_temp.getLength()):
trndata.addSample(trndata_temp.getSample(n)[0],trndata_temp.getSample(n)[1])
valdata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0,stimalldata.getLength()):
valdata.addSample(stimalldata.getSample(n)[0],stimalldata.getSample(n)[1])
# organizes dataset for pybrain
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
valdata._convertToOneOfMany()
# sample printouts before running classifier
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]
# build the ANN
# 2 hidden layers (4 layers total)