本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.getField方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.getField方法的具体用法?Python SupervisedDataSet.getField怎么用?Python SupervisedDataSet.getField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SupervisedDataSet
的用法示例。
在下文中一共展示了SupervisedDataSet.getField方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getField [as 别名]
error = metrics.rmse(actualA, predictedA)
predictedA, actualA = predict(n, tstdata['input'], tstdata['target'])
error2 = metrics.rmse(actualA, predictedA)
graph.append((i, error, error2))
with open('results/graphs/'+filename, 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(graph)'''
#
# Write the output of the final network
#
n_folds=5
inp = DS.getField("input")
tar = DS.getField("target")
perms = array_split(permutation(DS.getLength()), n_folds)
performances = 0
for i in range(n_folds):
# determine train indices
train_perms_idxs = range(n_folds)
train_perms_idxs.pop(i)
temp_list = []
for train_perms_idx in train_perms_idxs:
temp_list.append(perms[ train_perms_idx ])
train_idxs = concatenate(temp_list)
# determine test indices
test_idxs = perms[i]
示例2: Network
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getField [as 别名]
class Network(object):
def __init__(self, input_size, output_size, number_of_layers=3, size_of_hidden_layers=3, type_of_hidden_layer='sigmoid', net_bias=False, epochs=100):
self.net = FeedForwardNetwork()
self.num_epochs = epochs
# set up layers of the network
layers = []
for i in range(number_of_layers):
if i == 0:
layers.append(LinearLayer(input_size))
self.net.addInputModule(layers[i])
elif i == (number_of_layers-1):
layers.append(LinearLayer(output_size))
self.net.addOutputModule(layers[i])
self.net.addConnection(FullConnection(layers[i-1], layers[i]))
else:
if type_of_hidden_layer == 'linear':
layers.append(LinearLayer((input_size + output_size) / 2))
elif type_of_hidden_layer == 'sigmoid':
layers.append(SigmoidLayer((input_size + output_size) / 2))
elif type_of_hidden_layer == 'tanh':
layers.append(TanhLayer((input_size + output_size) / 2))
self.net.addModule(layers[i])
self.net.addConnection(FullConnection(layers[i-1], layers[i]))
self.net.sortModules()
self.input_size = input_size
self.output_size = output_size
def load(self, filedir):
self.net = NetworkReader.readFrom(filedir)
def save(self, filedir):
NetworkWriter.writeToFile(self.net, filedir)
def prepare_trainer(self, filedir):
# initialize the data set
self.ds = SupervisedDataSet(self.input_size, self.output_size)
# train on data
with open(filedir, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
# format data
input_data = tuple(map(float, row[1:(self.input_size+1)]))
output_data = tuple(map(float, row[(self.input_size+1):((self.input_size+1+self.output_size))]))
# print (output_data)
# add to dataset
self.ds.addSample(input_data, output_data)
# uses backpropegation to create a trainer
self.trainer = BackpropTrainer(self.net, self.ds)
def train(self, convergance):
if convergance:
self.trainer.trainUntilConvergence()
else:
self.trainer.trainEpochs(self.num_epochs)
def query(self, input_data):
return self.net.activate(input_data)
def cross_vaildate(self):
n_folds = 5
max_epochs = self.num_epochs
l = self.ds.getLength()
inp = self.ds.getField("input")
tar = self.ds.getField("target")
indim = self.ds.indim
outdim = self.ds.outdim
assert l > n_folds
perms = array_split(permutation(l), n_folds)
perf = 0.
for i in range(n_folds):
# determine train indices
train_perms_idxs = list(range(n_folds))
train_perms_idxs.pop(i)
temp_list = []
for train_perms_idx in train_perms_idxs:
temp_list.append(perms[ train_perms_idx ])
train_idxs = concatenate(temp_list)
# determine test indices
test_idxs = perms[i]
# train
train_ds = SupervisedDataSet(indim, outdim)
train_ds.setField("input" , inp[train_idxs])
train_ds.setField("target" , tar[train_idxs])
temp_trainer = copy.deepcopy(self.trainer)
temp_trainer.setData(train_ds)
if not max_epochs:
#.........这里部分代码省略.........
示例3: open
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import getField [as 别名]
with open('data/' + i) as f:
next(f)
for counter, line in enumerate(f):
line = line.replace('\n','')
line = line.strip()
line = line.split(',')
values = line[1:]
values = line[-20:]
for j, value in enumerate(values):
if value == '':
values[j] = 0.0
values = np.array(values, dtype='float32')
ds.addSample(values[0:-1], values[-1])
#%%
l = len(ds.getField('input'))
w = len(ds.getField('input').transpose())
nonzero = {}
zero = {}
for i in xrange(w):
nonzero[i] = np.count_nonzero(ds['input'].transpose()[i])
zero[i] = len(ds.getField('input')) - np.count_nonzero(ds['input'].transpose()[i])
print nonzero
print zero
print labels
d_view = [ (v,k) for k,v in zero.iteritems() ]
d_view.sort(reverse=True) # natively sort tuples by first element
i = 0
for v,k in d_view:
i += 1
print