本文整理汇总了Python中pybrain.datasets.SupervisedDataSet.appendLinked方法的典型用法代码示例。如果您正苦于以下问题:Python SupervisedDataSet.appendLinked方法的具体用法?Python SupervisedDataSet.appendLinked怎么用?Python SupervisedDataSet.appendLinked使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.SupervisedDataSet
的用法示例。
在下文中一共展示了SupervisedDataSet.appendLinked方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: createDataset2
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def createDataset2(nInputs,inputSize,nOutputs):
index = 1
ds = SupervisedDataSet(inputSize,nOutputs)
i = 0
j = 0
pList =candleGen()
print len(pList)
input = []
z = 0
for sub in pList:
if nInputs == j:
break
elif i < inputSize:
input.append(sub[index])
i = i+1
elif i == inputSize:
ds.appendLinked(input,sub[index])
input.pop(0)
input.append(sub[index])
j = j + 1
i = i + 1
else:
ds.appendLinked(input,sub[index])
input.pop(0)
input.append(sub[index])
j = j + 1
return ds
示例2: entrenarSomnolencia
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def entrenarSomnolencia(red):
#Se inicializa el dataset
ds = SupervisedDataSet(4096,1)
"""Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
luego se le asignan los valores deseados del resultado la red neuronal."""
print "Somnolencia - cara"
for i,c in enumerate(os.listdir(os.path.dirname('/home/taberu/Imágenes/img_tesis/somnoliento/'))):
try:
im = cv2.imread('/home/taberu/Imágenes/img_tesis/somnoliento/'+c)
pim = pi.procesarImagen(im)
cara = d.deteccionFacial(pim)
if cara == None:
print "No hay cara"
else:
print i
ds.appendLinked(cara.flatten(),10)
except:
pass
trainer = BackpropTrainer(red, ds)
print "Entrenando hasta converger"
trainer.trainUntilConvergence()
NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')
示例3: get_supervised_dataset
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def get_supervised_dataset(race_data, race_factors):
race_bins = get_bins(race_data)
race_bin_groups = pd.DataFrame.from_dict(race_bins).groupby('race_id')
# Input, ouput
data_set = SupervisedDataSet(6, 15)
for race_id, race_bin in race_bin_groups:
# Skipe bins with fewer than 10% race population
if not np.count_nonzero(race_bin.population_pct) > 10:
continue
race_factor = race_factors[race_factors.race_id == race_id]
# If race has missing factor data then skip
if race_factor.empty:
continue
input_factors = [first(race_factor.high_temp) / 100.0,
first(race_factor.low_temp) / 100.0,
first(race_factor.high_humidity) / 100.0,
first(race_factor.low_humidity) / 100.0,
first(race_factor.starting_elevation) / 10000.0,
first(race_factor.gross_elevation_gain) / 10000.0
]
output_factors = race_bin.population_pct.tolist()
data_set.appendLinked(input_factors, output_factors)
return data_set
示例4: neuralNetwork_eval_func
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def neuralNetwork_eval_func(self, chromosome):
node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
if self.check_log(node_num, learning_rate, window_size):
return self.get_means_from_log(node_num, learning_rate, window_size)[0]
folded_dataset = self.create_folded_dataset(window_size)
indim = 21 * (2 * window_size + 1)
mean_AUC = 0
mean_decision_value = 0
mean_mcc = 0
sample_size_over_thousand_flag = False
for test_fold in xrange(self.fold):
test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
if len(test_labels) + len(train_labels) > 1000:
sample_size_over_thousand_flag = True
ds = SupervisedDataSet(indim, 1)
for i in xrange(len(train_labels)):
ds.appendLinked(train_dataset[i], [train_labels[i]])
net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
decision_values = map(lambda x: x[0], decision_values)
AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
mean_AUC += AUC
mean_decision_value += decision_value_and_max_mcc[0]
mean_mcc += decision_value_and_max_mcc[1]
if sample_size_over_thousand_flag:
break
if not sample_size_over_thousand_flag:
mean_AUC /= self.fold
mean_decision_value /= self.fold
mean_mcc /= self.fold
self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
return mean_AUC
示例5: __init__
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
class dataset:
# Initialize the dataset with input and label size
def __init__(self, inputsize, labelsize):
self.inputsize = inputsize
self.labelsize = labelsize
self.DS = SupervisedDataSet(self.inputsize, self.labelsize)
# Adds data to existing training dataset
def addTrainingData(self,inputdata, labeldata):
try:
if inputdata.size == self.inputsize and labeldata.size == self.labelsize:
self.DS.appendLinked(inputdata, labeldata)
return 1
except AttributeError:
print "Input error."
return 0
def getTrainingDataset(self):
return self.DS
def generateDataSet(self):
for line in fileinput.input(['data/inputdata3.txt']):
x = line.split(':')
# print ft.feature.getImageFeatureVector(x[0]),np.array([int(x[1])])
self.addTrainingData(ft.feature.getImageFeatureVector(x[0]),np.array([int(x[1])]))
return 1
示例6: fit
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def fit(self):
trainds = SupervisedDataSet(self.INPUT_SIZE, 1)
for i in range(self.str_train, self.end_train):
trainds.appendLinked(self.data[i-self.INPUT_SIZE:i],self.data[i])
trainer = BackpropTrainer(self.net, trainds, learningrate=self.eta, weightdecay=self.lmda, momentum=0.1, shuffle=False)
trainer.trainEpochs(self.epochs)
trainer = None
示例7: buildDataSet
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def buildDataSet(fTrainSet):
ds = SupervisedDataSet(8, 1)
for row in fTrainSet:
inVec = row[2:10]
tarVec = row[10]
ds.appendLinked(inVec, tarVec)
return ds
示例8: main
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def main(T=10, load_brain=False, save_brain=False):
singles = [room for room in rooms.allRooms if room.capacity == "Single"]
preprocessed = preprocess_rooms(singles)
all_vectors = [room_to_feature_vector(room, preprocessed) for room in singles]
training_sequences = getLabeledRoomsFeaturesAndLabels(getRoomsMap(singles, all_vectors))
input_units = len(all_vectors[0])
if load_brain and "net" in brain_shelf:
net = brain_shelf["net"]
net.sorted = False
net.sortModules()
else:
net = FeedForwardNetwork()
layer_in = LinearLayer(input_units)
layer_hidden = SigmoidLayer(1000)
layer_hidden2 = SigmoidLayer(100)
layer_out = LinearLayer(1)
net.addInputModule(layer_in)
net.addModule(layer_hidden)
net.addModule(layer_hidden2)
net.addOutputModule(layer_out)
in_to_hidden = FullConnection(layer_in, layer_hidden)
hidden_to_hidden = FullConnection(layer_hidden, layer_hidden2)
hidden_to_out = FullConnection(layer_hidden2, layer_out)
net.addConnection(in_to_hidden)
net.addConnection(hidden_to_hidden)
net.addConnection(hidden_to_out)
net.sortModules()
training_data = SupervisedDataSet(len(all_vectors[0]), 1)
for training_seq in training_sequences:
training_data.appendLinked(training_seq[1], training_seq[2])
trainer = BackpropTrainer(net, training_data)
for i in xrange(T):
error = trainer.train()
print "Training iteration %d. Error: %f" % (i + 1, error)
if save_brain:
brain_shelf["net"] = net
labeled_rooms = []
for i, vector in enumerate(all_vectors):
labeled_rooms.append((singles[i], net.activate(vector)))
available_rooms = available.get_available_rooms()
labeled_rooms.sort(key=lambda x: -x[1])
for room, label in labeled_rooms:
if room.num in available_rooms:
print "%16.12f: %s" % (label, room)
示例9: load_from_file
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def load_from_file(filename):
input_size = 9
output_size = 1
dataset = SupervisedDataSet(input_size, output_size)
with open(filename, 'r') as datafile:
for line in datafile:
data = line.strip().split(' ')
dataset.appendLinked(
tuple(data[:input_size]),
tuple(data[-output_size:]))
return dataset
示例10: do_evaluate
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def do_evaluate(eval_data, folds_number, iter_number):
eval_set = SupervisedDataSet(len(feats), 1)
for inst in eval_data:
eval_set.appendLinked(inst.features(), [inst.class_idx()])
res = evaluate(net_placeholder[0], eval_set)
with open(os.path.join("results", str(folds_number) + ".net." + str(iter_number) + ".obj"), "w") as f:
pickle.dump(res, f)
res = evaluate_base(eval_set)
with open(os.path.join("results", str(folds_number) + ".base." + str(iter_number) + ".obj"), 'w') as f:
pickle.dump(res, f)
print res
示例11: gettraining
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def gettraining(self):
DS = SupervisedDataSet(self.datainput, 8)
for trn in self.training:
inf = open(trn,'r')
for line in inf:
val = line.split(' ', 2)
index = self.fileindex[val[0]]
if index>=10:
input=self.fftfile(val[0])
output=self.tobit(int(val[1]))
DS.appendLinked(input, output)
inf.close()
return DS
示例12: create_NN_classifier
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def create_NN_classifier(genes, positive_dataset, negative_dataset):
maxEpochs_for_trainer = 60
node_num, learning_rate, window_size = genes
node_num, learning_rate, window_size = int(node_num), float(learning_rate), int(window_size)
train_labels, train_dataset = create_train_labels_and_dataset(positive_dataset, negative_dataset)
indim = 21 * (2 * window_size + 1)
ds = SupervisedDataSet(indim, 1)
for i in xrange(len(train_labels)):
ds.appendLinked(train_dataset[i], [train_labels[i]])
net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
trainer.trainUntilConvergence(maxEpochs=maxEpochs_for_trainer)
return net
示例13: buildDataset
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def buildDataset(path,indexes):
f = open(path)
ds = SupervisedDataSet(len(indexes[0]),len(indexes[1]))
indexin,indexout = indexes
for line in f.readlines():
outline = [float(x) for x in line.split('\t')[:-1]]
inpt,outpt = [],[]
for i in indexin:
inpt.append(outline[i])
for i in indexout:
outpt.append(outline[i])
ds.appendLinked(inpt,outpt)
return ds
示例14: _generate_Pybrain_DS
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def _generate_Pybrain_DS(self):
vect_stream = []
for word in self.sent_stream:
vect_stream.append(self._word_to_vec(word))
to_conv = zip(vect_stream, vect_stream[1:])
to_conv.append((vect_stream[-1], vect_stream[0])) #add wrap around
DS = SupervisedDataSet(29,29)
for inp, targ in to_conv:
DS.appendLinked(inp,targ)
return DS
示例15: getSeparateDataSets
# 需要导入模块: from pybrain.datasets import SupervisedDataSet [as 别名]
# 或者: from pybrain.datasets.SupervisedDataSet import appendLinked [as 别名]
def getSeparateDataSets(testSize = 0.2):
trnDs = ClassificationDataSet(len(feats), nb_classes=len(classes))
tstDs = SupervisedDataSet(len(feats), 1)
for c in classes:
with codecs.open(os.path.join(data_root, c+".txt"), 'r', 'utf8') as f:
lines = f.readlines()
breakpoint = (1.0 - testSize) * len(lines)
for i in range(len(lines)):
r = Record("11", lines[i], c, "")
if i < breakpoint:
trnDs.appendLinked(r.features(), [r.class_idx()])
else:
tstDs.appendLinked(r.features(), [r.class_idx()])
trnDs._convertToOneOfMany([0, 1])
return trnDs, tstDs