本文整理汇总了Python中pybrain.datasets.ClassificationDataSet.appendLinked方法的典型用法代码示例。如果您正苦于以下问题:Python ClassificationDataSet.appendLinked方法的具体用法?Python ClassificationDataSet.appendLinked怎么用?Python ClassificationDataSet.appendLinked使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybrain.datasets.ClassificationDataSet
的用法示例。
在下文中一共展示了ClassificationDataSet.appendLinked方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: makeMnistDataSets
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def makeMnistDataSets(path):
"""Return a pair consisting of two datasets, the first being the training
and the second being the test dataset."""
# test = SupervisedDataSet(28 * 28, 10)
test = ClassificationDataSet(28*28, 10)
test_image_file = os.path.join(path, 't10k-images-idx3-ubyte')
test_label_file = os.path.join(path, 't10k-labels-idx1-ubyte')
test_images = images(test_image_file)
test_labels = (flaggedArrayByIndex(l, 10) for l in labels(test_label_file))
for image, label in zip(test_images, test_labels):
test.appendLinked(image, label)
# test.addSample(image, label)
# train = SupervisedDataSet(28 * 28, 10)
train = ClassificationDataSet(28*28, 10)
train_image_file = os.path.join(path, 'train-images-idx3-ubyte')
train_label_file = os.path.join(path, 'train-labels-idx1-ubyte')
train_images = images(train_image_file)
train_labels = (flaggedArrayByIndex(l, 10) for l in labels(train_label_file))
for image, label in zip(train_images, train_labels):
train.appendLinked(image, label)
# train.addSample(image, label)
return train, test
示例2: getPybrainDataSet
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def getPybrainDataSet(source='Rachelle'):
first = False#True
qualities, combinations = cp.getCombinations()
moods = combinations.keys()
ds = None
l=0
for mood in moods:
if mood=='neutral':
continue
for typeNum in range(1,21):
for take in range(1,10):
fileName = 'recordings/'+source+'/'+mood+'/'+\
str(typeNum)+'_'+str(take)+'.skl'
try:
data, featuresNames = ge.getFeatureVec(fileName, first)
first = False
except IOError:
continue
if ds is None:#initialization
ds = ClassificationDataSet( len(data), len(qualities) )
output = np.zeros((len(qualities)))
for q in combinations[mood][typeNum]:
output[qualities.index(q)] = 1
ds.appendLinked(data , output)
l+=sum(output)
return ds, featuresNames
示例3: classifer
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def classifer(labels, data):
""" data in format (value, label)
"""
clsff = ClassificationDataSet(2,class_labels=labels)
for d in data:
clsff.appendLinked(d[0], d[1])
clsff.calculateStatistics()
示例4: build_dataset
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def build_dataset(data_pair):
inputs, classes = data_pair
ds = ClassificationDataSet(256)
data = zip(inputs, classes)
for (inp, c) in data:
ds.appendLinked(inp, [c])
return ds
示例5: getBoardImage
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def getBoardImage(img):
'''
Runs an image through processing and neural network to decode digits
img: an openCV image object
returns:
pil_im: a PIL image object with the puzzle isolated, cropped and straightened
boardString: string representing the digits and spaces of a Sudoku board (left to right, top to bottom)
'''
# Process image and extract digits
pil_im, numbers, parsed, missed = process(img, False)
if pil_im == None:
return None, None
net = NetworkReader.readFrom(os.path.dirname(os.path.abspath(__file__))+'/network.xml')
boardString = ''
for number in numbers:
if number is None:
boardString += ' '
else:
data=ClassificationDataSet(400, nb_classes=9, class_labels=['1','2','3','4','5','6','7','8','9'])
data.appendLinked(number.ravel(),[0])
boardString += str(net.activateOnDataset(data).argmax(axis=1)[0]+1)
return pil_im, boardString
示例6: import_dataset
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def import_dataset(path, shapes, used_for, samples_nbr):
ds = ClassificationDataSet(4, nb_classes=3)
for shape in sorted(shapes):
for i in range(samples_nbr):
image = imread(path + used_for + "/" + shape + str(i + 1) + ".png", as_grey=True, plugin=None, flatten=None)
image_inputs = image_to_inputs(image)
ds.appendLinked(image_inputs, shapes[shape])
return ds
示例7: create_data_set
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def create_data_set(file_name):
raw_data = open(file_name).readlines()
data_set = ClassificationDataSet(64, nb_classes=10, class_labels=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
for line in raw_data:
# Get raw line into a list of integers
line = map(lambda x: int(x), line.strip().split(','))
data_set.appendLinked(line[:-1], line[-1])
return data_set
示例8: conv2DS
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def conv2DS(Xv,yv = None) :
if yv == None :
yv = np.asmatrix( np.ones( (Xv.shape[0],1) ) )
for j in range(len(classNames)) : yv[j] = j
C = len(unique(yv.flatten().tolist()[0]))
DS = ClassificationDataSet(M, 1, nb_classes=C)
for i in range(Xv.shape[0]) : DS.appendLinked(Xv[i,:].tolist()[0], [yv[i].A[0][0]])
DS._convertToOneOfMany( )
return DS
示例9: getSeparateDataSets
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def getSeparateDataSets(testSize = 0.2):
trnDs = ClassificationDataSet(len(feats), nb_classes=len(classes))
tstDs = SupervisedDataSet(len(feats), 1)
for c in classes:
with codecs.open(os.path.join(data_root, c+".txt"), 'r', 'utf8') as f:
lines = f.readlines()
breakpoint = (1.0 - testSize) * len(lines)
for i in range(len(lines)):
r = Record("11", lines[i], c, "")
if i < breakpoint:
trnDs.appendLinked(r.features(), [r.class_idx()])
else:
tstDs.appendLinked(r.features(), [r.class_idx()])
trnDs._convertToOneOfMany([0, 1])
return trnDs, tstDs
示例10: __prepareTrainingData
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def __prepareTrainingData(self,places,num_of_places):
alldata = ClassificationDataSet(2, 1, nb_classes=self.num_of_places)
previous_feature_vector=None
previous_place=None
counter=0
for location_event in places:
if location_event.place!=None:
current_timestamp=location_event.timestamp
new_feature_vector=self.__prepare_features(location_event.place,current_timestamp)
new_place=self.__prepare_place(location_event.place)
#if previous_feature_vector!=None and previous_place!=None and location_event.place.name!=previous_place.name:
if previous_feature_vector!=None:
counter+=1
if location_event.place.name=="2":
print previous_feature_vector
print location_event.place.name
for i in range(1):
alldata.appendLinked(previous_feature_vector,[new_place])
previous_feature_vector=new_feature_vector
previous_place=location_event.place
self.last_visit_map[location_event.place]=current_timestamp
previous_feature_vector=None
previous_place=None
probiability_of_static=float(counter)/float(len(places))
probiability_of_static=0.5
for location_event in places:
if location_event.place!=None:
current_timestamp=location_event.timestamp
new_feature_vector=self.__prepare_features(location_event.place,current_timestamp)
new_place=self.__prepare_place(location_event.place)
rand=random.random()
if previous_feature_vector!=None and rand<=probiability_of_static:
counter+=1
if location_event.place.name=="1":
print new_feature_vector
print location_event.place.name
for i in range(1):
alldata.appendLinked(previous_feature_vector,[new_place])
previous_feature_vector=new_feature_vector
previous_place=new_place
self.last_visit_map[location_event.place]=current_timestamp
return alldata
示例11: fnn
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def fnn():
data = orange.ExampleTable("D:\\Back-up-THICK_on_Vista\\Orange\\W1BIN.tab")#input_dict['data'])
addMetaID(data)
n_attrs = len(data.domain.attributes)
classes = list(data.domain.classVar.values)
pbdata = ClassificationDataSet(n_attrs, class_labels=classes)
for ex in data:
pbdata.appendLinked([x.value for x in list(ex)[:n_attrs]], [classes.index(ex.getclass().value)])
tstdata, trndata = pbdata.splitWithProportion( 0.25 )
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]
示例12: train
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def train(training_data):
training_set = ClassificationDataSet(len(feats), nb_classes=len(classes))
for inst in training_data:
training_set.appendLinked(inst.features(), [inst.class_idx()])
training_set._convertToOneOfMany([0, 1])
net_placeholder[0] = buildNetwork(
training_set.indim,
int((training_set.indim + training_set.outdim)/2),
training_set.outdim, bias=True,
hiddenclass=TanhLayer,
outclass=SoftmaxLayer
)
trainer = BackpropTrainer(
net_placeholder[0], training_set, momentum=0.75, verbose=False, learningrate=0.05
)
trainer.trainUntilConvergence(maxEpochs=100, validationProportion=0.1)
示例13: build_net
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def build_net(self):
if os.path.exists(self.NET_FILE):
return NetworkReader.readFrom(self.NET_FILE)
ds = ClassificationDataSet(len(feats), nb_classes=len(classes))
for c in classes:
print c
with codecs.open(os.path.join(self.data_root, c+".txt"), 'r', 'utf8') as f:
for line in f:
r = Record("11", line, c, "")
ds.appendLinked(r.features(), [r.class_idx()])
ds._convertToOneOfMany([0, 1])
net = buildNetwork(ds.indim, int((ds.indim + ds.outdim)/2), ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, ds, momentum=0.75, verbose=True)
trainer.trainUntilConvergence(maxEpochs=300)
NetworkWriter.writeToFile(net, self.NET_FILE)
return net
示例14: init_classifier
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def init_classifier(self, hidden_units = 20):
data = ClassificationDataSet(len(self.channels), nb_classes=5)
# Prepare the dataset
for i in range(len(self.classification_proc)):
data.appendLinked(self.y_proc[i], self.classification_proc[i])
# Make global for test purposes
self.data = data
# Prepare training and test data, 75% - 25% proportion
self.testdata, self.traindata = data.splitWithProportion(0.25)
#self.traindata._convertToOneOfMany()
#self.testdata._convertToOneOfMany()
# CHECK the number of hidden units
fnn = buildNetwork(self.traindata.indim, hidden_units, self.traindata.outdim)
# CHECK meaning of the parameters
trainer = BackpropTrainer(fnn, dataset=self.traindata, momentum=0, verbose=True, weightdecay=0.01)
return fnn, trainer, data
示例15: bagging_classifier
# 需要导入模块: from pybrain.datasets import ClassificationDataSet [as 别名]
# 或者: from pybrain.datasets.ClassificationDataSet import appendLinked [as 别名]
def bagging_classifier(self, trainInstances, testInstances, L):
"""Train and test bagging classifier for the neural network.
(1) generate self.m new training sets each with L instances
from trainInstances using replacement;
(2) train self.m neural networks on the self.m training sets;
(3) majority vote
Precondition: dimensions of trainInstances,testInstances must match self.fnn
:param trainInstances: collection of training examples
:type trainInstances: ClassificationDataSet
:param testInstances: collection of test examples
:type testInstances: ClassificationDataSet
:param L: number of items in each training set
:type L: int
:returns: accuracy of predictions
:rtype: float
"""
ensemble = []
for j in range(self.m):
# generate random sample of indices
tset = random.sample(range(0, len(trainInstances["input"])), L)
c = ClassificationDataSet(self.fnn.indim, 1, nb_classes=self.fnn.outdim)
for index in tset:
c.appendLinked(trainInstances['input'][index], trainInstances['target'][index])
c._convertToOneOfMany(bounds=[0,1]) # 1 of k binary representation
net = buildNetwork(24, 18, 16, 8, hiddenclass=TanhLayer, outclass=SoftmaxLayer) # define neural net
trainer = BackpropTrainer(net, dataset=c, learningrate=0.01, momentum=0.1, verbose=True, weightdecay=0.01)
trainer.trainEpochs(20) # train
ensemble.append(net)
print percentError(trainer.testOnClassData(
dataset=testInstances ), testInstances['class'])
# key is test example, value is list of labels from each model
d = dict.fromkeys(np.arange(len(testInstances['input'])))
for model in ensemble:
# get label with highest probability for each test example
result = model.activateOnDataset(testInstances).argmax(axis=1)
for k in range(len(result)):
if d[k] == None:
d[k] = [result[k]]
else:
d[k].append(result[k])
predictions = []
for ex in d.keys():
predictions.append(max(set(d[ex]), key=d[ex].count)) # majority voting
actual = [int(row[0]) for row in testInstances['class']]
return accuracy_score(actual, predictions) # traditional accuracy calc