本文整理汇总了Python中nolearn.dbn.DBN.score方法的典型用法代码示例。如果您正苦于以下问题:Python DBN.score方法的具体用法?Python DBN.score怎么用?Python DBN.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.dbn.DBN
的用法示例。
在下文中一共展示了DBN.score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_test_split
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
(train_x,vali_x,train_y,vali_y) = train_test_split(train_x,train_y,test_size = 0.2)
dbn = DBN(
[300,1024,120000],
learn_rates = 0.025,
learn_rate_decays = 0.98,
l2_costs = 0.0001,
minibatch_size=256,
epochs=5,
momentum = 0.9,
#dropouts=0.22,
verbose = 2)
dbn.fit(train_x, train_y)
print 'validation score is:' ,dbn.score(vali_x,vali_y)
result = dbn.predict(test_x)
with open('data/result','w') as f:
for el in result:
f.write(el+'\n')
#predicted_y_proba = dbn.predict_proba(test_x)
#if __name__ == "__main__":
#p_proba_str = cPickle.dumps(predicted_y_proba)
'''import sys
file_name = sys.argv[1]
with open(file_name, 'w') as a:
a.write(p_proba_str)'''
示例2: _score
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
verbose = 1)
##### Below is the trick for changing score function to evaluate the accuracy. The original program
# does not have other options except for pure compare % of accurate outputs,
# here one may create a function of his/her own.
import new
def _score(self, X, y):
outputs = self.predict_proba(X)
targets = self._onehot(y) # This is a built-in function to ensure results are like 1,2,3,... as numerical
mistakes = np.sum(np.not_equal(targets, outputs))
#return - float(mistakes) / len(y) + 1
return 1 - 1.0*mistakes/len(y)
dbn.score = new.instancemethod(_score, dbn, dbn.__class__) #update the score function
###########
dbn.fit(trainX, trainY)
# compute the predictions for the test data and show a classification
# report
preds = dbn.predict(testX)
print classification_report(testY, preds)
print 'The accuracy on testing data is:', accuracy_score(testY, preds)
# randomly select a few of the test instances
for i in np.random.choice(np.arange(0, len(testY)), size = (10,)):
# classify the digit
pred = dbn.predict(np.atleast_2d(testX[i]))
# reshape the feature vector to be a 28x28 pixel image, then change
# the data type to be an unsigned 8-bit integer
示例3: range
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
max_epochs=10,
verbose=1)
classifiers.append(('nolearn.lasagne', clf))
RUNS = 1
for name, orig in classifiers:
times = []
accuracies = []
for i in range(RUNS):
start = time.time()
clf = clone(orig)
clf.random_state = int(time.time())
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracies.append(clf.score(X_test, y_test))
times.append(time.time() - start)
a_t = np.array(times)
a_s = np.array(accuracies)
print("\n"+name)
print("\tAccuracy: %5.2f%% ±%4.2f" % (100.0 * a_s.mean(), 100.0 * a_s.std()))
print("\tTimes: %5.2fs ±%4.2f" % (a_t.mean(), a_t.std()))
print("\tReport:")
print(classification_report(y_test, y_pred))
示例4: main
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
def main():
"""."""
from sklearn.cross_validation import KFold
set_verbosity(3)
overlap_df = get_data("./vectors/google_overlap.csv")
#overlap_df = get_data("./vectors/freebase_overlap.csv")
overlap_df = overlap_df[overlap_df.NER != 'O']
overlap_df = overlap_df[overlap_df.NER != 'I-FAC']
overlap_df = overlap_df[overlap_df.NER != 'B-FAC']
overlap_df = overlap_df[overlap_df.NER != 'I-LOC']
overlap_df = overlap_df[overlap_df.NER != 'B-LOC']
overlap_df = overlap_df[overlap_df.NER != 'I-WEA']
overlap_df = overlap_df[overlap_df.NER != 'B-WEA']
overlap_df = overlap_df[overlap_df.NER != 'I-VEH']
overlap_df = overlap_df[overlap_df.NER != 'B-VEH']
overlap_df = overlap_df[overlap_df.NER != 'I-TTL']
overlap_df = overlap_df[overlap_df.NER != 'B-TTL']
#overlap_df = overlap_df.groupby("NER").filter(lambda x: len(x) > 50)
label_map, labels = map_labels(overlap_df)
X, y = parse_data(overlap_df, label_map)
trainX, testX, trainY, testY = train_test_split(X, y, test_size=0.10)
count, n_folds, scores = 0, 20, []
logging.info("Beginning Cross Validation with " + str(n_folds) + " folds")
kf = KFold(len(trainX), n_folds=n_folds)
lrs = list(np.linspace(0.1, 0.4, num=n_folds))
for train, test in kf:
logging.debug("TRAIN:" + str(len(train)) + " TEST:" + str(len(test)))
trainX_fold, validX_fold = trainX[train], trainX[test]
trainY_fold, validY_fold = trainY[train], trainY[test]
google_topology = [trainX_fold.shape[1], 300, 200, 100, len(labels)]
#freebase_topology = [trainX_fold.shape[1], 750, 500, 250, len(labels)]
dbn = DBN(
#freebase_topology,
google_topology,
learn_rates=float(lrs[count]),
learn_rate_decays=0.9,
epochs=50,
verbose=0)
dbn.fit(trainX_fold, trainY_fold)
score = dbn.score(validX_fold, validY_fold)
scores.append((score, float(lrs[count])))
count += 1
logging.info(
"Learning rate: " + str(float(lrs[count-1])) + " score:" + \
str(score) + " " + str(float(count)/float(n_folds) * 100) + "% done")
best_lr = max(scores, key=lambda x: x[0])[1]
logging.info("Best CV score: " + str(best_lr))
google_topology = [trainX.shape[1], 300, 200, 100, len(labels)]
#freebase_topology = [trainX.shape[1], 750, 500, 250, len(labels)]
dbn = DBN(
#freebase_topology,
google_topology,
learn_rates=best_lr,
learn_rate_decays=0.9,
epochs=100,
verbose=1)
dbn.fit(trainX, trainY)
preds = dbn.predict(testX)
print classification_report(testY, preds)
#model_and_data = (dbn, label_map)
#dump_model(model_and_data, './google_model.pkl')
#'''
'''
示例5: DBN
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
datareader2 = csv.reader(f2)
for row in datareader2:
labellist = []
labellist = [float(x) for x in row]
train_labels.extend(labellist)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(train, train_labels, test_size=0.2, random_state=0)
print "Applying a learning algorithm..."
from nolearn.dbn import DBN
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
clf = DBN([X_train.shape[1], 300, 10], learn_rates=0.3, learn_rate_decays=0.9, epochs=15, verbose=1)
clf.fit(X_train, y_train)
acc_nn = clf.score(X_test, y_test)
print "neural network accuracy: ", acc_nn
y_pred = clf.predict(X_test)
print "Classification report:"
print classification_report(y_test, y_pred)
print ("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, y_pred))
示例6: classification_report
# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import score [as 别名]
output_num_units=10,
output_nonlinearity=softmax,
eval_size=0.0,
more_params=dict(
hidden1_num_units=300,
),
update=nesterov_momentum,
update_learning_rate=0.02,
update_momentum=0.9,
max_epochs=10,
verbose=1
)
classifiers.append(('nolearn.lasagne', clf))
for name, clf in classifiers:
start = time.time()
clf.fit(X_train, y_train)
from sklearn.metrics import classification_report
y_pred = clf.predict(X_test)
print name
print "\tAccuracy:", clf.score(X_test, y_test)
print "\tTime:", time.time() - start
print "\tReport:"
print classification_report(y_test, y_pred)