本文整理汇总了Python中sklearn.neural_network.BernoulliRBM.n_iter方法的典型用法代码示例。如果您正苦于以下问题:Python BernoulliRBM.n_iter方法的具体用法?Python BernoulliRBM.n_iter怎么用?Python BernoulliRBM.n_iter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neural_network.BernoulliRBM
的用法示例。
在下文中一共展示了BernoulliRBM.n_iter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RBM_SVM
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def RBM_SVM(trainfeatures, testfeatures, trainlabels, testlabels):
# ******************* Scikit-learning RBM + SVM *******************
print "train RBM+SVM model"
## trainfeatures = (trainfeatures - np.min(trainfeatures, 0)) / (np.max(trainfeatures, 0) + 0.0001) # 0-1 scaling
min_max_scaler = preprocessing.MinMaxScaler()
trainfeatures_fs = min_max_scaler.fit_transform(trainfeatures)
testfeatures_fs = min_max_scaler.transform(testfeatures)
# SVM parameters
clf = svm.SVC(C=5.0, kernel='sigmoid', degree=3, gamma=0.5, coef0=10.0,
shrinking=True, probability=False, tol=0.001, cache_size=200,
class_weight=None, verbose=False, max_iter=-1, random_state=None)
# RBM parameters
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
# Machine learning pipeline
classifier = Pipeline(steps=[('rbm', rbm), ('svm', clf)])
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 400
classifier.fit(trainfeatures_fs, trainlabels)
results = classifier.predict(testfeatures_fs)
results = results.ravel()
testerror = float(len(testlabels)
- np.sum(testlabels == results))/float(len(testlabels))
# print"error rate with SVM is %.4f" %testerror
return testerror
示例2: Logistic
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def Logistic():
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
# RBM parameters obtained after cross-validation
rbm.learning_rate = 0.01
rbm.n_iter = 121
rbm.n_components = 700
logistic.C= 1.0
# Training RBM-Logistic Pipeline
classifier.fit(data_train,target_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=1.0)
logistic_classifier.fit(data_train,target_train)
print("printing_results")
print("Logistic regression using RBM features:\n%s\n" % (metrics.classification_report(target_test,classifier.predict(data_test))))
cm3 = confusion_matrix(target_test,classifier.predict(data_test))
plt.matshow(cm3)
plt.title('Confusion Matrix Logistic Regression with RBM Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix3.jpg')
print("Logistic regression using raw pixel features:\n%s\n" % (metrics.classification_report(target_test,logistic_classifier.predict(data_test))))
cm4 = confusion_matrix(target_test,logistic_classifier.predict(data_test))
plt.matshow(cm4)
plt.title('Confusion Matrix Logistic Regression')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix4.jpg')
#Logistic()
示例3: SGD
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def SGD():
SGD = linear_model.SGDClassifier(loss='hinge',penalty='l2',random_state=42,n_jobs=-1,epsilon=0.001)
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('SGD', SGD)])
# RBM parameters obtained after cross-validation
rbm.learning_rate = 0.01
rbm.n_iter = 15
rbm.n_components = 50
SGD.alpha=0.0001
SGD.C=1
# Training SGD
SGD_classifier = linear_model.SGDClassifier(loss='hinge',penalty='l2',random_state=42,n_jobs=-1,alpha=0.0001, epsilon=0.001)
SGD_classifier.fit(data_train,target_train)
# Training RBM-SGD Pipeline
classifier.fit(data_train,target_train)
print("printing_results")
print("SGD using RBM features:\n%s\n" % (metrics.classification_report(target_test,classifier.predict(data_test))))
cm = confusion_matrix(target_test,classifier.predict(data_test))
plt.matshow(cm)
plt.title('Confusion Matrix SVM with SDG with RBM Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix1.jpg')
print("SGD using raw pixel features:\n%s\n" % (metrics.classification_report(target_test,SGD_classifier.predict(data_test))))
cm1 = confusion_matrix(target_test,SGD_classifier.predict(data_test))
plt.matshow(cm1)
plt.title('Confusion Matrix SVM with SDG Raw Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix2.jpg')
示例4: build_classifier
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def build_classifier(clf_name):
clf = None
parameters = {}
if clf_name == "svm":
clf = svm.SVC(kernel='linear', C=10)
parameters = {}
elif clf_name == "knn":
clf = neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='brute', leaf_size=30,
metric='cosine', metric_params=None)
elif clf_name == "rmb":
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.01
rbm.n_iter = 20
rbm.n_components = 100
logistic.C = 6000
clf = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
#parameters = {'clf__C': (1, 10)}
elif clf_name == "tsne":
clf = TSNE(n_components=2, init='random', metric='cosine')
return clf, parameters
示例5: runRBM
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def runRBM(arr, clsfr):#iters, lrn_rate, logistic_c_val, logistic_c_val2, n_comp, filename):
global file_dir, nEvents, solutionFile
iters = int(arr[0]*10)
lrn_rate = arr[1]
logistic_c_val = arr[2]*1000.0
logistic_c_val2 = arr[3]*100.0
n_comp = int(arr[4]*100)
filename = 'rbm_iter'+str(iters)+'_logc'+str(log_c_val)+'_logcc'+str(log_c_val2)+'_lrn'+str(learn_rate)+'_nc'+str(n_comp)# low
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = lrn_rate #0.10#0.06
rbm.n_iter = iters #20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = n_comp # 250
logistic.C = logistic_c_val #6000.0
# Training RBM-Logistic Pipeline
classifier.fit(sigtr[train_input].values, sigtr['Label'].values)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=logistic_c_val2)#100.0
logistic_classifier.fit(sigtr[train_input].values, sigtr['Label'].values)
###############################################################################
# Evaluation
if clsfr == 0:
clsnn_pred=classifier.predict(sigtest[train_input].values)
solnFile('clsnn_'+filename,clsnn_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+filename+'.out', nEvents)
print ams_score
logfile.write(filename+': ' + str(ams_score)+'\n')
elif clsfr == 1:
log_cls_pred = logistic_classifier.predict(sigtest[train_input].values)
solnFile('lognn_'+filename,log_cls_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+'lognn_'+filename+'.out', nEvents)
print ams_score
logfile.write('lognn ' + filename+': ' + str(ams_score)+'\n')
else:
logistic_classifier_tx = linear_model.LogisticRegression(C=logistic_c_val2)
logistic_classifier_tx.fit_transform(sigtr[train_input].values, sigtr['Label'].values)
log_cls_tx_pred = logistic_classifier_tx.predict(sigtest[train_input].values)
solnFile('lognntx_'+filename,log_cls_tx_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+filename+'.out', nEvents)
print ams_score
logfile.write('lognntx '+ filename+': ' + str(ams_score)+'\n')
return -1.0*float(ams_score)
示例6: train_rbm
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def train_rbm(X, n_components=100, n_iter=10):
X = X.astype(np.float64)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # scale to [0..1]
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = n_iter
rbm.n_components = n_components
rbm.fit(X)
return rbm
示例7: rbm_dbn_train_and_predict
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def rbm_dbn_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
dbn = DBN(epochs=200,learn_rates=0.01)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('dbn', dbn)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
示例8: rbm_logistic_train_and_predict
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def rbm_logistic_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
logistic = linear_model.LogisticRegression(C=6000)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
示例9: rbm_knn_train_and_predict
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def rbm_knn_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
knn = KNeighborsClassifier(n_neighbors=5)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('knn', knn)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
示例10: run_auto
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def run_auto():
X = load_data('gender/male')
X = X.astype(np.float32) / 256
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 2000
rbm.fit(X)
cimgs = [comp.reshape(100, 100) for comp in rbm.components_]
smartshow(cimgs[:12])
return rbm
示例11: getNeuralModel
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def getNeuralModel(self,X,Y):
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(verbose=True)
classifier = linear_model.LogisticRegression(penalty='l2', tol=.0001)#Pipeline(steps = [('rbm', rbm),('logistic',logistic)])
rbm.learning_rate = 0.0001
rbm.n_iter = 1000
rbm.n_components = 1000
classifier.fit(X, Y)
return classifier
示例12: Logistic_cross_vaildation
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def Logistic_cross_vaildation():
logistic = linear_model.LogisticRegression()
#cross-validation for logistic regression with RBM
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
rbm.n_iter=100
cv = cross_validation.StratifiedKFold(output, 3)
score_func = metrics.f1_score
parameters = { "rbm__learning_rate": [0.1, 0.01, 0.001,0.0001],"rbm__n_components":[100,200,300,400,500,600,700,800],"logistic__C":[1,100,1000,5000]}
grid_search = GridSearchCV(classifier,parameters,score_func=score_func,cv=cv)
grid_search.fit(input,output)
print "Best %s: %0.3f" % (score_func.__name__, grid_search.best_score_)
print "Best parameters set:"
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print "\t%s: %r" % (param_name, best_parameters[param_name])
示例13: SGD_cross_validation
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def SGD_cross_validation():
SGD = linear_model.SGDClassifier(loss='hinge',penalty='l2',random_state=42,n_jobs=-1,epsilon=0.001)
# cross-validaiotn for SGD classifier
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('SGD', SGD)])
rbm.n_iter=100
cv = cross_validation.StratifiedKFold(output, 3)
score_func = metrics.f1_score
parameters = { "rbm__learning_rate": [0.1, 0.01, 0.001,0.0001],"rbm__n_components":[100,200,300,400,500,600,700,800],"SGD__alpha":[0.1,0.01,0.001,0.0001], "SGD__C":[1,100,1000,10000]}
grid_search = GridSearchCV(classifier,parameters,score_func=score_func,cv=cv)
grid_search.fit(input,output)
print "Best %s: %0.3f" % (score_func.__name__, grid_search.best_score_)
print "Best parameters set:"
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print "\t%s: %r" % (param_name, best_parameters[param_name])
示例14: neural_net
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def neural_net():
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
sidelength = int(np.sqrt(X.shape[1]))
X,Y = nudge_dataset(X,digits.target,dimen=(sidelength,sidelength))
#Scale the data to be between zero and 1 at all pixels:
X = (X - np.min(X,axis=0))/(np.max(X,axis=0)+0.0001)
#Split the data set into a training and testing set:
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.2,random_state=0)
#Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
#The classifier
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
#logistic_classifier = linear_model.LogisticRegression(C=100.0)
#logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print ""
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
#Predict a few individual cases:
print classifier.predict(X_test[:5,:]),Y_test[:5]
示例15: estimate_n_components
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import n_iter [as 别名]
def estimate_n_components():
X = load_data('gender/male')
X = X.astype(np.float32) / 256
n_comp_list = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200]
scores = []
for n_comps in n_comp_list:
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 50
rbm.n_components = 100
rbm.fit(X)
score = rbm.score_samples(X).mean()
scores.append(score)
plt.figure()
plt.plot(n_comp_list, scores)
plt.show()
return n_comp_list, scores