当前位置: 首页>>代码示例>>Python>>正文


Python DBN.predict_proba方法代码示例

本文整理汇总了Python中nolearn.dbn.DBN.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python DBN.predict_proba方法的具体用法?Python DBN.predict_proba怎么用?Python DBN.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.dbn.DBN的用法示例。


在下文中一共展示了DBN.predict_proba方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dbn_clf

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import predict_proba [as 别名]
def dbn_clf(X, y, hidden_sizes=[300], num_epochs=10):
    """ deep belief network """
    Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.25, random_state=0)
    output_categories = np.load(os.path.join(loaddir,'submit_col_name.npy'))

    print('Start training Neural Network...')

    dbn = DBN(
        [Xtrain.shape[1]] + hidden_sizes + [len(output_categories)],
        learn_rates = 0.3,
        learn_rate_decays = 0.9,
        epochs = num_epochs,
        verbose = 1)
    dbn.fit(Xtrain, ytrain)
    
    ypred = dbn.predict_proba(Xtest)
    score = log_loss(ytest, ypred)
    print('Log loss = {}'.format(score))

    return dbn, score
开发者ID:wkvictor,项目名称:Kaggle-TalkingData,代码行数:22,代码来源:train_models.py

示例2: test

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import predict_proba [as 别名]
	 def test(self):
                 #iris = datasets.load_iris()
                 #X, y = iris.data, iris.target
                 X, y = self.dataMat,self.labelMat
                 X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.6, random_state=12)
                 #clf = RandomForestClassifier(max_depth=6,min_samples_split=9,min_samples_leaf=15,n_estimators=5)
                 #clf = DBN([X.shape[1], 24, 2],scales=0.5,learn_rates=0.02,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=500,l2_costs = 0.02*0.031, dropouts=0.2,verbose=0)
                 #cvnum = ShuffleSplit(2013,n_iter=10,test_size=0.6,train_size=0.4,random_state=0)
                 for scal in arange(4.5, 5.0, 0.5):
                     print "**************************************************************"
                     print "DBN scal=",scal
                     clf = DBN([X.shape[1], 24,48, 2],scales=0.5,learn_rates=0.01,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=50,l2_costs = 0.02*0.001, dropouts=0.0,verbose=0)
                     clf.fit(X_train, y_train);
                     scores = cross_val_score(clf,X,y,cv=3,scoring='roc_auc')
                     y_pred = clf.predict(X_test);
                     y_predprob = clf.predict_proba(X_test);
                     prf=precision_recall_fscore_support(y_test, y_pred, average='binary')
                     print ("Accuracy: %0.5f (+/- %0.5f)" % (scores.mean(), scores.std() * 2))
                     print  classification_report(y_test,y_pred)
                     print 'The accuracy is: ', accuracy_score(y_test,y_pred)
                     print 'The log loss is:', log_loss(y_test, y_predprob)
                     print 'The ROC score is:', roc_auc_score(y_test,y_predprob[:,1])
开发者ID:kevinmtian,项目名称:Kaggle-Contests,代码行数:24,代码来源:cross_valid_NN.py

示例3: PolynomialFeatures

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import predict_proba [as 别名]
# generate polynomial features
poly = PolynomialFeatures()
train = poly.fit_transform(train)
test = poly.transform(test)
#train = np.hstack((train, poly_train))
#test = np.hstack((test, poly_test))

# encode labels
lbl_enc = LabelEncoder()
labels = lbl_enc.fit_transform(labels)

# set up datasets for cross eval
x_train, x_test, y_train, y_test = train_test_split(train, labels)

# train a DBN classifier
clf = DBN([train.shape[1], 8000, 9], learn_rates = 0.3,
            learn_rate_decays = 0.9, epochs = 50, verbose = 1) # l2_costs = 0.0001,

clf.fit(x_train, y_train)

# predict on test set
preds = clf.predict_proba(x_test)

# ----------------------  cross eval  -----------------------------------------

#y_test = label_binary.inverse_transform(y_test)
#y_test = LabelEncoder().fit_transform(y_test)

print("Multiclass Log Loss: ", MultiLogLoss(y_test, preds))
开发者ID:Thru-Echoes,项目名称:structure-gene-discovery,代码行数:31,代码来源:dbm_logloss.py

示例4: load_iris

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import predict_proba [as 别名]
from nolearn.dbn import DBN
from sklearn import metrics
from sklearn import cross_validation
from sklearn.cross_validation import cross_val_score
from sklearn.datasets import load_iris
from sklearn.preprocessing import scale
from sklearn.metrics import zero_one_loss, classification_report, accuracy_score, log_loss

iris = load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.2, random_state=0)
X_train_in, X_train_test, y_train_in, y_train_test = cross_validation.train_test_split(X_train, y_train, test_size=0.4, random_state=0)
clf = DBN([X_train.shape[1], 4, 3],learn_rates=0.05,epochs=200)

print 'Cross Validation'
clf.fit(X_train_in, y_train_in)
y_pred = clf.predict(X_train_test)
y_predprob = clf.predict_proba(X_train_test)
print classification_report(y_train_test,y_pred)
print 'The accuracy is: ', accuracy_score(y_train_test,y_pred)
print 'The log loss is:', log_loss(y_train_test, y_predprob)

print 'Train VS Test'
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_predprob = clf.predict_proba(X_test)
print classification_report(y_test,y_pred)
print 'The accuracy is: ', accuracy_score(y_test,y_pred)
print 'The log loss is:', log_loss(y_test, y_predprob)

开发者ID:kevinmtian,项目名称:Kaggle-Contests,代码行数:30,代码来源:DBN_iris.py


注:本文中的nolearn.dbn.DBN.predict_proba方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。