当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.predict_proba方法代码示例

本文整理汇总了Python中neuralnet.NeuralNet.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.predict_proba方法的具体用法?Python NeuralNet.predict_proba怎么用?Python NeuralNet.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neuralnet.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.predict_proba方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BlendedModel

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import predict_proba [as 别名]
class BlendedModel(BaseEnsemble):
    def __init__(self, models=[], blending='average',nbFeatures=4):
        self.models = models
        self.blending = blending
        self.logR = LogisticRegression(C=10)#,multi_class='multinomial',solver='lbfgs', max_iter=10000)
        self.logRT= LogisticRegression(C=10)#,multi_class='multinomial',solver='lbfgs', max_iter=10000)
        self.nn=NeuralNet(nbFeatures) 
        self.XGB=ModifiedXGBClassifier()
        if self.blending not in ['average', 'most_confident']:
            raise Exception('Wrong blending method')
    
    ##fit the stochastic gradient boosting trees classifier
    def fit(self, X, y): 
        for model in self.models:
            print 'Training model :'
            print model.get_params()
            model.fit(X, y)                
        return self
    
    ##predict the outputs according to the average of the classifier (or according an entropy based voting scheme that does not work well)
    def predict_proba(self, X):
        preds = np.array(
                    [model.predict_proba(X) for model in self.models]
                )
        if self.blending == 'average':
            return np.mean(preds , axis=0 )
        elif self.blending == 'most_confident':
            def dirac_weights(entropies):
                w = (entropies == np.min(entropies)).astype(float)
                return w/np.sum(w)
            def shannon_entropy(l):
                l = [min(max(1e-5,p),1-1e-5) for p in l]
                l = np.array(l)/sum(l)
                return sum([-p*math.log(p) for p in l])
            shannon_entropy_array = lambda l : np.apply_along_axis(shannon_entropy, 1, l)
            entropies = np.array([shannon_entropy_array(pred) for pred in preds])
            weights = np.apply_along_axis(dirac_weights, 0, entropies)
            return np.sum(np.multiply(weights.T, preds.T).T, axis = 0)
 
    
    ##fit the logistic regression stacking
    def fitLog(self,X,y,mod=0):
        if mod==0: ##without features engineering
            preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
            features=np.array([np.array([preds[j][i] for j in range(len(self.models))]).flatten() for i in range(len(X))])
            self.logR.fit(features, y)
        elif mod==1: ##with features engineering
            preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
            features=np.array([np.array([[math.log(preds[j][i][k]/(1-preds[j][i][k])) for k in range(4)] for j in range(len(self.models))]).flatten() for i in range(len(X))])
            self.logRT.fit(features, y)
        return self
    
  
    ##predict the outputs of the logistic regression stack
    def predict_Logproba(self, X,mod=0):
        if mod==0:
            preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
            features=np.array([np.array([preds[j][i] for j in range(len(self.models))]).flatten() for i in range(len(X))])
            preds=self.logR.predict_proba(features)
            return preds
        elif mod==1:
            preds = np.array(
                    [model.predict_proba(X) for model in self.models]
                )
            features=np.array([np.array([[math.log(preds[j][i][k]/(1-preds[j][i][k])) for k in range(4)] for j in range(len(self.models))]).flatten() for i in range(len(X))])
            preds=self.logRT.predict_proba(features)
            return preds
        
    ##I also try to use a gradient boosting as a stacking classifier, but it does not work well    
    def fitXGB(self,X,y):
        preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
        features=np.array([np.array([[math.log(preds[j][i][k]/(1-preds[j][i][k])) for k in range(4)] for j in range(len(self.models))]).flatten() for i in range(len(X))])
        #features= np.append(features, X, axis=1)
        self.XGB.fit(features,y)
        return self
    
    def predict_XGBproba(self,X):
        preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
        features=np.array([np.array([[math.log(preds[j][i][k]/(1-preds[j][i][k])) for k in range(4)] for j in range(len(self.models))]).flatten() for i in range(len(X))])
        #features= np.append(features, X, axis=1)
        return self.XGB.predict_proba(features)
  
    ##neural network stack classifier
    def fitNN(self,X,y,lambda1=0.00000001,lambda2=0.00005,new=0,teX=[],teY=[],lr=0.001):
        
        preds = np.array(
                        [model.predict_proba(X) for model in self.models]
                    )
        features=np.array([np.array([preds[j][i] for j in range(len(self.models))]).flatten() for i in range(len(X))]) 
        features= np.append(features, X, axis=1)
#.........这里部分代码省略.........
开发者ID:alexrame,项目名称:snips,代码行数:103,代码来源:stacking.py


注:本文中的neuralnet.NeuralNet.predict_proba方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。