当前位置: 首页>>代码示例>>Python>>正文


Python LogisticRegressionCV.predict_proba方法代码示例

本文整理汇总了Python中sklearn.linear_model.LogisticRegressionCV.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegressionCV.predict_proba方法的具体用法?Python LogisticRegressionCV.predict_proba怎么用?Python LogisticRegressionCV.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.linear_model.LogisticRegressionCV的用法示例。


在下文中一共展示了LogisticRegressionCV.predict_proba方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_predictions

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def make_predictions():
    # Fit Logistic Regression Model
    logreg = LogisticRegressionCV(scoring='log_loss', n_jobs=-1, verbose=1, random_state=6156)
    logreg.fit(X=trainX, y=train['y'].values)
    
    # Validate
    pred_pr = logreg.predict_proba(valX)
    loss = log_loss(y_true=val['y'].values, y_pred=pred_pr)
    print "Validation log loss:", loss
    
    # Get Test predictions
    img_files = [os.path.join(IMG_DIR, f) for f in os.listdir(IMG_DIR)]
        
    if os.path.isfile('test_pca.csv'):
        test_pca = pd.read_csv('test_pca.csv', dtype={'id' : str})
    else:
        test_pca = prepare_test_data(img_files, STD_SIZE)
        
    test_predictions = logreg.predict_proba(test_pca.values[:, 1:])
    id_s = [re.sub('\D', '', f) for f in img_files]
    df_id = pd.DataFrame({'id' : id_s})
    col_names = ['col'+str(i) for i in range(1, 9)]
    df_yhat = pd.DataFrame(data=test_predictions, columns=col_names)
    df_id_yhat = pd.concat([test_pca['id'], df_yhat], axis=1)
    yhat = df_id.merge(df_id_yhat, on='id', how='left')
    yhat.fillna(1./8, inplace=True)
    yhat.to_csv('kaggle_430_2pm.csv', index=False)
开发者ID:keithgw,项目名称:ML_Competition,代码行数:29,代码来源:build_model.py

示例2: compute_roc_auc

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def compute_roc_auc(test_sa, adv_sa, split=1000):
    tr_test_sa = np.array(test_sa[:split])
    tr_adv_sa = np.array(adv_sa[:split])

    tr_values = np.concatenate(
        (tr_test_sa.reshape(-1, 1), tr_adv_sa.reshape(-1, 1)), axis=0
    )
    tr_labels = np.concatenate(
        (np.zeros_like(tr_test_sa), np.ones_like(tr_adv_sa)), axis=0
    )

    lr = LogisticRegressionCV(cv=5, n_jobs=-1).fit(tr_values, tr_labels)

    ts_test_sa = np.array(test_sa[split:])
    ts_adv_sa = np.array(adv_sa[split:])
    values = np.concatenate(
        (ts_test_sa.reshape(-1, 1), ts_adv_sa.reshape(-1, 1)), axis=0
    )
    labels = np.concatenate(
        (np.zeros_like(ts_test_sa), np.ones_like(ts_adv_sa)), axis=0
    )

    probs = lr.predict_proba(values)[:, 1]

    _, _, auc_score = compute_roc(
        probs_neg=probs[: (len(test_sa) - split)],
        probs_pos=probs[(len(test_sa) - split) :],
    )

    return auc_score
开发者ID:coinse,项目名称:sadl,代码行数:32,代码来源:utils.py

示例3: mdl_1d_cat

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def mdl_1d_cat(x, y):
    """builds univariate model to calculate AUC"""
    if x.nunique() > 10 and com.is_numeric_dtype(x):
        x = sb_cutz(x)

    series = pd.get_dummies(x, dummy_na=True)
    lr = LogisticRegressionCV(scoring='roc_auc')

    lr.fit(series, y)

    try:
        preds = (lr.predict_proba(series)[:, -1])
        #preds = (preds > preds.mean()).astype(int)
    except ValueError:
        Tracer()()

    plot = plot_cat(x, y)

    imgdata = BytesIO()
    plot.savefig(imgdata)
    imgdata.seek(0)

    aucz = roc_auc_score(y, preds)
    cmatrix = 'data:image/png;base64,' + \
        quote(base64.b64encode(imgdata.getvalue()))
    plt.close()
    return aucz, cmatrix
开发者ID:bartlesy,项目名称:pandas-profiling,代码行数:29,代码来源:sb_univar.py

示例4: Second_layer_ensembling

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def Second_layer_ensembling (p_valid, p_test, y_valid, y_test, p_ttest_t):
    print('')
    print('Performance of optimization based ensemblers (2nd layer) on X_test')
    print('------------------------------------------------------------------')
    
    #Creating the data for the 2nd layer
    XV = np.hstack(p_valid)
    XT = np.hstack(p_test)

    XTTT = np.hstack(p_ttest_t)
    
    clf = LogisticRegressionCV(scoring='log_loss', random_state=random_state)
    clf = clf.fit(XV, y_valid)
    
    yT = clf.predict_proba(XT)
    yt_out = clf.predict_proba(XTTT)
    print('{:20s} {:2s} {:1.7f}'.format('Ensemble of Classifiers', 'logloss_ensembled  =>', log_loss(y_test, yT[:,1])))
    
    return yt_out
开发者ID:AnatolyPavlov,项目名称:bnpcompetition,代码行数:21,代码来源:two_layer_training.py

示例5: compute_classifier

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def compute_classifier(pow_mat, recalls):
    print 'Computing logistic regression:', pow_mat.shape[0], 'samples', pow_mat.shape[1], 'features'

    lr_classifier = LogisticRegressionCV(penalty='l1', solver='liblinear')
    lr_classifier.fit(pow_mat, recalls)
    probs = lr_classifier.predict_proba(pow_mat)[:,1]
    auc = roc_auc_score(recalls, probs)

    print 'AUC =', auc

    return lr_classifier
开发者ID:busygin,项目名称:ram_utils,代码行数:13,代码来源:FR1_events.py

示例6: LogisticModelCombination

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
class LogisticModelCombination(ClassifierMixin):

    """
        Combine multiple models using a Logistic Regression
    """

    def __init__(self, classifiers, cv_folds=1, use_original_features=False, random_state=None, verbose=0):
        self.classifiers = classifiers
        self.cv_folds = cv_folds
        self.use_original_features = use_original_features
        self.logistic = LogisticRegressionCV(
            Cs=[10, 1, 0.1, 0.01, 0.001], refit=True)

        if random_state is None:
            self.random_state = random.randint(0, 10000)
        else:
            self.random_state = random_state

    def fit(self, X, y):
        sss = StratifiedShuffleSplit(
            y, n_iter=self.cv_folds, random_state=self.random_state)
        for train_index, test_index in sss:
            train_x = X[train_index]
            train_y = y[train_index]

            test_x = X[test_index]
            test_y = y[test_index]

            self._fit_logistic(train_x, train_y)

    def _fit_logistic(self, X, y):
        pred_X = self.convert_data(X)
        self.logistic.fit(pred_X, y)
        return self

    def convert_data(self, X):
        preds = []
        for i, clf in enumerate(self.classifiers):
            class_proba = clf.predict(X)
            preds.append(class_proba)
        pred_X = np.vstack(preds).T

        if self.use_original_features:
            pred_X = np.concatenate([X, pred_X], axis=1)
        return pred_X

    def predict_proba(self, X):
        pred_X = self.convert_data(X)
        return self.logistic.predict_proba(pred_X)
开发者ID:erolbiberoglu,项目名称:machine-learning,代码行数:51,代码来源:EnsembleClassifiers.py

示例7: mdl_1d

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
def mdl_1d(x, y):
    """builds univariate model to calculate AUC"""
    lr = LogisticRegressionCV(scoring='roc_auc')
    lars = LassoLarsIC(criterion='aic')

    if x.nunique() > 10 and com.is_numeric_dtype(x):
        x2 = sb_cutz(x)
        series = pd.get_dummies(x2, dummy_na=True)
    else:
        series = pd.get_dummies(x, dummy_na=True)

    lr.fit(series, y)
    lars.fit(series, y)

    try:
        preds = (lr.predict_proba(series)[:, -1])
        #preds = (preds > preds.mean()).astype(int)
    except ValueError:
        Tracer()()

    # try:
    #    cm = confusion_matrix(y, (preds > y.mean()).astype(int))
    # except ValueError:
    #    Tracer()()

    aucz = roc_auc_score(y, preds)

    ns = num_bin_stats(x, y)

    nplot = plot_num(ns)
    #plot = plot_confusion_matrix(cm, y)

    imgdata = BytesIO()
    nplot.savefig(imgdata)
    imgdata.seek(0)
    nplot = 'data:image/png;base64,' + \
        quote(base64.b64encode(imgdata.getvalue()))
    plt.close()

    bplot = plot_bubble(ns)
    imgdatab = BytesIO()
    bplot.savefig(imgdatab)
    imgdatab.seek(0)
    bplot = 'data:image/png;base64,' + \
        quote(base64.b64encode(imgdatab.getvalue()))
    plt.close()

    return aucz, nplot, bplot
开发者ID:bartlesy,项目名称:pandas-profiling,代码行数:50,代码来源:sb_univar.py

示例8: LogisticRegressionCV

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
#Make what should be categorical variables into categorical variables
for col in CATEGORICAL_COLUMNS:
    df_agg[col] = df_agg[col].astype('category')

#Convert all categoricas into 
X_a = pd.get_dummies(df_agg)

matchlabel = np.concatenate((np.zeros([n_tr,1]),np.ones([X_a.shape[0]-n_tr,1])))

matchreg = LogisticRegressionCV()
matchreg.fit(X_a.drop('Response',1),matchlabel)

#Chop up training data to build our own evaluation scheme  

estim= matchreg.predict_proba(X_a.drop('Response',1))[:,1]
plt.plot(estim,'.')
print "Converted dataset...."

X_a['match']=estim
truetest = X_a[n_tr:]
training = X_a[:n_tr].sort('match',ascending=False)[:50000]
truetest.drop('Response',1)


#Stitch training data back together and send them out to 


training.to_csv('matchtrain.csv')
truetest.to_csv('matchtest.csv')
开发者ID:lots-of-things,项目名称:ml-brute,代码行数:31,代码来源:split_data_match.py

示例9: range

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
	for i in range(num_new_features):
		col = str(i)
		df[col] = X_new[:,i]

	# get frequencies
	#for col in  df.columns.values:
		#df['f_'+col] = df.groupby(col)[col].transform('count')
	del all_data, dp1, dt1

	# one hot encoding	
	enc = OneHotEncoder(categorical_features=range(num_new_features + 9))
	enc.fit(df.ix[:,1:])

	df_train = df.drop(df.index[range(num_train, num_train+num_test)])
	df_test = df.drop(df.index[range(num_train)])
	del df
	y = df_train.ix[:,0];X = df_train.ix[:,1:]
	y_test = df_test.ix[:,0]; X_test = df_test.ix[:,1:]
	X = enc.transform(X)
	X_test = enc.transform(X_test)
	
	# CV and training
	classifier = LogisticRegressionCV(Cs = np.logspace(-4, 4, 10, base=2), penalty='l1', scoring = 'roc_auc', solver = 'liblinear')
	classifier.fit(X, y)
	
	# testing
	y_test_pred = classifier.predict_proba(X_test)
	ans = pd.DataFrame({'Id': y_test, 'Action' : y_test_pred[:,1]})
	ans.to_csv('hw3p1.csv', index=False, columns=['Id', 'Action'])	

开发者ID:abhishekcs,项目名称:LML_HW3,代码行数:31,代码来源:hw3p1.py

示例10: recall

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
    plt.legend(loc=7)
    plt.xlabel(param_name + " values")
    if metric == 'accuracy': plt.ylabel("Mean cross validation accuracy")
    if metric == 'recall': plt.ylabel("Mean cross validation recall (Senstivity) for label 1")
    plt.show()

    # return the training and testing scores on each parameter value
    return train_scores, test_scores


########FOCUSING ON LOGISTIC REGRESSION AND LDA TEST DATA########################
from sklearn.linear_model import LogisticRegressionCV

logregCV = LogisticRegressionCV(cv= 10, solver = 'lbfgs', penalty = 'l2').fit(train_standardized, target)
logCV_acc = logregCV.scores_
y_pred = logregCV.predict_proba(test_standardized)


ldaC = LDA().fit(train_standardized, target)
y_pred = ldaC.predict_proba(test_standardized)

ad_fit = ad(n_estimators = 10).fit(train_standardized, target)
y_pred = ad_fit.predict_proba(test_standardized)


rf_fit = rf(random_state=99).fit(train_standardized, target)

splitSizes = list(range(1,10,1))
train_scores, test_scores = calc_params(train_standardized, target, rf_fit, splitSizes, 'min_samples_leaf', 5, metric = 'accuracy')
pd.DataFrame(np.array([test_scores, splitSizes]).T, columns = ['Test Recall', 'Minimum Split Size'])
开发者ID:ysriram1,项目名称:Kaggle-Shelter-Animal-Outcomes,代码行数:32,代码来源:script.py

示例11: get_Onest_Amplitude_Duration_of_spindles

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]

#.........这里部分代码省略.........
    pass_ =RMS_mean > mph['mean']
    up = np.where(np.diff(pass_.astype(int))>0)
    down= np.where(np.diff(pass_.astype(int))<0)
    up = up[0]
    down = down[0]
    ###############################
    #print(down[0],up[0])
    if down[0] < up[0]:
        down = down[1:]
    #print(down[0],up[0])
    #############################
    if (up.shape > down.shape) or (up.shape < down.shape):
        size = np.min([up.shape,down.shape])
        up = up[:size]
        down = down[:size]
    C = np.vstack((up,down))
    for pairs in C.T:
        
        if l_bound < (time[pairs[1]] - time[pairs[0]]) < h_bound:
            SegmentForPeakSearching = RMS_mean[pairs[0]:pairs[1],]
            if np.max(SegmentForPeakSearching)< mpl['mean']:
                temp_time = time[pairs[0]:pairs[1]]
                ints_temp = np.argmax(SegmentForPeakSearching)
                peak_time['mean'].append(temp_time[ints_temp])
                peak_at.append(SegmentForPeakSearching[ints_temp])
                duration_temp = time[pairs[1]] - time[pairs[0]]
                duration.append(duration_temp) 
    time_find=[];mean_peak_power=[];Duration=[];
    for item,PEAK,duration_time in zip(peak_time['mean'],peak_at,duration):
        temp_timePoint=[]
        for ii, names in enumerate(channelList):
            try:
                temp_timePoint.append(min(enumerate(peak_time[names]), key=lambda x: abs(x[1]-item))[1])
            except:
                temp_timePoint.append(item + 2)
        try:
            if np.sum((abs(np.array(temp_timePoint) - item)<tol).astype(int))>=syn_channels:
                time_find.append(float(item))
                mean_peak_power.append(PEAK)
                Duration.append(duration_time)
        except:
            pass
    ############ the end of the processing in which no other inputs ##
    #### update the spindles we found if we want to add information of sleep stages ######
    if sleep_stage:
        
        temp_time_find=[];temp_mean_peak_power=[];temp_duration=[];
        # seperate out stage 2
        stages = annotations[annotations.Annotation.apply(stage_check)]
        On = stages[::2];Off = stages[1::2]
        stage_on_off = list(zip(On.Onset.values, Off.Onset.values))
        if abs(np.diff(stage_on_off[0]) - 30) < 2:
            pass
        else:
            On = stages[1::2];Off = stages[::2]
            stage_on_off = list(zip(On.Onset.values[1:], Off.Onset.values[2:]))
        for single_time_find, single_mean_peak_power, single_duration in zip(time_find,mean_peak_power,Duration):
            for on_time,off_time in stage_on_off:
                if intervalCheck([on_time,off_time],single_time_find,tol=tol):
                    temp_time_find.append(single_time_find)
                    temp_mean_peak_power.append(single_mean_peak_power)
                    temp_duration.append(single_duration)
        time_find=temp_time_find;mean_peak_power=temp_mean_peak_power;Duration=temp_duration
    
    ####### decision function based on spindles we have just found ####
    """
    A single floating representation is computed based on the validation window size (say 3 seconds), and information like peak power densities and peak frequencies are added to the feature space.
    We fit the standandardized features with the labels (spindles found by the automated pipeline)
    A prediction probability is computed using scikit-learn::logisticregression
    """
    decision_features=None;auto_proba=None;auto_label=None
    if proba:
        result = pd.DataFrame({'Onset':time_find,'Duration':Duration,'Annotation':['spindle']*len(Duration)})     
        auto_label,_ = discritized_onset_label_auto(raw,result,validation_windowsize)
        events = mne.make_fixed_length_events(raw,id=1,start=front,stop=raw.times[-1]-back,duration=validation_windowsize)
        epochs = mne.Epochs(raw,events,event_id=1,tmin=0,tmax=validation_windowsize,preload=True)
        data = epochs.get_data()[:,:,:-1]
        full_prop=[]        
        for d in data:    
            temp_p=[]
            #fig,ax = plt.subplots(nrows=2,ncols=3,figsize=(8,8))
            for ii,(name) in enumerate(zip(channelList)):#,ax.flatten())):
                rms = window_rms(d[ii,:],500)
                l = trim_mean(rms,0.05) + lower_threshold * trimmed_std(rms,0.05)
                h = trim_mean(rms,0.05) + higher_threshold * trimmed_std(rms,0.05)
                prop = (sum(rms>l)+sum(rms<h))/(sum(rms<h) - sum(rms<l))
                if np.isinf(prop):
                    prop = (sum(rms>l)+sum(rms<h))
                temp_p.append(prop)
                
            
            full_prop.append(temp_p)
        psds,freq = mne.time_frequency.psd_multitaper(epochs,fmin=l_freq,fmax=h_freq,tmin=0,tmax=3,low_bias=True,)
        psds = 10* np.log10(psds)
        features = pd.DataFrame(np.concatenate((np.array(full_prop),psds.max(2),freq[np.argmax(psds,2)]),1))
        decision_features = StandardScaler().fit_transform(features.values,auto_label)
        clf = LogisticRegressionCV(Cs=np.logspace(-4,6,11),cv=5,tol=1e-7,max_iter=int(1e7))
        clf.fit(decision_features,auto_label)
        auto_proba=clf.predict_proba(decision_features)[:,-1]
    return time_find,mean_peak_power,Duration,mph,mpl,auto_proba,auto_label
开发者ID:adowaconan,项目名称:modification-pipelines,代码行数:104,代码来源:osf_test.py

示例12: LogisticRegressionCV

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
# Comparing our ensemble results with sklearn LogisticRegression based stacking of classifiers.
# Both techniques EN_optA and EN_optB optimizes an objective function. In this experiment I am using the multi-class logloss as objective function. Therefore, the two proposed methods basically become implementations of LogisticRegression. The following code allows to compare the results of sklearn implementation of LogisticRegression with the proposed ensembles.
# In [ ]:


#By default the best C parameter is obtained with a cross-validation approach, doing grid search with
#10 values defined in a logarithmic scale between 1e-4 and 1e4.
#Change parameters to see how they affect the final results.
lr = LogisticRegressionCV(Cs=10, dual=False, fit_intercept=True,
                  intercept_scaling=1.0, max_iter=100,
                  multi_class='ovr', n_jobs=1, penalty='l2',
                  solver='lbfgs', tol=0.0001)

lr.fit(XV, y_valid)
y_lr = lr.predict_proba(XT)
print('{:20s} {:2s} {:1.7f}'.format('Log_Reg:', 'logloss  =>', log_loss(y_test, y_lr)))

# In [ ]:


# In [ ]:

print len(p_valid), len(p_valid[0])
print len(np.hstack(p_valid))
# In [ ]:

# for i in range(500):
#
#     dummy = random.randint(1,10000)
#     x = True
开发者ID:pmk2109,项目名称:Kaggle,代码行数:32,代码来源:OptimizationAndCrossValidationUsingSparkDF.toPandas.py

示例13: LogisticRegressionCV

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
# 8        source_Direct     0.000000
# 9           source_Seo     0.000000
feat_importances.sort_values(by="importances",inplace=True,ascending=False)


############## use LR to detect feature importances
lrcv = LogisticRegressionCV(Cs = np.logspace(-3,3,7),
                            dual=False,
                            scoring='roc_auc',
                            max_iter=1000,
                            n_jobs=-1,
                            verbose=1)
lrcv.fit(Xtrain,ytrain)
lrcv.C_ # 10

ytest_predict = lrcv.predict(Xtest)
print classification_report(y_true=ytest,y_pred=ytest_predict)

ytest_proba = lrcv.predict_proba(Xtest)

feat_importances = pd.DataFrame({"name":featnames,"coef":lrcv.coef_[0]})
feat_importances = feat_importances[['name','coef']]# reorder the columns
feat_importances['importances'] = np.abs( feat_importances['coef'] )

simple_dump('lr.pkl',lrcv)





开发者ID:hbcbh1999,项目名称:TakeHomeDataChallenges,代码行数:27,代码来源:script1.py

示例14: len

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]
dataset_blend_test = np.zeros((X_submission.shape[0], len(clfs)))

for j, clf in enumerate(clfs):
    print j, clf
    dataset_blend_test_j = np.zeros((X_submission.shape[0], len(skf)))
    for i, (train, test) in enumerate(skf):
        print "Fold", i
        X_train = X[train]
        y_train = y[train]
        X_test = X[test]
        y_test = y[test]
        clf.fit(X_train, y_train)
        y_submission = clf.predict_proba(X_test)[:, 1]
        dataset_blend_train[test, j] = y_submission
        dataset_blend_test_j[:, i] = clf.predict_proba(X_submission)[:, 1]
    dataset_blend_test[:, j] = dataset_blend_test_j.mean(1)

print
print "Blending."
clf = LogisticRegressionCV(class_weight="balanced", cv=5, scoring="roc_auc", n_jobs=-1, random_state=42)
clf.fit(dataset_blend_train, y)
y_submission = clf.predict_proba(dataset_blend_test)[:, 1]

print "Linear stretch of predictions to [0,1]"
y_submission = (y_submission - y_submission.min()) / (y_submission.max() - y_submission.min())

# print "Saving Results."
# np.savetxt(fname='test.csv', X=y_submission, fmt='%0.9f')

print time() - tt
开发者ID:JaredChung,项目名称:DataScienceNotes,代码行数:32,代码来源:Blending+Ensemble.py

示例15: thresholding_filterbased_spindle_searching

# 需要导入模块: from sklearn.linear_model import LogisticRegressionCV [as 别名]
# 或者: from sklearn.linear_model.LogisticRegressionCV import predict_proba [as 别名]

#.........这里部分代码省略.........
        

    peak_time['mean']=[];peak_at=[];duration=[]
    RMS_mean=hmean(RMS)
    
    mph['mean'] = trim_mean(RMS_mean[int(front*sfreq):-int(back*sfreq)],0.05) + lower_threshold * trimmed_std(RMS_mean,0.05)
    mpl['mean'] = trim_mean(RMS_mean[int(front*sfreq):-int(back*sfreq)],0.05) + higher_threshold * trimmed_std(RMS_mean,0.05)
    pass_ =RMS_mean > mph['mean']
    up = np.where(np.diff(pass_.astype(int))>0)
    down= np.where(np.diff(pass_.astype(int))<0)
    up = up[0]
    down = down[0]
    ###############################
    #print(down[0],up[0])
    if down[0] < up[0]:
        down = down[1:]
    #print(down[0],up[0])
    #############################
    if (up.shape > down.shape) or (up.shape < down.shape):
        size = np.min([up.shape,down.shape])
        up = up[:size]
        down = down[:size]
    C = np.vstack((up,down))
    for pairs in C.T:
        
        if l_bound < (time[pairs[1]] - time[pairs[0]]) < h_bound:
            SegmentForPeakSearching = RMS_mean[pairs[0]:pairs[1],]
            if np.max(SegmentForPeakSearching)< mpl['mean']:
                temp_time = time[pairs[0]:pairs[1]]
                ints_temp = np.argmax(SegmentForPeakSearching)
                peak_time['mean'].append(temp_time[ints_temp])
                peak_at.append(SegmentForPeakSearching[ints_temp])
                duration_temp = time[pairs[1]] - time[pairs[0]]
                duration.append(duration_temp) 
            
        
    time_find=[];mean_peak_power=[];Duration=[];
    for item,PEAK,duration_time in zip(peak_time['mean'],peak_at,duration):
        temp_timePoint=[]
        for ii, names in enumerate(channelList):
            try:
                temp_timePoint.append(min(enumerate(peak_time[names]), key=lambda x: abs(x[1]-item))[1])
            except:
                temp_timePoint.append(item + 2)
        try:
            if np.sum((abs(np.array(temp_timePoint) - item)<tol).astype(int))>=syn_channels:
                time_find.append(float(item))
                mean_peak_power.append(PEAK)
                Duration.append(duration_time)
        except:
            pass
    if sleep_stage:
        
        temp_time_find=[];temp_mean_peak_power=[];temp_duration=[];
        # seperate out stage 2
        stages = annotations[annotations.Annotation.apply(stage_check)]
        On = stages[::2];Off = stages[1::2]
        stage_on_off = list(zip(On.Onset.values, Off.Onset.values))
        if abs(np.diff(stage_on_off[0]) - 30) < 2:
            pass
        else:
            On = stages[1::2];Off = stages[::2]
            stage_on_off = list(zip(On.Onset.values[1:], Off.Onset.values[2:]))
        for single_time_find, single_mean_peak_power, single_duration in zip(time_find,mean_peak_power,Duration):
            for on_time,off_time in stage_on_off:
                if intervalCheck([on_time,off_time],single_time_find,tol=tol):
                    temp_time_find.append(single_time_find)
                    temp_mean_peak_power.append(single_mean_peak_power)
                    temp_duration.append(single_duration)
        time_find=temp_time_find;mean_peak_power=temp_mean_peak_power;Duration=temp_duration
    
    result = pd.DataFrame({'Onset':time_find,'Duration':Duration,'Annotation':['spindle']*len(Duration)})     
    auto_label,_ = discritized_onset_label_auto(raw,result,validation_windowsize)
    decision_features=None
    if proba:
        events = mne.make_fixed_length_events(raw,id=1,start=0,duration=validation_windowsize)
        epochs = mne.Epochs(raw,events,event_id=1,tmin=0,tmax=validation_windowsize,preload=True)
        data = epochs.get_data()[:,:,:-1]
        full_prop=[]        
        for d in data:    
            temp_p=[]
            #fig,ax = plt.subplots(nrows=2,ncols=3,figsize=(8,8))
            for ii,(name) in enumerate(zip(channelList)):#,ax.flatten())):
                rms = window_rms(d[ii,:],500)
                l = trim_mean(rms,0.05) + lower_threshold * trimmed_std(rms,0.05)
                h = trim_mean(rms,0.05) + higher_threshold * trimmed_std(rms,0.05)
                prop = (sum(rms>l)+sum(rms<h))/(sum(rms<h) - sum(rms<l))
                temp_p.append(prop)
                
            
            full_prop.append(temp_p)
        psds,freq = mne.time_frequency.psd_multitaper(epochs,fmin=11,fmax=16,tmin=0,tmax=3,low_bias=True,)
        psds = 10* np.log10(psds)
        features = pd.DataFrame(np.concatenate((np.array(full_prop),psds.max(2),freq[np.argmax(psds,2)]),1))
        decision_features = StandardScaler().fit_transform(features.values,auto_label)
        clf = LogisticRegressionCV(Cs=np.logspace(-4,6,11),cv=5,tol=1e-7,max_iter=int(1e7))
        clf.fit(decision_features,auto_label)
        auto_proba=clf.predict_proba(decision_features)[:,-1]
            
    return time_find,mean_peak_power,Duration,mph,mpl,auto_proba,auto_label
开发者ID:adowaconan,项目名称:modification-pipelines,代码行数:104,代码来源:Add+probability+to+pipeline+-+single+subject+try.py


注:本文中的sklearn.linear_model.LogisticRegressionCV.predict_proba方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。