本文整理匯總了Python中sklearn.preprocessing.robust_scale方法的典型用法代碼示例。如果您正苦於以下問題:Python preprocessing.robust_scale方法的具體用法?Python preprocessing.robust_scale怎麽用?Python preprocessing.robust_scale使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.preprocessing
的用法示例。
在下文中一共展示了preprocessing.robust_scale方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_robust_standardize_to_sklearn
# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import robust_scale [as 別名]
def test_robust_standardize_to_sklearn(args):
X, q_level = args
q0, q1 = 0.5 * (1.0 - q_level), 0.5 * (1.0 + q_level)
assert close_enough(q1 - q0, q_level)
X_bo = stats.robust_standardize(X, q_level=q_level)
X = X[:, None]
X_skl = robust_scale(X, axis=0, with_centering=True, with_scaling=True, quantile_range=[100.0 * q0, 100.0 * q1])
X_skl = X_skl[:, 0] * (sst.norm.ppf(q1) - sst.norm.ppf(q0))
assert close_enough(X_bo, X_skl, equal_nan=True)
示例2: sk_robust
# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import robust_scale [as 別名]
def sk_robust(X):
return robust_scale(X)
示例3: loopCalculate
# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import robust_scale [as 別名]
def loopCalculate(xyzArray,eps,fn):
robustScaleList=[]
totalNumber=[]
CTableDic={}
partialCorrelationsList=[]
counter=0
#逐一計算所有距離的聚類
for i in eps:
pred,predLable=affinityPropagationForPoints(xyzArray,i) #聚類計算,返回預測值及簇類標
pt_lyrName_w=r'%s_POI'%i #字符串格式化輸出文件名
point2Shp(dataBunch,pred,fn,pt_lyrName_w)
print("%s has been written to disk"%i)
counterData=Counter(pred) #聚類簇類標頻數統計
# print(counterData)
counterValue=np.array(list(counterData.values()))
cvFloat=counterValue.astype(float)
robustScale=preprocessing.robust_scale(cvFloat.reshape(-1,1)) #如果數據中含有異常值,那麽使用均值和方差縮放數據的效果並不好,因此用preprocessing.robust_scale()縮放帶有outlier的數據
cvF=robustScale.ravel() #展平,注意numpy的ravel() 和 flatten()函數的區別
robustScaleList.append(cvF)
totalNumber.append(len(predLable)) #預測類標的數量
CTable,partial_correlations=contingencyTableChi2andPOISpaceStructure(dataBunch,pred,class_mapping,predLable,pt_lyrName_w) #返回列聯表與偏相關分析
CTableDic[counter]=CTable
counter+=1
partialCorrelationsList.append(partial_correlations)
return robustScaleList,totalNumber,CTableDic,partialCorrelationsList #返回所有計算距離:1.縮放後的聚類簇類標頻數統計 2.預測類標的數量 3.列聯表 4.偏相關分析
示例4: loopCalculate
# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import robust_scale [as 別名]
def loopCalculate(df_osm,epsDegree,fn,eps):
xyzArray=pd.DataFrame({"lon": df_osm['lon'] , "lat": df_osm['lat'] }).to_numpy()
robustScaleList=[]
totalNumber=[]
CTableDic={}
partialCorrelationsList=[]
counter=0
#逐一計算所有距離的聚類
for i in range(len(epsDegree)):
pred,predLable=affinityPropagationForPoints(xyzArray,epsDegree[i]) #聚類計算,返回預測值及簇類標
pt_lyrName_w=r'%s_POI'%eps[i] #字符串格式化輸出文件名
point2Shp(df_osm,pred,fn,pt_lyrName_w)
print("\n%s has been written to disk"%i)
counterData=Counter(pred) #聚類簇類標頻數統計
# print(counterData)
counterValue=np.array(list(counterData.values()))
cvFloat=counterValue.astype(float)
robustScale=preprocessing.robust_scale(cvFloat.reshape(-1,1)) #如果數據中含有異常值,那麽使用均值和方差縮放數據的效果並不好,因此用preprocessing.robust_scale()縮放帶有outlier的數據
cvF=robustScale.ravel() #展平,注意numpy的ravel() 和 flatten()函數的區別
robustScaleList.append(cvF)
totalNumber.append(len(predLable)) #預測類標的數量
return robustScaleList,totalNumber