當前位置: 首頁>>代碼示例>>Python>>正文


Python preprocessing.minmax_scale方法代碼示例

本文整理匯總了Python中sklearn.preprocessing.minmax_scale方法的典型用法代碼示例。如果您正苦於以下問題:Python preprocessing.minmax_scale方法的具體用法?Python preprocessing.minmax_scale怎麽用?Python preprocessing.minmax_scale使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.preprocessing的用法示例。


在下文中一共展示了preprocessing.minmax_scale方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: min_max_scaling

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def min_max_scaling(data, lowerbound_zero=False):
    from sklearn.preprocessing import minmax_scale
    size = data.shape
    data = data/255.0
    if not lowerbound_zero:
        data = (data *2.0)-1.0
    data[np.isnan(data)] = 0
    # if (len(size)==4):
    #     for i in range(size[3]):
    #         tmp = minmax_scale(data[:,:,:,i].reshape(-1, size[1]*size[2]),
    #                            feature_range = (s, t), axis=1)
    #         data[:,:,:,i] = tmp.reshape(-1,size[1],size[2])
    # elif (len(size)==3):
    #    data = minmax_scale(data.reshape(-1, size[1]*size[2]), axis=1)
    #    data = data.reshape(-1, size[1],size[2])


    return data 
開發者ID:bbdamodaran,項目名稱:deepJDOT,代碼行數:20,代碼來源:preprocess.py

示例2: fusion

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def fusion(*args):
    from scipy.stats import rankdata
    from sklearn.preprocessing import minmax_scale

    max_rk = [None] * len(args)
    masks = [None] * len(args)
    for j, a in enumerate(args):
        m = masks[j] = a != 0
        a[m] = rankdata(a[m])
        max_rk[j] = a[m].max()

    max_rk = min(max_rk)
    for j, a in enumerate(args):
        m = masks[j]
        a[m] = minmax_scale(a[m], feature_range=(1, max_rk))

    return np.hstack(args)


# fuse the matrices 
開發者ID:MICA-MNI,項目名稱:BrainSpace,代碼行數:22,代碼來源:plot_tutorial2.py

示例3: preprocess

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def preprocess(self):
        """
        Normalization of data between 0 and 1 and subtraction of the nuggets
        Returns:
            pandas.core.frame.DataFrame: Dataframe containing the transformed data
            pandas.core.frame.DataFrame: Containing the substracted nuggets

        """
        import sklearn.preprocessing as skp

        # Normalization
        scaled_data = pn.DataFrame(skp.minmax_scale(self.exp_var_raw[self.properties]), columns=self.properties)

        # Nuggets
        nuggets = scaled_data[self.properties].iloc[0]
        processed_data = scaled_data - nuggets
        return processed_data, nuggets 
開發者ID:cgre-aachen,項目名稱:gempy,代碼行數:19,代碼來源:coKriging.py

示例4: features_transformer

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def features_transformer(df_text):
    from nlp import meta_features_transformer
    from nlp import topic_features_transformer
    # get features
    meta_features = meta_features_transformer(df_text).values
    topic_features = topic_features_transformer(df_text).values
    # concat
    joined_features = np.hstack([meta_features, topic_features])
    return minmax_scale(joined_features) 
開發者ID:KevinLiao159,項目名稱:Quora,代碼行數:11,代碼來源:model_v40_BAK.py

示例5: b_fit_score

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def b_fit_score(self, x, y):
        """ Compute the RECI fit score

        Args:
            x (numpy.ndarray): Variable 1
            y (numpy.ndarray): Variable 2

        Returns:
            float: RECI fit score

        """
        x = np.reshape(minmax_scale(x), (-1, 1))
        y = np.reshape(minmax_scale(y), (-1, 1))
        poly = PolynomialFeatures(degree=self.degree)
        poly_x = poly.fit_transform(x)

        poly_x[:,1] = 0
        poly_x[:,2] = 0

        regressor = LinearRegression()
        regressor.fit(poly_x, y)

        y_predict = regressor.predict(poly_x)
        error = mean_squared_error(y_predict, y)

        return error 
開發者ID:FenTechSolutions,項目名稱:CausalDiscoveryToolbox,代碼行數:28,代碼來源:RECI.py

示例6: weightWithDropslop

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def weightWithDropslop(self, weighted, scale):
        'weight the adjacency matrix with the sudden drop of ts for each col'
        if weighted:
            colWeights = np.multiply(self.tspim.dropslops, self.tspim.dropfalls)
        else:
            colWeights = self.tspim.dropslops
        if scale == 'logistic':
            from scipy.stats import logistic
            from sklearn import preprocessing
            'zero mean scale'
            colWeights = preprocessing.scale(colWeights)
            colWeights = logistic.cdf(colWeights)
        elif scale == 'linear':
            from sklearn import preprocessing
            #add a base of suspecious for each edge
            colWeights = preprocessing.minmax_scale(colWeights) +1
        elif scale == 'plusone':
            colWeights += 1
        elif scale == 'log1p':
            colWeights = np.log1p(colWeights) + 1
        else:
            print '[Warning] no scale for the prior weight'

        n = self.nV
        colDiag = lil_matrix((n, n))
        colDiag.setdiag(colWeights)
        self.graphr = self.graphr * colDiag.tocsr()
        self.graph = self.graphr.tocoo(copy=False)
        self.graphc = self.graph.tocsc(copy=False)
        print "finished computing weight matrix" 
開發者ID:shenghua-liu,項目名稱:HoloScope,代碼行數:32,代碼來源:holoscopeFraudDect.py

示例7: evalsusp4rate

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def evalsusp4rate(self, suspusers, neutral=False, scale='max'):
        susprates = self.ratepim.suspratedivergence(neutral, delta=True)
        if scale == 'max':
            assert(self.ratepim.maxratediv > 0)
            nsusprates = susprates/self.ratepim.maxratediv
        elif scale=='minmax':
            #need a copy, and do not change susprates' value for delta
            nsusprates = preprocessing.minmax_scale(susprates, copy=True)
        else:
            #no scale 
            nsusprates = susprates
        return nsusprates 
開發者ID:shenghua-liu,項目名稱:HoloScope,代碼行數:14,代碼來源:holoscopeFraudDect.py

示例8: process_image

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def process_image(path):
    """Load and resize the images and return as flattened numpy array"""
    img = image.load(path, dtype=np.float32)
    resized_img = imresize(img.array, size=(100, 100), mode='F').flatten()
    rescaled_img = preprocessing.minmax_scale(resized_img)
    return rescaled_img 
開發者ID:jrkerns,項目名稱:pylinac,代碼行數:8,代碼來源:tools.py

示例9: _transform

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def _transform(self, series):
        series_ = series
        if series.min() != series.max():
            if self.bc:
                with np.errstate(all='raise'):
                    shift = 1e-10
                    tmp = series - series.min() + shift
                    try:
                        series_, _ = boxcox(tmp)
                    except FloatingPointError:
                        series_ = series
        series_ = minmax_scale(series_)
        return series_ 
開發者ID:yoshida-lab,項目名稱:XenonPy,代碼行數:15,代碼來源:heatmap.py

示例10: getImgAsMatFromFile

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def getImgAsMatFromFile(filename, width=28, height=28, scale_min=0, scale_max=1):
    #img = io.imread(filename, as_grey=True)
    img = Image.open(filename)
    img = img.resize((width, height), Image.BILINEAR)
    imgArr_2d = np.array(img.convert('L'))
    imgArr_2d = np.float64(1 - imgArr_2d)
    shape_2d = imgArr_2d.shape
    imgArr_1d_scale = preprocessing.minmax_scale(imgArr_2d.flatten(), feature_range=(0, 1))
    return imgArr_1d_scale.reshape(shape_2d) 
開發者ID:zlxy9892,項目名稱:ml_code,代碼行數:11,代碼來源:img_proc.py

示例11: G_display

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import minmax_scale [as 別名]
def G_display(G):
    # make new graph
    H = nx.Graph()
    for v in G:
        # print(v)
        H.add_node(v)
    weightValue=list(nx.get_edge_attributes(G,'weight').values()) #提取權重
    # weightsForWidth=[G[u][v]['weight'] for u,v in G.edges()] #another way
    # print(weightValue)
    import pysal.viz.mapclassify as mc
    q=mc.Quantiles(weightValue,k=30).bins #計算分位數,用於顯示值的提取
    # print(q)
  
    for (u, v, d) in tqdm(G.edges(data=True)):
        # print(u,v,d)
        # print()
        # print(d['weight'])
        if d['weight'] > q[28]:
            H.add_edge(u, v)

    print("H_digraph has %d nodes with %d edges"% (nx.number_of_nodes(H), nx.number_of_edges(H)))
    # draw with matplotlib/pylab
    plt.figure(figsize=(18, 18))
    # m=2
    # fig = figure(figsize=(9*m,9*m)
    # with nodes colored by degree sized by value elected
    node_color = [float(H.degree(v)) for v in H]
    # print(node_color)
    # nx.draw(H, G.position,node_size=[G.perimeter[v] for v in H],node_color=node_color, with_labels=True)
    
    weightsForWidthScale=np.interp(weightValue, (min(weightValue), max(weightValue)), (1, 3000)) #setting the edge width
    scaleNode=1
    
    # sklearn.preprocessing.minmax_scale(X, feature_range=(0, 1), axis=0, copy=True)
    nx.draw(H, G.position,node_size=minmax_scale([G.shape_area[v]*scaleNode for v in H],feature_range=(10, 2200)), node_color=node_color,with_labels=True,edge_cmap=plt.cm.Blues,width=weightsForWidthScale) #edge_cmap=plt.cm.Blues
    # scale the axes equally
    # plt.xlim(-5000, 500)
    # plt.ylim(-2000, 3500)

    plt.show()


#CSV文件轉.shp格式,並返回關鍵信息。使用geopandas庫實現 
開發者ID:richieBao,項目名稱:python-urbanPlanning,代碼行數:45,代碼來源:vectorSpatialAnalysis.py


注:本文中的sklearn.preprocessing.minmax_scale方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。