本文整理汇总了Python中sklearn.preprocessing.minmax_scale方法的典型用法代码示例。如果您正苦于以下问题:Python preprocessing.minmax_scale方法的具体用法?Python preprocessing.minmax_scale怎么用?Python preprocessing.minmax_scale使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing
的用法示例。
在下文中一共展示了preprocessing.minmax_scale方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: min_max_scaling
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def min_max_scaling(data, lowerbound_zero=False):
from sklearn.preprocessing import minmax_scale
size = data.shape
data = data/255.0
if not lowerbound_zero:
data = (data *2.0)-1.0
data[np.isnan(data)] = 0
# if (len(size)==4):
# for i in range(size[3]):
# tmp = minmax_scale(data[:,:,:,i].reshape(-1, size[1]*size[2]),
# feature_range = (s, t), axis=1)
# data[:,:,:,i] = tmp.reshape(-1,size[1],size[2])
# elif (len(size)==3):
# data = minmax_scale(data.reshape(-1, size[1]*size[2]), axis=1)
# data = data.reshape(-1, size[1],size[2])
return data
示例2: fusion
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def fusion(*args):
from scipy.stats import rankdata
from sklearn.preprocessing import minmax_scale
max_rk = [None] * len(args)
masks = [None] * len(args)
for j, a in enumerate(args):
m = masks[j] = a != 0
a[m] = rankdata(a[m])
max_rk[j] = a[m].max()
max_rk = min(max_rk)
for j, a in enumerate(args):
m = masks[j]
a[m] = minmax_scale(a[m], feature_range=(1, max_rk))
return np.hstack(args)
# fuse the matrices
示例3: preprocess
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def preprocess(self):
"""
Normalization of data between 0 and 1 and subtraction of the nuggets
Returns:
pandas.core.frame.DataFrame: Dataframe containing the transformed data
pandas.core.frame.DataFrame: Containing the substracted nuggets
"""
import sklearn.preprocessing as skp
# Normalization
scaled_data = pn.DataFrame(skp.minmax_scale(self.exp_var_raw[self.properties]), columns=self.properties)
# Nuggets
nuggets = scaled_data[self.properties].iloc[0]
processed_data = scaled_data - nuggets
return processed_data, nuggets
示例4: features_transformer
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def features_transformer(df_text):
from nlp import meta_features_transformer
from nlp import topic_features_transformer
# get features
meta_features = meta_features_transformer(df_text).values
topic_features = topic_features_transformer(df_text).values
# concat
joined_features = np.hstack([meta_features, topic_features])
return minmax_scale(joined_features)
示例5: b_fit_score
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def b_fit_score(self, x, y):
""" Compute the RECI fit score
Args:
x (numpy.ndarray): Variable 1
y (numpy.ndarray): Variable 2
Returns:
float: RECI fit score
"""
x = np.reshape(minmax_scale(x), (-1, 1))
y = np.reshape(minmax_scale(y), (-1, 1))
poly = PolynomialFeatures(degree=self.degree)
poly_x = poly.fit_transform(x)
poly_x[:,1] = 0
poly_x[:,2] = 0
regressor = LinearRegression()
regressor.fit(poly_x, y)
y_predict = regressor.predict(poly_x)
error = mean_squared_error(y_predict, y)
return error
示例6: weightWithDropslop
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def weightWithDropslop(self, weighted, scale):
'weight the adjacency matrix with the sudden drop of ts for each col'
if weighted:
colWeights = np.multiply(self.tspim.dropslops, self.tspim.dropfalls)
else:
colWeights = self.tspim.dropslops
if scale == 'logistic':
from scipy.stats import logistic
from sklearn import preprocessing
'zero mean scale'
colWeights = preprocessing.scale(colWeights)
colWeights = logistic.cdf(colWeights)
elif scale == 'linear':
from sklearn import preprocessing
#add a base of suspecious for each edge
colWeights = preprocessing.minmax_scale(colWeights) +1
elif scale == 'plusone':
colWeights += 1
elif scale == 'log1p':
colWeights = np.log1p(colWeights) + 1
else:
print '[Warning] no scale for the prior weight'
n = self.nV
colDiag = lil_matrix((n, n))
colDiag.setdiag(colWeights)
self.graphr = self.graphr * colDiag.tocsr()
self.graph = self.graphr.tocoo(copy=False)
self.graphc = self.graph.tocsc(copy=False)
print "finished computing weight matrix"
示例7: evalsusp4rate
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def evalsusp4rate(self, suspusers, neutral=False, scale='max'):
susprates = self.ratepim.suspratedivergence(neutral, delta=True)
if scale == 'max':
assert(self.ratepim.maxratediv > 0)
nsusprates = susprates/self.ratepim.maxratediv
elif scale=='minmax':
#need a copy, and do not change susprates' value for delta
nsusprates = preprocessing.minmax_scale(susprates, copy=True)
else:
#no scale
nsusprates = susprates
return nsusprates
示例8: process_image
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def process_image(path):
"""Load and resize the images and return as flattened numpy array"""
img = image.load(path, dtype=np.float32)
resized_img = imresize(img.array, size=(100, 100), mode='F').flatten()
rescaled_img = preprocessing.minmax_scale(resized_img)
return rescaled_img
示例9: _transform
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def _transform(self, series):
series_ = series
if series.min() != series.max():
if self.bc:
with np.errstate(all='raise'):
shift = 1e-10
tmp = series - series.min() + shift
try:
series_, _ = boxcox(tmp)
except FloatingPointError:
series_ = series
series_ = minmax_scale(series_)
return series_
示例10: getImgAsMatFromFile
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def getImgAsMatFromFile(filename, width=28, height=28, scale_min=0, scale_max=1):
#img = io.imread(filename, as_grey=True)
img = Image.open(filename)
img = img.resize((width, height), Image.BILINEAR)
imgArr_2d = np.array(img.convert('L'))
imgArr_2d = np.float64(1 - imgArr_2d)
shape_2d = imgArr_2d.shape
imgArr_1d_scale = preprocessing.minmax_scale(imgArr_2d.flatten(), feature_range=(0, 1))
return imgArr_1d_scale.reshape(shape_2d)
示例11: G_display
# 需要导入模块: from sklearn import preprocessing [as 别名]
# 或者: from sklearn.preprocessing import minmax_scale [as 别名]
def G_display(G):
# make new graph
H = nx.Graph()
for v in G:
# print(v)
H.add_node(v)
weightValue=list(nx.get_edge_attributes(G,'weight').values()) #提取权重
# weightsForWidth=[G[u][v]['weight'] for u,v in G.edges()] #another way
# print(weightValue)
import pysal.viz.mapclassify as mc
q=mc.Quantiles(weightValue,k=30).bins #计算分位数,用于显示值的提取
# print(q)
for (u, v, d) in tqdm(G.edges(data=True)):
# print(u,v,d)
# print()
# print(d['weight'])
if d['weight'] > q[28]:
H.add_edge(u, v)
print("H_digraph has %d nodes with %d edges"% (nx.number_of_nodes(H), nx.number_of_edges(H)))
# draw with matplotlib/pylab
plt.figure(figsize=(18, 18))
# m=2
# fig = figure(figsize=(9*m,9*m)
# with nodes colored by degree sized by value elected
node_color = [float(H.degree(v)) for v in H]
# print(node_color)
# nx.draw(H, G.position,node_size=[G.perimeter[v] for v in H],node_color=node_color, with_labels=True)
weightsForWidthScale=np.interp(weightValue, (min(weightValue), max(weightValue)), (1, 3000)) #setting the edge width
scaleNode=1
# sklearn.preprocessing.minmax_scale(X, feature_range=(0, 1), axis=0, copy=True)
nx.draw(H, G.position,node_size=minmax_scale([G.shape_area[v]*scaleNode for v in H],feature_range=(10, 2200)), node_color=node_color,with_labels=True,edge_cmap=plt.cm.Blues,width=weightsForWidthScale) #edge_cmap=plt.cm.Blues
# scale the axes equally
# plt.xlim(-5000, 500)
# plt.ylim(-2000, 3500)
plt.show()
#CSV文件转.shp格式,并返回关键信息。使用geopandas库实现