本文整理汇总了Python中sklearn.preprocessing.label.LabelEncoder.fit_transform方法的典型用法代码示例。如果您正苦于以下问题:Python LabelEncoder.fit_transform方法的具体用法?Python LabelEncoder.fit_transform怎么用?Python LabelEncoder.fit_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing.label.LabelEncoder
的用法示例。
在下文中一共展示了LabelEncoder.fit_transform方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: normalize_data
# 需要导入模块: from sklearn.preprocessing.label import LabelEncoder [as 别名]
# 或者: from sklearn.preprocessing.label.LabelEncoder import fit_transform [as 别名]
def normalize_data(data, target):
data.replace({'None': np.nan}, inplace=True)
types = pd.read_csv('data/datatypes.csv')
for i, row in types.iterrows():
data[row['feature']] = data[row['feature']].astype(row['type'])
data['memFreq'].fillna(0, inplace=True)
data['memtRFC'].fillna(0, inplace=True)
os_le = LabelEncoder()
cpu_full_le = LabelEncoder()
cpu_arch_le = LabelEncoder()
mem_type_le = LabelEncoder()
data['cpuFull'] = cpu_full_le.fit_transform(data['cpuFull'])
data['os'] = os_le.fit_transform(data['os'])
data['cpuArch'] = cpu_arch_le.fit_transform(data['cpuArch'])
data['memType'] = mem_type_le.fit_transform(data['memType'])
# drop single value columns
data = data.drop(['cacheL3IsShared', 'BMI', 'CLF_._Cache_Line_Flush', 'CMOV_._Conditionnal_Move_Inst.',
'CX8_._CMPXCHG8B', 'FXSR.FXSAVE.FXRSTOR', 'IA.64_Technology',
'MMX_Technology', 'SSE', 'SSE2', 'SSE4a', 'SSE5', 'TBM', 'X3DNow_Pro_Technology'], axis=1)
data['C0'] = np.log(data['n'] * data['m'] * data['k'])
data = data.drop(['m', 'n', 'k'], axis=1)
return data, target, {
'os': os_le,
'cpuFull': cpu_full_le,
'cpuArch': cpu_arch_le,
'memType': mem_type_le,
}
示例2: test_label_encoder_fit_transform
# 需要导入模块: from sklearn.preprocessing.label import LabelEncoder [as 别名]
# 或者: from sklearn.preprocessing.label.LabelEncoder import fit_transform [as 别名]
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
示例3: test_label_encoder
# 需要导入模块: from sklearn.preprocessing.label import LabelEncoder [as 别名]
# 或者: from sklearn.preprocessing.label.LabelEncoder import fit_transform [as 别名]
def test_label_encoder(values, classes, unknown):
# Test LabelEncoder's transform, fit_transform and
# inverse_transform methods
le = LabelEncoder()
le.fit(values)
assert_array_equal(le.classes_, classes)
assert_array_equal(le.transform(values), [1, 0, 2, 0, 2])
assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values)
le = LabelEncoder()
ret = le.fit_transform(values)
assert_array_equal(ret, [1, 0, 2, 0, 2])
with pytest.raises(ValueError, match="unseen labels"):
le.transform(unknown)
示例4: design_matrix
# 需要导入模块: from sklearn.preprocessing.label import LabelEncoder [as 别名]
# 或者: from sklearn.preprocessing.label.LabelEncoder import fit_transform [as 别名]
def design_matrix(sample_labels, interaction_indices=None):
"""
Parameters
---------
sample_labels:
a numpy matrix, for each sample a vector with the conditions
which we would like to model.
cols represent the type of conditions we want to model,
row represent a combination of conditions that are represented by the row-variable.
if we have a 2x3 design we build this matrix:
[[0,0],
[0,1],
[0,2],
[1,0],
[1,1],
[1,2]]
Returns
-------
X: the design matrix.
factor_labels: the labels of the design-matrix columns
factor_num : number of factors for each condition
"""
factor_num = []
n_factors = 0
for i in range(sample_labels.shape[1]):
unique_labels = np.unique(sample_labels[:,i])
if len(unique_labels) == 1:
label_factors = 0
else:
label_factors = len(unique_labels)
n_factors+=label_factors
factor_num.append(label_factors)
n_interactions = 0
if interaction_indices != None:
interaction_factors = np.array(factor_num)[[interaction_indices]]
n_interactions = np.prod(interaction_factors)
Xint = np.zeros((sample_labels.shape[0], n_interactions))
X = np.zeros((sample_labels.shape[0], n_factors))
lb = LabelEncoder()
factor_labels = []
offset = 0
for i, factor in enumerate(factor_num):
if factor == 0:
continue
index = lb.fit_transform(sample_labels.T[i])
for j in range(sample_labels.shape[0]):
X[j,index[j]+offset] = 1
factor_labels.append(lb.classes_)
offset += factor
if interaction_indices != None:
interaction_product = [np.arange(v).tolist() for v in interaction_factors]
interaction_gen = cartesian(interaction_product)
# This is buggy!!
Xint = np.zeros((sample_labels.shape[0], n_interactions))
offset = interaction_indices[0] * np.sum(factor_num[:interaction_indices[0]])
offset = np.int(offset)
for i, int_indices in enumerate(interaction_gen):
index1 = offset + int_indices[0]
index2 = offset + int_indices[1] + factor_num[interaction_indices[0]]
Xint[:,i] = X[:,index1] * X[:,index2]
factor1 = interaction_indices[0]
factor2 = interaction_indices[1]
new_label = factor_labels[factor1][int_indices[0]] + "_" + \
factor_labels[factor2][int_indices[1]]
factor_labels.append(new_label)
X = np.hstack((X, Xint))
return X, np.hstack(factor_labels), factor_num