本文整理汇总了Python中sklearn.preprocessing.LabelBinarizer.inverse_transform方法的典型用法代码示例。如果您正苦于以下问题:Python LabelBinarizer.inverse_transform方法的具体用法?Python LabelBinarizer.inverse_transform怎么用?Python LabelBinarizer.inverse_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing.LabelBinarizer
的用法示例。
在下文中一共展示了LabelBinarizer.inverse_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_label_binarizer_multilabel
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
示例2: partb
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def partb():
def load(file_name):
file = np.load(file_name)
X_train =file['X_train'].T
y_train =file['y_train']
X_test =file['X_test'].T
y_test =file['y_test']
X_cv =file['X_cv'].T
y_cv =file['y_cv']
return X_train,y_train,X_cv,y_cv,X_test,y_test
train_ = [0,0]
test_ = [0,0]
overall = []
for i in range(14):
X_train,y_train,X_cv,y_cv,X_test,y_test = load('pofa{}.npz'.format(i))
from sklearn.preprocessing import LabelBinarizer
binarizer = LabelBinarizer()
binarizer.fit(y_train)
Y_train = binarizer.transform(y_train).T
Y_cv = binarizer.transform(y_cv).T
#nn.forward(X)
#nn.backprop(X,Y,graient_check=True)
print(X_train.shape[0], Y_train.shape[0])
nn = NeuralNetwork([X_train.shape[0],30,Y_train.shape[0]], functions=[sigmoid,softmax], derivatives=[derivative_sigmoid])
nn.fit(X_train,Y_train,eta=0.01,momentum=0.5,minibatch=16,regularizer=0.15,max_iter=200,gradient_check=False,cv = (X_cv,Y_cv),graphs=False, lbfgs=False)
output = nn.forward(X_train)
y_train_output = binarizer.inverse_transform(output.T)
y_test_output = binarizer.inverse_transform(nn.forward(X_test).T)
print("Iteration: ",i)
print((y_train_output==y_train).mean())
print((y_test_output ==y_test).mean())
overall.append((y_test == y_test_output).mean())
train_[0] += (y_train_output==y_train).sum()
train_[1] += y_train.shape[0]
test_[0] += (y_test_output==y_test).sum()
test_[1] += y_test.shape[0]
print("Average train accuracy: ", train_[0]/train_[1],"Average test accuracy: ",test_[0]/test_[1])
print(train_,test_)
overall = np.array(overall)
print(overall.mean())
示例3: display_image_predictions
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature*255)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
示例4: GBClassifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class GBClassifier(_BaseGB, ClassifierMixin):
def __init__(self, estimator, n_estimators=100,
step_size="line_search", learning_rate=0.1,
loss="squared_hinge", subsample=1.0,
callback=None, random_state=None):
self.estimator = estimator
self.n_estimators = n_estimators
self.step_size = step_size
self.learning_rate = learning_rate
self.loss = loss
self.subsample = subsample
self.callback = callback
self.random_state = random_state
def _get_loss(self):
losses = dict(squared_hinge=_SquaredHingeLoss(),
log=_LogLoss())
return losses[self.loss]
def fit(self, X, y):
self._lb = LabelBinarizer(neg_label=-1)
Y = self._lb.fit_transform(y)
return super(GBClassifier, self).fit(X, Y)
def predict(self, X):
pred = self.decision_function(X)
return self._lb.inverse_transform(pred)
示例5: BinaryRelevanceClassifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class BinaryRelevanceClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, Y):
# binarize labels
self.bl = LabelBinarizer()
Y = self.bl.fit_transform(Y)
self.classes_ = self.bl.classes_
# create an estimator for each label
self.estimators_ = []
for i in xrange(self.bl.classes_.shape[0]):
estimator = clone(self.estimator)
estimator.fit(X, Y[:, i])
self.estimators_.append(estimator)
def predict(self, X):
self._check_is_fitted()
X = np.atleast_2d(X)
Y = np.empty((X.shape[0], self.classes_.shape[0]))
for i, estimator in enumerate(self.estimators_):
Y[:, i] = estimator.predict(X).T
return self.bl.inverse_transform(Y)
def _check_is_fitted(self):
if not hasattr(self, "estimators_"):
raise ValueError("The object hasn't been fitted yet!")
示例6: test_label_binarizer_set_label_encoding
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2], [-2, -2, +2, -2], [-2, +2, -2, -2], [-2, -2, +2, -2], [+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
示例7: test_label_binarizer
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
示例8: MLPClassifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class MLPClassifier(BaseMLP, ClassifierMixin):
""" Multilayer Perceptron Classifier.
Uses a neural network with one hidden layer.
Parameters
----------
Attributes
----------
Notes
-----
References
----------"""
def __init__(
self, n_hidden=200, lr=0.1, l2decay=0, loss="cross_entropy", output_layer="softmax", batch_size=100, verbose=0
):
super(MLPClassifier, self).__init__(n_hidden, lr, l2decay, loss, output_layer, batch_size, verbose)
def fit(self, X, y, max_epochs=10, shuffle_data=False):
self.lb = LabelBinarizer()
one_hot_labels = self.lb.fit_transform(y)
super(MLPClassifier, self).fit(X, one_hot_labels, max_epochs, shuffle_data)
return self
def predict(self, X):
prediction = super(MLPClassifier, self).predict(X)
return self.lb.inverse_transform(prediction)
示例9: test_fit_reg_squared_multiple_outputs
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_fit_reg_squared_multiple_outputs():
reg = CDRegressor(C=0.05, random_state=0, penalty="l1/l2",
loss="squared", max_iter=100)
lb = LabelBinarizer()
Y = lb.fit_transform(mult_target)
reg.fit(mult_dense, Y)
y_pred = lb.inverse_transform(reg.predict(mult_dense))
assert_almost_equal(np.mean(y_pred == mult_target), 0.797, 3)
assert_almost_equal(reg.n_nonzero(percentage=True), 0.5)
示例10: BaseClassifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class BaseClassifier(BaseEstimator):
def predict_proba(self, X):
if len(self.classes_) != 2:
raise NotImplementedError("predict_(log_)proba only supported"
" for binary classification")
if self.loss == "log":
df = self.decision_function(X).ravel()
prob = 1.0 / (1.0 + np.exp(-df))
elif self.loss == "modified_huber":
df = self.decision_function(X).ravel()
prob = np.minimum(1, np.maximum(-1, df))
prob += 1
prob /= 2
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%s given)" % self.loss)
out = np.zeros((X.shape[0], 2), dtype=np.float64)
out[:, 1] = prob
out[:, 0] = 1 - prob
return out
def _set_label_transformers(self, y, reencode=False, neg_label=-1):
if reencode:
self.label_encoder_ = LabelEncoder()
y = self.label_encoder_.fit_transform(y).astype(np.int32)
else:
y = y.astype(np.int32)
self.label_binarizer_ = LabelBinarizer(neg_label=neg_label,
pos_label=1)
self.label_binarizer_.fit(y)
self.classes_ = self.label_binarizer_.classes_.astype(np.int32)
n_classes = len(self.label_binarizer_.classes_)
n_vectors = 1 if n_classes <= 2 else n_classes
return y, n_classes, n_vectors
def decision_function(self, X):
pred = safe_sparse_dot(X, self.coef_.T)
if hasattr(self, "intercept_"):
pred += self.intercept_
return pred
def predict(self, X):
pred = self.decision_function(X)
out = self.label_binarizer_.inverse_transform(pred)
if hasattr(self, "label_encoder_"):
out = self.label_encoder_.inverse_transform(out)
return out
示例11: test_label_binarizer_multilabel
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
inp = [(2, 3), (1,), (1, 2)]
expected = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal(lb.inverse_transform(got), inp)
示例12: test_label_binarizer_iris
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k]) for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
示例13: __init__
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class _CategoricalEncoder:
"""OneHotEncoder that can handle categorical variables."""
def __init__(self):
"""Convert labeled categories into one-hot encoded features."""
self._lb = LabelBinarizer()
def fit(self, X):
"""Fit a list or array of categories.
Parameters
----------
* `X` [array-like, shape=(n_categories,)]:
List of categories.
"""
self.mapping_ = {v: i for i, v in enumerate(X)}
self.inverse_mapping_ = {i: v for v, i in self.mapping_.items()}
self._lb.fit([self.mapping_[v] for v in X])
self.n_classes = len(self._lb.classes_)
return self
def transform(self, X):
"""Transform an array of categories to a one-hot encoded representation.
Parameters
----------
* `X` [array-like, shape=(n_samples,)]:
List of categories.
Returns
-------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
The one-hot encoded categories.
"""
return self._lb.transform([self.mapping_[v] for v in X])
def inverse_transform(self, Xt):
"""Inverse transform one-hot encoded categories back to their original
representation.
Parameters
----------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
One-hot encoded categories.
Returns
-------
* `X` [array-like, shape=(n_samples,)]:
The original categories.
"""
Xt = np.asarray(Xt)
return [
self.inverse_mapping_[i] for i in self._lb.inverse_transform(Xt)
]
示例14: AdaBoostClassifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class AdaBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimator, n_estimators=10):
self.estimator = estimator
self.n_estimators = n_estimators
def fit(self, X, y):
n_samples = X.shape[0]
weights = np.ones(n_samples, dtype=np.float64) / n_samples
self._lb = LabelBinarizer(neg_label=-1)
y = self._lb.fit_transform(y).ravel()
self.estimators_ = np.zeros(self.n_estimators, dtype=np.object)
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
y_pred_ = np.zeros(n_samples, dtype=np.float64)
for it in xrange(self.n_estimators):
est = clone(self.estimator)
est = est.fit(X, y, sample_weight=weights)
y_pred = est.predict(X)
err = 1 - accuracy_score(y, y_pred, sample_weight=weights)
if err == 0:
self.estimator_weights_[it] = 1
self.estimators_[it] = est
break
alpha = 0.5 * np.log((1 - err) / err)
#weights *= np.exp(- alpha * y * y_pred)
#weights /= weights.sum()
y_pred_ += alpha * y_pred
weights = np.exp(-y * y_pred_)
#weights = 1.0 / (1 + np.exp(y * y_pred_)) # logit boost
weights /= weights.sum()
self.estimator_weights_[it] = alpha
self.estimators_[it] = est
return self
def predict(self, X):
y_pred = np.zeros(X.shape[0], dtype=np.float64)
for it in xrange(self.n_estimators):
if self.estimator_weights_[it] != 0:
pred = self.estimators_[it].predict(X)
y_pred += self.estimator_weights_[it] * pred
y_pred = np.sign(y_pred)
return self._lb.inverse_transform(y_pred.reshape(-1, 1))
示例15: NN_Classifier
# 需要导入模块: from sklearn.preprocessing import LabelBinarizer [as 别名]
# 或者: from sklearn.preprocessing.LabelBinarizer import inverse_transform [as 别名]
class NN_Classifier(NNBase):
def __init__(self,layers = [], lr=0.01, epochs=None, noisy=None, verbose=False):
super(NN_Classifier, self).__init__(layers=layers, lr=lr, epochs=epochs, noisy=noisy, verbose=verbose)
self.type = 'C'
self.error_func = CrossEntropyError
self.accuracy_score = AccuracyScore
self.label_binarizer = LabelBinarizer()
def predict(self, X):
predictions = []
for el in X:
current_prediction = NNBase._predict(self, row(el))
predictions.append(current_prediction)
predictions = np.vstack(predictions)
current_results = coalesce(predictions)
return self.label_binarizer.inverse_transform(current_results)
def predict_proba(self, X):
predictions = []
for el in X:
current_prediction = NNBase._predict(self, row(el))
predictions.append(current_prediction)
predictions = np.vstack(predictions)
return predictions
def fit(self, X, T):
T_impl = self.label_binarizer.fit_transform(T)
if not self.epochs:
self.epochs = 1
for num in xrange(self.epochs):
if self.verbose:
print "Epoch: %d" % num
for i in xrange(len(X)):
NNBase._update(self, row(X[i]), row(T_impl[i]))
def error(self, X, T):
T_impl = self.label_binarizer.transform(T)
Y = self.predict_proba(X)
return self.error_func.func(Y, T_impl)
def score(self, X, T):
Y = self.predict(X)
return self.accuracy_score.func(Y,T)
def analytical_gradient(self, X, T):
T_impl = self.label_binarizer.transform(T)
return NNBase._analytical_gradient(self, X, T_impl)
def numerical_gradient(self, X, T):
T_impl = self.label_binarizer.transform(T)
return NNBase._numerical_gradient(self, X, T_impl)