本文整理汇总了Python中sklearn.neighbors.KNeighborsClassifier方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.KNeighborsClassifier方法的具体用法?Python neighbors.KNeighborsClassifier怎么用?Python neighbors.KNeighborsClassifier使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neighbors
的用法示例。
在下文中一共展示了neighbors.KNeighborsClassifier方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: classify_1nn
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def classify_1nn(data_train, data_test):
'''
Classification using 1NN
Inputs: data_train, data_test: train and test csv file path
Outputs: yprediction and accuracy
'''
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
data = {'src': np.loadtxt(data_train, delimiter=','),
'tar': np.loadtxt(data_test, delimiter=','),
}
Xs, Ys, Xt, Yt = data['src'][:, :-1], data['src'][:, -
1], data['tar'][:, :-1], data['tar'][:, -1]
Xs = StandardScaler(with_mean=0, with_std=1).fit_transform(Xs)
Xt = StandardScaler(with_mean=0, with_std=1).fit_transform(Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs, Ys)
ypred = clf.predict(Xt)
acc = accuracy_score(y_true=Yt, y_pred=ypred)
print('Acc: {:.4f}'.format(acc))
return ypred, acc
示例2: buildModel
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def buildModel(dataset, method, parameters):
"""
Build final model for predicting real testing data
"""
features = dataset.columns[0:-1]
if method == 'RNN':
clf = performRNNlass(dataset[features], dataset['UpDown'])
return clf
elif method == 'RF':
clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
elif method == 'KNN':
clf = neighbors.KNeighborsClassifier()
elif method == 'SVM':
c = parameters[0]
g = parameters[1]
clf = SVC(C=c, gamma=g)
elif method == 'ADA':
clf = AdaBoostClassifier()
return clf.fit(dataset[features], dataset['UpDown'])
示例3: knn_masked_data
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):
raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
# Using 'brute' method since we only want to do one query per classifier
# so this will be quicker as it avoids overhead of creating a search tree
knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
total_images = raw_im_data.shape[0]
pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
for i in range(total_images):
mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
rep_mask = np.tile(mask,(trX.shape[0],1))
# Corrupt whole training set according to the current mask
corr_trX = np.multiply(trX, rep_mask)
knn_m.fit(corr_trX, trY)
prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
pbar.update(i)
pbar.finish()
return prob_Y_hat
示例4: test_experiment_sklearn_multiclass
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_experiment_sklearn_multiclass(tmpdir_name):
X, y = make_classification_df(n_samples=1024, n_num_features=10, n_cat_features=0,
n_classes=5, random_state=0, id_column='user_id')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
params = {
'n_neighbors': 10
}
result = run_experiment(params, X_train, y_train, X_test, tmpdir_name, algorithm_type=KNeighborsClassifier,
with_auto_prep=False)
assert len(np.unique(result.oof_prediction[:, 0])) > 5 # making sure prediction is not binarized
assert len(np.unique(result.test_prediction[:, 0])) > 5
assert result.oof_prediction.shape == (len(y_train), 5)
assert result.test_prediction.shape == (len(y_test), 5)
_check_file_exists(tmpdir_name)
示例5: test_classification
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC(gamma="scale")]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
示例6: test_kneighbors_classifier_sparse
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
示例7: test_neighbors_iris
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert np.mean(clf.predict(iris.data) == iris.target) > 0.95
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
示例8: test_neighbors_digits
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float, copy=False), Y_train).score(
X_test.astype(float, copy=False), Y_test)
assert_equal(score_uint8, score_float)
示例9: test_same_knn_parallel
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_same_knn_parallel(algorithm):
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
示例10: tune_params
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def tune_params(self):
"""
tune specified (and default) parameters
"""
self._start_time = time.time()
self.default_params() # set default parameters
self.score_init() # set initial score
if self.dim_reduction is not None:
knn = Pipeline([('dimred',self.dim_reduction_method())
('knn',KNeighborsClassifier(**self._params))])
self._pipeline = True
else:
knn = KNeighborsClassifier(**self._params)
self.apply_gridsearch(knn)
self.print_progress(self._start_time)
return self
示例11: test_31_knn_classifier
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_31_knn_classifier(self):
print("\ntest 31 (knn classifier without preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = KNeighborsClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test31sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
示例12: test_32_knn_classifier
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def test_32_knn_classifier(self):
print("\ntest 32 (knn classifier without preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = KNeighborsClassifier()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test32sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
示例13: __init__
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def __init__(self, classifier=FaceClassifierModels.DEFAULT):
self._clf = None
if classifier == FaceClassifierModels.LINEAR_SVM:
self._clf = SVC(C=1.0, kernel="linear", probability=True)
elif classifier == FaceClassifierModels.NAIVE_BAYES:
self._clf = GaussianNB()
elif classifier == FaceClassifierModels.RBF_SVM:
self._clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif classifier == FaceClassifierModels.NEAREST_NEIGHBORS:
self._clf = KNeighborsClassifier(1)
elif classifier == FaceClassifierModels.DECISION_TREE:
self._clf = DecisionTreeClassifier(max_depth=5)
elif classifier == FaceClassifierModels.RANDOM_FOREST:
self._clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
elif classifier == FaceClassifierModels.NEURAL_NET:
self._clf = MLPClassifier(alpha=1)
elif classifier == FaceClassifierModels.ADABOOST:
self._clf = AdaBoostClassifier()
elif classifier == FaceClassifierModels.QDA:
self._clf = QuadraticDiscriminantAnalysis()
print("classifier={}".format(FaceClassifierModels(classifier)))
示例14: getModels
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def getModels():
result = []
result.append("LinearRegression")
result.append("BayesianRidge")
result.append("ARDRegression")
result.append("ElasticNet")
result.append("HuberRegressor")
result.append("Lasso")
result.append("LassoLars")
result.append("Rigid")
result.append("SGDRegressor")
result.append("SVR")
result.append("MLPClassifier")
result.append("KNeighborsClassifier")
result.append("SVC")
result.append("GaussianProcessClassifier")
result.append("DecisionTreeClassifier")
result.append("RandomForestClassifier")
result.append("AdaBoostClassifier")
result.append("GaussianNB")
result.append("LogisticRegression")
result.append("QuadraticDiscriminantAnalysis")
return result
示例15: define_classification_model
# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KNeighborsClassifier [as 别名]
def define_classification_model():
""" Select and define the model you will use for the classifier.
"""
if config['model_type'] == 'linearSVM': # linearSVM can be faster than SVM
return LinearSVC(C=1)
elif config['model_type'] == 'SVM': # non-linearSVM, we can use the kernel trick
return SVC(C=1, kernel='rbf', gamma='scale')
elif config['model_type'] == 'kNN': # k-nearest neighbour
return KNeighborsClassifier(n_neighbors=1, metric='cosine')
elif config['model_type'] == 'perceptron': # otpimizes log-loss, also known as cross-entropy with sgd
return SGDClassifier(max_iter=600, verbose=0.5, loss='log', learning_rate='optimal')
elif config['model_type'] == 'MLP': # otpimizes log-loss, also known as cross-entropy with sgd
return MLPClassifier(hidden_layer_sizes=(20,), max_iter=600, verbose=10,
solver='sgd', learning_rate='constant', learning_rate_init=0.001)