本文整理汇总了Python中imblearn.under_sampling.ClusterCentroids类的典型用法代码示例。如果您正苦于以下问题:Python ClusterCentroids类的具体用法?Python ClusterCentroids怎么用?Python ClusterCentroids使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ClusterCentroids类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sample_wrong_X
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit(X, Y)
assert_raises(RuntimeError, cc.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
示例2: test_multiclass_fit_sample
def test_multiclass_fit_sample():
y = Y.copy()
y[5] = 2
y[6] = 2
cc = ClusterCentroids(random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_sample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 2
assert count_y_res[1] == 2
assert count_y_res[2] == 2
示例3: test_fit_resample_auto
def test_fit_resample_auto():
sampling_strategy = 'auto'
cc = ClusterCentroids(
sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.06738818, -0.529627],
[0.17901516, 0.69860992], [0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
示例4: test_fit_resample_half
def test_fit_resample_half():
sampling_strategy = {0: 3, 1: 6}
cc = ClusterCentroids(
sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.13347175, 0.12167502], [
0.47104475, 0.44386323
], [0.09125309, -0.85409574], [0.19220316, 0.32337101],
[0.094035, -2.55298982], [0.20792588, 1.49407907],
[0.04352327, -0.20515826], [0.12372842, 0.6536186]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
print(X_resampled)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
示例5: test_fit_sample_object
def test_fit_sample_object():
ratio = 'auto'
cluster = KMeans(random_state=RND_SEED)
cc = ClusterCentroids(
ratio=ratio, random_state=RND_SEED, estimator=cluster)
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.13347175, 0.12167502],
[0.06738818, -0.529627],
[0.17901516, 0.69860992],
[0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
示例6: test_cc_fit
def test_cc_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit the data
cc.fit(X, Y)
# Check if the data information have been computed
assert_equal(cc.min_c_, 0)
assert_equal(cc.maj_c_, 1)
assert_equal(cc.stats_c_[0], 500)
assert_equal(cc.stats_c_[1], 4500)
示例7: test_multiclass_fit_sample
def test_multiclass_fit_sample():
"""Test fit sample method with multiclass target"""
# Make y to be multiclass
y = Y.copy()
y[0:1000] = 2
# Resample the data
cc = ClusterCentroids(random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_sample(X, y)
# Check the size of y
count_y_res = Counter(y_resampled)
assert_equal(count_y_res[0], 400)
assert_equal(count_y_res[1], 400)
assert_equal(count_y_res[2], 400)
示例8: test_fit_sample_half
def test_fit_sample_half():
"""Test fit and sample routines with ratio of .5"""
# Define the parameter for the under-sampling
ratio = .5
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'cc_x_05.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'cc_y_05.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
示例9: test_fit_sample_auto
def test_fit_sample_auto():
"""Test fit and sample routines with auto ratio"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.06738818, -0.529627],
[0.17901516, 0.69860992], [0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
示例10: test_fit_hard_voting
def test_fit_hard_voting():
sampling_strategy = 'auto'
voting = 'hard'
cluster = KMeans(random_state=RND_SEED)
cc = ClusterCentroids(
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
estimator=cluster,
voting=voting)
X_resampled, y_resampled = cc.fit_resample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.09125309, -0.85409574],
[0.12372842, 0.6536186], [0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
for x in X_resampled:
assert np.any(np.all(x == X, axis=1))
示例11: test_fit_sample_half
def test_fit_sample_half():
"""Test fit and sample routines with ratio of .5"""
# Define the parameter for the under-sampling
ratio = .5
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.09125309, -0.85409574],
[0.19220316, 0.32337101], [0.094035, -2.55298982],
[0.20792588, 1.49407907], [0.04352327, -0.20515826],
[0.12372842, 0.6536186]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
示例12: test_fit_sample_check_voting
def test_fit_sample_check_voting():
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit_sample(X, Y)
assert cc.voting_ == 'soft'
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit_sample(sparse.csr_matrix(X), Y)
assert cc.voting_ == 'hard'
示例13: test_fit_sample_error
def test_fit_sample_error():
ratio = 'auto'
cluster = 'rnd'
cc = ClusterCentroids(
ratio=ratio, random_state=RND_SEED, estimator=cluster)
with raises(ValueError, match="has to be a KMeans clustering"):
cc.fit_sample(X, Y)
voting = 'unknown'
cc = ClusterCentroids(ratio=ratio, voting=voting, random_state=RND_SEED)
with raises(ValueError, match="needs to be one of"):
cc.fit_sample(X, Y)
示例14: make_classification
from imblearn.under_sampling import ClusterCentroids
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply Cluster Centroids
cc = ClusterCentroids()
X_resampled, y_resampled = cc.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
示例15: range
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#define X y
X, y = data.loc[:,data.columns != 'state'].values, data.loc[:,data.columns == 'state'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
#ClusterCentroids
cc = ClusterCentroids(random_state=0)
os_X,os_y = cc.fit_sample(X_train,y_train)
#XGboost
clf_XG = XGBClassifier(learning_rate= 0.3, min_child_weight=1,
max_depth=6,gamma=0,subsample=1, max_delta_step=0, colsample_bytree=1,
reg_lambda=1, n_estimators=100, seed=1000, scale_pos_weight=1000)
clf_XG.fit(os_X, os_y,eval_set=[(os_X, os_y), (X_test, y_test)],eval_metric='auc',verbose=False)
evals_result = clf_XG.evals_result()
y_true, y_pred = y_test, clf_XG.predict(X_test)
#F1_score, precision, recall, specifity, G score
print "F1_score : %.4g" % metrics.f1_score(y_true, y_pred)
print "Recall : %.4g" % metrics.recall_score(y_true, y_pred)
recall = metrics.recall_score(y_true, y_pred)
print "Precision : %.4g" % metrics.precision_score(y_true, y_pred)