本文整理汇总了Python中sklearn.datasets.make_moons方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.make_moons方法的具体用法?Python datasets.make_moons怎么用?Python datasets.make_moons使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.datasets
的用法示例。
在下文中一共展示了datasets.make_moons方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_classifier_comparison
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def test_classifier_comparison():
"""Test the classifier comparison example works"""
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
scores = []
for ds in datasets:
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
clf = SymbolicClassifier(random_state=0)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scores.append(('%.2f' % score).lstrip('0'))
assert_equal(scores, ['.95', '.93', '.95'])
示例2: load_mini
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def load_mini(N=1000):
X, y = make_moons(N, noise=0.035, random_state=20)
x_, y_ = make_circles(N, noise=0.02, random_state=20)
x_[:, 1] += 2.0
y_ += 2
X = np.concatenate([X, x_], axis=0)
y = np.concatenate([y, y_])
X -= X.mean(0, keepdims=True)
X /= X.max(0, keepdims=True)
X = X.astype("float32")
y = y.astype("int32")
dict_init = [
("datum_shape", (2,)),
("n_classes", 4),
("name", "mini"),
("classes", [str(u) for u in range(4)]),
]
dataset = Dataset(**dict(dict_init))
dataset["inputs/train_set"] = X
dataset["outputs/train_set"] = y
return dataset
示例3: generateData
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def generateData(n):
"""
"""
np.random.seed(12046)
blobs = make_blobs(n_samples=n, centers = [[-2, -2], [2, 2]])
circles = make_circles(n_samples=n, factor=.4, noise=.05)
moons = make_moons(n_samples=n, noise=.05)
blocks = np.random.rand(n, 2) - 0.5
y = (blocks[:, 0] * blocks[:, 1] < 0) + 0
blocks = (blocks, y)
# 由于神经网络对数据的线性变换不稳定,因此将数据做归一化处理
scaler = StandardScaler()
blobs = (scaler.fit_transform(blobs[0]), blobs[1])
circles = (scaler.fit_transform(circles[0]), circles[1])
moons = (scaler.fit_transform(moons[0]), moons[1])
blocks = (scaler.fit_transform(blocks[0]), blocks[1])
return blobs, circles, moons, blocks
示例4: runKernelPCA
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def runKernelPCA():
"""
使用kernel PCA对数据降维
"""
data, labels = make_moons(n_samples=100, noise=0.05)
fig = plt.figure(figsize=(10, 10), dpi=80)
# 将原始数据可视化
ax = fig.add_subplot(2, 2, 1)
visualizeKernelPCA(ax, data, labels)
# 使用PCA对数据降维,并将结果可视化
ax = fig.add_subplot(2, 2, 2)
model = trainPCA(data)
x = model.transform(data)[:, 0]
visualizeKernelPCA(ax, np.c_[x, [0] * len(x)], labels)
# 使用kernel PCA对数据降维,并将结果可视化
ax = fig.add_subplot(2, 2, 3)
model = trainKernelPCA(data)
x = model.transform(data)[:, 0]
visualizeKernelPCA(ax, np.c_[x, [0] * len(x)], labels)
# 展示数据在kernel PCA第一和第二主成分的降维结果
ax = fig.add_subplot(2, 2, 4)
visualizeKernelPCA(ax, model.transform(data), labels)
plt.show()
示例5: num_observations
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def num_observations():
obs_values = [10, 100, 1000]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
losses_store = []
for i in obs_values:
X, y = datasets.make_moons(i, noise=0.1)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'n_observations = ' + str(obs_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
示例6: noise
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def noise():
noise_values = [0.01, 0.1, 0.2, 0.3, 0.4]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
losses_store = []
for i in noise_values:
X, y = datasets.make_moons(200, noise=i)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'noise_value = ' + str(noise_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
示例7: reg
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def reg():
reg_values = [0.00, 0.01, 0.1, 0.2, 0.3]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
losses_store = []
for i in reg_values:
reg_lambda = i # regularization strength
X, y = datasets.make_moons(200, noise=0.2)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'regularization_value = ' + str(reg_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
示例8: test_num_nodes
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def test_num_nodes():
X, y = datasets.make_moons(400, noise=0.2)
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
node_vals = [4,8,16,32,64,128]
losses_store = []
for val in node_vals:
model = build_model(X,val,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'n_nodes = ' + str(node_vals[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
示例9: generate_data
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def generate_data(n_samples=300, noise=0.05):
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=noise)
X = noisy_moons[0]
return X
示例10: load_data
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def load_data():
x = ds.make_moons(n_samples=30000, shuffle=True, noise=0.05)[0]
return x[:24000], x[24000:27000], x[27000:]
示例11: test_single_linkage_clustering
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def test_single_linkage_clustering():
# Check that we get the correct result in two emblematic cases
moons, moon_labels = make_moons(noise=0.05, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage='single')
clustering.fit(moons)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
moon_labels), 1)
circles, circle_labels = make_circles(factor=0.5, noise=0.025,
random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage='single')
clustering.fit(circles)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
circle_labels), 1)
示例12: test_make_moons
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
示例13: test_as_classifier
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def test_as_classifier():
X, y = make_moons(n_samples=100, random_state=1)
y = 2 * y - 1 # use -1/+1 labels
clf = as_classifier(DecisionTreeRegressor())
clf.fit(X, y)
probas = clf.predict_proba(X)
predictions = clf.predict(X)
assert_array_equal(probas.shape, (len(X), 2))
assert_array_equal(predictions, y)
y[-1] = 2
clf = as_classifier(DecisionTreeRegressor())
assert_raises(ValueError, clf.fit, X, y)
示例14: _download
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def _download():
train_x, train_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
test_x, test_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
valid_x, valid_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
train_x += np.abs(train_x.min())
test_x += np.abs(test_x.min())
valid_x += np.abs(valid_x.min())
train_set = (train_x, train_t)
test_set = (test_x, test_t)
valid_set = (valid_x, valid_t)
return train_set, test_set, valid_set
示例15: generate_data
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import make_moons [as 别名]
def generate_data(n_samples, dataset, noise):
if dataset == 'moons':
return datasets.make_moons(
n_samples=n_samples,
noise=noise,
random_state=0
)
elif dataset == 'circles':
return datasets.make_circles(
n_samples=n_samples,
noise=noise,
factor=0.5,
random_state=1
)
elif dataset == 'linear':
X, y = datasets.make_classification(
n_samples=n_samples,
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1
)
rng = np.random.RandomState(2)
X += noise * rng.uniform(size=X.shape)
linearly_separable = (X, y)
return linearly_separable
else:
raise ValueError(
'Data type incorrectly specified. Please choose an existing '
'dataset.')