本文整理汇总了Python中sklearn.neural_network.BernoulliRBM.score_samples方法的典型用法代码示例。如果您正苦于以下问题:Python BernoulliRBM.score_samples方法的具体用法?Python BernoulliRBM.score_samples怎么用?Python BernoulliRBM.score_samples使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neural_network.BernoulliRBM
的用法示例。
在下文中一共展示了BernoulliRBM.score_samples方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testRBM
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def testRBM():
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
print X
model = BernoulliRBM(n_components=2)
model.fit(X)
print dir(model)
print model.transform(X)
print model.score_samples(X)
print model.gibbs
示例2: test_small_sparse_partial_fit
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0)
示例3: run_test
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def run_test(params, model):
if model == "rf":
n_tree, mtry = params
print "# Trees: ", n_tree
print "mtry: ", mtry
rf = RandomForestClassifier(n_estimators= int(n_tree), verbose = True,
n_jobs = -1, max_features= int(mtry))
rf.fit(X, y)
modelPred = rf.predict(X)
elif model == "svm":
C, kernel = params
print "# Cost: ", C
print "kernel: ", kernel
svmod = SVC(int(C), kernel)
svmod.fit(X, y)
modelPred = svmod.predict(X)
elif model == "knn":
k = params
print "# k: ", k
knnmod = KNeighborsClassifier(int(k))
knnmod.fit(X, y)
modelPred =knnmod.predict(X)
elif model == "NeuralNetwork":
n_components, learning_rate, batch_size, n_iter = params
print "# n_components: ", n_components
print "# learning_rate: ", learning_rate
print "# batch_size: ", batch_size
print "# n_iter: ", n_iter
nnmod = BernoulliRBM(int(n_components), learning_rate, int(batch_size), int(n_iter))
nnmod.fit(X, y)
modelPred =nnmod.score_samples(X)
accuError = AccuracyErrorCalc(y, modelPred)
return accuError
示例4: test_score_samples
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
示例5: test_fit
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
示例6: test_score_samples
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_score_samples():
"""Check that the pseudo likelihood is computed without clipping.
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert((rbm1.score_samples(X) < -300).all())
示例7: test_partial_fit
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
assert_array_equal(X, Xdigits)
示例8: test_score_samples
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under="ignore"):
rbm1.score_samples(np.arange(1000) * 100)
示例9: estimate_n_components
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
def estimate_n_components():
X = load_data('gender/male')
X = X.astype(np.float32) / 256
n_comp_list = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200]
scores = []
for n_comps in n_comp_list:
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 50
rbm.n_components = 100
rbm.fit(X)
score = rbm.score_samples(X).mean()
scores.append(score)
plt.figure()
plt.plot(n_comp_list, scores)
plt.show()
return n_comp_list, scores
示例10: open
# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import score_samples [as 别名]
import numpy as np
from sklearn.neural_network import BernoulliRBM
from sklearn import cross_validation
#Read from data file
with open("dataset") as textFile:
lines = [line.split() for line in textFile]
a = np.array(lines, dtype=float)
dataPoints = np.array(a[:, [1, 2, 3]])
target = np.array(a[:, 0])
model = BernoulliRBM()
last_score = 0
last_partition = 0
for i in range(2, 10):
x_train, x_test, y_train, y_test = cross_validation.train_test_split(dataPoints, target, test_size = float(i)/10.0, random_state = 0)
model.fit(x_train, y_train)
if (model.score_samples(x_test, y_test)) > last_score:
last_score = model.score_samples(x_test, y_test)
last_partition = (i+1)/10
x_train, x_test, y_train, y_test = cross_validation.train_test_split(dataPoints, target, test_size = last_partition, random_state = 0)
model.fit(x_train, y_train)
print model.score_samples(x_test, y_test)
print last_score