本文整理汇总了Python中mlxtend.tf_classifier.TfMultiLayerPerceptron类的典型用法代码示例。如果您正苦于以下问题:Python TfMultiLayerPerceptron类的具体用法?Python TfMultiLayerPerceptron怎么用?Python TfMultiLayerPerceptron使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TfMultiLayerPerceptron类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _clf_mlp
def _clf_mlp(trX,teX,trY,teY):
print "MLP"
print trX.shape,"trX shape"
print "Enter Layer for MLP"
layer=input()
# print "enter delIdx"
# delIdx=input()
# while(delIdx):
# trX=np.delete(trX,-1,axis=0)
# trY=np.delete(trY,-1,axis=0)
# delIdx=delIdx-1
print "factors",factors(trX.shape[0])
teY=teY.astype(np.int32)
trY=trY.astype(np.int32)
print trX.shape,"trX shape"
print "enter no of mini batch"
mini_batch=int(input())
mlp = TfMultiLayerPerceptron(eta=0.01,
epochs=100,
hidden_layers=layer,
activations=['relu' for i in range(len(layer))],
print_progress=3,
minibatches=mini_batch,
optimizer='adam',
random_seed=1)
mlp.fit(trX,trY)
pred=mlp.predict(teX)
print _f_count(teY),"test f count"
pred=pred.astype(np.int32)
print _f_count(pred),"pred f count"
conf_mat=confusion_matrix(teY, pred)
process_cm(conf_mat, to_print=True)
print precision_score(teY,pred),"Precision Score"
print recall_score(teY,pred),"Recall Score"
print roc_auc_score(teY,pred), "ROC_AUC"
示例2: test_fail_minibatches
def test_fail_minibatches():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=13,
random_seed=1)
mlp.fit(X, y)
assert (y == mlp.predict(X)).all()
示例3: test_binary_sgd
def test_binary_sgd():
mlp = MLP(epochs=10,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=len(y_bin),
random_seed=1)
mlp.fit(X_bin, y_bin)
assert (y_bin == mlp.predict(X_bin)).all()
示例4: test_valid_acc
def test_valid_acc():
mlp = MLP(epochs=3,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y, X_valid=X[:100], y_valid=y[:100])
assert len(mlp.valid_acc_) == 3
示例5: test_train_acc
def test_train_acc():
mlp = MLP(epochs=3,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
assert len(mlp.train_acc_) == 3
示例6: test_score_function_adagrad
def test_score_function_adagrad():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='adagrad',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
acc = mlp.score(X, y)
assert acc == 1.0, acc
示例7: test_multiclass_gd_learningdecay
def test_multiclass_gd_learningdecay():
mlp = MLP(epochs=5,
eta=0.5,
hidden_layers=[15],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
decay=[0.5, 1.0],
random_seed=1)
mlp.fit(X, y)
expect = [3.107878, 2.124671, 1.786916, 1.65095, 1.590468]
np.testing.assert_almost_equal(expect, mlp.cost_, decimal=2)
示例8: test_multiclass_gd_dropout
def test_multiclass_gd_dropout():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1,
dropout=0.05)
mlp.fit(X, y)
acc = round(mlp.score(X, y), 2)
assert acc == 0.67, acc
示例9: test_multiclass_probas
def test_multiclass_probas():
mlp = MLP(epochs=500,
eta=0.5,
hidden_layers=[10],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
idx = [0, 50, 149] # sample labels: 0, 1, 2
y_pred = mlp.predict_proba(X[idx])
exp = np.array([[1.0, 0.0, 0.0],
[0.0, 0.9, 0.1],
[0.0, 0.1, 0.9]])
np.testing.assert_almost_equal(y_pred, exp, 1)
示例10: test_continue_learning
def test_continue_learning():
mlp = MLP(epochs=25,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
assert np.sum(y == mlp.predict(X)) == 144, np.sum(y == mlp.predict(X))
mlp.fit(X, y, init_params=False)
assert np.sum(y == mlp.predict(X)) == 150, np.sum(y == mlp.predict(X))
示例11: test_mapping
def test_mapping():
mlp = MLP()
w, b = mlp._layermapping(n_features=10,
n_classes=11,
hidden_layers=[8, 7, 6])
expect_b = {1: [[8], 'n_hidden_1'],
2: [[7], 'n_hidden_2'],
3: [[6], 'n_hidden_3'],
'out': [[11], 'n_classes']}
expect_w = {1: [[10, 8], 'n_features, n_hidden_1'],
2: [[8, 7], 'n_hidden_1, n_hidden_2'],
3: [[7, 6], 'n_hidden_2, n_hidden_3'],
'out': [[6, 11], 'n_hidden_3, n_classes']}
assert expect_b == b, b
assert expect_w == w, w