本文整理汇总了Python中mlxtend.tf_classifier.TfMultiLayerPerceptron.fit方法的典型用法代码示例。如果您正苦于以下问题:Python TfMultiLayerPerceptron.fit方法的具体用法?Python TfMultiLayerPerceptron.fit怎么用?Python TfMultiLayerPerceptron.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlxtend.tf_classifier.TfMultiLayerPerceptron
的用法示例。
在下文中一共展示了TfMultiLayerPerceptron.fit方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _clf_mlp
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def _clf_mlp(trX,teX,trY,teY):
print "MLP"
print trX.shape,"trX shape"
print "Enter Layer for MLP"
layer=input()
# print "enter delIdx"
# delIdx=input()
# while(delIdx):
# trX=np.delete(trX,-1,axis=0)
# trY=np.delete(trY,-1,axis=0)
# delIdx=delIdx-1
print "factors",factors(trX.shape[0])
teY=teY.astype(np.int32)
trY=trY.astype(np.int32)
print trX.shape,"trX shape"
print "enter no of mini batch"
mini_batch=int(input())
mlp = TfMultiLayerPerceptron(eta=0.01,
epochs=100,
hidden_layers=layer,
activations=['relu' for i in range(len(layer))],
print_progress=3,
minibatches=mini_batch,
optimizer='adam',
random_seed=1)
mlp.fit(trX,trY)
pred=mlp.predict(teX)
print _f_count(teY),"test f count"
pred=pred.astype(np.int32)
print _f_count(pred),"pred f count"
conf_mat=confusion_matrix(teY, pred)
process_cm(conf_mat, to_print=True)
print precision_score(teY,pred),"Precision Score"
print recall_score(teY,pred),"Recall Score"
print roc_auc_score(teY,pred), "ROC_AUC"
示例2: test_fail_minibatches
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_fail_minibatches():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=13,
random_seed=1)
mlp.fit(X, y)
assert (y == mlp.predict(X)).all()
示例3: test_binary_sgd
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_binary_sgd():
mlp = MLP(epochs=10,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=len(y_bin),
random_seed=1)
mlp.fit(X_bin, y_bin)
assert (y_bin == mlp.predict(X_bin)).all()
示例4: test_valid_acc
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_valid_acc():
mlp = MLP(epochs=3,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y, X_valid=X[:100], y_valid=y[:100])
assert len(mlp.valid_acc_) == 3
示例5: test_train_acc
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_train_acc():
mlp = MLP(epochs=3,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
assert len(mlp.train_acc_) == 3
示例6: test_score_function_adagrad
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_score_function_adagrad():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='adagrad',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
acc = mlp.score(X, y)
assert acc == 1.0, acc
示例7: test_multiclass_gd_learningdecay
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_multiclass_gd_learningdecay():
mlp = MLP(epochs=5,
eta=0.5,
hidden_layers=[15],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
decay=[0.5, 1.0],
random_seed=1)
mlp.fit(X, y)
expect = [3.107878, 2.124671, 1.786916, 1.65095, 1.590468]
np.testing.assert_almost_equal(expect, mlp.cost_, decimal=2)
示例8: test_multiclass_gd_dropout
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_multiclass_gd_dropout():
mlp = MLP(epochs=100,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1,
dropout=0.05)
mlp.fit(X, y)
acc = round(mlp.score(X, y), 2)
assert acc == 0.67, acc
示例9: test_continue_learning
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_continue_learning():
mlp = MLP(epochs=25,
eta=0.5,
hidden_layers=[5],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
assert np.sum(y == mlp.predict(X)) == 144, np.sum(y == mlp.predict(X))
mlp.fit(X, y, init_params=False)
assert np.sum(y == mlp.predict(X)) == 150, np.sum(y == mlp.predict(X))
示例10: test_multiclass_probas
# 需要导入模块: from mlxtend.tf_classifier import TfMultiLayerPerceptron [as 别名]
# 或者: from mlxtend.tf_classifier.TfMultiLayerPerceptron import fit [as 别名]
def test_multiclass_probas():
mlp = MLP(epochs=500,
eta=0.5,
hidden_layers=[10],
optimizer='gradientdescent',
activations=['logistic'],
minibatches=1,
random_seed=1)
mlp.fit(X, y)
idx = [0, 50, 149] # sample labels: 0, 1, 2
y_pred = mlp.predict_proba(X[idx])
exp = np.array([[1.0, 0.0, 0.0],
[0.0, 0.9, 0.1],
[0.0, 0.1, 0.9]])
np.testing.assert_almost_equal(y_pred, exp, 1)