本文整理汇总了Python中NeuralNetwork.NeuralNetwork.fit方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNetwork.fit方法的具体用法?Python NeuralNetwork.fit怎么用?Python NeuralNetwork.fit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NeuralNetwork.NeuralNetwork
的用法示例。
在下文中一共展示了NeuralNetwork.fit方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_digits
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
# 每个图片8x8 识别数字:0,1,2,3,4,5,6,7,8,9
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_split
digits = load_digits()
X = digits.data
y = digits.target
X -= X.min() # normalize the values to bring them into the range 0-1
X /= X.max()
nn = NeuralNetwork([64, 100, 10], 'logistic')
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print "start fitting"
nn.fit(X_train, labels_train, epochs=3000)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i])
predictions.append(np.argmax(o))
print confusion_matrix(y_test, predictions)
print classification_report(y_test, predictions)
示例2: NeuralNetwork
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
import numpy as np
from NeuralNetwork import NeuralNetwork
nn = NeuralNetwork([2, 2, 1], 'tanh')
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
print(i, nn.predict(i))
示例3: load_digits
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
# -*- coding:utf-8 -*-
# 每个图片8x8 识别数字:0,1,2,3,4,5,6,7,8,9
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
digits = load_digits()
X = digits.data
y = digits.target
X -= X.min() # normalize the values to bring them into the range 0-1
X /= X.max()
nn = NeuralNetwork([64, 1024, 10], 'logistic')
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print "start fitting"
nn.fit(X_train, labels_train, epochs=1024*10)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i])
predictions.append(np.argmax(o))
print confusion_matrix(y_test, predictions)
print classification_report(y_test, predictions)
示例4: load_digits
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
# import pylab as pl
# pl.gray()
# pl.matshow(digits.images[0])
# pl.show()
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
import numpy as np
from sklearn.metrics import confusion_matrix,classification_report
digits = load_digits()
x= digits.data
y = digits.target
x -= x.min()
x /= x.max()
nn = NeuralNetwork([64,100,10],"logistic")
x_train,x_test,y_train,y_test = train_test_split(x,y)
label_train = LabelBinarizer().fit_transform(y_train)
label_test = LabelBinarizer().fit_transform(y_test)
print("start fitting..")
predictions = []
nn.fit(x_train, label_train, epochs=10000)
for i in range(x_test.shape[0]):
o = nn.predict(x_test[i])
predictions.append(np.argmin(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
示例5: NeuralNetwork
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
from NeuralNetwork import NeuralNetwork
import numpy as np
nn = NeuralNetwork([2, 2, 1], 'tanh')
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(x, y)
for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
print(i, nn.predict(i))
示例6: NeuralNetwork
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
'link2target': None
}, 1: {
'n_neuron': y_train.shape[1],
'incoming_layer_list': [0,],
'incoming_weight_list': [],
'bias': None,
'loss': 'cross_entropy',
'act_func_name': 'softmax',
'value': None,
'layer_type': 'output',
'back_error': 0,
'link2input': None,
'link2target': y_train } }
network = NeuralNetwork(n_layers=2, layer_dict = Networklayer_dict)
network.fit(batch_size = 1000, learning_rate = step_iterator(0.1,0.01,-0.02),
weight_decay = step_iterator(0,0,0), momentum = step_iterator(0.1,0.9,0.1), n_iter = 100, switch_point = 10)
y_pred = network.transform(rbm2.transform(rbm1.transform(rbm0.transform(X_test))))[0]
correct = np.sum(y_pred.argmax(axis=1) == y_test.argmax(axis=1))
print('correct = %d in %d'%(correct,X_test.shape[0]))
network.transform(rbm2.transform(rbm1.transform(rbm0.transform(X_train_copy))))[0]
error = network.empirical_error(target = y_train)
print('initial error: %f'%error)
with open(r"C:\Users\daredavil\Documents\Python Scripts\RBMver2\rbms.pkl",'wb') as file_:
pickle.dump((rbm0.hidden_layer.dimension, rbm0.weight_list[0], rbm0.hidden_layer.bias,
rbm1.hidden_layer.dimension, rbm1.weight_list[0], rbm1.hidden_layer.bias,
rbm2.hidden_layer.dimension, rbm2.weight_list[0], rbm2.hidden_layer.bias,
network.output_layer_list[0].incoming_weight_list[0], network.output_layer_list[0].bias), file_)
示例7: test_classification
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
def test_classification():
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
digits = load_digits()
iris = load_iris()
breast = dt.load_breast_cancer()
ocr = dt.load_ocr_train()
ocr1 = dt.load_ocr_test()
X = digits.data
y = digits.target
X -= X.min() # normalize the values to bring them into the range 0-1
X /= X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print 'digits dataset'
print 'MLP performance:'
mlp = MLPClassifier()
mlp.fit(X_train,labels_train)
predictions = []
for i in range(X_test.shape[0]):
o = mlp.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
print 'Perceptron performance'
nn = NeuralNetwork([64,100,10],'tanh')
nn.fit(X_train,labels_train,epochs=100)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
#################################################
X = iris.data
y = iris.target
#X -= X.min() # normalize the values to bring them into the range 0-1
#X /= X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print 'Iris dataset'
print 'MLP performance'
mlp = MLPClassifier()
mlp.fit(X_train,labels_train)
predictions = []
for i in range(X_test.shape[0]):
o = mlp.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
print 'Perceptron performance'
nn = NeuralNetwork([64,100,10],'tanh')
nn.fit(X_train,labels_train,epochs=100)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(y_test,predictions)
print classification_report(y_test,predictions)
####################################################
X_train = breast['x_train']
y_train = breast['y_train']
X_test = breast['x_test']
y_test = breast['y_test']
X_train -= X_train.min() # normalize the values to bring them into the range 0-1
X_train /= X_train.max()
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print 'Breast cancer dataset'
print 'MLP performance'
mlp = MLPClassifier()
mlp.fit(X_train,labels_train)
predictions = []
for i in range(X_test.shape[0]):
o = mlp.predict(X_test[i] )
predictions.append(np.argmax(o))
print accuracy_score(labels_test,predictions)
#print confusion_matrix(labels_test,predictions)
print classification_report(labels_test,predictions)
print 'Perceptron performance'
nn = NeuralNetwork([64,100,10],'tanh')
nn.fit(X_train,labels_train,epochs=100)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i] )
predictions.append(np.argmax(o))
print confusion_matrix(labels_test,predictions)
print classification_report(labels_test,predictions)
####################################################
'''
示例8: NeuralNetwork
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import fit [as 别名]
"incoming_weight_list": rbm_list[0].weight_list[0],
"bias": rbm_list[0].input_layer_list[0].bias,
"loss": "mse",
"act_func_name": "linear",
"value": None,
"layer_type": "output",
"random_state": random_state,
"back_error": 0,
"link2target": X_train,
},
}
network = NeuralNetwork(n_layers=11, layer_dict=Networklayer_dict)
network.fit(
batch_size=1000,
learning_rate=step_iterator(0.1, 0.01, -0.02),
weight_decay=step_iterator(0, 0, 0),
momentum=step_iterator(0, 0, 0),
n_iter=5,
switch_point=None,
)
network.transform([X_train])[0]
hidd_rep_train = network.layer_list[5].value
network.transform([X_test])[0]
hidd_rep_test = network.layer_list[5].value
save_dict["hrtrain"] = hidd_rep_train
save_dict["hrtest"] = hidd_rep_test
with open("data.pkl", "wb") as f_:
cPickle.dump(save_dict, f_)