本文整理汇总了Python中NeuralNetwork.NeuralNetwork.train方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNetwork.train方法的具体用法?Python NeuralNetwork.train怎么用?Python NeuralNetwork.train使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NeuralNetwork.NeuralNetwork
的用法示例。
在下文中一共展示了NeuralNetwork.train方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: exo67
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
def exo67():
print("\n\n>>EXERCICE 6 et 7 : Calcul matriciel")
print(" --- K=1 ---")
#Xtrain, ytrain, Xvalid, yvalid, Xtest, ytest = utils.readMoonFile()
Xtrain = [[30, 20, 40, 50], [25, 15, 35, 45]]
ytrain = [0,0]
default_h = 2
nn = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=1, wd=0)
nne = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=1, wd=0)
nne._w1 = nn._w1 # trick pour que l'aleatoire soit egale
nne._w2 = nn._w2
nn.train(Xtrain,ytrain,1)
nne.train(Xtrain,ytrain,1)
utils.compareNN(nn,nne)
print(" --- K=10 ---")
Xtrain = [[30, 20, 40, 50], [25, 15, 35, 45],[30, 76, 45, 44],[89, 27, 42, 52],[30, 24, 44, 53],[89, 25, 45, 50],[30, 20, 40, 50],[30, 65, 47, 50],[30, 34, 40, 50],[39, 20, 29, 58]]
ytrain = [0,0,0,0,0,0,0,0,0,0]
default_h = 2
nn = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=10, wd=0)
nne = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=10, wd=0)
nne._w1 = nn._w1 # trick pour que l'aleatoire soit egale
nne._w2 = nn._w2
nn.train(Xtrain,ytrain,1)
nne.train(Xtrain,ytrain,1)
utils.compareNN(nn,nne,10)
示例2: accuracy
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
def accuracy(self, number_layers, numbers_neurons, learning_rate):
"""Returns the accuracy of a neural network associated with an Individual"""
net = NeuralNetwork(number_layers, numbers_neurons, learning_rate, X_train=self.dataset.X_train, Y_train=self.dataset.Y_train, X_test=self.dataset.X_test, Y_test=self.dataset.Y_test)
#train neural NeuralNetwork
net.train()
#calcule accurate
acc = net.classify()
#set AUC
self.__auc = net.get_auc()
return acc
示例3: test
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
def test(base_directory, ignore_word_file, filtered, nb_hidden_neurons, nb_max_iteration):
print("post reading...")
pr = PostReader(base_directory, ignore_word_file, filtered)
print("creating neural network...")
nn = NeuralNetwork(pr.get_word_set(), nb_hidden_neurons, nb_max_iteration)
print("training...")
training_set = pr.get_training_set()
t0 = time.clock()
nb_iteration = nn.train(training_set)
training_time = time.clock() - t0
print("verification...")
t0 = time.clock()
verification_set = pr.get_verification_set()
verification_time = time.clock() - t0
nb_correct = 0
for msg in verification_set:
final = NeuralNetwork.threshold(nn.classify(msg[0]))
if final == msg[1]:
nb_correct += 1
print("=======================")
print("training set length : %s" % len(training_set))
print("nb hidden neurons : %s" % nb_hidden_neurons)
print("nb max iterations : %s" % nb_max_iteration)
print("nb iterations : %s" % nb_iteration)
print("verification set length: %s posts" % len(verification_set))
print("nb correct classified : %s posts" % nb_correct)
print("rate : %i %%" % (nb_correct / len(verification_set) * 100))
print("training time : %i s" % training_time)
print("verification time : %i s" % verification_time)
print("=======================")
print("")
示例4: exo8
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
def exo8():
print("\n\n>>EXERCICE 8 MNIST")
Xtrain, ytrain, Xvalid, yvalid, Xtest, ytest = utils.readMNISTfile()
default_h = 30
maxIter = 1
neuralNetwork = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain),K=100)
neuralNetworkEfficient = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain),K=100)
neuralNetworkEfficient._w1 = neuralNetwork._w1
neuralNetworkEfficient._w2 = neuralNetwork._w2
print("--- Reseau de depart ---")
t1 = datetime.now()
neuralNetwork.train(Xtrain, ytrain, maxIter)
t2 = datetime.now()
delta = t2 - t1
print("Cela a mis : " + str(delta.total_seconds()) + " secondes")
print("--- Reseau optimise ---")
t1 = datetime.now()
neuralNetworkEfficient.train(Xtrain, ytrain, maxIter)
t2 = datetime.now()
delta = t2 - t1
print("Cela a mis : " + str(delta.total_seconds()) + " secondes")
示例5: NeuralNetworkXORTestcase
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
class NeuralNetworkXORTestcase(unittest.TestCase):
def setUp(self):
self.nn = NeuralNetwork(['a', 'b'], 2)
self.nn.hidden_neurons[0].input_weights['a'] = 1.0
self.nn.hidden_neurons[0].input_weights['b'] = 1.0
self.nn.hidden_neurons[0].bias = 0.0
self.nn.hidden_neurons[1].input_weights['a'] = 1.0
self.nn.hidden_neurons[1].input_weights['b'] = 1.0
self.nn.hidden_neurons[1].bias = 0.0
self.nn.final_neuron.input_weights[0] = -1
self.nn.final_neuron.input_weights[1] = 1
self.nn.final_neuron.bias = 0.0
def test_classifiy(self):
self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 0.0}), 1.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 1.0}), 1.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 1.0}), 0.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 0.0}), 0.0, 5)
def test_train(self):
self.nn = NeuralNetwork(['a', 'b'], 2)
self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 0.0}), 1.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 1.0}), 1.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 1.0}), 0.0, 5)
self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 0.0}), 0.0, 5)
self.nn.hidden_neurons[0].input_weights
示例6: Normalize
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
dataColumn = Normalize(dataColumn)
length = len(dataColumn)
InputLayer.append(dataColumn)
dataColumn = []
return InputLayer,length
def Normalize(dataColumn):
#print dataColumn
newDataColumn = []
maximum = max(dataColumn)
minimum = min(dataColumn)
for each in dataColumn:
norm = (each - minimum)/(maximum-minimum)
newDataColumn.append(norm)
#print norm
return newDataColumn
InputArray,length = getData()
OutputLayer = np.array([InputArray[1]])
ones = [1]*length
#InputLayer = np.array([InputArray[0],InputArray[2],InputArray[3],ones])
InputLayer = np.array([InputArray[0],ones])
InputLayer = InputLayer.T
y = OutputLayer.T
nn = NeuralNetwork(4,4)
nn.declareInput(InputLayer)
nn.declareTarget(y)
nn.train(1000000)
示例7:
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
# n_kerns=256,
# height=5,
# width=5
#)
# Add fc layer
nn.add(
'Convolution',
n_kerns=115,
height=12,
width=12
)
nn.add(
'Pool',
shape=(2,2)
)
nn.add(
'FCLayer',
n_out=500
)
nn.compile()
nn.set_ttv_data(dataset)
nn.train()
示例8: DigitClassifier
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
class DigitClassifier(tkinter.Tk):
def __init__(self):
tkinter.Tk.__init__(self)
self.nn = NeuralNetwork(784, 300, 10)
self.background = tkinter.Canvas(self, width = 308, height = 308)
self.background.config(background="black")
self.input_canvas = InputCanvas(self, width = 300, height = 300)
self.result_label = tkinter.Label(self, text='')
self.recog_button = tkinter.Button(self, text='Recognize', command=self.recognize)
self.clear_button = tkinter.Button(self, text='Clear', command=self.input_canvas.clear)
self.background.pack()
self.input_canvas.place(x=4, y=4)
self.result_label.pack()
self.recog_button.pack()
self.clear_button.pack()
def train_nn(self, epochs=100000, edit_image=False):
"""ニューラルネットワークを訓練する"""
import Mnist
labels = Mnist.trainLabels
images = Mnist.trainImages
inputs, targets = [], []
for _ in range(epochs):
i = int(random.random() * len(labels))
target = np.zeros(10)
if edit_image:
# 訓練データを加工する
img = Image.fromarray(images[i])
new_img = Image.new('L', (28, 28))
new_img.paste(img.rotate(random.uniform(-45.0, 45.0)),
(random.randint(-5.0, 5.0), random.randint(-5.0, 5.0)))
image = np.asarray(new_img).ravel()
else:
# 加工なし
image = images[i].ravel()
inputs.append(image/255.0)
target[labels[i]] = 1.0
targets.append(target)
print("start training...")
self.nn.train(np.array(inputs), np.array(targets), n=0.01)
labels = Mnist.testLabels
images = Mnist.testImages
inputs, targets = [], []
for i in range(len(labels)):
target = np.zeros(10)
inputs.append(images[i].ravel() / 255.0)
target[labels[i]] = 1.0
targets.append(target)
print("start testing...")
results = self.nn.test(np.array(inputs), np.array(targets))
#print(results)
overall = np.zeros((10, 10), dtype=int)
correct = 0
for result, target in zip(results, targets):
ri = max(enumerate(result), key=lambda x: x[1])[0]
ti = max(enumerate(target), key=lambda x: x[1])[0]
overall[ti, ri] += 1
if ti == ri:
correct += 1
print(overall)
print(float(correct)/len(labels))
# 訓練後のパラメータを保存する
np.save('parameters/w1_2.npy', self.nn.w1_2)
np.save('parameters/w2_3.npy', self.nn.w2_3)
def load_nn_parameters(self):
# パラメータを読み込む
self.nn.w1_2 = np.load('parameters/w1_2.npy')
self.nn.w2_3 = np.load('parameters/w2_3.npy')
def recognize(self):
# キャンバスに書き込まれた数字を認識する
img = self.input_canvas.getImage().filter(ImageFilter.BLUR).convert('L')
img.thumbnail((28, 28), getattr(Image, 'ANTIALIAS'))
img = img.point(lambda x: 255 - x)
input = np.asarray(img).ravel()
result = self.nn.test([input / 255.0], np.zeros(10))[0]
num = max(enumerate(result), key=lambda x: x[1])[0]
self.result_label.configure(text = str(num))
print(num, result)
示例9: YourFaceSoundsFamiliar
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
class YourFaceSoundsFamiliar(BaseWidget):
def __init__(self):
super(YourFaceSoundsFamiliar,self).__init__('Your Face Sounds Familiar')
#Predict Tab
self._imagepath = ControlText('Path')
self._browsebuttonpredict = ControlButton('Browse')
self._nametopred = ControlText('Name')
self._selectfile = ControlFile()
self._selectfile.changed = self.__change_path
self._predictimage = ControlImage()
self._predictbutton = ControlButton('Predict')
self._predicteddetails = ControlLabel('Details')
self._name = ControlLabel('Recognized Name: ')
self._fscore = ControlLabel('FScore: ')
self._predictbutton.value = self.__predictbAction
#Train Tab
self._pername = ControlText('Name')
self._selectdir = ControlDir()
self._selectdir.changed = self.__change_path_dir
self._imagetotrain = ControlImage()
# self._imagetotest = ControlImage()
self._totrainlist = ControlList("To Train",defaultValue=[])
self.traininglist = self._totrainlist.value
self._addtolistbutton = ControlButton('Add')
self._addtolistbutton.value = self.__addtolistbAction
self._trainbutton = ControlButton('Train')
self._trainbutton.value = self.__trainbAction
#Formsets
self._formset = [{
'Predict':['_selectfile','=','_nametopred','=','_predictimage',
'=','_predictbutton','=',
'_predicteddetails','=','_name',
'=','_fscore'],
'Train': ['_pername', '=', '_selectdir',
'=', '_imagetotrain', '=', '_addtolistbutton','=' ,
'_totrainlist', '=', '_trainbutton']
}]
self.trainingsetall = []
self.nn = self.__init_nn()
self.learned = {}
self._k = 4
self._trainingPercent = 0.8
self.learned = self.__load_learned()
self.cross_validation_set = [np.empty((0,0))]*self._k
self.cross_validation_set_y = [np.empty((0,0))]*self._k
self.test_set = np.empty((0, 0))
self.testing_y = np.empty((0, 0))
self.training_X = [np.empty((0, 900))] * self._k
self.training_y = [np.empty((0, 1))] * self._k
self.X = np.empty((0, 0))
def __load_learned(self):
try:
with open('learned.json') as learned_file:
for line in learned_file:
learned = json.loads(line)
for key in learned.keys():
self._totrainlist.__add__([key])
except IOError:
learned = {}
config = {'input_size': 30 * 30, 'hidden_size': 30 * 30, 'lambda': 1, 'num_labels': (len(learned))}
self.nn = NeuralNetwork(config=config)
return learned
def __predictbAction(self):
predictset_filename = 'predictset.csv'
np.savetxt(predictset_filename,self.predictset, delimiter=',')
prediction = np.argmax(self.nn.predict(self.predictset)) + 1
for k, v in self.learned.iteritems():
if prediction == v:
self._name.value = k
def __init_nn(self):
nn = NeuralNetwork()
return nn
def __change_path(self):
image = cv2.imread(self._selectfile.value)
self._predictimage.value = []
self._predictimage.value = FaceDetection().drawrectangle(image)
resizedimage = FaceDetection().resizeimageb(self._predictimage.value)
croppedimage = FaceDetection().cropface(resizedimage)
resizedcroppedimage = FaceDetection().resizeimagea(croppedimage)
self.predictset = np.array(resizedcroppedimage[1]).flatten()
def __change_path_dir(self):
name = self._selectdir.value
name = name.split('/')
self._pername.value = name.pop(len(name)-1)
self._imagetotrain.value = []
# self._imagetotest.value = []
listofimages = os.listdir(self._selectdir.value)
listofimages = sorted(listofimages)
#.........这里部分代码省略.........
示例10: contrast_normalize
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
# Assumes "train.mat" is the training set from MNIST
train_data = sc.io.loadmat('dataset/train.mat')
train_images = train_data['train_images']
train_labels = train_data['train_labels']
side_length = train_images.shape[0]
preprocessed_images = np.transpose(train_images.reshape((side_length*side_length,-1)))
preprocessed_images = contrast_normalize(preprocessed_images)
training_features, training_labels, validation_features, validation_labels = split_data(preprocessed_images, train_labels, 1/6.0)
test_data = sc.io.loadmat('dataset/test.mat')
test_images = test_data['test_images']
preprocessed_test_images = test_images.reshape((10000, 784))
preprocessed_test_images = contrast_normalize(preprocessed_test_images)
# This actually isn't a great setup for MNIST, multiple hidden layers aren't useful unless you're doing convolutions.
example_net = NeuralNetwork(cost_func = cross_entropy, cost_deriv = cross_entropy_deriv,
activation_func = ReLU, activ_deriv = ReLU_deriv,
output_func = softmax, output_deriv = softmax_deriv,
hid_layer_sizes=[200,200], num_inputs = 784, num_outputs=10, learning_rate = 1e-2, stopping_threshold=-1,
momentum_rate = 0.9, batch_size = 50, decay_rate = 0.5, decay_frequency = 20,
cost_calc_freq = 1000, snapshot_frequency = -1,
snapshot_name = "./snapshots/multilayer_softmax_ReLU", max_iterations = 1e6, relax_targets=False)
example_net.train(training_features, training_labels)
validation_predictions = example_net.predict(validation_features)
benchmark(validation_predictions,validation_labels)
final_predictions = example_net.predict(preprocessed_test_images)
示例11: __init__
# 需要导入模块: from NeuralNetwork import NeuralNetwork [as 别名]
# 或者: from NeuralNetwork.NeuralNetwork import train [as 别名]
class Classifier:
def __init__(self, classifier_type, **kwargs):
"""
Initialize a classifier for managing learning model.
Args:
classifier_type: the type of learning model. e.g. neural_network
**kwargs: store parameter in a dictionary
"""
self.classifier_type = classifier_type
self.params = kwargs
self.clf = None
self.file = open('result/trial_' + str(datetime.datetime.today()).replace("/", "_", -1) + ".txt", 'w', 0)
def train(self, training_data, testData, classNum, batchSize):
"""
Create a learning model. Train the model with the training data. Print the training accuracy every certain iterations.
If the learning rate is not chosen appropriately, let the user to enter a new
"""
# find the numbers for feature and label
featureNum = training_data.shape[1] - 1
# #this will find all the unique labels automatically, but will have problem when training data is lacking some labels
# labelNum = len(np.unique(training_data[:, :1]))
labelNum = classNum
# get the number of nodes for each layer
if "hidden_layer" in self.params and self.params["hidden_layer"] is not None:
nodeNum = [featureNum] + self.params["hidden_layer"] + [labelNum]
else:
nodeNum = [featureNum, featureNum * 2, labelNum]
# get the mode for initializing the weight
if "weightInitMode" in self.params and self.params["weightInitMode"] is not None:
weightInitMode = self.params["weightInitMode"]
else:
weightInitMode = None
# get the momentum factor
if "momentumFactor" in self.params:
momentumFactor = self.params["momentumFactor"]
else:
momentumFactor = 0.0
self.clf = NeuralNetwork(training_data, nodeNum, weightInitMode, momentumFactor)
iteration = 5
totalIter = 0
testSize = 100000
while iteration > 0:
if iteration < 10:
self.clf.train(iteration, batchSize)
totalIter += iteration
print "---------- Settings ----------"
print "Examples :", training_data.shape[0]
print "Batch size :", batchSize
print "Alpha :", self.clf.getAlpha()
print "Momentum factor :", momentumFactor
print "# of Nodes in all layers :", nodeNum
print "Training iteration so far:", totalIter
self.file.write("\n")
self.file.write("---------- Settings ----------" + "\n")
self.file.write("Examples : " + str(training_data.shape[0]) + "\n")
self.file.write("Batch size : " + str(batchSize) + "\n")
self.file.write("Alpha : " + str(self.clf.getAlpha()) + "\n")
self.file.write("Momentum factor : " + str(momentumFactor) + "\n")
self.file.write("# of Nodes in all layers : " + str(nodeNum) + "\n")
self.file.write("Training iteration so far: " + str(totalIter) + "\n")
self.test(training_data, "training")
self.test(testData, "testing")
iteration = 0
while iteration >= testSize:
self.clf.train(testSize, batchSize)
totalIter += testSize
print "---------- Settings ----------"
print "Examples :", training_data.shape[0]
print "Batch size :", batchSize
print "Alpha :", self.clf.getAlpha()
print "Momentum factor :", momentumFactor
print "# of Nodes in all layers :", nodeNum
print "Training iteration so far:", totalIter
self.file.write("\n")
self.file.write("---------- Settings ----------" + "\n")
self.file.write("Examples : " + str(training_data.shape[0]) + "\n")
self.file.write("Batch size : " + str(batchSize) + "\n")
self.file.write("Alpha : " + str(self.clf.getAlpha()) + "\n")
self.file.write("Momentum factor : " + str(momentumFactor) + "\n")
self.file.write("# of Nodes in all layers : " + str(nodeNum) + "\n")
self.file.write("Training iteration so far: " + str(totalIter) + "\n")
self.test(training_data, "training")
self.test(testData, "testing")
iteration -= testSize
if iteration > 0:
self.clf.train(iteration, batchSize)
totalIter += iteration
print "---------- Settings ----------"
print "Examples :", training_data.shape[0]
#.........这里部分代码省略.........