本文整理汇总了Python中mlp.MLP.train方法的典型用法代码示例。如果您正苦于以下问题:Python MLP.train方法的具体用法?Python MLP.train怎么用?Python MLP.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlp.MLP
的用法示例。
在下文中一共展示了MLP.train方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def main():
dataset = [((0, 0), (0, 1)), ((0, 1), (1, 0)), ((1, 0), (1, 0)), ((1, 1), (0, 1))]
#dtanh = lambda o: 1 - o ** 2
dsigm = lambda o: o * (1 - o)
activation_functions = (np.vectorize(sigmoid), np.vectorize(sigmoid))
#activation_functions = (np.tanh, np.tanh)
derivation_functions = (np.vectorize(dsigm), np.vectorize(dsigm))
#derivation_functions = (np.vectorize(dtanh), np.vectorize(dtanh))
m = MLP((2, 3, 2), activation_functions, derivation_functions)
m.train(dataset, epsilon=0, alpha=0.9, eta=.25, epochs=2500)
for i in range(len(dataset)):
o = m.feedForward(dataset[i][0])
print(i, dataset[i][0], encode(o.argmax(), len(o)), ' (expected ', dataset[i][1], ')')
示例2: test_xor
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def test_xor(self):
xor = MLP()
xor.add_layer(Layer(2))
xor.add_layer(Layer(2))
xor.add_layer(Layer(1))
xor.init_network()
xor_patterns = [
([0, 0], [0]),
([0, 1], [1]),
([1, 0], [1]),
([1, 1], [0]),
]
xor.train(xor_patterns)
for inp, outp in xor_patterns:
self.assertEqual(xor.run(inp), outp)
示例3: main
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def main():
xor = MLP()
cnf = lambda: 0
xor.add_layer(Layer(2))
xor.add_layer(Layer(2, cnf))
xor.add_layer(Layer(1))
xor.add_bias()
xor.init_network()
xor.patterns = [
([0, 0], [0]),
([0, 1], [1]),
([1, 0], [1]),
([1, 1], [0]),
]
print xor.train(xor.patterns)
for inp, target in xor.patterns:
tolerance = 0.1
computed = xor.forward(inp)
error = abs(computed[0] - target[0])
print 'input: %s target: %s, output: %s, error: %.4f' % (inp,
target, computed, error)
示例4: testMLP
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def testMLP(self):
'''
Using MLP of one hidden layer and one softmax layer
'''
conf_filename = './snippet_mlp.conf'
start_time = time.time()
configer = MLPConfiger(conf_filename)
mlpnet = MLP(configer, verbose=True)
end_time = time.time()
pprint('Time used to build the architecture of MLP: %f seconds' % (end_time-start_time))
# Training
start_time = time.time()
for i in xrange(configer.nepoch):
cost, accuracy = mlpnet.train(self.snippet_train_set, self.snippet_train_label)
pprint('epoch %d, cost = %f, accuracy = %f' % (i, cost, accuracy))
end_time = time.time()
pprint('Time used for training MLP network on Snippet task: %f minutes' % ((end_time-start_time)/60))
# Test
test_size = self.snippet_test_label.shape[0]
prediction = mlpnet.predict(self.snippet_test_set)
accuracy = np.sum(prediction == self.snippet_test_label) / float(test_size)
pprint('Test accuracy: %f' % accuracy)
示例5: testMLP
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def testMLP(self):
'''
Sentiment analysis task for sentence representation using MLP,
with one hidden layer and one softmax layer.
'''
conf_filename = './sentiment_mlp.conf'
start_time = time.time()
configer = MLPConfiger(conf_filename)
mlpnet = MLP(configer, verbose=True)
end_time = time.time()
pprint('Time used to build the architecture of MLP: %f seconds.' % (end_time-start_time))
# Training
start_time = time.time()
for i in xrange(configer.nepoch):
rate = 2.0 / ((1.0 + i/500) ** 2)
cost, accuracy = mlpnet.train(self.senti_train_set, self.senti_train_label, rate)
pprint('epoch %d, cost = %f, accuracy = %f' % (i, cost, accuracy))
end_time = time.time()
pprint('Time used for training MLP network on Sentiment analysis task: %f minutes.' % ((end_time-start_time)/60))
# Test
prediction = mlpnet.predict(self.senti_test_set)
accuracy = np.sum(prediction == self.senti_test_label) / float(self.test_size)
pprint('Test accuracy: %f' % accuracy)
示例6: MLP
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
project = DB.getProject('mlpnew') #evalmlp')
model = MLP(
id=project.id,
rng=rng,
input=x,
momentum=0.0,
offline=True,
n_in=project.patchSize**2,
n_hidden=project.hiddenUnits,
n_out=len(project.labels),
train_time=project.trainTime,
#batch_size=project.batchSize,
batch_size=50,
patch_size=project.patchSize,
path=project.path_offline)
data = Data( project, offline=True, n_train_samples=700000, n_valid_samples=5000)
#model.train(offline=True, data=data, mean=project.mean, std=project.std)
#data.load(project )
#print data.get_pixel_count(project)
#exit(1)
n_iterations = 5000
for iteration in xrange(n_iterations):
print 'iteration:', iteration
model.train(data=data, offline=True, mean=project.mean, std=project.std)
示例7: mlp
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
def mlp(fn, X, y, _):
mlp = MLP(fn, [50, 50, 50, 50], batchsize=BATCHSIZE)
losses = mlp.train(ITERATIONS, X, y, plot=PLOT)
return losses
示例8: map
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
rbmstack.run(0,cfg.weight_updates, mbp)
if cfg.finetune:
# clean up RBM parts which are not needed anymore
map(lambda x:x.deallocPChain(), rbmstack.layers)
map(lambda x:x.dealloc(), rbmstack.layers)
weights = map(lambda x: x.mat, rbmstack.weights)
biases = map(lambda x: x.bias_hi, rbmstack.weights)
from mlp import MLP
pymlp = MLP(cfg, weights,biases)
pymlp.preEpochHook = lambda mlp,epoch: epoch%10==0 and mlp.runMLP(mbp_test, cfg.test_batchsize,epoch)
try:
pymlp.train(mbp,cfg.finetune_epochs, cfg.finetune_batch_size, cfg.finetune_rprop)
except KeyboardInterrupt:
pass
map(lambda x:x.alloc(), rbmstack.layers)
map(lambda x:x.allocPChain(), rbmstack.layers)
rbmstack.saveAllLayers("-finetune")
pymlp.saveLastLayer()
if cfg.headless:
cp.exitCUDA()
sys.exit(0)
PLT_NUM=1
import matplotlib.pyplot as plt
#### calculate maps_bottom into py. yeah it's a dirty hack, i know
px=cfg.px
示例9: __init__
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import train [as 别名]
class TrainerValidator:
def __init__(self, k, nb_epochs, H1, H2, nu, mu, batchsize, data):
self.k = k
self.data = data
self.H1 = H1
self.H2 = H2
self.mu = mu
self.nu = nu
self.batchsize = batchsize
self.mlp = MLP(H1,H2,576, nu, mu, batchsize, self.k)
self.error = Error()
self.NUM_EPOCH = nb_epochs
self.validation_error = sp.zeros(self.NUM_EPOCH+1)
self.misclassified_val = sp.zeros(self.NUM_EPOCH+1)
self.training_error = sp.zeros(self.NUM_EPOCH+1)
self.misclassified_train = sp.zeros(self.NUM_EPOCH+1)
def trainAndClassify(self):
converge = 0
a = 4
var_thresh = 0.005
early_stopping = 0
for i in range(self.NUM_EPOCH+1):
self.data.shuffleData()
self.mlp.train(self.data.train_left, self.data.train_right, self.data.train_cat)
_, _, _, _, _, results_train, _, _, _, _, _, _ = self.mlp.forward_pass(self.data.train_left, self.data.train_right)
results_val, results_classif = self.mlp.classify(self.data.val_left, self.data.val_right)
self.training_error[i], self.misclassified_train[i] = self.error.norm_total_error(results_train, self.data.train_cat, self.k)
self.validation_error[i], self.misclassified_val[i] = self.error.norm_total_error(results_val, self.data.val_cat, self.k)
print "Epoch #"+str(i)+" Ratio of misclassified: "+str(self.misclassified_val[i])+" - Error: "+str(self.validation_error[i])
# Early stopping
if early_stopping :
if i > 0 :
if (self.validation_error[i]>(self.validation_error[i-1]*(1-var_thresh))) :
converge += 1
else :
if converge > 0 :
converge -= 1/2
if converge>=a :
print "Triggering early stopping - Cause : increasing(overfitting) or convergence of the error has been detected"
break
#self.mlp.test_gradient(self.data.val_left, self.data.val_right, self.data.val_cat)
def plotResults(self):
error_fig = plt.figure()
ax1 = error_fig.add_subplot(111)
ax1.plot(self.validation_error, label='validation error')
ax1.plot(self.training_error, label='training error')
ax1.set_ylabel('error')
ax1.set_xlabel('epoch')
title = "k=%d H1=%d H2=%d mu=%f nu=%f batchsize=%d std(val)=%f std(err)=%f" % (self.k, self.H1, self.H2, self.mu, self.nu, self.batchsize, sp.std(self.validation_error), sp.std(self.training_error) )
error_fig.suptitle(title)
plt.legend()
filename = "k=%d-H1=%d-H2=%d-mu=%f-nu=%f-batchsize=%d-nb_epoch=%d" % (self.k,self.H1, self.H2, self.mu, self.nu, self.batchsize, self.NUM_EPOCH)
plt.savefig('results/'+filename+"-error.png")
mis_fig = plt.figure()
ax2 = mis_fig.add_subplot(111)
ax2.plot(self.misclassified_val, label='misclassified ratio (validation)')
ax2.plot(self.misclassified_train, label='misclassified ratio (training)')
title = "k=%d H1=%d H2=%d mu=%f nu=%f batchsize=%d std(val)=%f std(err)=%f" % (self.k, self.H1, self.H2, self.mu, self.nu, self.batchsize, sp.std(self.misclassified_val), sp.std(self.misclassified_train) )
mis_fig.suptitle(title)
#ax2.set_xlim([1,self.NUM_EPOCH])
ax2.set_ylabel('misclassified')
ax2.set_xlabel('epoch')
plt.legend()
plt.savefig('results/'+filename+"-misclassified.png")
#plt.show()
def getMLP(self) :
return self.mlp