本文整理匯總了Python中mlp.MLP.forward方法的典型用法代碼示例。如果您正苦於以下問題:Python MLP.forward方法的具體用法?Python MLP.forward怎麽用?Python MLP.forward使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mlp.MLP
的用法示例。
在下文中一共展示了MLP.forward方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import forward [as 別名]
def main():
xor = MLP()
cnf = lambda: 0
xor.add_layer(Layer(2))
xor.add_layer(Layer(2, cnf))
xor.add_layer(Layer(1))
xor.add_bias()
xor.init_network()
xor.patterns = [
([0, 0], [0]),
([0, 1], [1]),
([1, 0], [1]),
([1, 1], [0]),
]
print xor.train(xor.patterns)
for inp, target in xor.patterns:
tolerance = 0.1
computed = xor.forward(inp)
error = abs(computed[0] - target[0])
print 'input: %s target: %s, output: %s, error: %.4f' % (inp,
target, computed, error)
示例2: xrange
# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import forward [as 別名]
mlp.packParam(param)
tune_lambda = (after_cost - before_cost)/delta_cost
if tune_lambda < 0.25:
mlp._lambda = mlp._lambda*1.5
elif tune_lambda > 0.75:
mlp._lambda = mlp._lambda/1.5
print "Training NNL: %f, Error: %f"%(train_nll,train_error)
nll=[]
error=[]
for batch_index in xrange(n_valid_batches):
X=valid_X[batch_index*batch_size:(batch_index+1)*batch_size,:]
Y=valid_Y[batch_index*batch_size:(batch_index+1)*batch_size]
mlp.forward(X)
nll.append(mlp.Cost(Y))
error.append(mlp.error(Y))
print "Validation NNL: %f, Error: %f"%(numpy.mean(nll),numpy.mean(error))
"""
LR = Logisticlayer(784,10)
iters=1000
batch_size = 256
n_train_batches = train_X.shape[0]/batch_size
n_valid_batches = valid_X.shape[0]/batch_size
for i in xrange(iters):
nll=[]
error=[]
print "Iter: %d ...\n"%(i)
for batch_index in xrange(n_train_batches):