本文整理汇总了Python中mlp.MLP.loss方法的典型用法代码示例。如果您正苦于以下问题:Python MLP.loss方法的具体用法?Python MLP.loss怎么用?Python MLP.loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlp.MLP
的用法示例。
在下文中一共展示了MLP.loss方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sgd_optimization_mnist_mlp
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import loss [as 别名]
def sgd_optimization_mnist_mlp(learning_rate=0.01, L1_reg=0.0, L2_reg=0.0001,
n_epochs=1000, dataset='mnist.pkl.gz',
batch_size=20, n_hidden=500):
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# Notice that get_value is called with borrow
# so that a deep copy of the input is not created
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
print("... Building the model")
index = T.lscalar() # index to a mini-batch
# Symbolic variables for input and output for a batch
x = T.matrix('x')
y = T.ivector('y')
rng = numpy.random.RandomState(1234)
# Build the logistic regression class
# Images in MNIST are 28*28, there are 10 output classes
classifier = MLP(
rng=rng,
input=x,
n_in=28*28,
n_hidden=n_hidden,
n_out=10)
# Cost to minimize
cost = (
classifier.loss(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sq
)
# Compile function that measures test performance wrt the 0-1 loss
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens=[
(x, test_set_x[index * batch_size: (index + 1) * batch_size]),
(y, test_set_y[index * batch_size: (index + 1) * batch_size])
]
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens=[
(x, valid_set_x[index * batch_size: (index + 1) * batch_size]),
(y, valid_set_y[index * batch_size: (index + 1) * batch_size])
]
)
# Stochastic Gradient descent
updates = simple_sgd(cost, classifier.params, learning_rate)
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens=[
(x, train_set_x[index * batch_size: (index + 1) * batch_size]),
(y, train_set_y[index * batch_size: (index + 1) * batch_size])
]
)
################
# TRAIN MODEL #
################
print("... Training the model")
# Early stopping parameters
patience = 10000 # Look at these many parameters regardless
# Increase patience by this quantity when a best score is achieved
patience_increase = 2
improvement_threshold = 0.995 # Minimum significant improvement
validation_frequency = min(n_train_batches, patience // 2)
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# Iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
# Check if validation needs to be performed
if (iter + 1) % validation_frequency == 0:
# Compute average 0-1 loss on validation set
validation_losses = [validate_model(i)
for i in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
#.........这里部分代码省略.........
示例2: main
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import loss [as 别名]
#.........这里部分代码省略.........
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
training_error_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_loss_model = theano.function(
inputs=[index],
outputs=classifier.loss(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validation_loss_model = theano.function(
inputs=[index],
outputs=classifier.loss(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
training_loss_model = theano.function(
inputs=[index],
outputs=classifier.loss(y),
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_accuracy_model = theano.function(
inputs=[index],
outputs=classifier.accuracy(y),
givens={