本文整理匯總了Python中blocks.extensions.Timing方法的典型用法代碼示例。如果您正苦於以下問題:Python extensions.Timing方法的具體用法?Python extensions.Timing怎麽用?Python extensions.Timing使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類blocks.extensions
的用法示例。
在下文中一共展示了extensions.Timing方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_timing
# 需要導入模塊: from blocks import extensions [as 別名]
# 或者: from blocks.extensions import Timing [as 別名]
def test_timing():
main_loop = MockMainLoop(extensions=[Timing(),
FinishAfter(after_n_epochs=2)])
main_loop.run()
示例2: main
# 需要導入模塊: from blocks import extensions [as 別名]
# 或者: from blocks.extensions import Timing [as 別名]
def main(save_to, num_epochs):
mlp = MLP([Tanh(), Softmax()], [784, 100, 10],
weights_init=IsotropicGaussian(0.01),
biases_init=Constant(0))
mlp.initialize()
x = tensor.matrix('features')
y = tensor.lmatrix('targets')
probs = mlp.apply(x)
cost = CategoricalCrossEntropy().apply(y.flatten(), probs)
error_rate = MisclassificationRate().apply(y.flatten(), probs)
cg = ComputationGraph([cost])
W1, W2 = VariableFilter(roles=[WEIGHT])(cg.variables)
cost = cost + .00005 * (W1 ** 2).sum() + .00005 * (W2 ** 2).sum()
cost.name = 'final_cost'
mnist_train = MNIST(("train",))
mnist_test = MNIST(("test",))
algorithm = GradientDescent(
cost=cost, parameters=cg.parameters,
step_rule=Scale(learning_rate=0.1))
extensions = [Timing(),
FinishAfter(after_n_epochs=num_epochs),
DataStreamMonitoring(
[cost, error_rate],
Flatten(
DataStream.default_stream(
mnist_test,
iteration_scheme=SequentialScheme(
mnist_test.num_examples, 500)),
which_sources=('features',)),
prefix="test"),
TrainingDataMonitoring(
[cost, error_rate,
aggregation.mean(algorithm.total_gradient_norm)],
prefix="train",
after_epoch=True),
Checkpoint(save_to),
Printing()]
if BLOCKS_EXTRAS_AVAILABLE:
extensions.append(Plot(
'MNIST example',
channels=[
['test_final_cost',
'test_misclassificationrate_apply_error_rate'],
['train_total_gradient_norm']]))
main_loop = MainLoop(
algorithm,
Flatten(
DataStream.default_stream(
mnist_train,
iteration_scheme=SequentialScheme(
mnist_train.num_examples, 50)),
which_sources=('features',)),
model=Model(cost),
extensions=extensions)
main_loop.run()
示例3: run
# 需要導入模塊: from blocks import extensions [as 別名]
# 或者: from blocks.extensions import Timing [as 別名]
def run():
streams = create_celeba_streams(training_batch_size=100,
monitoring_batch_size=500,
include_targets=True)
main_loop_stream = streams[0]
train_monitor_stream = streams[1]
valid_monitor_stream = streams[2]
cg, bn_dropout_cg = create_training_computation_graphs()
# Compute parameter updates for the batch normalization population
# statistics. They are updated following an exponential moving average.
pop_updates = get_batch_normalization_updates(bn_dropout_cg)
decay_rate = 0.05
extra_updates = [(p, m * decay_rate + p * (1 - decay_rate))
for p, m in pop_updates]
# Prepare algorithm
step_rule = Adam()
algorithm = GradientDescent(cost=bn_dropout_cg.outputs[0],
parameters=bn_dropout_cg.parameters,
step_rule=step_rule)
algorithm.add_updates(extra_updates)
# Prepare monitoring
cost = bn_dropout_cg.outputs[0]
cost.name = 'cost'
train_monitoring = DataStreamMonitoring(
[cost], train_monitor_stream, prefix="train",
before_first_epoch=False, after_epoch=False, after_training=True,
updates=extra_updates)
cost, accuracy = cg.outputs
cost.name = 'cost'
accuracy.name = 'accuracy'
monitored_quantities = [cost, accuracy]
valid_monitoring = DataStreamMonitoring(
monitored_quantities, valid_monitor_stream, prefix="valid",
before_first_epoch=False, after_epoch=False, every_n_epochs=5)
# Prepare checkpoint
checkpoint = Checkpoint(
'celeba_classifier.zip', every_n_epochs=5, use_cpickle=True)
extensions = [Timing(), FinishAfter(after_n_epochs=50), train_monitoring,
valid_monitoring, checkpoint, Printing(), ProgressBar()]
main_loop = MainLoop(data_stream=main_loop_stream, algorithm=algorithm,
extensions=extensions)
main_loop.run()
示例4: run
# 需要導入模塊: from blocks import extensions [as 別名]
# 或者: from blocks.extensions import Timing [as 別名]
def run(discriminative_regularization=True):
streams = create_celeba_streams(training_batch_size=100,
monitoring_batch_size=500,
include_targets=False)
main_loop_stream, train_monitor_stream, valid_monitor_stream = streams[:3]
# Compute parameter updates for the batch normalization population
# statistics. They are updated following an exponential moving average.
rval = create_training_computation_graphs(discriminative_regularization)
cg, bn_cg, variance_parameters = rval
pop_updates = list(
set(get_batch_normalization_updates(bn_cg, allow_duplicates=True)))
decay_rate = 0.05
extra_updates = [(p, m * decay_rate + p * (1 - decay_rate))
for p, m in pop_updates]
model = Model(bn_cg.outputs[0])
selector = Selector(
find_bricks(
model.top_bricks,
lambda brick: brick.name in ('encoder_convnet', 'encoder_mlp',
'decoder_convnet', 'decoder_mlp')))
parameters = list(selector.get_parameters().values()) + variance_parameters
# Prepare algorithm
step_rule = Adam()
algorithm = GradientDescent(cost=bn_cg.outputs[0],
parameters=parameters,
step_rule=step_rule)
algorithm.add_updates(extra_updates)
# Prepare monitoring
monitored_quantities_list = []
for graph in [bn_cg, cg]:
cost, kl_term, reconstruction_term = graph.outputs
cost.name = 'nll_upper_bound'
avg_kl_term = kl_term.mean(axis=0)
avg_kl_term.name = 'avg_kl_term'
avg_reconstruction_term = -reconstruction_term.mean(axis=0)
avg_reconstruction_term.name = 'avg_reconstruction_term'
monitored_quantities_list.append(
[cost, avg_kl_term, avg_reconstruction_term])
train_monitoring = DataStreamMonitoring(
monitored_quantities_list[0], train_monitor_stream, prefix="train",
updates=extra_updates, after_epoch=False, before_first_epoch=False,
every_n_epochs=5)
valid_monitoring = DataStreamMonitoring(
monitored_quantities_list[1], valid_monitor_stream, prefix="valid",
after_epoch=False, before_first_epoch=False, every_n_epochs=5)
# Prepare checkpoint
save_path = 'celeba_vae_{}regularization.zip'.format(
'' if discriminative_regularization else 'no_')
checkpoint = Checkpoint(save_path, every_n_epochs=5, use_cpickle=True)
extensions = [Timing(), FinishAfter(after_n_epochs=75), train_monitoring,
valid_monitoring, checkpoint, Printing(), ProgressBar()]
main_loop = MainLoop(data_stream=main_loop_stream,
algorithm=algorithm, extensions=extensions)
main_loop.run()