当前位置: 首页>>代码示例>>Python>>正文


Python algorithms.Adam方法代码示例

本文整理汇总了Python中blocks.algorithms.Adam方法的典型用法代码示例。如果您正苦于以下问题:Python algorithms.Adam方法的具体用法?Python algorithms.Adam怎么用?Python algorithms.Adam使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.algorithms的用法示例。


在下文中一共展示了algorithms.Adam方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from blocks import algorithms [as 别名]
# 或者: from blocks.algorithms import Adam [as 别名]
def run():
    streams = create_celeba_streams(training_batch_size=100,
                                    monitoring_batch_size=500,
                                    include_targets=True)
    main_loop_stream = streams[0]
    train_monitor_stream = streams[1]
    valid_monitor_stream = streams[2]

    cg, bn_dropout_cg = create_training_computation_graphs()

    # Compute parameter updates for the batch normalization population
    # statistics. They are updated following an exponential moving average.
    pop_updates = get_batch_normalization_updates(bn_dropout_cg)
    decay_rate = 0.05
    extra_updates = [(p, m * decay_rate + p * (1 - decay_rate))
                     for p, m in pop_updates]

    # Prepare algorithm
    step_rule = Adam()
    algorithm = GradientDescent(cost=bn_dropout_cg.outputs[0],
                                parameters=bn_dropout_cg.parameters,
                                step_rule=step_rule)
    algorithm.add_updates(extra_updates)

    # Prepare monitoring
    cost = bn_dropout_cg.outputs[0]
    cost.name = 'cost'
    train_monitoring = DataStreamMonitoring(
        [cost], train_monitor_stream, prefix="train",
        before_first_epoch=False, after_epoch=False, after_training=True,
        updates=extra_updates)

    cost, accuracy = cg.outputs
    cost.name = 'cost'
    accuracy.name = 'accuracy'
    monitored_quantities = [cost, accuracy]
    valid_monitoring = DataStreamMonitoring(
        monitored_quantities, valid_monitor_stream, prefix="valid",
        before_first_epoch=False, after_epoch=False, every_n_epochs=5)

    # Prepare checkpoint
    checkpoint = Checkpoint(
        'celeba_classifier.zip', every_n_epochs=5, use_cpickle=True)

    extensions = [Timing(), FinishAfter(after_n_epochs=50), train_monitoring,
                  valid_monitoring, checkpoint, Printing(), ProgressBar()]
    main_loop = MainLoop(data_stream=main_loop_stream, algorithm=algorithm,
                         extensions=extensions)
    main_loop.run() 
开发者ID:vdumoulin,项目名称:discgen,代码行数:51,代码来源:train_celeba_classifier.py

示例2: run

# 需要导入模块: from blocks import algorithms [as 别名]
# 或者: from blocks.algorithms import Adam [as 别名]
def run(discriminative_regularization=True):
    streams = create_celeba_streams(training_batch_size=100,
                                    monitoring_batch_size=500,
                                    include_targets=False)
    main_loop_stream, train_monitor_stream, valid_monitor_stream = streams[:3]

    # Compute parameter updates for the batch normalization population
    # statistics. They are updated following an exponential moving average.
    rval = create_training_computation_graphs(discriminative_regularization)
    cg, bn_cg, variance_parameters = rval
    pop_updates = list(
        set(get_batch_normalization_updates(bn_cg, allow_duplicates=True)))
    decay_rate = 0.05
    extra_updates = [(p, m * decay_rate + p * (1 - decay_rate))
                     for p, m in pop_updates]

    model = Model(bn_cg.outputs[0])
    selector = Selector(
        find_bricks(
            model.top_bricks,
            lambda brick: brick.name in ('encoder_convnet', 'encoder_mlp',
                                         'decoder_convnet', 'decoder_mlp')))
    parameters = list(selector.get_parameters().values()) + variance_parameters

    # Prepare algorithm
    step_rule = Adam()
    algorithm = GradientDescent(cost=bn_cg.outputs[0],
                                parameters=parameters,
                                step_rule=step_rule)
    algorithm.add_updates(extra_updates)

    # Prepare monitoring
    monitored_quantities_list = []
    for graph in [bn_cg, cg]:
        cost, kl_term, reconstruction_term = graph.outputs
        cost.name = 'nll_upper_bound'
        avg_kl_term = kl_term.mean(axis=0)
        avg_kl_term.name = 'avg_kl_term'
        avg_reconstruction_term = -reconstruction_term.mean(axis=0)
        avg_reconstruction_term.name = 'avg_reconstruction_term'
        monitored_quantities_list.append(
            [cost, avg_kl_term, avg_reconstruction_term])
    train_monitoring = DataStreamMonitoring(
        monitored_quantities_list[0], train_monitor_stream, prefix="train",
        updates=extra_updates, after_epoch=False, before_first_epoch=False,
        every_n_epochs=5)
    valid_monitoring = DataStreamMonitoring(
        monitored_quantities_list[1], valid_monitor_stream, prefix="valid",
        after_epoch=False, before_first_epoch=False, every_n_epochs=5)

    # Prepare checkpoint
    save_path = 'celeba_vae_{}regularization.zip'.format(
        '' if discriminative_regularization else 'no_')
    checkpoint = Checkpoint(save_path, every_n_epochs=5, use_cpickle=True)

    extensions = [Timing(), FinishAfter(after_n_epochs=75), train_monitoring,
                  valid_monitoring, checkpoint, Printing(), ProgressBar()]
    main_loop = MainLoop(data_stream=main_loop_stream,
                         algorithm=algorithm, extensions=extensions)
    main_loop.run() 
开发者ID:vdumoulin,项目名称:discgen,代码行数:62,代码来源:train_celeba_vae.py

示例3: create_main_loop

# 需要导入模块: from blocks import algorithms [as 别名]
# 或者: from blocks.algorithms import Adam [as 别名]
def create_main_loop(self):
        model, bn_model, bn_updates = self.create_models()
        gan, = bn_model.top_bricks
        discriminator_loss, generator_loss = bn_model.outputs
        step_rule = Adam(learning_rate=self._config["learning_rate"], beta1=self._config["beta1"])
        algorithm = ali_algorithm(discriminator_loss, gan.discriminator_parameters, step_rule, generator_loss, gan.generator_parameters, step_rule)
        algorithm.add_updates(bn_updates)
        streams = create_packing_gaussian_mixture_data_streams(
            num_packings=self._config["num_packing"], 
            batch_size=self._config["batch_size"], 
            monitoring_batch_size=self._config["monitoring_batch_size"], 
            means=self._config["x_mode_means"], 
            variances=self._config["x_mode_variances"], 
            priors=self._config["x_mode_priors"],
            num_examples=self._config["num_sample"])
        main_loop_stream, train_monitor_stream, valid_monitor_stream = streams
        bn_monitored_variables = (
            [v for v in bn_model.auxiliary_variables if 'norm' not in v.name] +
            bn_model.outputs)
        monitored_variables = (
            [v for v in model.auxiliary_variables if 'norm' not in v.name] +
            model.outputs)
        extensions = [
            Timing(),
            FinishAfter(after_n_epochs=self._config["num_epoch"]),
            DataStreamMonitoring(
                bn_monitored_variables, train_monitor_stream, prefix="train",
                updates=bn_updates),
            DataStreamMonitoring(
                monitored_variables, valid_monitor_stream, prefix="valid"),
            Checkpoint(os.path.join(self._work_dir, self._config["main_loop_file"]), after_epoch=True, after_training=True, use_cpickle=True),
            ProgressBar(),
            Printing(),
        ]
        if self._config["log_models"]:
            extensions.append(ModelLogger(folder=self._work_dir, after_epoch=True))
        if self._config["log_figures"]:
            extensions.append(GraphLogger(num_modes=self._config["num_zmode"], num_samples=self._config["num_log_figure_sample"], dimension=self._config["num_zdim"], r=self._config["z_mode_r"], std=self._config["z_mode_std"], folder=self._work_dir, after_epoch=True, after_training=True))
        if self._config["log_metrics"]:
            extensions.append(MetricLogger(means=self._config["x_mode_means"], variances=self._config["x_mode_variances"], folder=self._work_dir, after_epoch=True))
        main_loop = MainLoop(model=bn_model, data_stream=main_loop_stream, algorithm=algorithm, extensions=extensions)
        return main_loop 
开发者ID:fjxmlzn,项目名称:PacGAN,代码行数:44,代码来源:pacgan_task.py


注:本文中的blocks.algorithms.Adam方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。