当前位置: 首页>>代码示例>>Python>>正文


Python GradientDescent.initialize方法代码示例

本文整理汇总了Python中blocks.algorithms.GradientDescent.initialize方法的典型用法代码示例。如果您正苦于以下问题:Python GradientDescent.initialize方法的具体用法?Python GradientDescent.initialize怎么用?Python GradientDescent.initialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.algorithms.GradientDescent的用法示例。


在下文中一共展示了GradientDescent.initialize方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_gradient_descent_finds_inputs_additional_updates

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
def test_gradient_descent_finds_inputs_additional_updates():
    W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
    n = shared_floatx(1)
    m = tensor.scalar('m')
    algorithm = GradientDescent(gradients=OrderedDict([(W, W + 1)]))
    algorithm.add_updates([(n, n + m)])
    algorithm.initialize()
    assert m in algorithm.inputs
开发者ID:Beronx86,项目名称:blocks,代码行数:10,代码来源:test_algorithms.py

示例2: test_gradient_descent

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
def test_gradient_descent():
    W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
    W_start_value = W.get_value()
    cost = tensor.sum(W ** 2)

    algorithm = GradientDescent(cost=cost, parameters=[W])
    algorithm.step_rule.learning_rate.set_value(0.75)
    algorithm.initialize()
    algorithm.process_batch(dict())
    assert_allclose(W.get_value(), -0.5 * W_start_value)
开发者ID:Beronx86,项目名称:blocks,代码行数:12,代码来源:test_algorithms.py

示例3: _test

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
 def _test(f):
     W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
     W_start_value = W.get_value()
     cost = tensor.sum(W ** 2)
     gradients = OrderedDict()
     gradients[W] = tensor.grad(cost, W)
     algorithm = GradientDescent(gradients=f(gradients))
     algorithm.step_rule.learning_rate.set_value(0.75)
     algorithm.initialize()
     algorithm.process_batch(dict())
     assert_allclose(W.get_value(), -0.5 * W_start_value)
开发者ID:leomauro,项目名称:blocks,代码行数:13,代码来源:test_algorithms.py

示例4: test_theano_profile_for_sgd_function

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
def test_theano_profile_for_sgd_function():
    W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
    W_start_value = W.get_value()
    cost = tensor.sum(W ** 2)

    algorithm = GradientDescent(
        cost=cost, parameters=[W], theano_func_kwargs={'profile': True})
    algorithm.step_rule.learning_rate.set_value(0.75)
    algorithm.initialize()
    algorithm.process_batch(dict())
    assert_allclose(W.get_value(), -0.5 * W_start_value)
    assert isinstance(algorithm._function.profile, ProfileStats)
开发者ID:Beronx86,项目名称:blocks,代码行数:14,代码来源:test_algorithms.py

示例5: test_gradient_descent_spurious_sources

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
def test_gradient_descent_spurious_sources():
    W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
    W_start_value = W.get_value()
    cost = tensor.sum(W ** 2)

    algorithm = GradientDescent(cost=cost, parameters=[W])
    algorithm.step_rule.learning_rate.set_value(0.75)
    algorithm.initialize()
    assert_raises(lambda:
                  algorithm.process_batch(dict(example_id='test')))

    algorithm = GradientDescent(cost=cost, parameters=[W],
                                on_unused_sources='ignore')
    algorithm.step_rule.learning_rate.set_value(0.75)
    algorithm.initialize()
    algorithm.process_batch(dict(example_id='test'))
    assert_allclose(W.get_value(), -0.5 * W_start_value)
开发者ID:Beronx86,项目名称:blocks,代码行数:19,代码来源:test_algorithms.py

示例6: construct_main_loop

# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import initialize [as 别名]
def construct_main_loop(name, task_name, batch_size, max_epochs,
                        patience_epochs, learning_rate,
                        hyperparameters, **kwargs):
    task = tasks.get_task(**hyperparameters)
    hyperparameters["n_channels"] = task.n_channels

    extensions = []

    print "constructing graphs..."
    graphs, outputs, updates = construct_graphs(task=task, **hyperparameters)

    print "setting up main loop..."

    from blocks.model import Model
    model = Model(outputs["train"]["cost"])

    from blocks.algorithms import GradientDescent, CompositeRule, StepClipping, Adam
    algorithm = GradientDescent(
        cost=outputs["train"]["cost"],
        parameters=graphs["train"].parameters,
        step_rule=CompositeRule([Adam(learning_rate=learning_rate),
                                 StepClipping(1e3)]),
        on_unused_sources="warn")
    algorithm.add_updates(updates["train"])

    extensions.extend(construct_monitors(
        algorithm=algorithm, task=task, model=model, graphs=graphs,
        outputs=outputs, updates=updates, **hyperparameters))

    from blocks.extensions import FinishAfter, Printing, ProgressBar, Timing
    from blocks.extensions.stopping import FinishIfNoImprovementAfter
    from blocks.extensions.training import TrackTheBest
    from blocks.extensions.saveload import Checkpoint
    from dump import DumpBest, LightCheckpoint, PrintingTo
    extensions.extend([
        TrackTheBest("valid_error_rate", "best_valid_error_rate"),
        FinishIfNoImprovementAfter("best_valid_error_rate", epochs=patience_epochs),
        FinishAfter(after_n_epochs=max_epochs),
        DumpBest("best_valid_error_rate", name+"_best.zip"),
        Checkpoint(hyperparameters["checkpoint_save_path"],
                   on_interrupt=False, every_n_epochs=5,
                   before_training=True, use_cpickle=True),
        ProgressBar(), Timing(), Printing(), PrintingTo(name+"_log")])

    from blocks.main_loop import MainLoop
    main_loop = MainLoop(data_stream=task.get_stream("train"),
                         algorithm=algorithm,
                         extensions=extensions,
                         model=model)

    # note blocks will crash and burn because it cannot deal with an
    # already-initialized Algorithm, so this should be enabled only for
    # debugging
    if False:
        with open("graph", "w") as graphfile:
            algorithm.initialize()
            theano.printing.debugprint(algorithm._function, file=graphfile)

    from tabulate import tabulate
    print "parameter sizes:"
    print tabulate((key, "x".join(map(str, value.get_value().shape)), value.get_value().size)
                   for key, value in main_loop.model.get_parameter_dict().items())

    return main_loop
开发者ID:cooijmanstim,项目名称:tsa-rnn,代码行数:66,代码来源:convnet.py


注:本文中的blocks.algorithms.GradientDescent.initialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。