本文整理汇总了Python中blocks.algorithms.GradientDescent.process_batch方法的典型用法代码示例。如果您正苦于以下问题:Python GradientDescent.process_batch方法的具体用法?Python GradientDescent.process_batch怎么用?Python GradientDescent.process_batch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.algorithms.GradientDescent
的用法示例。
在下文中一共展示了GradientDescent.process_batch方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gradient_descent
# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import process_batch [as 别名]
def test_gradient_descent():
W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
W_start_value = W.get_value()
cost = tensor.sum(W ** 2)
algorithm = GradientDescent(cost=cost, parameters=[W])
algorithm.step_rule.learning_rate.set_value(0.75)
algorithm.initialize()
algorithm.process_batch(dict())
assert_allclose(W.get_value(), -0.5 * W_start_value)
示例2: _test
# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import process_batch [as 别名]
def _test(f):
W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
W_start_value = W.get_value()
cost = tensor.sum(W ** 2)
gradients = OrderedDict()
gradients[W] = tensor.grad(cost, W)
algorithm = GradientDescent(gradients=f(gradients))
algorithm.step_rule.learning_rate.set_value(0.75)
algorithm.initialize()
algorithm.process_batch(dict())
assert_allclose(W.get_value(), -0.5 * W_start_value)
示例3: test_theano_profile_for_sgd_function
# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import process_batch [as 别名]
def test_theano_profile_for_sgd_function():
W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
W_start_value = W.get_value()
cost = tensor.sum(W ** 2)
algorithm = GradientDescent(
cost=cost, parameters=[W], theano_func_kwargs={'profile': True})
algorithm.step_rule.learning_rate.set_value(0.75)
algorithm.initialize()
algorithm.process_batch(dict())
assert_allclose(W.get_value(), -0.5 * W_start_value)
assert isinstance(algorithm._function.profile, ProfileStats)
示例4: test_gradient_descent_spurious_sources
# 需要导入模块: from blocks.algorithms import GradientDescent [as 别名]
# 或者: from blocks.algorithms.GradientDescent import process_batch [as 别名]
def test_gradient_descent_spurious_sources():
W = shared_floatx(numpy.array([[1, 2], [3, 4]]))
W_start_value = W.get_value()
cost = tensor.sum(W ** 2)
algorithm = GradientDescent(cost=cost, parameters=[W])
algorithm.step_rule.learning_rate.set_value(0.75)
algorithm.initialize()
assert_raises(lambda:
algorithm.process_batch(dict(example_id='test')))
algorithm = GradientDescent(cost=cost, parameters=[W],
on_unused_sources='ignore')
algorithm.step_rule.learning_rate.set_value(0.75)
algorithm.initialize()
algorithm.process_batch(dict(example_id='test'))
assert_allclose(W.get_value(), -0.5 * W_start_value)