本文整理匯總了Python中pylearn2.optimization.batch_gradient_descent.BatchGradientDescent._compute_grad方法的典型用法代碼示例。如果您正苦於以下問題:Python BatchGradientDescent._compute_grad方法的具體用法?Python BatchGradientDescent._compute_grad怎麽用?Python BatchGradientDescent._compute_grad使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pylearn2.optimization.batch_gradient_descent.BatchGradientDescent
的用法示例。
在下文中一共展示了BatchGradientDescent._compute_grad方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_batch_gradient_descent
# 需要導入模塊: from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent [as 別名]
# 或者: from pylearn2.optimization.batch_gradient_descent.BatchGradientDescent import _compute_grad [as 別名]
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print 'objective function value came out wrong on sample ',i
print 'analytical obj', analytical_obj
print 'actual obj',actual_obj
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print 'Checking for numerically induced non-convex behavior'
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print prev
step_size = 1e-4
x += step_size * d
cur = f(x)
print cur
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print cur
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print 'flip'
flip_cnt += 1
if flip_cnt > 1:
print "Non-convex!"
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
#.........這裏部分代碼省略.........