本文整理汇总了Python中pylearn2.optimization.batch_gradient_descent.BatchGradientDescent.obj方法的典型用法代码示例。如果您正苦于以下问题:Python BatchGradientDescent.obj方法的具体用法?Python BatchGradientDescent.obj怎么用?Python BatchGradientDescent.obj使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pylearn2.optimization.batch_gradient_descent.BatchGradientDescent
的用法示例。
在下文中一共展示了BatchGradientDescent.obj方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_batch_gradient_descent
# 需要导入模块: from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent [as 别名]
# 或者: from pylearn2.optimization.batch_gradient_descent.BatchGradientDescent import obj [as 别名]
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print 'objective function value came out wrong on sample ',i
print 'analytical obj', analytical_obj
print 'actual obj',actual_obj
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print 'Checking for numerically induced non-convex behavior'
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print prev
step_size = 1e-4
x += step_size * d
cur = f(x)
print cur
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print cur
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print 'flip'
flip_cnt += 1
if flip_cnt > 1:
print "Non-convex!"
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
#.........这里部分代码省略.........
示例2: sum
# 需要导入模块: from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent [as 别名]
# 或者: from pylearn2.optimization.batch_gradient_descent.BatchGradientDescent import obj [as 别名]
accs = []
for Y_i in Y:
pos_prob = 1./(1.+T.exp(model.free_energy(X)-model.free_energy(Y_i)))
acc = (pos_prob > .5).mean()
accs.append(acc)
acc = sum(accs) / float(len(accs))
print '\tinit accuracy ',function([],acc)()
#Minimize the objective function with batch gradient descent
minimizer = BatchGradientDescent( objective = J,
params = model.get_params(),
param_constrainers = [ model.censor_updates ])
print '\tinit obj:',minimizer.obj()
#minimizer.verbose = True
minimizer.minimize()
print '\tfinal obj:',minimizer.obj()
recovered_beta = model.beta.get_value()
recovered_mu = model.mu.get_value()
print '\trecovered beta:',recovered_beta
print '\trecovered mu:',recovered_mu
kl = kl_divergence(true, model)
kl = function([],kl)()
assert kl >= 0.0
print '\tkl was ',kl