本文整理汇总了Python中hypergrad.nn_utils.VectorParser.add_shape方法的典型用法代码示例。如果您正苦于以下问题:Python VectorParser.add_shape方法的具体用法?Python VectorParser.add_shape怎么用?Python VectorParser.add_shape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类hypergrad.nn_utils.VectorParser
的用法示例。
在下文中一共展示了VectorParser.add_shape方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sgd_parser
# 需要导入模块: from hypergrad.nn_utils import VectorParser [as 别名]
# 或者: from hypergrad.nn_utils.VectorParser import add_shape [as 别名]
def test_sgd_parser():
N_weights = 6
W0 = 0.1 * npr.randn(N_weights)
N_data = 12
batch_size = 4
num_epochs = 4
batch_idxs = BatchList(N_data, batch_size)
parser = VectorParser()
parser.add_shape('first', [2,])
parser.add_shape('second', [1,])
parser.add_shape('third', [3,])
N_weight_types = 3
alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
meta = 0.1 * npr.randn(N_weights*2)
A = npr.randn(N_data, N_weights)
def loss_fun(W, meta, i=None):
idxs = batch_idxs.all_idxs if i is None else batch_idxs[i % len(batch_idxs)]
sub_A = A[idxs, :]
return np.dot(np.dot(W + meta[:N_weights] + meta[N_weights:], np.dot(sub_A.T, sub_A)), W)
def full_loss(params):
(W0, alphas, betas, meta) = params
result = sgd_parsed(grad(loss_fun), kylist(W0, alphas, betas, meta), parser)
return loss_fun(result, meta)
d_num = nd(full_loss, (W0, alphas, betas, meta))
d_an_fun = grad(full_loss)
d_an = d_an_fun([W0, alphas, betas, meta])
for i, (an, num) in enumerate(zip(d_an, d_num[0])):
assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
"Type {0}, diffs are: {1}".format(i, an - num)
示例2: make_parabola
# 需要导入模块: from hypergrad.nn_utils import VectorParser [as 别名]
# 或者: from hypergrad.nn_utils.VectorParser import add_shape [as 别名]
def make_parabola(d):
parser = VectorParser()
parser.add_shape('weights', d)
dimscale = np.exp(np.linspace(-3, 3, d))
offset = npr.randn(d)
def loss(w, X=0.0, T=0.0, L2_reg=0.0):
return np.dot((w - offset) * dimscale, (w - offset))
return parser, loss
示例3: make_toy_funs
# 需要导入模块: from hypergrad.nn_utils import VectorParser [as 别名]
# 或者: from hypergrad.nn_utils.VectorParser import add_shape [as 别名]
def make_toy_funs():
parser = VectorParser()
parser.add_shape('weights', 2)
def rosenbrock(x):
return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
def loss(W_vect, X=0.0, T=0.0, L2_reg=0.0):
return 500 * logit(rosenbrock(W_vect) / 500)
return parser, loss
示例4: make_toy_funs
# 需要导入模块: from hypergrad.nn_utils import VectorParser [as 别名]
# 或者: from hypergrad.nn_utils.VectorParser import add_shape [as 别名]
def make_toy_funs():
parser = VectorParser()
parser.add_shape("weights", 2)
def rosenbrock(w):
x = w[1:]
y = w[:-1]
return sum(100.0 * (x - y ** 2.0) ** 2.0 + (1 - y) ** 2.0 + 200.0 * y)
def loss(W_vect, X=0.0, T=0.0, L2_reg=0.0):
return 800 * logit(rosenbrock(W_vect) / 500)
return parser, loss