当前位置: 首页>>代码示例>>Python>>正文


Python nn_utils.VectorParser类代码示例

本文整理汇总了Python中hypergrad.nn_utils.VectorParser的典型用法代码示例。如果您正苦于以下问题:Python VectorParser类的具体用法?Python VectorParser怎么用?Python VectorParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了VectorParser类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_sgd_parser

def test_sgd_parser():
    N_weights = 6
    W0 = 0.1 * npr.randn(N_weights)
    N_data = 12
    batch_size = 4
    num_epochs = 4
    batch_idxs = BatchList(N_data, batch_size)

    parser = VectorParser()
    parser.add_shape('first',  [2,])
    parser.add_shape('second', [1,])
    parser.add_shape('third',  [3,])
    N_weight_types = 3

    alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    meta = 0.1 * npr.randn(N_weights*2)

    A = npr.randn(N_data, N_weights)
    def loss_fun(W, meta, i=None):
        idxs = batch_idxs.all_idxs if i is None else batch_idxs[i % len(batch_idxs)]
        sub_A = A[idxs, :]
        return np.dot(np.dot(W + meta[:N_weights] + meta[N_weights:], np.dot(sub_A.T, sub_A)), W)

    def full_loss(params):
        (W0, alphas, betas, meta) = params
        result = sgd_parsed(grad(loss_fun), kylist(W0, alphas, betas, meta), parser)
        return loss_fun(result, meta)

    d_num = nd(full_loss, (W0, alphas, betas, meta))
    d_an_fun = grad(full_loss)
    d_an = d_an_fun([W0, alphas, betas, meta])
    for i, (an, num) in enumerate(zip(d_an, d_num[0])):
        assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
            "Type {0}, diffs are: {1}".format(i, an - num)
开发者ID:ChinJY,项目名称:hypergrad,代码行数:35,代码来源:test_grads.py

示例2: make_parabola

def make_parabola(d):
    parser = VectorParser()
    parser.add_shape('weights', d)
    dimscale = np.exp(np.linspace(-3, 3, d))
    offset = npr.randn(d)

    def loss(w, X=0.0, T=0.0, L2_reg=0.0):
        return np.dot((w - offset) * dimscale, (w - offset))

    return parser, loss
开发者ID:yinyumeng,项目名称:HyperParameterTuning,代码行数:10,代码来源:experiment.py

示例3: make_toy_funs

def make_toy_funs():
    parser = VectorParser()
    parser.add_shape('weights', 2)

    def rosenbrock(x):
        return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)

    def loss(W_vect, X=0.0, T=0.0, L2_reg=0.0):
        return 500 * logit(rosenbrock(W_vect) / 500)

    return parser, loss
开发者ID:ChinJY,项目名称:hypergrad,代码行数:11,代码来源:experiment.py

示例4: make_toy_funs

def make_toy_funs():
    parser = VectorParser()
    parser.add_shape("weights", 2)

    def rosenbrock(w):
        x = w[1:]
        y = w[:-1]
        return sum(100.0 * (x - y ** 2.0) ** 2.0 + (1 - y) ** 2.0 + 200.0 * y)

    def loss(W_vect, X=0.0, T=0.0, L2_reg=0.0):
        return 800 * logit(rosenbrock(W_vect) / 500)

    return parser, loss
开发者ID:lizhangzhan,项目名称:hypergrad,代码行数:13,代码来源:experiment.py

示例5: run

def run():
    train_data, valid_data, tests_data = load_data_dicts(N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
    N_weight_types = len(parser.names)
    hyperparams = VectorParser()
    hyperparams['log_param_scale'] = np.full(N_weight_types, init_log_param_scale)
    hyperparams['log_alphas']      = np.full((N_iters, N_weight_types), init_log_alphas)
    hyperparams['invlogit_betas']  = np.full((N_iters, N_weight_types), init_invlogit_betas)
    fixed_hyperparams = VectorParser()
    fixed_hyperparams['log_L2_reg'] = np.full(N_weight_types, init_log_L2_reg)

    def primal_optimizer(hyperparam_vect, i_hyper):
        def indexed_loss_fun(w, L2_vect, i_iter):
            rs = RandomState((seed, i_hyper, i_iter))  # Deterministic seed needed for backwards pass.
            idxs = rs.randint(N_train, size=batch_size)
            return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs], L2_vect)

        learning_curve_dict = defaultdict(list)
        def callback(x, v, g, i_iter):
            if i_iter % thin == 0:
                learning_curve_dict['learning_curve'].append(loss_fun(x, **train_data))
                learning_curve_dict['grad_norm'].append(np.linalg.norm(g))
                learning_curve_dict['weight_norm'].append(np.linalg.norm(x))
                learning_curve_dict['velocity_norm'].append(np.linalg.norm(v))

        cur_hyperparams = hyperparams.new_vect(hyperparam_vect)
        rs = RandomState((seed, i_hyper))
        W0 = fill_parser(parser, np.exp(cur_hyperparams['log_param_scale']))
        W0 *= rs.randn(W0.size)
        alphas = np.exp(cur_hyperparams['log_alphas'])
        betas  = logit(cur_hyperparams['invlogit_betas'])
        L2_reg = fill_parser(parser, np.exp(fixed_hyperparams['log_L2_reg']))
        W_opt = sgd_parsed(grad(indexed_loss_fun), kylist(W0, alphas, betas, L2_reg),
                           parser, callback=callback)
        return W_opt, learning_curve_dict

    def hyperloss(hyperparam_vect, i_hyper):
        W_opt, _ = primal_optimizer(hyperparam_vect, i_hyper)
        return loss_fun(W_opt, **train_data)
    hyperloss_grad = grad(hyperloss)

    initial_hypergrad = hyperloss_grad( hyperparams.vect, 0)
    parsed_init_hypergrad = hyperparams.new_vect(initial_hypergrad.copy())
    avg_hypergrad = initial_hypergrad.copy()
    for i in xrange(1, N_meta_iter):
        avg_hypergrad += hyperloss_grad( hyperparams.vect, i)
        print i
    parsed_avg_hypergrad = hyperparams.new_vect(avg_hypergrad)

    parser.vect = None # No need to pickle zeros
    return parser, parsed_init_hypergrad, parsed_avg_hypergrad
开发者ID:yinyumeng,项目名称:HyperParameterTuning,代码行数:51,代码来源:experiment.py

示例6: run

def run():
    train_data, valid_data, tests_data = load_data_dicts(N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
    N_weight_types = len(parser.names)
    hyperparams = VectorParser()
    hyperparams['log_param_scale'] = np.full(N_weight_types, init_log_param_scale)
    hyperparams['log_alphas']      = np.full((N_iters, N_weight_types), init_log_alphas)
    hyperparams['invlogit_betas']  = np.full((N_iters, N_weight_types), init_invlogit_betas)
    fixed_hyperparams = VectorParser()
    fixed_hyperparams['log_L2_reg'] = np.full(N_weight_types, init_log_L2_reg)

    cur_primal_results = {}

    def primal_optimizer(hyperparam_vect, i_hyper):
        def indexed_loss_fun(w, L2_vect, i_iter):
            rs = RandomState((seed, i_hyper, i_iter))  # Deterministic seed needed for backwards pass.
            idxs = rs.randint(N_train, size=batch_size)
            return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs], L2_vect)

        learning_curve_dict = defaultdict(list)
        def callback(x, v, g, i_iter):
            if i_iter % thin == 0:
                learning_curve_dict['learning_curve'].append(loss_fun(x, **train_data))
                learning_curve_dict['grad_norm'].append(np.linalg.norm(g))
                learning_curve_dict['weight_norm'].append(np.linalg.norm(x))
                learning_curve_dict['velocity_norm'].append(np.linalg.norm(v))

        cur_hyperparams = hyperparams.new_vect(hyperparam_vect)
        rs = RandomState((seed, i_hyper))
        W0 = fill_parser(parser, np.exp(cur_hyperparams['log_param_scale']))
        W0 *= rs.randn(W0.size)
        alphas = np.exp(cur_hyperparams['log_alphas'])
        betas  = logit(cur_hyperparams['invlogit_betas'])
        L2_reg = fill_parser(parser, np.exp(fixed_hyperparams['log_L2_reg']))
        W_opt = sgd_parsed(grad(indexed_loss_fun), kylist(W0, alphas, betas, L2_reg),
                           parser, callback=callback)
        cur_primal_results['weights'] = getval(W_opt).copy()
        cur_primal_results['learning_curve'] = getval(learning_curve_dict)
        return W_opt, learning_curve_dict

    def hyperloss(hyperparam_vect, i_hyper):
        W_opt, _ = primal_optimizer(hyperparam_vect, i_hyper)
        return loss_fun(W_opt, **train_data)
    hyperloss_grad = grad(hyperloss)

    meta_results = defaultdict(list)
    old_metagrad = [np.ones(hyperparams.vect.size)]
    def meta_callback(hyperparam_vect, i_hyper, metagrad=None):
        x, learning_curve_dict = cur_primal_results['weights'], cur_primal_results['learning_curve']
        cur_hyperparams = hyperparams.new_vect(hyperparam_vect.copy())
        for field in cur_hyperparams.names:
            meta_results[field].append(cur_hyperparams[field])
        meta_results['train_loss'].append(loss_fun(x, **train_data))
        meta_results['valid_loss'].append(loss_fun(x, **valid_data))
        meta_results['tests_loss'].append(loss_fun(x, **tests_data))
        meta_results['test_err'].append(frac_err(x, **tests_data))
        meta_results['learning_curves'].append(learning_curve_dict)
        meta_results['example_weights'] = x
        if metagrad is not None:
            meta_results['meta_grad_magnitude'].append(np.linalg.norm(metagrad))
            meta_results['meta_grad_angle'].append(np.dot(old_metagrad[0], metagrad) \
                                                   / (np.linalg.norm(metagrad)*
                                                      np.linalg.norm(old_metagrad[0])))
        old_metagrad[0] = metagrad
        print "Meta Epoch {0} Train loss {1:2.4f} Valid Loss {2:2.4f}" \
              " Test Loss {3:2.4f} Test Err {4:2.4f}".format(
            i_hyper, meta_results['train_loss'][-1], meta_results['valid_loss'][-1],
            meta_results['train_loss'][-1], meta_results['test_err'][-1])


    initial_hypergrad = hyperloss_grad( hyperparams.vect, 0)
    hypergrads = np.zeros((N_meta_iter, len(initial_hypergrad)))
    for i in xrange(N_meta_iter):
        hypergrads[i] = hyperloss_grad( hyperparams.vect, i)
        print i
    avg_hypergrad = np.mean(hypergrads, axis=0)
    parsed_avg_hypergrad = hyperparams.new_vect(avg_hypergrad)

    parser.vect = None # No need to pickle zeros
    return parser, parsed_avg_hypergrad
开发者ID:ChinJY,项目名称:hypergrad,代码行数:80,代码来源:experiment.py

示例7: run

def run():
    train_data, valid_data, tests_data = load_data_dicts(N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
    N_weight_types = len(parser.names)
    hyperparams = VectorParser()
    hyperparams['log_param_scale'] = np.full(N_weight_types, init_log_param_scale)
    hyperparams['log_alphas']      = np.full((N_iters, N_weight_types), init_log_alphas)
    hyperparams['invlogit_betas']  = np.full((N_iters, N_weight_types), init_invlogit_betas)
    fixed_hyperparams = VectorParser()
    fixed_hyperparams['log_L2_reg'] = np.full(N_weight_types, init_log_L2_reg)

    def primal_optimizer(hyperparam_vect, i_hyper):
        def indexed_loss_fun(w, L2_vect, i_iter):
            rs = RandomState((seed, i_hyper, i_iter))  # Deterministic seed needed for backwards pass.
            idxs = rs.randint(N_train, size=batch_size)
            return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs], L2_vect)

        learning_curve_dict = defaultdict(list)
        def callback(x, v, g, i_iter):
            if i_iter % thin == 0:
                learning_curve_dict['learning_curve'].append(loss_fun(x, **train_data))
                learning_curve_dict['grad_norm'].append(np.linalg.norm(g))
                learning_curve_dict['weight_norm'].append(np.linalg.norm(x))
                learning_curve_dict['velocity_norm'].append(np.linalg.norm(v))

        init_hyperparams = hyperparams.new_vect(hyperparam_vect)
        rs = RandomState((seed, i_hyper))
        W0 = fill_parser(parser, np.exp(init_hyperparams['log_param_scale']))
        W0 *= rs.randn(W0.size)
        alphas = np.exp(init_hyperparams['log_alphas'])
        betas  = logit(init_hyperparams['invlogit_betas'])
        L2_reg = fill_parser(parser, np.exp(fixed_hyperparams['log_L2_reg']))
        W_opt = sgd_parsed(grad(indexed_loss_fun), kylist(W0, alphas, betas, L2_reg),
                           parser, callback=callback)
        return W_opt, learning_curve_dict

    def hyperloss(hyperparam_vect, i_hyper):
        W_opt, _ = primal_optimizer(hyperparam_vect, i_hyper)
        return loss_fun(W_opt, **train_data)
    hyperloss_grad = grad(hyperloss)

    meta_results = defaultdict(list)
    old_metagrad = [np.ones(hyperparams.vect.size)]
    def meta_callback(hyperparam_vect, i_hyper, metagrad=None):
        x, learning_curve_dict = primal_optimizer(hyperparam_vect, i_hyper)
        cur_hyperparams = hyperparams.new_vect(hyperparam_vect.copy())
        for field in cur_hyperparams.names:
            meta_results[field].append(cur_hyperparams[field])
        meta_results['train_loss'].append(loss_fun(x, **train_data))
        meta_results['valid_loss'].append(loss_fun(x, **valid_data))
        meta_results['tests_loss'].append(loss_fun(x, **tests_data))
        meta_results['test_err'].append(frac_err(x, **tests_data))
        meta_results['learning_curves'].append(learning_curve_dict)
        if metagrad is not None:
            meta_results['meta_grad_magnitude'].append(np.linalg.norm(metagrad))
            meta_results['meta_grad_angle'].append(np.dot(old_metagrad[0], metagrad) \
                                                   / (np.linalg.norm(metagrad)*
                                                      np.linalg.norm(old_metagrad[0])))
        old_metagrad[0] = metagrad
        print "Meta Epoch {0} Train loss {1:2.4f} Valid Loss {2:2.4f}" \
              " Test Loss {3:2.4f} Test Err {4:2.4f}".format(
            i_hyper, meta_results['train_loss'][-1], meta_results['valid_loss'][-1],
            meta_results['train_loss'][-1], meta_results['test_err'][-1])

    # Average many gradient evaluations at the initial point.
    hypergrads = np.zeros((N_gradients_in_average, hyperparams.vect.size))
    for i in xrange(N_gradients_in_average):
        hypergrads[i] = hyperloss_grad(hyperparams.vect, i)
        print i
    first_gradient = hypergrads[0]
    avg_gradient = np.mean(hypergrads, axis=0)

    # Now do a line search along that direction.
    parsed_avg_grad = hyperparams.new_vect(avg_gradient)
    stepsize_scale = stepsize_search_rescale/np.max(np.exp(parsed_avg_grad['log_alphas'].ravel()))
    stepsizes = np.linspace(-stepsize_scale, stepsize_scale, N_points_in_line_search)
    for i, stepsize in enumerate(stepsizes):
        cur_hypervect = hyperparams.vect - stepsize * avg_gradient
        meta_callback(cur_hypervect, 0)   # Use the same random seed every time.

    parser.vect = None # No need to pickle zeros
    return meta_results, parser, first_gradient, parsed_avg_grad, stepsizes
开发者ID:ChinJY,项目名称:hypergrad,代码行数:82,代码来源:experiment.py


注:本文中的hypergrad.nn_utils.VectorParser类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。