当前位置: 首页>>代码示例>>Python>>正文


Python moves.izip函数代码示例

本文整理汇总了Python中theano.compat.six.moves.izip函数的典型用法代码示例。如果您正苦于以下问题:Python izip函数的具体用法?Python izip怎么用?Python izip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了izip函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_adadelta_update

def get_adadelta_update(params, grads, rho, eps):
    # E[g^2]_{t-1}
    E_g_square = []
    for p in params:
        tmp = theano.shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX), borrow=True)
        E_g_square.append(tmp)
    # E[g^2]_t = rho * E[g^2]_{t-1} + (1 - rho) * g_t^2
    E_g_square_next = []
    for e, g in izip(E_g_square, grads):
        tmp = rho * e + (1.0 - rho) * (g**2)
        E_g_square_next.append(tmp)
    # E[dW^2]_{t-1}
    E_dW_square = []
    for p in params:
        tmp = theano.shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX), borrow=True)
        E_dW_square.append(tmp)
    # dW_t = - {sqrt(E[dW^2]_t + eps) / sqrt(E[g^2]_t + eps)} * g_t
    dW = []
    for ew, eg, g in izip(E_dW_square, E_g_square, grads):
        tmp = - (T.sqrt(ew + eps) / T.sqrt(eg + eps)) * g
        dW.append(tmp)
    # E[dW^2]_t = rho * E[dW^2]_{t-1} + (1 - rho) * dW_t^2
    E_dW_square_next = []
    for ew, d in izip(E_dW_square, dW):
        tmp = rho * ew + (1.0 - rho) * (d**2)
        E_dW_square_next.append(tmp)

    E_g_square_updates = zip(E_g_square, E_g_square_next)
    E_dW_square_updates = zip(E_dW_square, E_dW_square_next)
    params_updates = []
    for p, d in izip(params, dW):
        # W_t = W_{t-1} + dW
        params_updates.append((p, p + d))
    return E_g_square_updates + E_dW_square_updates + params_updates
开发者ID:proboscis,项目名称:GradProject,代码行数:34,代码来源:optimization.py

示例2: test_adadelta

def test_adadelta():
    """
    Make sure that learning_rule.AdaDelta obtains the same parameter values as
    with a hand-crafted AdaDelta implementation, given a dummy model and
    learning rate scaler for each parameter.

    Reference:
    "AdaDelta: An Adaptive Learning Rate Method", Matthew D. Zeiler.
    """

    # We include a cost other than SumOfParams so that data is actually
    # queried from the training set, and the expected number of updates
    # are applied.
    cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])
    model = DummyModel(shapes, lr_scalers=scales)
    dataset = ArangeDataset(1)
    decay = 0.95

    sgd = SGD(cost=cost,
              learning_rate=learning_rate,
              learning_rule=AdaDelta(decay),
              batch_size=1)

    sgd.setup(model=model, dataset=dataset)

    state = {}
    for param in model.get_params():
        param_shape = param.get_value().shape
        state[param] = {}
        state[param]['g2'] = np.zeros(param_shape)
        state[param]['dx2'] = np.zeros(param_shape)

    def adadelta_manual(model, state):
        inc = []
        rval = []
        for scale, param in izip(scales, model.get_params()):
            pstate = state[param]
            param_val = param.get_value()
            # begin adadelta
            pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
            rms_g_t = np.sqrt(pstate['g2'] + scale * learning_rate)
            rms_dx_tm1 = np.sqrt(pstate['dx2'] + scale * learning_rate)
            dx_t = -rms_dx_tm1 / rms_g_t * param_val
            pstate['dx2'] = decay * pstate['dx2'] + (1 - decay) * dx_t ** 2
            rval += [param_val + dx_t]
        return rval

    manual = adadelta_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param
               in izip(manual, model.get_params()))

    manual = adadelta_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param in
               izip(manual, model.get_params()))
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:58,代码来源:test_learning_rule.py

示例3: test_adagrad

def test_adagrad():
    """
    Make sure that learning_rule.AdaGrad obtains the same parameter values as
    with a hand-crafted AdaGrad implementation, given a dummy model and
    learning rate scaler for each parameter.

    Reference:
    "Adaptive subgradient methods for online learning and
    stochastic optimization", Duchi J, Hazan E, Singer Y.
    """

    # We include a cost other than SumOfParams so that data is actually
    # queried from the training set, and the expected number of updates
    # are applied.
    cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])
    model = DummyModel(shapes, lr_scalers=scales)
    dataset = ArangeDataset(1)

    sgd = SGD(cost=cost,
              learning_rate=learning_rate,
              learning_rule=AdaGrad(),
              batch_size=1)

    sgd.setup(model=model, dataset=dataset)

    state = {}
    for param in model.get_params():
        param_shape = param.get_value().shape
        state[param] = {}
        state[param]['sg2'] = np.zeros(param_shape)

    def adagrad_manual(model, state):
        rval = []
        for scale, param in izip(scales, model.get_params()):
            pstate = state[param]
            param_val = param.get_value()
            # begin adadelta
            pstate['sg2'] += param_val ** 2
            dx_t = - (scale * learning_rate
                      / np.sqrt(pstate['sg2'])
                      * param_val)
            rval += [param_val + dx_t]
        return rval

    manual = adagrad_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param
               in izip(manual, model.get_params()))

    manual = adagrad_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param in
               izip(manual, model.get_params()))
开发者ID:ASAPPinc,项目名称:pylearn2,代码行数:55,代码来源:test_learning_rule.py

示例4: set_input_space

    def set_input_space(self, space):
        """ Note: this function will reset the parameters! """

        self.input_space = space

        if not isinstance(space, Conv2DSpace):
            raise BadInputSpaceError(self.__class__.__name__ +
                                     ".set_input_space "
                                     "expected a Conv2DSpace, got " +
                                     str(space) + " of type " +
                                     str(type(space)))

        rng = self.get_mlp().rng


        if self.pad != (0,0):
            output_shape = \
                [int(np.ceil((i_sh + 2. * k_pad - k_sh) / float(k_st))) + 1
                 for i_sh, k_sh, k_st, k_pad in izip(self.input_space.shape,
                                                     self.kernel_shape,
                                                     self.kernel_stride,
                                                     self.pad)]

        elif self.border_mode == 'valid':
            output_shape = [(self.input_space.shape[0] - self.kernel_shape[0])
                            / self.kernel_stride[0] + 1,
                            (self.input_space.shape[1] - self.kernel_shape[1])
                            / self.kernel_stride[1] + 1]
        elif self.border_mode == 'full':
            output_shape = [(self.input_space.shape[0] + self.kernel_shape[0])
                            / self.kernel_stride[0] - 1,
                            (self.input_space.shape[1] + self.kernel_shape[1])
                            / self.kernel_stride[1] - 1]

        print "In:", self.layer_name, self.input_space.shape, self.kernel_shape, self.kernel_stride, self.pad
        print "Out:", self.layer_name, output_shape


        self.detector_space = Conv2DSpace(shape=output_shape,
                                          num_channels=self.output_channels,
                                          axes=('b', 'c', 0, 1))

        self.initialize_transformer(rng)

        W, = self.transformer.get_params()
        W.name = self.layer_name + '_W'

        assert self.tied_b
        if self.tied_b:
            self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
                             self.init_bias)
        else:
            self.b = sharedX(self.detector_space.get_origin() + self.init_bias)

        self.b.name = self.layer_name + '_b'

        logger.info('Input shape: {0}'.format(self.input_space.shape))
        logger.info('Detector space: {0}'.format(self.detector_space.shape))

        self.initialize_output_space()
开发者ID:kastnerkyle,项目名称:facedet,代码行数:60,代码来源:corrVariable.py

示例5: get_gradients

    def get_gradients(self, model, data, ** kwargs):
        """
        Provides the gradients of the cost function with respect to the model
        parameters.

        These are not necessarily those obtained by theano.tensor.grad
        --you may wish to use approximate or even intentionally incorrect
        gradients in some cases.

        Parameters
        ----------
        model : a pylearn2 Model instance
        data : a batch in cost.get_data_specs() form
        kwargs : dict
            Optional extra arguments, not used by the base class.

        Returns
        -------
        gradients : OrderedDict
            a dictionary mapping from the model's parameters
            to their gradients
            The default implementation is to compute the gradients
            using T.grad applied to the value returned by expr.
            However, subclasses may return other values for the gradient.
            For example, an intractable cost may return a sampling-based
            approximation to its gradient.
        updates : OrderedDict
            a dictionary mapping shared variables to updates that must
            be applied to them each time these gradients are computed.
            This is to facilitate computation of sampling-based approximate
            gradients.
            The parameters should never appear in the updates dictionary.
            This would imply that computing their gradient changes
            their value, thus making the gradient value outdated.
        """

        try:
            cost,mask = self.expr(model=model, data=data, **kwargs)
        except TypeError:
            # If anybody knows how to add type(self) to the exception message
            # but still preserve the stack trace, please do so
            # The current code does neither
            message = "Error while calling " + str(type(self)) + ".expr"
            reraise_as(TypeError(message))

        if cost is None:
            raise NotImplementedError(str(type(self)) +
                                      " represents an intractable cost and "
                                      "does not provide a gradient "
                                      "approximation scheme.")

        params = list(model.get_params())

        grads = T.grad(cost, params, disconnected_inputs='ignore')

        gradients = OrderedDict(izip(params, grads))

        updates = OrderedDict()

        return gradients, updates
开发者ID:nitbix,项目名称:pylearn2,代码行数:60,代码来源:cost.py

示例6: test_momentum

def test_momentum():
    """
    Make sure that learning_rule.Momentum obtains the same parameter values as
    with a hand-crafted sgd w/ momentum implementation, given a dummy model and
    learning rate scaler for each parameter.
    """
    # We include a cost other than SumOfParams so that data is actually
    # queried from the training set, and the expected number of updates
    # are applied.
    cost = SumOfCosts([SumOfParams(), (0., DummyCost())])

    scales = [.01, .02, .05, 1., 5.]
    shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]

    model = DummyModel(shapes, lr_scalers=scales)
    dataset = ArangeDataset(1)
    learning_rate = .001
    momentum = 0.5

    sgd = SGD(cost=cost,
              learning_rate=learning_rate,
              learning_rule=Momentum(momentum),
              batch_size=1)

    sgd.setup(model=model, dataset=dataset)

    manual = [param.get_value() for param in model.get_params()]
    inc = [-learning_rate * scale for scale in scales]
    manual = [param + i for param, i in izip(manual, inc)]

    sgd.train(dataset=dataset)

    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param in
               izip(manual, model.get_params()))

    manual = [param - learning_rate * scale + i * momentum
              for param, scale, i in izip(manual, scales, inc)]

    sgd.train(dataset=dataset)

    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param in
               izip(manual, model.get_params()))
开发者ID:JackyRen,项目名称:pylearn2,代码行数:44,代码来源:test_learning_rule.py

示例7: __init__

    def __init__(self, autoencoders):
        super(DeepComposedAutoencoder, self).__init__()
        self.fn = None
        self.cpu_only = False

        assert all(pre.get_output_space().dim == post.get_input_space().dim
                   for pre, post in izip(autoencoders[:-1], autoencoders[1:]))

        self.autoencoders = list(autoencoders)
        self.input_space = autoencoders[0].get_input_space()
        self.output_space = autoencoders[-1].get_output_space()
开发者ID:FelixLMU,项目名称:pylearn2,代码行数:11,代码来源:autoencoder.py

示例8: test_rmsprop

def test_rmsprop():
    """
    Make sure that learning_rule.RMSProp obtains the same parameter values as
    with a hand-crafted RMSProp implementation, given a dummy model and
    learning rate scaler for each parameter.
    """

    # We include a cost other than SumOfParams so that data is actually
    # queried from the training set, and the expected number of updates
    # are applied.
    cost = SumOfCosts([SumOfOneHalfParamsSquared(), (0., DummyCost())])

    scales = [.01, .02, .05, 1., 5.]
    shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]

    model = DummyModel(shapes, lr_scalers=scales)
    dataset = ArangeDataset(1)
    learning_rate = .001
    decay = 0.90
    max_scaling = 1e5

    sgd = SGD(cost=cost,
              learning_rate=learning_rate,
              learning_rule=RMSProp(decay),
              batch_size=1)

    sgd.setup(model=model, dataset=dataset)

    state = {}
    for param in model.get_params():
        param_shape = param.get_value().shape
        state[param] = {}
        state[param]['g2'] = np.zeros(param_shape)

    def rmsprop_manual(model, state):
        inc = []
        rval = []
        epsilon = 1. / max_scaling
        for scale, param in izip(scales, model.get_params()):
            pstate = state[param]
            param_val = param.get_value()
            # begin rmsprop
            pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
            rms_g_t = np.maximum(np.sqrt(pstate['g2']), epsilon)
            dx_t = - scale * learning_rate / rms_g_t * param_val
            rval += [param_val + dx_t]
        return rval

    manual = rmsprop_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param
               in izip(manual, model.get_params()))
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:53,代码来源:test_learning_rule.py

示例9: adagrad_manual

 def adagrad_manual(model, state):
     rval = []
     for scale, param in izip(scales, model.get_params()):
         pstate = state[param]
         param_val = param.get_value()
         # begin adadelta
         pstate['sg2'] += param_val ** 2
         dx_t = - (scale * learning_rate
                   / np.sqrt(pstate['sg2'])
                   * param_val)
         rval += [param_val + dx_t]
     return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:12,代码来源:test_learning_rule.py

示例10: get_gradients

 def get_gradients(self, model, data, ** kwargs):
     """
     Overwrites the Cost.get_gradients so we can inject our theano.Op
     This will do a separate call back for each model.param
         Consider rewriting your model to have one param 
     """
     srng = RandomStreams(seed=232)
     params = list(model.get_params())
     grads = [OverwriteOp(self.grad,model)(srng.uniform(size=i.shape,dtype=theano.config.floatX),data) for i in params]
     gradients = OrderedDict(izip(params, grads))
     updates = OrderedDict()
     return gradients, updates        
开发者ID:KennethPierce,项目名称:MLResearch,代码行数:12,代码来源:notheano.py

示例11: rmsprop_manual

 def rmsprop_manual(model, state):
     inc = []
     rval = []
     epsilon = 1. / max_scaling
     for scale, param in izip(scales, model.get_params()):
         pstate = state[param]
         param_val = param.get_value()
         # begin rmsprop
         pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
         rms_g_t = np.maximum(np.sqrt(pstate['g2']), epsilon)
         dx_t = - scale * learning_rate / rms_g_t * param_val
         rval += [param_val + dx_t]
     return rval
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:13,代码来源:test_learning_rule.py

示例12: get_gradients

    def get_gradients(self, model, data, **kwargs):
        cost, neg_v = self._cost(model, data, **kwargs)

        params = list(model.get_params())

        grads = T.grad(cost, params, disconnected_inputs='ignore',
                       consider_constant=[neg_v])

        gradients = OrderedDict(izip(params, grads))

        updates = OrderedDict()

        return gradients, updates
开发者ID:ASAPPinc,项目名称:pylearn2,代码行数:13,代码来源:ebm_estimation.py

示例13: adadelta_manual

 def adadelta_manual(model, state):
     rval = []
     for scale, param in izip(scales, model.get_params()):
         pstate = state[param]
         param_val = param.get_value()
         # begin adadelta
         pstate['g2'] = decay * pstate['g2'] + (1 - decay) * param_val ** 2
         rms_g_t = np.sqrt(pstate['g2'] + scale * learning_rate)
         rms_dx_tm1 = np.sqrt(pstate['dx2'] + scale * learning_rate)
         dx_t = -rms_dx_tm1 / rms_g_t * param_val
         pstate['dx2'] = decay * pstate['dx2'] + (1 - decay) * dx_t ** 2
         rval += [param_val + dx_t]
     return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:13,代码来源:test_learning_rule.py

示例14: build_stacked_ae

def build_stacked_ae(nvis, nhids, act_enc, act_dec,
                     tied_weights=False, irange=1e-3, rng=None,
                     corruptor=None, contracting=False):
    """
    .. todo::

        WRITEME properly

    Allocate a stack of autoencoders.
    """
    rng = make_np_rng(rng, which_method='randn')
    layers = []
    final = {}
    # "Broadcast" arguments if they are singular, or accept sequences if
    # they are the same length as nhids
    for c in ['corruptor', 'contracting', 'act_enc', 'act_dec',
              'tied_weights', 'irange']:
        if type(locals()[c]) is not str and hasattr(locals()[c], '__len__'):
            assert len(nhids) == len(locals()[c])
            final[c] = locals()[c]
        else:
            final[c] = [locals()[c]] * len(nhids)
    # The number of visible units in each layer is the initial input
    # size and the first k-1 hidden unit sizes.
    nviss = [nvis] + nhids[:-1]
    seq = izip(nhids, nviss,
        final['act_enc'],
        final['act_dec'],
        final['corruptor'],
        final['contracting'],
        final['tied_weights'],
        final['irange'],
    )
    # Create each layer.
    for (nhid, nvis, act_enc, act_dec, corr, cae, tied, ir) in seq:
        args = (nvis, nhid, act_enc, act_dec, tied, ir, rng)
        if cae and corr is not None:
            raise ValueError("Can't specify denoising and contracting "
                             "objectives simultaneously")
        elif cae:
            autoenc = ContractiveAutoencoder(*args)
        elif corr is not None:
            autoenc = DenoisingAutoencoder(corr, *args)
        else:
            autoenc = Autoencoder(*args)
        layers.append(autoenc)

    # Create the stack
    return StackedBlocks(layers)
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:49,代码来源:autoencoder.py

示例15: test_adagrad

def test_adagrad():
    """
    Make sure that learning_rule.AdaGrad obtains the same parameter values as
    with a hand-crafted AdaGrad implementation, given a dummy model and
    learning rate scaler for each parameter.
    Reference:
    "Adaptive subgradient methods for online learning and
    stochastic optimization", Duchi J, Hazan E, Singer Y.
    """

    cost, model, dataset, sgd, state = prepare_adagrad_test()

    def adagrad_manual(model, state):
        rval = []
        for scale, param in izip(scales, model.get_params()):
            pstate = state[param]
            param_val = param.get_value()
            # begin adadelta
            pstate['sg2'] += param_val ** 2
            dx_t = - (scale * learning_rate
                      / np.sqrt(pstate['sg2'])
                      * param_val)
            rval += [param_val + dx_t]
        return rval

    manual = adagrad_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param
               in izip(manual, model.get_params()))

    manual = adagrad_manual(model, state)
    sgd.train(dataset=dataset)
    assert all(np.allclose(manual_param, sgd_param.get_value())
               for manual_param, sgd_param in
               izip(manual, model.get_params()))
开发者ID:123fengye741,项目名称:pylearn2,代码行数:36,代码来源:test_learning_rule.py


注:本文中的theano.compat.six.moves.izip函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。