当前位置: 首页>>代码示例>>Python>>正文


Python python2x.OrderedDict方法代码示例

本文整理汇总了Python中theano.compat.python2x.OrderedDict方法的典型用法代码示例。如果您正苦于以下问题:Python python2x.OrderedDict方法的具体用法?Python python2x.OrderedDict怎么用?Python python2x.OrderedDict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.compat.python2x的用法示例。


在下文中一共展示了python2x.OrderedDict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_updates

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_updates(self, gradients):
        """
        This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in iteritems(gradients):
            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[param] = param - scaled_lr * gradient
        return updates 
开发者ID:vitruvianscience,项目名称:OpenDeep,代码行数:23,代码来源:optimizer.py

示例2: gradient_descent

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def gradient_descent(self, loss):
        """Momentum GD with gradient clipping."""
        grad = T.grad(loss, self.params)
        self.momentum_velocity_ = [0.] * len(grad)
        grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grad)))
        updates = OrderedDict()
        not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))
        scaling_den = T.maximum(5.0, grad_norm)
        for n, (param, grad) in enumerate(zip(self.params, grad)):
            grad = T.switch(not_finite, 0.1 * param,
                            grad * (5.0 / scaling_den))
            velocity = self.momentum_velocity_[n]
            update_step = self.momentum * velocity - self.learning_rate * grad
            self.momentum_velocity_[n] = update_step
            updates[param] = param + update_step
        return updates 
开发者ID:majingCUHK,项目名称:Rumor_RvNN,代码行数:18,代码来源:TD_RvNN.py

示例3: __init__

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def __init__(self, control_port, batch_port):
        self._worker = Worker(control_port=control_port, data_port=batch_port)

        data_shape = self._worker.send_req('get_data_shape')

        self._computed_sum = theano.shared(
            value=np.zeros(data_shape, dtype=theano.config.floatX),
            name='sum', borrow=True)

        self._worker.init_shared_params(params=[self._computed_sum],
                                        param_sync_rule=SUMSync())

        input = T.matrix(dtype=theano.config.floatX)
        batch_sum = T.sum(input, axis=0, dtype=theano.config.floatX)

        updates = OrderedDict()
        updates[self._computed_sum] = (self._computed_sum + batch_sum)

        self._update_sum = theano.function(name='learn',
                                           inputs=[input],
                                           updates=updates) 
开发者ID:mila-iqia,项目名称:platoon,代码行数:23,代码来源:batched_pixel_sum_worker.py

示例4: __init__

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def __init__(self, model):
        avg_updates = OrderedDict()
        t = sharedX(1.)
        self.param_to_mean = OrderedDict()
        for param in model.get_params():
            mean = sharedX(param.get_value())
            assert type(mean) == type(param)
            self.param_to_mean[param] = mean
            avg_updates[mean] = mean - (mean - param) / t
            avg_updates[t] = t + 1.
        self.avg = function([], updates = avg_updates) 
开发者ID:goodfeli,项目名称:adversarial,代码行数:13,代码来源:sgd_alt.py

示例5: on_monitor

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def on_monitor(self, model, dataset, algorithm):
        """
        Make sure Polyak-averaged model gets monitored.
        Save the model if necessary.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        algorithm : WRITEME
        """
        if self._count == self.start:
            self._worker = _PolyakWorker(model)
            algorithm.update_callbacks.append(self._worker)
            #HACK
            try:
                model.add_polyak_channels(self._worker.param_to_mean,
                                          algorithm.monitoring_dataset)
            except AttributeError:
                pass
        elif self.save_path is not None and self._count > self.start and \
                self._count % self.save_freq == 0:
            saved_params = OrderedDict()
            for param in model.get_params():
                saved_params[param] = param.get_value()
                param.set_value(self._worker.param_to_mean[param].get_value())
            serial.save(self.save_path, model)
            for param in model.get_params():
                param.set_value(saved_params[param])
        self._count += 1 
开发者ID:goodfeli,项目名称:adversarial,代码行数:32,代码来源:sgd_alt.py

示例6: get_lr_scalers

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_lr_scalers(self):

    rval = OrderedDict()

    if not hasattr(self, 'b_lr_scale'):
        self.b_lr_scale = None

    if self.b_lr_scale is not None:
        assert isinstance(self.b_lr_scale, float)
        rval[self.b] = self.b_lr_scale

    return rval 
开发者ID:alumae,项目名称:kaldi-nnet-dur-model,代码行数:14,代码来源:durmodel_elements.py

示例7: get_layer_monitoring_channels

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_layer_monitoring_channels(self, state_below=None, state=None,
                                    target=None):
    b = self.b
    rval = OrderedDict([('bias_min', b.min()),
                        ('bias_mean', b.mean()),
                        ('bias_max', b.max()),])
    return rval 
开发者ID:alumae,项目名称:kaldi-nnet-dur-model,代码行数:9,代码来源:durmodel_elements.py

示例8: get_params

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_params(self):
        """
        This returns the list of theano shared variables that will be trained by the :class:`Optimizer`.
        These parameters are used in the gradient.

        This includes all of the parameters in every model in the Prototype, without duplication.

        Returns
        -------
        dict(str: SharedVariable)
            Dictionary of {string_name: theano shared variables} to be trained with an :class:`Optimizer`.
            These are the parameters to be trained.
        """
        params = OrderedDict()
        model_index = 0
        for model in self.models:
            if isinstance(model, Model):
                model_params = model.get_params()
                # append the parameters only if they aren't already in the list!
                for name, param in model_params.items():
                    if param not in list(params.values()):
                        name = model._classname + '_%d_' % model_index + name
                        params[name] = param
                        param.name = name
                model_index += 1
        return params 
开发者ID:vitruvianscience,项目名称:OpenDeep,代码行数:28,代码来源:prototype.py

示例9: two_step_backprop

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def two_step_backprop(mlp):
    """
    mlp: A SimpleMLP instance
    Returns:
        f1: a theano function
            Takes two arguments: a minibatch of examples and a minibatch of
            targets.
            Returns two values:
                1) The gradient of the loss on mlp.w_out
                2)  An auxiliary value of your choosing
        f2: Takes two arguments: a minibatch of examples, and the auxiliary
            value returned by f1.
            Returns the gradient of the loss on mlp.W_hid
            Should not make use of mlp.w_out at all!
    """

    # Run fprop
    X = T.matrix()
    y = T.vector()
    H, y_hat = mlp.fprop(X)
    l = loss(y_hat, y)

    g_w, g_H = T.grad(l, [mlp.w_out, H])

    f1 = function([X, y], [g_w, g_H])

    known_grads = OrderedDict()
    known_grads[H] = g_H
    g_W = T.grad(None, mlp.W_hid, known_grads=known_grads)

    f2 = function([X, g_H], g_W)

    return f1, f2 
开发者ID:goodfeli,项目名称:theano_exercises,代码行数:35,代码来源:04_two_step_backprop_soln.py

示例10: make_exchange_func

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def make_exchange_func(a, b):
    """
    a: a theano shared variable
    b: a theano shared variable
    Returns f
    where f is a theano function, that, when called, swaps the
    values in a and b
    f should not return anything
    """

    updates = OrderedDict()
    updates[a] = b
    updates[b] = a
    f = function([], updates=updates)
    return f 
开发者ID:goodfeli,项目名称:theano_exercises,代码行数:17,代码来源:02_shared_soln.py

示例11: get_monitoring_channels

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_monitoring_channels(self, data):
        rval = OrderedDict()
        if self.encoder is not None:
            rval = self.encoder.get_layer_monitoring_channels(
                state_below=data
            )
        return rval 
开发者ID:laurent-dinh,项目名称:nice,代码行数:9,代码来源:nice.py

示例12: get_lr_scalers

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_lr_scalers(self):
        rval = OrderedDict()
        if self.encoder is not None:
            safe_update(rval, self.encoder.get_lr_scalers())
        return rval 
开发者ID:laurent-dinh,项目名称:nice,代码行数:7,代码来源:nice.py

示例13: get_updates

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_updates(self, learning_rate, grads, lr_scalers=None):

        updates = OrderedDict()
        for param in grads.keys():

            avg_grad_sqr = sharedX(np.zeros_like(param.get_value()))
            momentum = sharedX(np.zeros_like(param.get_value()))

            if param.name is not None:
                avg_grad_sqr.name = 'avg_grad_sqr_' + param.name

            new_avg_grad_sqr = self.averaging_coeff * avg_grad_sqr \
                + (1 - self.averaging_coeff) \
                * T.sqr(grads[param])

            rms_grad_t = T.sqrt(new_avg_grad_sqr)
            rms_grad_t = T.maximum(rms_grad_t, self.stabilizer)
            normalized_grad = grads[param] / (rms_grad_t)
            new_momentum = self.momentum * momentum \
                - learning_rate * normalized_grad

            updates[avg_grad_sqr] = new_avg_grad_sqr
            updates[momentum] = new_momentum
            updates[param] = param + new_momentum

        return updates 
开发者ID:laurent-dinh,项目名称:nice,代码行数:28,代码来源:learning_rule.py

示例14: get_updates

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def get_updates(self, learning_rate, grads, lr_scalers=None):
        """
        Provides the symbolic (theano) description of the updates needed to
        perform this learning rule.

        Parameters
        ----------
        learning_rate : float
            Learning rate coefficient.
        grads : dict
            A dictionary mapping from the model's parameters to their
            gradients.
        lr_scalers : dict
            A dictionary mapping from the model's parameters to a learning
            rate multiplier.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.

        Notes
        -----
        e.g. for standard SGD, one would return `sgd_rule_updates` defined
        below. Note that such a `LearningRule` object is not implemented, as
        these updates are implemented by default when the `learning_rule`
        parameter of sgd.SGD.__init__ is None.

        .. code-block::  python

            sgd_rule_updates = OrderedDict()
            for (param, grad) in grads.iteritems():
                sgd_rule_updates[k] = (param - learning_rate *
                                       lr_scalers.get(param, 1.) * grad)
        """
        raise NotImplementedError(str(type(self)) + " does not implement "
                                  "get_updates.") 
开发者ID:caglar,项目名称:Attentive_reader,代码行数:40,代码来源:learning_rule.py

示例15: __init__

# 需要导入模块: from theano.compat import python2x [as 别名]
# 或者: from theano.compat.python2x import OrderedDict [as 别名]
def __init__(self, dataset, batch_size):
        self._batch_size = batch_size
        self._dataset = dataset

        self._computed_sum = theano.shared(value=np.zeros(dataset.shape[1], dtype=theano.config.floatX), name='sum', borrow=True)

        input = T.matrix(dtype=theano.config.floatX)
        batch_sum = T.sum(input, axis=0, dtype=theano.config.floatX)

        updates = OrderedDict()
        updates[self._computed_sum] = (self._computed_sum + batch_sum)

        self._update_sum = theano.function(name='learn',
                                           inputs=[input],
                                           updates=updates) 
开发者ID:mila-iqia,项目名称:platoon,代码行数:17,代码来源:batched_pixel_sum.py


注:本文中的theano.compat.python2x.OrderedDict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。