當前位置: 首頁>>代碼示例>>Python>>正文


Python OrderedDict.values方法代碼示例

本文整理匯總了Python中pylearn2.compat.OrderedDict.values方法的典型用法代碼示例。如果您正苦於以下問題:Python OrderedDict.values方法的具體用法?Python OrderedDict.values怎麽用?Python OrderedDict.values使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pylearn2.compat.OrderedDict的用法示例。


在下文中一共展示了OrderedDict.values方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_lr_scalers

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
    def get_lr_scalers(self):
        """
        .. todo::

            WRITEME
        """
        rval = OrderedDict()

        params = self.get_params()

        for layer in self.hidden_layers + [self.visible_layer]:
            contrib = layer.get_lr_scalers()

            # No two layers can contend to scale a parameter
            assert not any([key in rval for key in contrib])
            # Don't try to scale anything that's not a parameter
            assert all([key in params for key in contrib])

            rval.update(contrib)
        assert all([isinstance(val, float) for val in rval.values()])

        return rval
開發者ID:HBadertscher,項目名稱:pylearn2,代碼行數:24,代碼來源:dbm.py

示例2: DRPROP

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
class DRPROP(LearningRule):
    def __init__(
        self,
        decrease_rate=0.5,
        increase_rate=1.2,
        min_rate=1e-6,
        max_rate=50,
        switching_threshold=1e-6
    ):
        assert increase_rate > 1.
        assert decrease_rate < 1.
        self.decrease_rate = sharedX(decrease_rate, 'decrease_rate')
        self.increase_rate = sharedX(increase_rate, 'increase_rate')
        self.min_rate = min_rate
        self.max_rate = max_rate
        self.switching_threshold = switching_threshold
        self.epsilons = OrderedDict()
        self.gt_epsilons = OrderedDict()
        self.lt_epsilons = OrderedDict()
        self.eq_epsilons = OrderedDict()

    def add_channels_to_monitor(self, monitor, monitoring_dataset):
        monitor.add_channel(
            'rprop_decrease_rate',
            ipt=None,
            val=self.decrease_rate,
            dataset=monitoring_dataset,
            data_specs=(NullSpace(), '')
        )
        monitor.add_channel(
            'rprop_increase_rate',
            ipt=None,
            val=self.increase_rate,
            dataset=monitoring_dataset,
            data_specs=(NullSpace(), '')
        )
        #for gt_epsilon in self.gt_epsilons.values():
        #    monitor.add_channel(
        #        gt_epsilon.name,
        #        ipt=None,
        #        val=T.sum(gt_epsilon),
        #        dataset=monitoring_dataset,
        #        data_specs=(NullSpace(), '')
        #    )
        #for lt_epsilon in self.lt_epsilons.values():
        #    monitor.add_channel(
        #        lt_epsilon.name,
        #        ipt=None,
        #        val=T.sum(lt_epsilon),
        #        dataset=monitoring_dataset,
        #        data_specs=(NullSpace(), '')
        #    )
        #for eq_epsilon in self.eq_epsilons.values():
        #    monitor.add_channel(
        #        eq_epsilon.name,
        #        ipt=None,
        #        val=T.sum(eq_epsilon),
        #        dataset=monitoring_dataset,
        #        data_specs=(NullSpace(), '')
        #    )
        for epsilon in self.epsilons.values():
            monitor.add_channel(
                epsilon.name + '_sum',
                ipt=None,
                val=T.sum(epsilon),
                dataset=monitoring_dataset,
                data_specs=(NullSpace(), '')
            )
            monitor.add_channel(
                epsilon.name + '_min',
                ipt=None,
                val=T.min(epsilon),
                dataset=monitoring_dataset,
                data_specs=(NullSpace(), '')
            )
            monitor.add_channel(
                epsilon.name + '_max',
                ipt=None,
                val=T.max(epsilon),
                dataset=monitoring_dataset,
                data_specs=(NullSpace(), '')
            )

    def get_updates(self, learning_rate, grads, lr_scalers=None,
            global_error=None,dropout_mask=None):
        updates = OrderedDict()

        for param, grad in grads.iteritems():
            # Created required shared variables
            lr = lr_scalers.get(param, learning_rate.get_value())
            delta = sharedX(
                np.zeros_like(param.get_value()) + lr,
                borrow=True
            )
            previous_grad = sharedX(
                np.zeros_like(param.get_value()),
                borrow=True
            )
            epsilons = sharedX(
                np.zeros_like(param.get_value()),
#.........這裏部分代碼省略.........
開發者ID:nitbix,項目名稱:pylearn2,代碼行數:103,代碼來源:learning_rule.py

示例3: DROP_RPROP

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
class DROP_RPROP(LearningRule):
    def __init__(
        self,
        decrease_rate=0.5,
        increase_rate=1.2,
        min_rate=1e-6,
        max_rate=50
    ):
        assert increase_rate > 1.
        assert decrease_rate < 1.
        self.decrease_rate = sharedX(decrease_rate, 'decrease_rate')
        self.increase_rate = sharedX(increase_rate, 'increase_rate')
        self.min_rate = min_rate
        self.max_rate = max_rate
        self.zeros = OrderedDict()

    def add_channels_to_monitor(self, monitor, monitoring_dataset):
        monitor.add_channel(
            'rprop_decrease_rate',
            ipt=None,
            val=self.decrease_rate,
            dataset=monitoring_dataset,
            data_specs=(NullSpace(), '')
        )
        monitor.add_channel(
            'rprop_increase_rate',
            ipt=None,
            val=self.increase_rate,
            dataset=monitoring_dataset,
            data_specs=(NullSpace(), '')
        )
        for zero in self.zeros.values():
            monitor.add_channel(
                zero.name,
                ipt=None,
                val=T.sum(zero),
                dataset=monitoring_dataset,
                data_specs=(NullSpace(), '')
            )

    def get_updates(self, learning_rate, grads, lr_scalers=None,
            global_error=None,masks=None):
        updates = OrderedDict()

        for param, grad in grads.iteritems():
            # Create required shared variables
            lr = lr_scalers.get(param, learning_rate.get_value())
            delta = sharedX(
                np.zeros_like(param.get_value()) + lr,
                borrow=True
            )
            previous_grad = sharedX(
                np.zeros_like(param.get_value()),
                borrow=True
            )
            zeros = sharedX(
                np.zeros_like(param.get_value()),
                borrow=True
            )
            

            layer_name = re.sub('_W$','',param.name)
            if re.match(r'.*_W$',param.name) and layer_name in masks:
                    mask = masks[layer_name]
                    masked_grad = T.gt(T.dot(mask.T,T.dot(mask,grad)),0.)
            else:
                masked_grad = 1. #T.ones_like(grad)

            # Name variables according to the parameter name
            if param.name is not None:
                delta.name = 'delta_'+param.name
                zeros.name = 'zeros_' + param.name
                previous_grad.name = 'previous_grad_' + param.name

            self.zeros[param] = zeros
            temp = grad * previous_grad
            delta_inc = T.switch(
                    T.neq(grad,0.),
                    T.clip(
                        T.switch(
                            T.eq(temp, 0.),
                            delta,
                            T.switch(
                                T.lt(temp, 0.),
                                delta*self.decrease_rate,
                                delta*self.increase_rate
                            )
                        ),
                        self.min_rate,
                        self.max_rate
                    ),
                    delta
            )


            previous_grad_inc = T.switch(
                T.gt(masked_grad,0.),
                T.switch(
                    T.gt(temp,0.),
                    grad,
#.........這裏部分代碼省略.........
開發者ID:nitbix,項目名稱:pylearn2,代碼行數:103,代碼來源:learning_rule.py

示例4: RMSProp

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
class RMSProp(LearningRule):
    """
    Implements the RMSProp learning rule.

    The RMSProp learning rule is described by Hinton in `lecture 6
    <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`
    of the Coursera Neural Networks for Machine Learning course.

    In short, Hinton suggests "[the] magnitude of the gradient can be very
    different for different weights and can change during learning.  This
    makes it hard to choose a global learning rate." RMSProp solves this
    problem by "[dividing] the learning rate for a weight by a running
    average of the magnitudes of recent gradients for that weight."


    Parameters
    ----------
    decay : float, optional
        Decay constant similar to that used in AdaDelta and Momentum methods.
    max_scaling: float, optional
        Restrict the RMSProp gradient scaling coefficient to values
        below `max_scaling`.

    Notes
    -----
    An instance of this LearningRule should only be used with one
    TrainingAlgorithm, and its get_updates method should be called
    only once. This is required in order to make the monitoring
    channels correctly report the moving averages.
    """

    def __init__(self, decay=0.9, max_scaling=1e5):
        assert 0. <= decay < 1.
        assert max_scaling > 0
        self.decay = sharedX(decay, 'decay')
        self.epsilon = 1. / max_scaling
        self.mean_square_grads = OrderedDict()

    @wraps(LearningRule.add_channels_to_monitor)
    def add_channels_to_monitor(self, monitor, monitoring_dataset):
        """
        The channels added are the min, mean, and max of the
        mean_square_grad of each parameter.
        """

        channel_mapping = {
            '_min': T.min,
            '_max': T.max,
            '_mean': T.mean
        }

        for mean_square_grad in self.mean_square_grads.values():
            for suffix, op in channel_mapping.items():
                monitor.add_channel(
                    name=(mean_square_grad.name + suffix),
                    ipt=None,
                    val=op(mean_square_grad),
                    data_specs=(NullSpace(), ''),
                    dataset=monitoring_dataset)
        return

    def get_updates(self, learning_rate, grads, lr_scalers=None):
        """
        Provides the symbolic (theano) description of the updates needed to
        perform this learning rule. See Notes for side-effects.

        Parameters
        ----------
        learning_rate : float
            Learning rate coefficient.
        grads : dict
            A dictionary mapping from the model's parameters to their
            gradients.
        lr_scalers : dict
            A dictionary mapping from the model's parameters to a learning
            rate multiplier.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.

        Notes
        -----
        This method has the side effect of storing the moving average
        of the square gradient in `self.mean_square_grads`. This is
        necessary in order for the monitoring channels to be able
        to track the value of these moving averages.
        Therefore, this method should only get called once for each
        instance of RMSProp.
        """

        updates = OrderedDict()
        for param in grads:

            # mean_squared_grad := E[g^2]_{t-1}
            mean_square_grad = sharedX(param.get_value() * 0.)

            if param.name is None:
#.........這裏部分代碼省略.........
開發者ID:nitbix,項目名稱:pylearn2,代碼行數:103,代碼來源:learning_rule.py

示例5: Monitor

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
class Monitor(object):
    """
    A class for monitoring Models while they are being trained.

    A monitor object records the number of minibatches and number of
    examples the model has trained, as well as any number of "channels"
    that track quantities of interest (examples: the objective
    function, measures of hidden unit activity, reconstruction error,
    sum of squared second derivatives, average norm of the weight
    vectors, etc.)

    Parameters
    ----------
    model : `pylearn2.models.model.Model`

    Attributes
    ----------
    on_channel_conflict : string
        `error` : this is a behavior when there is conlfict
            on creating a channel twice
        `copy_history` : this is a behavior when creating a
            new channel and transfering history of old_monitor
        `overwrite` : this is a behavior when creating a
            new channel without taking an account of old_monitor
    """

    def __init__(self, model):
        self.training_succeeded = False
        self.model = model
        self.channels = OrderedDict()
        self._num_batches_seen = 0
        self._examples_seen = 0
        self._epochs_seen = 0
        self._datasets = []
        self._iteration_mode = []
        self._batch_size = []
        self._num_batches = []
        self._dirty = True
        self._rng_seed = []
        self.names_to_del = ['theano_function_mode']
        self.t0 = time.time()
        self.theano_function_mode = None
        self.on_channel_conflict = 'error'

        # Initialize self._nested_data_specs, self._data_specs_mapping,
        # and self._flat_data_specs
        self._build_data_specs()

    def _build_data_specs(self):
        """
        Computes a nested data_specs for input and all channels

        Also computes the mapping to flatten it. This function is
        called from redo_theano.
        """
        # Ask the model what it needs
        m_space, m_source = self.model.get_monitoring_data_specs()
        input_spaces = [m_space]
        input_sources = [m_source]
        for channel in self.channels.values():
            space = channel.data_specs[0]
            assert isinstance(space, Space)
            input_spaces.append(space)
            input_sources.append(channel.data_specs[1])

        nested_space = CompositeSpace(input_spaces)
        nested_source = tuple(input_sources)

        self._nested_data_specs = (nested_space, nested_source)
        self._data_specs_mapping = DataSpecsMapping(self._nested_data_specs)

        flat_space = self._data_specs_mapping.flatten(nested_space,
                                                      return_tuple=True)
        flat_source = self._data_specs_mapping.flatten(nested_source,
                                                       return_tuple=True)
        self._flat_data_specs = (CompositeSpace(flat_space), flat_source)

    def set_theano_function_mode(self, mode):
        """
        .. todo::

            WRITEME

        Parameters
        ----------
        mode : theano.compile.Mode
            Theano functions for the monitoring channels will be
            compiled and run using this mode.
        """
        if self.theano_function_mode != mode:
            self._dirty = True
            self.theano_function_mode = mode

    def add_dataset(self, dataset, mode='sequential', batch_size=None,
                    num_batches=None, seed=None):
        """
        Determines the data used to calculate the values of each channel.

        Parameters
        ----------
#.........這裏部分代碼省略.........
開發者ID:123fengye741,項目名稱:pylearn2,代碼行數:103,代碼來源:monitor.py

示例6: UpdateNormMonitorLearningRule

# 需要導入模塊: from pylearn2.compat import OrderedDict [as 別名]
# 或者: from pylearn2.compat.OrderedDict import values [as 別名]
class UpdateNormMonitorLearningRule(LearningRule):

    """ Wraps an existing pylearn2 learning rule and adds monitor channels
        for the norms of the gradient based updates calculated during
        learning.
    """
    
    def __init__(self, base_learning_rule, decay=0.9):
        self.base = base_learning_rule
        # hack to allow MomentumAdjustor to access momentum value
        if hasattr(self.base, 'momentum'):
            self.momentum = self.base.momentum
        self.decay = decay
        self.mean_updates = OrderedDict()
         
    def add_channels_to_monitor(self, monitor, monitoring_dataset):
    
        channel_mapping = {
            '_min': T.min,
            '_max': T.max,
            '_mean': T.mean
        }
        
        for mean_update in self.mean_updates.values():
            if mean_update.ndim == 4:
                # rank-4 tensor (assuming stack of rank-3 convolutional kernels)
                knl_norm_vals = T.sqrt(T.sum(T.sqr(mean_update), axis=(1,2,3)))
                for suffix, op in channel_mapping.items():
                    monitor.add_channel(
                        name=(mean_update.name + "_kernel_norm" + suffix),
                        ipt=None,
                        val=op(knl_norm_vals),
                        data_specs=(NullSpace(), ''),
                        dataset=monitoring_dataset)
            elif mean_update.ndim == 3:
                # rank-3 tensor (assuming stack of rank-2 conv layer biases)
                knl_norm_vals = T.sqrt(T.sum(T.sqr(mean_update), axis=(1,2)))
                for suffix, op in channel_mapping.items():
                    monitor.add_channel(
                        name=(mean_update.name + "_norm" + suffix),
                        ipt=None,
                        val=op(knl_norm_vals),
                        data_specs=(NullSpace(), ''),
                        dataset=monitoring_dataset)
            elif mean_update.ndim == 2:
                # rank-2 tensor (matrix)
                col_norm_vals = T.sqrt(T.sum(T.sqr(mean_update), axis=0))
                row_norm_vals = T.sqrt(T.sum(T.sqr(mean_update), axis=1))
                mtx_norm_val = T.sqrt(T.sum(T.sqr(mean_update)))        
                for suffix, op in channel_mapping.items():
                    monitor.add_channel(
                        name=(mean_update.name + "_col_norm" + suffix),
                        ipt=None,
                        val=op(col_norm_vals),
                        data_specs=(NullSpace(), ''),
                        dataset=monitoring_dataset)
                    monitor.add_channel(
                        name=(mean_update.name + "_row_norm" + suffix),
                        ipt=None,
                        val=op(row_norm_vals),
                        data_specs=(NullSpace(), ''),
                        dataset=monitoring_dataset)
                monitor.add_channel(
                    name=(mean_update.name + "_norm"),
                    ipt=None,
                    val=mtx_norm_val,
                    data_specs=(NullSpace(), ''),
                    dataset=monitoring_dataset)
            elif mean_update.ndim == 1:
                # rank-1 tensor (vector)
                norm_val = T.sqrt(T.sum(T.sqr(mean_update), axis=0))
                monitor.add_channel(
                    name=(mean_update.name + "_norm"),
                    ipt=None,
                    val=norm_val,
                    data_specs=(NullSpace(), ''),
                    dataset=monitoring_dataset)
            elif mean_update.ndim == 0:
                # rank-0 tensor (scalar)
                monitor.add_channel(
                    name=(mean_update.name + "_norm"),
                    ipt=None,
                    val=mean_update,
                    data_specs=(NullSpace(), ''),
                    dataset=monitoring_dataset)                
            else:
                # not sure which axes to sum over in this case
                raise ValueError(
                    'Mean update {0} has unexpected number of dimensions {1} ({2})'
                    .format(mean_update, mean_update.ndim, mean_update.shape))
                    
        self.base.add_channels_to_monitor(monitor, monitoring_dataset)
        
        return  

    def get_updates(self, learning_rate, grads, lr_scalers=None):
    
        updates = self.base.get_updates(learning_rate, grads, lr_scalers)
    
        for (param, grad) in six.iteritems(grads):
#.........這裏部分代碼省略.........
開發者ID:Neuroglycerin,項目名稱:neukrill-net-tools,代碼行數:103,代碼來源:update_norm_monitor.py


注:本文中的pylearn2.compat.OrderedDict.values方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。