当前位置: 首页>>代码示例>>Python>>正文


Python OrderedDict.items方法代码示例

本文整理汇总了Python中theano.compat.python2x.OrderedDict.items方法的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict.items方法的具体用法?Python OrderedDict.items怎么用?Python OrderedDict.items使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.compat.python2x.OrderedDict的用法示例。


在下文中一共展示了OrderedDict.items方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
def main():
    var = theano.shared(T.zeros(shape=(88, 100), dtype=theano.config.floatX).eval(), name='W')
    updates = [(var, add_uniform(input=var, noise_level=.02))]

    stats = get_stats(var)
    l1 = stats.pop('l1')
    l2 = stats.pop('l2')
    min = stats.pop('min')
    max = stats.pop('max')
    var = stats.pop('var')
    std = stats.pop('std')
    mean = stats.pop('mean')

    mean_monitor = Monitor('mean', mean, train=True, valid=True, out_service=FileService('outs/mean.txt'))
    var_monitor = Monitor('var', var, out_service=FileService('outs/var.txt'))

    w_channel = MonitorsChannel('W', monitors=mean_monitor)

    stat_channel = MonitorsChannel('stats', monitors=[var_monitor])

    monitors = [w_channel, stat_channel]

    train_collapsed_raw = collapse_channels(monitors, train=True)
    train_collapsed = OrderedDict([(item[0], item[1]) for item in train_collapsed_raw])
    train_services = OrderedDict([(item[0], item[2]) for item in train_collapsed_raw])
    valid_collapsed_raw = collapse_channels(monitors, valid=True)
    valid_collapsed = OrderedDict([(item[0], item[1]) for item in valid_collapsed_raw])
    valid_services = OrderedDict([(item[0], item[2]) for item in valid_collapsed_raw])

    log.debug('compiling...')
    f = theano.function(inputs=[], outputs=train_collapsed.values(), updates=updates)
    f2 = theano.function(inputs=[], outputs=valid_collapsed.values(), updates=updates)
    log.debug('done')

    t1=time.time()

    for epoch in range(10):
        t=time.time()
        log.debug(epoch)
        vals = f()
        m = OrderedDict(zip(train_collapsed.keys(), vals))
        for name, service in train_services.items():
            if name in m:
                service.write(m[name], TRAIN)
        log.debug('----- '+make_time_units_string(time.time()-t))

    for epoch in range(10):
        t = time.time()
        log.debug(epoch)
        vals = f2()
        m = OrderedDict(zip(valid_collapsed.keys(), vals))
        for name, service in valid_services.items():
            if name in m:
                service.write(m[name], VALID)
        log.debug('----- ' + make_time_units_string(time.time() - t))

    log.debug("TOTAL TIME "+make_time_units_string(time.time()-t1))
开发者ID:52nlp,项目名称:OpenDeep,代码行数:59,代码来源:test_fileservice.py

示例2: get_gradients

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
    def get_gradients(self, model, data, ** kwargs):

        cost = self.expr(model=model, data=data, **kwargs)

        params = list(model.get_params())

        grads = T.grad(cost, params, disconnected_inputs='ignore')

        gradients = OrderedDict(izip(params, grads))

        if self.gradient_clipping:
            norm_gs = 0.
            for grad in gradients.values():
                norm_gs += (grad ** 2).sum()
            not_finite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
            norm_gs = T.sqrt(norm_gs)
            norm_gs = T.switch(T.ge(norm_gs, self.max_magnitude),
                               self.max_magnitude / norm_gs,
                               1.)

            for param, grad in gradients.items():
                gradients[param] = T.switch(not_finite,
                                            .1 * param,
                                            grad * norm_gs)

        updates = OrderedDict()

        return gradients, updates
开发者ID:Sandy4321,项目名称:librnn,代码行数:30,代码来源:rnn.py

示例3: orderings

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
    def orderings(self):
        """
        Return dict d s.t. d[node] is a list of nodes that must be evaluated
        before node itself can be evaluated.

        This is used primarily by the destroy_handler feature to ensure that
        all clients of any destroyed inputs have already computed their
        outputs.

        :note: This only calls the orderings() fct on all features. It does not
               take care of computing dependencies by itself.

        """
        ords = OrderedDict()
        assert isinstance(self._features, list)
        for feature in self._features:
            if hasattr(feature, 'orderings'):
                orderings = feature.orderings(self)
                if not isinstance(orderings, OrderedDict):
                    raise TypeError("Non-deterministic return value from " +
                                    str(feature.orderings) +
                                    ". Nondeterministic object is " +
                                    str(orderings))
                for node, prereqs in orderings.items():
                    if not isinstance(prereqs, (list, OrderedSet)):
                        raise TypeError(
                            "prereqs must be a type with a "
                            "deterministic iteration order, or toposort "
                            " will be non-deterministic.")
                    ords.setdefault(node, []).extend(prereqs)
        # eliminate duplicate prereqs
        for (node, prereqs) in ords.items():
            ords[node] = list(OrderedSet(prereqs))
        return ords
开发者ID:Jackwangyang,项目名称:Theano,代码行数:36,代码来源:fg.py

示例4: StemCell

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
class StemCell(NonlinCell):
    """
    WRITEME

    Parameters
    ----------
    .. todo::
    """
    def __init__(self,
                 name,
                 parent=[],
                 parent_dim=[],
                 nout=None,
                 init_W=InitCell('randn'),
                 init_b=InitCell('zeros'),
                 cons=0.,
                 use_bias=1,
                 lr_scaler=None,
                 **kwargs):
        super(StemCell, self).__init__(**kwargs)
        if name is None:
            name = self.__class__.name__.lower()
        self.name = name
        self.nout = nout
        self.init_W = init_W
        self.init_b = init_b
        self.cons = cons
        self.parent = OrderedDict()
        parent_dim = tolist(parent_dim)
        for i, par in enumerate(tolist(parent)):
            if len(parent_dim) != 0 and len(parent) != 0:
                if len(parent) != len(parent_dim):
                    raise AssertionError("You probably had a mistake providing,\
                                          write number of values. It will end,\
                                          up with a model containing a bug.")
                self.parent[par] = parent_dim[i]
            else:
                self.parent[par] = None
        self.lr_scaler = lr_scaler
        self.use_bias = use_bias

    def fprop(self):
        raise NotImplementedError(
            str(type(self)) + " does not implement Layer.fprop.")

    def initialize(self):

        params = OrderedDict()

        for parname, parout in self.parent.items():
            W_shape = (parout, self.nout)
            W_name = 'W_' + parname + '__' + self.name
            params[W_name] = self.init_W.get(W_shape)

        if self.use_bias:
            params['b_'+self.name] = self.init_b.get(self.nout)

        return params
开发者ID:taesupkim,项目名称:cle,代码行数:60,代码来源:__init__.py

示例5: RecurrentLayer

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
class RecurrentLayer(StemCell):
    """
    Abstract class for recurrent layers

    Parameters
    ----------
    .. todo::
    """
    def __init__(self,
                 recurrent=[],
                 recurrent_dim=[],
                 self_recurrent=1,
                 clip_gradient = True,
                 clip_bound = 5,
                 init_U=InitCell('ortho'),
                 **kwargs):

        super(RecurrentLayer, self).__init__(**kwargs)
        self.recurrent = OrderedDict()

        if self_recurrent:
            self.recurrent[self.name] = self.nout

        recurrent_dim = tolist(recurrent_dim)

        for i, rec in enumerate(tolist(recurrent)):
            if len(recurrent_dim) != 0:
                self.recurrent[rec] = recurrent_dim[i]
            else:
                self.recurrent[rec] = None

        self.clip_gradient = clip_gradient
        self.clip_bound = clip_bound
        self.init_U = init_U

    def get_init_state(self, batch_size):

        state = T.zeros((batch_size, self.nout), dtype=theano.config.floatX)
        state = T.unbroadcast(state, *range(state.ndim))

        return state

    def initialize(self):

        self.params = super(RecurrentLayer, self).initialize()

        for recname, recout in self.recurrent.items():
            U_shape = (recout, self.nout)
            U_name = 'U_'+recname+'__'+self.name
            self.alloc(self.init_U.get(U_shape, U_name))

        return self.params
开发者ID:kratarth1203,项目名称:cle,代码行数:54,代码来源:recurrent.py

示例6: RecurrentLayer

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
class RecurrentLayer(StemCell):
    """
    Abstract class for recurrent layers

    Parameters
    ----------
    .. todo::
    """

    def __init__(self,
                 batch_size,
                 recurrent=[],
                 recurrent_dim=[],
                 self_recurrent=1,
                 init_state_cons=0.,
                 init_U=InitCell('ortho'),
                 **kwargs):
        super(RecurrentLayer, self).__init__(**kwargs)
        self.recurrent = OrderedDict()
        if self_recurrent:
            self.recurrent[self.name] = self.nout
        recurrent_dim = tolist(recurrent_dim)
        for i, rec in enumerate(tolist(recurrent)):
            if len(recurrent_dim) != 0:
                self.recurrent[rec] = recurrent_dim[i]
            else:
                self.recurrent[rec] = None
        self.batch_size = batch_size
        self.init_U = init_U
        self.init_states = OrderedDict()
        self.init_state_cons = init_state_cons

    def get_init_state(self, batch_size=None):
        if batch_size is None:
            batch_size = self.batch_size
        state = T.zeros((batch_size, self.nout)) + self.init_state_cons
        state = T.unbroadcast(state, *range(state.ndim))
        return state

    def initialize(self):
        super(RecurrentLayer, self).initialize()
        for recname, recout in self.recurrent.items():
            U_shape = (recout, self.nout)
            U_name = 'U_' + recname + '__' + self.name
            self.alloc(self.init_U.get(U_shape, U_name))
开发者ID:anirudh9119,项目名称:cle,代码行数:47,代码来源:recurrent.py

示例7: get_updates

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]
    def get_updates(self, grads):
        """
        .. todo::

            WRITEME
        """
        updates = OrderedDict()
        g_tt = OrderedDict()
        cnt = sharedX(0, 'counter')
        for p, g in grads.items():
            lr_scaler = self.lr_scalers.get(str(p), 1.)
            m = sharedX(p.get_value() * 0.)
            v = sharedX(p.get_value() * 0.)
            b1 = self.b1 * self.lambd**cnt
            m_t = b1 * m + (1 - b1) * g
            v_t = self.b2 * v + (1 - self.b2) * g**2
            m_t_hat = m_t / (1. - self.b1**(cnt + 1))
            v_t_hat = v_t / (1. - self.b2**(cnt + 1))
            g_t = m_t_hat / (T.sqrt(v_t_hat) + self.e)
            p_t = p - lr_scaler * self.lr * g_t
            g_tt[p] = g_t
            updates[m] = m_t
            updates[v] = v_t
            updates[p] = p_t
        if self.post_clip:
            g_norm = sum([T.sqr(x/self.batch_size).sum()
                          for x in g_tt.values()])
            not_finite = T.or_(T.isnan(g_norm), T.isinf(g_norm))
            g_norm = T.sqrt(g_norm)
            scaler = self.scaler / T.maximum(self.scaler, g_norm)
            for p, g in g_tt.items():
                lr_scaler = self.lr_scalers.get(str(p), 1.)
                p_t = p - lr_scaler * self.lr * g * scaler
                updates[p] = p_t
        updates[cnt] = cnt + 1
        return updates
开发者ID:lipengyu,项目名称:cle,代码行数:38,代码来源:opt.py

示例8: Optimizer

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]

#.........这里部分代码省略.........
                                                          self.learning_rate,
                                                          learning_rate,
                                                          lr_decay_factor)
        else:
            self.learning_rate_decay = False

        # rest of initial parameters needed for training.
        self.batch_size = batch_size
        self.min_batch_size = min_batch_size
        self.n_epoch = epochs
        self.save_frequency = save_freq
        self.early_stop_threshold = stop_threshold
        self.early_stop_length = stop_patience
        self.grad_clip = grad_clip
        self.hard_clip = hard_clip

    def get_updates(self, gradients):
        """
        This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[param] = param - scaled_lr * gradient
        return updates

    def train(self, monitor_channels=None, train_outservice=None, plot=None):
        """
        This method performs the training!!!
        It is an online training method that goes over minibatches from the dataset for a number of epochs,
        updating parameters after each minibatch.

        You can disrupt training with a KeyBoardInterrupt and it should exit/save parameters gracefully.

        Parameters
        ----------
        monitor_channels : list(MonitorsChannel or Monitor), optional
            The list of channels or monitors containing monitor expressions/variables to compile and evaluate
            on the data.
        train_outservice : OutService, optional
            The OutService to use for the automatically created train_cost monitor. Default of None just outputs
            to logs.
        plot : Plot, optional
            The Plot object to use if we want to graph the outputs (uses bokeh server).
        """
        if not self.model:
            log.error("No self.model for the Optimizer!")
            raise AssertionError("Needs to be initialized with a Model! (Or something went wrong if train() "
                                 "was called from the Model. Try initializing the Optimizer with the model param "
                                 "and calling optimizer.train().")

        #########################
        # gradients and updates #
        #########################
开发者ID:adammenges,项目名称:OpenDeep,代码行数:70,代码来源:optimizer.py

示例9: Optimizer

# 需要导入模块: from theano.compat.python2x import OrderedDict [as 别名]
# 或者: from theano.compat.python2x.OrderedDict import items [as 别名]

#.........这里部分代码省略.........
            # grab the data and labels
            data, labels = self.dataset.getSubset(subset)
            # create the givens for the input function as pairs of (input_variable: sliced_data)
            givens = OrderedDict(zip(model_inputs, [data[batch_slice]]))
            # include labels as well if they are required by the model
            if model_targets is not None and len(model_targets) > 0:
                if labels is None:
                    log.error("No labels in the dataset!")
                    raise AssertionError, "No lables in the dataset!"
                givens.update(OrderedDict(zip(model_targets, [labels[batch_slice]])))
        else:
            log.warning("Dataset doesn't have subset %s" % get_subset_strings(subset))

        return givens

    def get_updates(self, gradients):
        """
        This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their
            gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[param] = param - scaled_lr * gradient
        return updates

    def get_lr_monitor(self):
        """
        Returns a monitor dictionary to the Optimizer's learning rate.

        Returns
        -------
        dict
            Mapping 'learning_rate' to `self.learning_rate` shared variable.
        """
        return {'learning_rate': self.learning_rate}

    def train(self, monitor_channels=None, train_outservice=None, plot=None, continue_training=False):
        """
        This method performs the training!!!
        It is an online training method that goes over minibatches from the dataset for a number of epochs,
        updating parameters after each minibatch.

        You can disrupt training with a KeyBoardInterrupt and it should exit/save parameters gracefully.

        Parameters
        ----------
        monitor_channels : list(MonitorsChannel or Monitor), optional
            The list of channels or monitors containing monitor expressions/variables to compile and evaluate
            on the data.
        train_outservice : OutService, optional
            The OutService to use for the automatically created train_cost monitor. Default of None just outputs
            to logs.
        plot : Plot, optional
开发者ID:52nlp,项目名称:OpenDeep,代码行数:70,代码来源:optimizer.py


注:本文中的theano.compat.python2x.OrderedDict.items方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。