当前位置: 首页>>代码示例>>Python>>正文


Python DataSpecsMapping.flatten方法代码示例

本文整理汇总了Python中pylearn2.utils.data_specs.DataSpecsMapping.flatten方法的典型用法代码示例。如果您正苦于以下问题:Python DataSpecsMapping.flatten方法的具体用法?Python DataSpecsMapping.flatten怎么用?Python DataSpecsMapping.flatten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pylearn2.utils.data_specs.DataSpecsMapping的用法示例。


在下文中一共展示了DataSpecsMapping.flatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_nest_specs

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
def test_nest_specs():
    x1 = TT.matrix('x1')
    x2 = TT.matrix('x2')
    x3 = TT.matrix('x3')
    x4 = TT.matrix('x4')

    for nested_space, nested_source, nested_data in [
            (VectorSpace(dim=10), 'target', x2),
            (CompositeSpace([VectorSpace(dim=3), VectorSpace(dim=9)]),
                ('features', 'features'),
                (x1, x4)),
            (CompositeSpace([VectorSpace(dim=3),
                             CompositeSpace([VectorSpace(dim=10),
                                             VectorSpace(dim=7)])]),
                ('features', ('target', 'features')),
                (x1, (x2, x3))),
            ]:

        mapping = DataSpecsMapping((nested_space, nested_source))
        flat_space = mapping.flatten(nested_space)
        flat_source = mapping.flatten(nested_source)
        flat_data = mapping.flatten(nested_data)

        renested_space = mapping.nest(flat_space)
        renested_source = mapping.nest(flat_source)
        renested_data = mapping.nest(flat_data)

        assert_equal(renested_space, nested_space)
        assert_equal(renested_source, nested_source)
        assert_equal(renested_data, nested_data)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:32,代码来源:test_data_specs.py

示例2: test_nest_specs

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
def test_nest_specs():
    x1 = TT.matrix("x1")
    x2 = TT.matrix("x2")
    x3 = TT.matrix("x3")
    x4 = TT.matrix("x4")

    for nested_space, nested_source, nested_data in [
        (VectorSpace(dim=10), "target", x2),
        (CompositeSpace([VectorSpace(dim=3), VectorSpace(dim=9)]), ("features", "features"), (x1, x4)),
        (
            CompositeSpace([VectorSpace(dim=3), CompositeSpace([VectorSpace(dim=10), VectorSpace(dim=7)])]),
            ("features", ("target", "features")),
            (x1, (x2, x3)),
        ),
    ]:

        mapping = DataSpecsMapping((nested_space, nested_source))
        flat_space = mapping.flatten(nested_space)
        flat_source = mapping.flatten(nested_source)
        flat_data = mapping.flatten(nested_data)

        renested_space = mapping.nest(flat_space)
        renested_source = mapping.nest(flat_source)
        renested_data = mapping.nest(flat_data)

        assert_equal(renested_space, nested_space)
        assert_equal(renested_source, nested_source)
        assert_equal(renested_data, nested_data)
开发者ID:Bowen-C,项目名称:pylearn2,代码行数:30,代码来源:test_data_specs.py

示例3: test_variational_cd

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
def test_variational_cd():

    # Verifies that VariationalCD works well with make_layer_to_symbolic_state
    visible_layer = BinaryVector(nvis=100)
    hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500,
                                       pool_size=1,
                                       layer_name='h',
                                       irange=0.05,
                                       init_bias=-2.0)
    model = DBM(visible_layer=visible_layer,
                hidden_layers=[hidden_layer],
                batch_size=100,
                niter=1)

    cost = VariationalCD(num_chains=100, num_gibbs_steps=2)

    data_specs = cost.get_data_specs(model)
    mapping = DataSpecsMapping(data_specs)
    space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
    source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

    theano_args = []
    for space, source in safe_zip(space_tuple, source_tuple):
        name = '%s' % (source)
        arg = space.make_theano_batch(name=name)
        theano_args.append(arg)
    theano_args = tuple(theano_args)
    nested_args = mapping.nest(theano_args)

    grads, updates = cost.get_gradients(model, nested_args)
开发者ID:BloodNg,项目名称:pylearn2,代码行数:32,代码来源:test_dbm.py

示例4: setup

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def setup(self, model, dataset):
        """
        Allows the training algorithm to do some preliminary configuration
        *before* we actually start training the model. The dataset is provided
        in case other derived training algorithms need to modify model based on
        the dataset.

        Parameters
        ----------
        model: a Python object representing the model to train loosely
        implementing the interface of models.model.Model.

        dataset: a pylearn2.datasets.dataset.Dataset object used to draw
        training data
        """
        self.model = model

        self.monitor = Monitor.get_monitor(model)

        if self.monitoring_dataset is not None:
            # Get the data specifications needed by the model
            space, source = model.get_monitoring_data_specs()

            # Create Theano variables for each of the individual components
            # of that data. Usually, it will be X for inputs and Y for targets.
            # First, we need to find these components, and put them in a tuple
            mapping = DataSpecsMapping((space, source))
            space_tuple = mapping.flatten(space, return_tuple=True)
            source_tuple = mapping.flatten(source, return_tuple=True)
            # Then, build a flat tuple of these Theano variables
            ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
                    for (sp, src) in safe_zip(space_tuple, source_tuple))
            # Finally, organize them back into a structure expected by the
            # monitoring channels of the model
            nested_ipt = mapping.nest(ipt)

            self.monitor.add_dataset(dataset=self.monitoring_dataset,
                                mode="sequential",
                                batch_size=self.batch_size,
                                num_batches=self.monitoring_batches)

            channels = model.get_monitoring_channels(nested_ipt)
            if not isinstance(channels, dict):
                raise TypeError("model.get_monitoring_channels must return a "
                                "dictionary, but it returned " + str(channels))
            for name in channels:
                J = channels[name]
                if isinstance(J, tuple):
                    assert len(J) == 2
                    J, prereqs = J
                else:
                    prereqs = None

                self.monitor.add_channel(name=name,
                                         ipt=nested_ipt,
                                         val=J,
                                         prereqs=prereqs,
                                         data_specs=(space, source))
        self.first = True
        self.bSetup = True
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:62,代码来源:default.py

示例5: train

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def train(self, dataset):
        if not hasattr(self, 'sgd_update'):
            raise Exception("train called without first calling setup")

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)

        self.first = False
        rng = self.rng
        if not is_stochastic(self.train_iteration_mode):
            rng = None

        data_specs = self.cost.get_data_specs(self.model)

        # The iterator should be built from flat data specs, so it returns
        # flat, non-redundent tuples of data.
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
        if len(space_tuple) == 0:
            # No data will be returned by the iterator, and it is impossible
            # to know the size of the actual batch.
            # It is not decided yet what the right thing to do should be.
            raise NotImplementedError("Unable to train with SGD, because "
                    "the cost does not actually use data from the data set. "
                    "data_specs: %s" % str(data_specs))
        flat_data_specs = (CompositeSpace(space_tuple), source_tuple)

        iterator = dataset.iterator(mode=self.train_iteration_mode,
                batch_size=self.batch_size,
                data_specs=flat_data_specs, return_tuple=True,
                rng = rng, num_batches = self.batches_per_iter)

        on_load_batch = self.on_load_batch
        for batch in iterator:
            for callback in on_load_batch:
                callback(mapping.nest(batch))
            self.sgd_update(*batch)
            # iterator might return a smaller batch if dataset size
            # isn't divisible by batch_size
            # Note: if data_specs[0] is a NullSpace, there is no way to know
            # how many examples would actually have been in the batch,
            # since it was empty, so actual_batch_size would be reported as 0.
            actual_batch_size = flat_data_specs[0].np_batch_size(batch)
            self.monitor.report_batch(actual_batch_size)
            for callback in self.update_callbacks:
                callback(self)

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)
开发者ID:ahmed26,项目名称:pylearn2,代码行数:58,代码来源:sgd.py

示例6: train

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def train(self, dataset):
        """
        .. todo::

            WRITEME
        """
        assert self.bSetup
        model = self.model

        rng = self.rng
        train_iteration_mode = "shuffled_sequential"
        if not is_stochastic(train_iteration_mode):
            rng = None

        data_specs = self.cost.get_data_specs(self.model)
        # The iterator should be built from flat data specs, so it returns
        # flat, non-redundent tuples of data.
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
        if len(space_tuple) == 0:
            # No data will be returned by the iterator, and it is impossible
            # to know the size of the actual batch.
            # It is not decided yet what the right thing to do should be.
            raise NotImplementedError(
                "Unable to train with BGD, because "
                "the cost does not actually use data from the data set. "
                "data_specs: %s" % str(data_specs)
            )
        flat_data_specs = (CompositeSpace(space_tuple), source_tuple)

        iterator = dataset.iterator(
            mode=train_iteration_mode,
            batch_size=self.batch_size,
            num_batches=self.batches_per_iter,
            data_specs=flat_data_specs,
            return_tuple=True,
            rng=rng,
        )

        mode = self.theano_function_mode
        for data in iterator:
            if "targets" in source_tuple and mode is not None and hasattr(mode, "record"):
                Y = data[source_tuple.index("targets")]
                stry = str(Y).replace("\n", " ")
                mode.record.handle_line("data Y " + stry + "\n")

            for on_load_batch in self.on_load_batch:
                on_load_batch(mapping.nest(data))

            self.before_step(model)
            self.optimizer.minimize(*data)
            self.after_step(model)
            actual_batch_size = flat_data_specs[0].np_batch_size(data)
            model.monitor.report_batch(actual_batch_size)
开发者ID:pangyuteng,项目名称:chalearn2014,代码行数:57,代码来源:bgd.py

示例7: setup

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def setup(self):
        self.X = T.matrix('X')
        self.Y = T.matrix('Y')

        # Taken from pylearn2/training_algorithms/sgd.py


        data_specs = self.cost.get_data_specs(self.model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name, batch_size = self.batch_size)
            theano_args.append(arg)
        print 'BATCH SIZE=',self.batch_size
        theano_args = tuple(theano_args)

        # Methods of `self.cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        print self.cost
        fixed_var_descr = self.cost.get_fixed_var_descr(self.model, nested_args)
        print self.cost
        self.on_load_batch = fixed_var_descr.on_load_batch
        params = list(self.model.get_params())
        self.X = nested_args[0]
        self.Y = nested_args[1]
        init_grads, updates = self.cost.get_gradients(self.model, nested_args)

        params = self.model.get_params()
        # We need to replace parameters with purely symbolic variables in case some are shared
        # Create gradient and cost functions
        self.params = params
        symbolic_params = [self._convert_variable(param) for param in params]
        givens = dict(zip(params, symbolic_params))
        costfn = self.model.cost_from_X((self.X, self.Y))
        gradfns = [init_grads[param] for param in params]
        #self.symbolic_params = symbolic_params
        #self._loss = theano.function(symbolic_para[self.X, self.Y], self.model.cost_from_X((self.X, self.Y)))#, givens=givens)
        #1/0
        print 'Compiling function...'
        self.theano_f_df = theano.function(inputs=symbolic_params + [self.X, self.Y], outputs=[costfn] + gradfns, givens=givens)
        print 'done'
开发者ID:NuelASRB,项目名称:Sum-of-Functions-Optimizer,代码行数:53,代码来源:model_gradient.py

示例8: CallbackCost

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
class CallbackCost(Cost):
    """
    A Cost that runs callbacks on the data.
    Returns the sum of the data multiplied by the
    sum of all model parameters as the cost.
    The callback is run via the CallbackOp
    so the cost must be used to compute one
    of the outputs of your theano graph if you
    want the callback to get called.
    The is cost is designed so that the SGD algorithm
    will result in in the CallbackOp getting
    evaluated.
    """

    def __init__(self, data_callbacks, data_specs):
        """
        data_callback: optional, callbacks to run on data.
            It is either a Python callable, or a tuple (possibly nested),
            in the same format as data_specs.
        data_specs: (space, source) pair specifying the format
            and label associated to the data.
        """
        self.data_callbacks = data_callbacks
        self.data_specs = data_specs
        self._mapping = DataSpecsMapping(data_specs)

    def get_data_specs(self, model):
        return self.data_specs

    def expr(self, model, data):
        self.get_data_specs(model)[0].validate(data)
        callbacks = self.data_callbacks

        cb_tuple = self._mapping.flatten(callbacks, return_tuple=True)
        data_tuple = self._mapping.flatten(data, return_tuple=True)

        costs = []
        for (callback, data_var) in safe_zip(cb_tuple, data_tuple):
            orig_var = data_var
            data_var = CallbackOp(callback)(data_var)
            assert len(data_var.owner.inputs) == 1
            assert orig_var is data_var.owner.inputs[0]

            costs.append(data_var.sum())

        # sum() will call theano.add on the symbolic variables
        cost = sum(costs)
        model_terms = sum([param.sum() for param in model.get_params()])
        cost = cost * model_terms
        return cost
开发者ID:sonu5623,项目名称:pylearn2,代码行数:52,代码来源:cost.py

示例9: get_fixed_var_descr

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
        def get_fixed_var_descr(self, model, data):
            data_specs = self.get_data_specs(model)
            data_specs[0].validate(data)
            rval = FixedVarDescr()
            rval.fixed_vars = {'sup_aux_var': sup_counter}
            rval.data_specs = data_specs

            # data has to be flattened into a tuple before being passed
            # to `function`.
            mapping = DataSpecsMapping(data_specs)
            flat_data = mapping.flatten(data, return_tuple=True)
            theano_func = function(flat_data,
                                 updates=[(sup_counter, sup_counter + 1)])
            # the on_load_batch function will take numerical data formatted
            # as rval.data_specs, so we have to flatten it inside the
            # returned function too.
            # Using default argument binds the variables used in the lambda
            # function to the value they have when the lambda is defined.
            on_load = (lambda batch, mapping=mapping, theano_func=theano_func:
                    theano_func(*mapping.flatten(batch, return_tuple=True)))
            rval.on_load_batch = [on_load]
            return rval
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:24,代码来源:test_bgd.py

示例10: test_flatten_specs

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
def test_flatten_specs():
    for space, source, flat_space, flat_source in [
        # (None, None),
        (VectorSpace(dim=5), "features", VectorSpace(dim=5), "features"),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=2)]),
            ("features", "features"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=2)]),
            ("features", "features"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "targets"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "targets"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "features"),
            VectorSpace(dim=5),
            "features",
        ),
        (
            CompositeSpace([VectorSpace(dim=5), CompositeSpace([VectorSpace(dim=9), VectorSpace(dim=12)])]),
            ("features", ("features", "targets")),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
        ),
    ]:

        mapping = DataSpecsMapping((space, source))
        rval = (mapping.flatten(space), mapping.flatten(source))
        assert_equal((flat_space, flat_source), rval)
开发者ID:Bowen-C,项目名称:pylearn2,代码行数:41,代码来源:test_data_specs.py

示例11: setup

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def setup(self, model, dataset, algorithm):
        self.origin = model.get_param_vector()

        cost = algorithm.cost
        # Cargo cult all the Pascal bullshit needed to evaluate the fucking cost function now
        # =======================================
        data_specs = cost.get_data_specs(model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name,
                                          batch_size=self.batch_size)
            theano_args.append(arg)
        theano_args = tuple(theano_args)

        # Methods of `cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        fixed_var_descr = cost.get_fixed_var_descr(model, nested_args)
        self.on_load_batch = fixed_var_descr.on_load_batch

        cost_value = cost.expr(model, nested_args,
                                    ** fixed_var_descr.fixed_vars)
        # End cargo culting
        # ======================

        print "Compiling cost function..."
        cost_fn = function(theano_args, cost_value)
        self.cost_fn = cost_fn
开发者ID:cc13ny,项目名称:galatea,代码行数:40,代码来源:__init__.py

示例12: get_fixed_var_descr

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
        def get_fixed_var_descr(self, model, data, **kwargs):
            data_specs = self.get_data_specs(model)
            data_specs[0].validate(data)
            rval = FixedVarDescr()
            rval.fixed_vars = {'unsup_aux_var': unsup_counter}

            # The input to function should be a flat, non-redundent tuple
            mapping = DataSpecsMapping(data_specs)
            data_tuple = mapping.flatten(data, return_tuple=True)
            theano_func = function([],
                    updates=[(unsup_counter, unsup_counter + 1)])
            def on_load(batch, mapping=mapping, theano_func=theano_func):
                return theano_func()
            rval.on_load_batch = [on_load]

            return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:18,代码来源:test_bgd.py

示例13: agent_train

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def agent_train(self, terminal):
        """
        Training function.

        terminal: boolean
            Whether current state is a terminal state.
        """
        # Wait until we have enough data to train
        if self.action_count >= ((self.train.algorithm.batch_size+1)*self.k+1):
            tic = time()
            if self.train_setup == 0:
                self.train.main_loop()

                data_specs = self.train.algorithm.cost.get_data_specs(
                    self.model)

                # The iterator should be built from flat data specs, so it
                # returns flat, non-redundent tuples of data.
                mapping = DataSpecsMapping(data_specs)
                space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
                source_tuple = mapping.flatten(
                    data_specs[1],
                    return_tuple=True
                )
                if len(space_tuple) == 0:
                    # No data will be returned by the iterator, and it is
                    # impossible to know the size of the actual batch. It
                    # is not decided yet what the right thing to do should be.
                    raise NotImplementedError(
                        "Unable to train with SGD, because the cost does not"
                        " actually use data from the data set. "
                        "data_specs: %s" % str(data_specs)
                    )
                flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
                self.flat_data_specs = flat_data_specs
                self.train_setup = 1

            else:
                tic_iter = time()
                temp_iter = self.train.dataset.iterator(
                    mode=self.train.algorithm.train_iteration_mode,
                    batch_size=self.train.algorithm.batch_size,
                    data_specs=self.flat_data_specs,
                    return_tuple=True,
                    rng=self.train.algorithm.rng,
                    num_batches=self.train.algorithm.batches_per_iter
                )
                toc_iter = time()
                log.debug('Iter creation time: %0.2f' % (toc_iter - tic_iter))

                tic_next = time()
                batch = temp_iter.next()
                toc_next = time()
                log.debug('Iter next time: %0.2f' % (toc_next - tic_next))

                tic_sgd = time()
                self.train.algorithm.sgd_update(*batch)
                toc_sgd = time()
                log.debug('SGD time: %0.2f' % (toc_sgd - tic_sgd))

                log.info('Frames seen: %d' % self.all_time_total_frames)
                log.info('Epsilon: %0.10f' % self.epsilon)

            toc = time()
            self.episode_training_time += toc-tic
            log.debug('Real train time: %0.2f' % (toc-tic))
开发者ID:UncleYu,项目名称:hedgehog,代码行数:68,代码来源:basic.py

示例14: setup

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def setup(self, model, dataset):
        """
        Compiles the theano functions needed for the train method.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        """
        if self.cost is None:
            self.cost = model.get_default_cost()

        inf_params = [param for param in model.get_params()
                      if np.any(np.isinf(param.get_value()))]
        if len(inf_params) > 0:
            raise ValueError("These params are Inf: "+str(inf_params))
        if any([np.any(np.isnan(param.get_value()))
                for param in model.get_params()]):
            nan_params = [param for param in model.get_params()
                          if np.any(np.isnan(param.get_value()))]
            raise ValueError("These params are NaN: "+str(nan_params))
        self.model = model

        self._synchronize_batch_size(model)
        model._test_batch_size = self.batch_size
        self.monitor = Monitor.get_monitor(model)
        self.monitor._sanity_check()

        # test if force batch size and batch size
        if getattr(model, "force_batch_size", False) and \
           any(dataset.get_design_matrix().shape[0] % self.batch_size != 0 for
               dataset in self.monitoring_dataset.values()) and \
           not has_uniform_batch_size(self.monitor_iteration_mode):

            raise ValueError("Dataset size is not a multiple of batch size."
                             "You should set monitor_iteration_mode to "
                             "even_sequential, even_shuffled_sequential or "
                             "even_batchwise_shuffled_sequential")

        data_specs = self.cost.get_data_specs(self.model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name,
                                          batch_size=self.batch_size)
            theano_args.append(arg)
        theano_args = tuple(theano_args)

        # Methods of `self.cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
        self.on_load_batch = fixed_var_descr.on_load_batch

        cost_value = self.cost.expr(model, nested_args,
                                    ** fixed_var_descr.fixed_vars)

        if cost_value is not None and cost_value.name is None:
            # Concatenate the name of all tensors in theano_args !?
            cost_value.name = 'objective'

        # Set up monitor to model the objective value, learning rate,
        # momentum (if applicable), and extra channels defined by
        # the cost
        learning_rate = self.learning_rate
        if self.monitoring_dataset is not None:
            if (self.monitoring_batch_size is None and
                    self.monitoring_batches is None):
                self.monitoring_batch_size = self.batch_size
                self.monitoring_batches = self.batches_per_iter
            self.monitor.setup(dataset=self.monitoring_dataset,
                               cost=self.cost,
                               batch_size=self.monitoring_batch_size,
                               num_batches=self.monitoring_batches,
                               extra_costs=self.monitoring_costs,
                               mode=self.monitor_iteration_mode)
            dataset_name = self.monitoring_dataset.keys()[0]
            monitoring_dataset = self.monitoring_dataset[dataset_name]
            #TODO: have Monitor support non-data-dependent channels
            self.monitor.add_channel(name='learning_rate',
                                     ipt=None,
                                     val=learning_rate,
                                     data_specs=(NullSpace(), ''),
                                     dataset=monitoring_dataset)

            if self.learning_rule:
                self.learning_rule.add_channels_to_monitor(
                        self.monitor,
                        monitoring_dataset)

        params = list(model.get_params())
#.........这里部分代码省略.........
开发者ID:AdityoSanjaya,项目名称:adversarial,代码行数:103,代码来源:sgd_alt.py

示例15: run

# 需要导入模块: from pylearn2.utils.data_specs import DataSpecsMapping [as 别名]
# 或者: from pylearn2.utils.data_specs.DataSpecsMapping import flatten [as 别名]
    def run(self):
        mm = self.monitor        

        updates = OrderedDict()
        for channel in mm.channels.values():
            updates[channel.val_shared] = np.cast[config.floatX](0.0)        
        mm.begin_record_entry = function(inputs=[], updates=updates, mode=mm.theano_function_mode,
                    name = 'Monitor.begin_record_entry')


        updates = OrderedDict()
        givens = OrderedDict()
        theano_args = mm._flat_data_specs[0].make_theano_batch(
                ['monitoring_%s' % s for s in mm._flat_data_specs[1]])

        # Get a symbolic expression of the batch size
        # We do it here, rather than for each channel, because channels with an
        # empty data_specs do not use data, and are unable to extract the batch
        # size. The case where the whole data specs is empty is not supported.
        batch_size = mm._flat_data_specs[0].batch_size(theano_args)
        nested_theano_args = mm._data_specs_mapping.nest(theano_args)
        if not isinstance(nested_theano_args, tuple):
            nested_theano_args = (nested_theano_args,)        
        
        assert len(nested_theano_args) == (len(mm.channels) + 1)

        for key in sorted(mm.channels.keys()):
            mode = mm.theano_function_mode
            if mode is not None and hasattr(mode, 'record'):
                mode.record.handle_line('compiling monitor including channel '+key+'\n')
            #log.info('\t%s' % key)
        it = [d.iterator(mode=i, num_batches=n, batch_size=b,
                         data_specs=mm._flat_data_specs,
                         return_tuple=True) \
              for d, i, n, b in safe_izip(mm._datasets, mm._iteration_mode,
                                    mm._num_batches, mm._batch_size)]
        mm.num_examples = [np.cast[config.floatX](float(i.num_examples)) for i in it]


        givens = [OrderedDict() for d in mm._datasets]
        updates = [OrderedDict() for d in mm._datasets]

        #for i, channel in enumerate(mm.channels.values()):
        for i, dw_name in enumerate(mm.channels.keys()):            
            if dw_name in self.p_channel:
                channel = mm.channels[dw_name]
                
                index = mm._datasets.index(channel.dataset)
                d = mm._datasets[index]
                g = givens[index]
                cur_num_examples = mm.num_examples[index]
                u = updates[index]

                # Flatten channel.graph_input and the appropriate part of
                # nested_theano_args, to iterate jointly over them.
                c_mapping = DataSpecsMapping(channel.data_specs)
                channel_inputs = c_mapping.flatten(channel.graph_input,
                                                   return_tuple=True)                
                inputs = c_mapping.flatten(nested_theano_args[i + 1],
                                           return_tuple=True)

                for (channel_X, X) in safe_izip(channel_inputs, inputs):
                    assert channel_X not in g or g[channel_X] is X
                    #print channel_X.type , X.type
                    assert channel_X.type == X.type
                    g[channel_X] = X

                if batch_size == 0:
                    # No channel does need any data, so there is not need to
                    # average results, and we will call the accum functions only
                    # once.
                    # TODO: better handling of channels not needing data when
                    # some other channels need data.
                    assert len(mm._flat_data_specs[1]) == 0
                    val = channel.val
                else:
                    if n == 0:
                        raise ValueError("Iterating over 0 examples results in divide by 0")
                    val = (channel.val * T.cast(batch_size, config.floatX)
                            / cur_num_examples)
                u[channel.val_shared] = channel.val_shared + val
            
        mm.accum = []
        for idx, packed in enumerate(safe_izip(givens, updates)):
            g, u = packed
            mode = mm.theano_function_mode
            if mode is not None and hasattr(mode, 'record'):
                for elem in g:
                    mode.record.handle_line('g key '+var_descriptor(elem)+'\n')
                    mode.record.handle_line('g val '+var_descriptor(g[elem])+'\n')
                for elem in u:
                    mode.record.handle_line('u key '+var_descriptor(elem)+'\n')
                    mode.record.handle_line('u val '+var_descriptor(u[elem])+'\n')
            function_name = 'Monitor.accum[%d]' % idx
            if mode is not None and hasattr(mode, 'record'):
                mode.record.handle_line('compiling supervised accum\n')
            # Some channels may not depend on the data, ie, they might just monitor the model
            # parameters, or some shared variable updated by the training algorithm, so we
            # need to ignore the unused input error
            mm.accum.append(function(theano_args,
#.........这里部分代码省略.........
开发者ID:caomw,项目名称:Deep_wrapper,代码行数:103,代码来源:DBL_util.py


注:本文中的pylearn2.utils.data_specs.DataSpecsMapping.flatten方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。