当前位置: 首页>>代码示例>>Python>>正文


Python monitor.Monitor类代码示例

本文整理汇总了Python中pylearn2.monitor.Monitor的典型用法代码示例。如果您正苦于以下问题:Python Monitor类的具体用法?Python Monitor怎么用?Python Monitor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Monitor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: channel_scaling_checker

 def channel_scaling_checker(num_examples, mode, num_batches, batch_size):
     num_features = 2
     monitor = Monitor(DummyModel(num_features))
     dataset = DummyDataset(num_examples, num_features)
     monitor.add_dataset(dataset=dataset, mode=mode,
                             num_batches=num_batches, batch_size=batch_size)
     vis_batch = T.matrix()
     mean = vis_batch.mean()
     data_specs = (monitor.model.get_input_space(),
                   monitor.model.get_input_source())
     monitor.add_channel(name='mean', ipt=vis_batch, val=mean, dataset=dataset,
                         data_specs=data_specs)
     monitor()
     assert 'mean' in monitor.channels
     mean = monitor.channels['mean']
     assert len(mean.val_record) == 1
     actual = mean.val_record[0]
     X = dataset.get_design_matrix()
     if batch_size is not None and num_batches is not None:
         total = min(num_examples, num_batches * batch_size)
     else:
         total = num_examples
     expected = X[:total].mean()
     if not np.allclose(expected, actual):
         raise AssertionError("Expected monitor to contain %f but it has "
                              "%f" % (expected, actual))
开发者ID:julius506,项目名称:pylearn2,代码行数:26,代码来源:test_monitor.py

示例2: test_serialization_guard

def test_serialization_guard():

    # tests that Train refuses to serialize the dataset

    dim = 2
    m = 11

    rng = np.random.RandomState([28,9,2012])
    X = rng.randn(m, dim)
    dataset = DenseDesignMatrix(X=X)

    model = DummyModel(dim)
    # make the dataset part of the model, so it will get
    # serialized
    model.dataset = dataset

    Monitor.get_monitor(model)

    algorithm = DummyAlgorithm()

    train = Train(dataset, model, algorithm, save_path='_tmp_unit_test.pkl',
                 save_freq=1, extensions=None)

    try:
        train.main_loop()
    except RuntimeError:
        return
    assert False # train did not complain, this is a bug
开发者ID:BloodNg,项目名称:pylearn2,代码行数:28,代码来源:test_train.py

示例3: channel_scaling_checker

 def channel_scaling_checker(num_examples, mode, num_batches, batch_size):
     num_features = 2
     monitor = Monitor(DummyModel(num_features))
     dataset = DummyDataset(num_examples, num_features)
     try:
         monitor.add_dataset(dataset=dataset, mode=mode,
                             num_batches=num_batches, batch_size=batch_size)
     except NotImplementedError:
         # make sure this was due to the unimplemented batch_size case
         if num_batches is None:
             assert num_examples % batch_size != 0
         else:
             assert num_examples % num_batches != 0
         raise SkipTest()
     vis_batch = T.matrix()
     mean = vis_batch.mean()
     monitor.add_channel(name='mean', ipt=vis_batch, val=mean, dataset=dataset)
     monitor()
     assert 'mean' in monitor.channels
     mean = monitor.channels['mean']
     assert len(mean.val_record) == 1
     actual = mean.val_record[0]
     X = dataset.get_design_matrix()
     if batch_size is not None and num_batches is not None:
         total = min(num_examples, num_batches * batch_size)
     else:
         total = num_examples
     expected = X[:total].mean()
     if not np.allclose(expected, actual):
         raise AssertionError("Expected monitor to contain %f but it has "
                              "%f" % (expected, actual))
开发者ID:deigen,项目名称:pylearn,代码行数:31,代码来源:test_monitor.py

示例4: prep_valtest_monitor

 def prep_valtest_monitor(self, model, batch_size):
     minibatch = T.as_tensor_variable(
                     self.valid_ddm.get_batch_topo(batch_size), 
                     name='minibatch'
                 )
     target = T.matrix('target')
     Accuracy = self.get_classification_accuracy(model, minibatch, target)           
     monitor = Monitor.get_monitor(model)
     
     monitor.add_dataset(self.valid_ddm, 'sequential', batch_size)
     monitor.add_channel("Validation Classification Accuracy",
                         (minibatch, target),
                         Accuracy,
                         self.valid_ddm)
     monitor.add_channel("Validation Missclassification",
                         (minibatch, target),
                         1.0-Accuracy,
                         self.valid_ddm)
                         
     if self.test_ddm is not None:
         monitor.add_dataset(self.test_ddm, 'sequential', batch_size)
         monitor.add_channel("Test Classification Accuracy",
                             (minibatch, target),
                             Accuracy,
                             self.test_ddm)
开发者ID:nicholas-leonard,项目名称:ift6266,代码行数:25,代码来源:hps.py

示例5: test_reject_empty

def test_reject_empty():

    # Test that Monitor raises an error if asked to iterate over 0 batches

    BATCH_SIZE = 2
    num_examples = BATCH_SIZE
    NUM_FEATURES = 3

    model = DummyModel(NUM_FEATURES)
    monitor = Monitor.get_monitor(model)

    monitoring_dataset = DummyDataset(num_examples = num_examples,
            num_features = NUM_FEATURES)

    monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE,
            num_batches = 0)

    name = 'z'

    monitor.add_channel(name = name,
            ipt = model.input_space.make_theano_batch(),
            val = 0.,
            data_specs=(model.get_input_space(), model.get_input_source()))

    try:
        monitor()
    except ValueError:
        return
    assert False
开发者ID:julius506,项目名称:pylearn2,代码行数:29,代码来源:test_monitor.py

示例6: main_loop

    def main_loop(self):
        """
        Repeatedly runs an epoch of the training algorithm, runs any
        epoch-level callbacks, and saves the model.
        """
        if self.algorithm is None:
            self.model.monitor = Monitor.get_monitor(self.model)
            self.run_callbacks_and_monitoring()
            while self.model.train(dataset=self.dataset):
                self.run_callbacks_and_monitoring()
                if self.save_freq > 0 and self.epochs % self.save_freq == 0:
                    self.save()
                self.epochs += 1
            self.run_callbacks_and_monitoring()
            if self.save_freq > 0:
                self.save()
        else:
            self.algorithm.setup(model=self.model, dataset=self.dataset)
            self.run_callbacks_and_monitoring()
            epoch_start = datetime.datetime.now()
            while self.algorithm.train(dataset=self.dataset):
                epoch_end = datetime.datetime.now()
                print 'Time this epoch:', str(epoch_end - epoch_start)
                epoch_start = datetime.datetime.now()
                self.run_callbacks_and_monitoring()
                if self.save_freq > 0 and self.epochs % self.save_freq == 0:
                    self.save()
                self.epochs += 1
            self.run_callbacks_and_monitoring()

            if self.save_freq > 0:
                self.save()
开发者ID:JasonBens,项目名称:pylearn,代码行数:32,代码来源:train.py

示例7: setup_monitor

 def setup_monitor(self):
     if self.topo_view:
         print "topo view"
         self.minibatch = T.as_tensor_variable(
                     self.valid_ddm.get_batch_topo(self.batch_size), 
                     name='minibatch'
                 )
     else:
         print "design view"
         batch = self.valid_ddm.get_batch_design(self.batch_size)
         if isinstance(batch, spp.csr_matrix):
             print "sparse2"
             self.minibatch = self.model.get_input_space().make_batch_theano()
             print type(self.minibatch)
         else:
             self.minibatch = T.as_tensor_variable(
                     self.valid_ddm.get_batch_design(self.batch_size), 
                     name='minibatch'
                 )
                     
     self.target = T.matrix('target')  
     
     self.monitor = Monitor.get_monitor(self.model)
     self.log_channel_names = []
     self.log_channel_names.extend(self.base_channel_names)
     
     self.monitor.add_dataset(self.valid_ddm, 'sequential', 
                                 self.batch_size)
     if self.test_ddm is not None:
         self.monitor.add_dataset(self.test_ddm, 'sequential', 
                                     self.batch_size)
开发者ID:nicholas-leonard,项目名称:hps,代码行数:31,代码来源:hps3.py

示例8: test_ambig_data

def test_ambig_data():

    # test that the right error is raised if you
    # add a channel to a monitor that has multiple datasets
    # and don't specify the dataset

    BATCH_SIZE = 2
    num_examples = BATCH_SIZE
    NUM_FEATURES = 3

    model = DummyModel(NUM_FEATURES)
    monitor = Monitor.get_monitor(model)

    first = DummyDataset(num_examples = num_examples,
            num_features = NUM_FEATURES)
    second = DummyDataset(num_examples = num_examples,
            num_features = NUM_FEATURES)

    monitor.add_dataset(first, 'sequential', batch_size=BATCH_SIZE)
    monitor.add_dataset(second, 'sequential', batch_size=BATCH_SIZE)


    name = 'num_prereq_calls'

    try:
        monitor.add_channel(name = name,
            ipt = model.input_space.make_theano_batch(),
            val = 0.,
            data_specs=(model.get_input_space(), model.get_input_source()))
    except ValueError as e:
        assert exc_message(e) == _err_ambig_data
        return
    assert False
开发者ID:julius506,项目名称:pylearn2,代码行数:33,代码来源:test_monitor.py

示例9: test_prereqs_batch

def test_prereqs_batch():

    # Test that prereqs get run before each monitoring batch

    BATCH_SIZE = 2
    num_examples = 2 * BATCH_SIZE
    NUM_FEATURES = 3

    model = DummyModel(NUM_FEATURES)
    monitor = Monitor.get_monitor(model)

    monitoring_dataset = DummyDataset(num_examples=num_examples, num_features=NUM_FEATURES)

    monitor.add_dataset(monitoring_dataset, "sequential", batch_size=BATCH_SIZE)

    sign = sharedX(1.0)

    def prereq(X, y):
        sign.set_value(-sign.get_value())

    name = "batches_should_cancel_to_0"

    monitor.add_channel(name=name, ipt=model.input_space.make_theano_batch(), val=sign, prereqs=[prereq])

    channel = monitor.channels[name]

    assert len(channel.val_record) == 0
    monitor()
    assert channel.val_record == [0]
    monitor()
    assert channel.val_record == [0, 0]
开发者ID:jiangfeng1124,项目名称:pylearn2,代码行数:31,代码来源:test_monitor.py

示例10: prep_valtest_monitor

 def prep_valtest_monitor(self, model, batch_size):
     if self.topo_view:
         print "topo view"
         minibatch = T.as_tensor_variable(
                         self.valid_ddm.get_batch_topo(batch_size), 
                         name='minibatch'
                     )
     else:
         print "design view"
         minibatch = T.as_tensor_variable(
                         self.valid_ddm.get_batch_design(batch_size), 
                         name='minibatch'
                     )
     target = T.matrix('target')
     valMSE = MissingTargetCost()(model, minibatch, target)
     monitor = Monitor.get_monitor(model)
     
     monitor.add_dataset(self.valid_ddm, 'sequential', batch_size)
     monitor.add_channel("Validation MSE",
                         (minibatch, target),
                         valMSE,
                         self.valid_ddm)
                         
     if self.test_ddm is not None:
         monitor.add_dataset(self.test_ddm, 'sequential', batch_size)
         monitor.add_channel("Test MSE",
                             (minibatch, target),
                             valMSE,
                             self.test_ddm)
开发者ID:nicholas-leonard,项目名称:hps,代码行数:29,代码来源:main.py

示例11: setup

    def setup(self, model, dataset):
        """
        Allows the training algorithm to do some preliminary configuration
        *before* we actually start training the model. The dataset is provided
        in case other derived training algorithms need to modify model based on
        the dataset.

        Parameters
        ----------
        model: a Python object representing the model to train loosely
        implementing the interface of models.model.Model.

        dataset: a pylearn2.datasets.dataset.Dataset object used to draw
        training data
        """
        self.model = model

        self.monitor = Monitor.get_monitor(model)

        if self.monitoring_dataset is not None:
            # Get the data specifications needed by the model
            space, source = model.get_monitoring_data_specs()

            # Create Theano variables for each of the individual components
            # of that data. Usually, it will be X for inputs and Y for targets.
            # First, we need to find these components, and put them in a tuple
            mapping = DataSpecsMapping((space, source))
            space_tuple = mapping.flatten(space, return_tuple=True)
            source_tuple = mapping.flatten(source, return_tuple=True)
            # Then, build a flat tuple of these Theano variables
            ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
                    for (sp, src) in safe_zip(space_tuple, source_tuple))
            # Finally, organize them back into a structure expected by the
            # monitoring channels of the model
            nested_ipt = mapping.nest(ipt)

            self.monitor.add_dataset(dataset=self.monitoring_dataset,
                                mode="sequential",
                                batch_size=self.batch_size,
                                num_batches=self.monitoring_batches)

            channels = model.get_monitoring_channels(nested_ipt)
            if not isinstance(channels, dict):
                raise TypeError("model.get_monitoring_channels must return a "
                                "dictionary, but it returned " + str(channels))
            for name in channels:
                J = channels[name]
                if isinstance(J, tuple):
                    assert len(J) == 2
                    J, prereqs = J
                else:
                    prereqs = None

                self.monitor.add_channel(name=name,
                                         ipt=nested_ipt,
                                         val=J,
                                         prereqs=prereqs,
                                         data_specs=(space, source))
        self.first = True
        self.bSetup = True
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:60,代码来源:default.py

示例12: setup_monitor

 def setup_monitor(self):
     if self.topo_view:
         print "topo view"
         self.minibatch = T.as_tensor_variable(
                     self.valid_ddm.get_batch_topo(self.batch_size), 
                     name='minibatch'
                 )
     else:
         print "design view"
         self.minibatch = T.as_tensor_variable(
                     self.valid_ddm.get_batch_design(self.batch_size), 
                     name='minibatch'
                 )
                     
     self.target = T.tensor3('target')  
     
     self.monitor = Monitor.get_monitor(self.model)
     self.log_channel_names = []
     self.log_channel_names.extend(self.base_channel_names)
     
     self.monitor.add_dataset(self.valid_ddm, 'sequential', 
                                 self.batch_size)
     if self.test_ddm is not None:
         self.monitor.add_dataset(self.test_ddm, 'sequential', 
                                     self.batch_size)
开发者ID:nicholas-leonard,项目名称:hps,代码行数:25,代码来源:test1.py

示例13: test_prereqs

def test_prereqs():

    # Test that prereqs get run before the monitoring channels are computed

    BATCH_SIZE = 2
    num_examples = BATCH_SIZE
    NUM_FEATURES = 3

    model = DummyModel(NUM_FEATURES)
    monitor = Monitor.get_monitor(model)

    monitoring_dataset = DummyDataset(num_examples = num_examples,
            num_features = NUM_FEATURES)

    monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE)

    prereq_counter = sharedX(0.)
    def prereq(*data):
        prereq_counter.set_value(prereq_counter.get_value() + 1.)

    name = 'num_prereq_calls'

    monitor.add_channel(name = name,
            ipt = model.input_space.make_theano_batch(),
            val = prereq_counter,
            prereqs = [ prereq ],
            data_specs=(model.get_input_space(), model.get_input_source()))

    channel = monitor.channels[name]

    assert len(channel.val_record) == 0
    monitor()
    assert channel.val_record == [1]
    monitor()
    assert channel.val_record == [1,2]
开发者ID:julius506,项目名称:pylearn2,代码行数:35,代码来源:test_monitor.py

示例14: test_save_load_save

def test_save_load_save():

    """
    Test that a monitor can be saved, then loaded, and then the loaded
    copy can be saved again.
    This only tests that the serialization and deserialization processes
    don't raise an exception. It doesn't test for correctness at all.
    """

    model = DummyModel(1)
    monitor = Monitor.get_monitor(model)

    num_examples = 2
    num_features = 3
    num_batches = 1
    batch_size = 2

    dataset = DummyDataset(num_examples, num_features)
    monitor.add_dataset(dataset=dataset,
                            num_batches=num_batches, batch_size=batch_size)
    vis_batch = T.matrix()
    mean = vis_batch.mean()
    data_specs = (monitor.model.get_input_space(),
                  monitor.model.get_input_source())
    monitor.add_channel(name='mean', ipt=vis_batch, val=mean, dataset=dataset,
                        data_specs=data_specs)

    saved = to_string(monitor)
    monitor = from_string(saved)
    saved_again = to_string(monitor)
开发者ID:julius506,项目名称:pylearn2,代码行数:30,代码来源:test_monitor.py

示例15: setup

    def setup(self, model, dataset):
        """
        Initialize the training algorithm. Should be called
        once before calls to train.

        Parameters
        ----------
        model : object
            Model to be trained.  Object implementing the pylearn2 Model
            interface.
        dataset : object
            Dataset on which to train.  Object implementing the
            pylearn2 Dataset interface.
        """

        self.model = model

        self.monitor = Monitor.get_monitor(model)
        self.monitor.set_dataset(dataset=self.monitoring_dataset,
                                 batches=self.monitoring_batches,
                                 batch_size=self.batch_size)

        X = T.matrix(name='sgd_X')
        J = self.cost(model, X)
        if J.name is None:
            J.name = 'sgd_cost(' + X.name + ')'
        self.monitor.add_channel(name=J.name, ipt=X, val=J)
        params = model.get_params()

        for i, param in enumerate(params):
            if param.name is None:
                param.name = 'sgd_params[%d]' % i

        grads = dict(zip(params, T.grad(J, params)))

        for param in grads:
            if grads[param].name is None:
                grads[param].name = ('grad(%(costname)s, %(paramname)s)' %
                                     {'costname': J.name,
                                      'paramname': param.name})

        learning_rate = T.scalar('sgd_learning_rate')

        updates = dict(zip(params, [param - learning_rate * grads[param]
                                    for param in params]))

        for param in updates:
            if updates[param].name is None:
                updates[param].name = 'sgd_update(' + param.name + ')'

        model.censor_updates(updates)
        for param in updates:
            if updates[param] is None:
                updates[param].name = 'censor(sgd_update(' + param.name + '))'

        self.sgd_update = function([X, learning_rate], updates=updates,
                                   name='sgd_update')
        self.params = params
        self.bSetup = True
开发者ID:davyfeng,项目名称:pylearn,代码行数:59,代码来源:sgd.py


注:本文中的pylearn2.monitor.Monitor类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。