当前位置: 首页>>代码示例>>Python>>正文


Python Train.main_loop方法代码示例

本文整理汇总了Python中pylearn2.train.Train.main_loop方法的典型用法代码示例。如果您正苦于以下问题:Python Train.main_loop方法的具体用法?Python Train.main_loop怎么用?Python Train.main_loop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pylearn2.train.Train的用法示例。


在下文中一共展示了Train.main_loop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: my_train

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def my_train():
    trainset = CIN_FEATURE2(which_set='train')
    validset = CIN_FEATURE2(which_set='valid')
    layers = []
    layers1 = []
    h1 = Linear(layer_name='h1', dim=850, irange=0.05)
    h2 = Linear(layer_name='h2', dim=556, irange=0.05)
    layers1.append(h1)
    layers1.append(h2)
    l1 = CompositeLayerWithSource(layer_name='c', layers=layers1)
    l2 = Linear(layer_name='o', dim=2, irange=0.05)
    layers.append(l1)
    layers.append(l2)

    input_space = CompositeSpace(components=[VectorSpace(dim=850), VectorSpace(dim=556)])
    input_source = ['feature850', 'feature556']
    model = MLPWithSource(batch_size=1140, layers=layers,
                          input_space=input_space, input_source=input_source)

    algorithm = BGD(conjugate=1,
                    # batch_size=1140,
                    line_search_mode='exhaustive',
                    cost=Default(),
                    termination_criterion=EpochCounter(max_epochs=MAX_EPOCHS))

    train = Train(dataset=trainset, model=model, algorithm=algorithm)
    train.main_loop()
开发者ID:jackal092927,项目名称:pylearn2_med,代码行数:29,代码来源:mlp_composite_train.py

示例2: test_train_ae

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_train_ae():
    ds = MNIST(which_set='train',one_hot=True,all_labelled=ALL_LABELLED,supervised=SUPERVISED)

    gsn = GSN.new(
        layer_sizes=[ds.X.shape[1], HIDDEN_SIZE,ds.X.shape[1]],
        activation_funcs=["sigmoid", "tanh", rescaled_softmax],
        pre_corruptors=[GaussianCorruptor(GAUSSIAN_NOISE)] * 3,
        post_corruptors=[SaltPepperCorruptor(SALT_PEPPER_NOISE), None,SmoothOneHotCorruptor(GAUSSIAN_NOISE)],
        layer_samplers=[BinomialSampler(), None, MultinomialSampler()],
        tied=False
    )

    _mbce = MeanBinaryCrossEntropy()
    reconstruction_cost = lambda a, b: _mbce.cost(a, b) / ds.X.shape[1]

    c = GSNCost([(0, 1.0, reconstruction_cost)], walkback=WALKBACK)

    alg = SGD(
        LEARNING_RATE,
        init_momentum=MOMENTUM,
        cost=c,
        termination_criterion=EpochCounter(MAX_EPOCHS),
        batches_per_iter=BATCHES_PER_EPOCH,
        batch_size=BATCH_SIZE,
        monitoring_dataset=ds,
        monitoring_batches=MONITORING_BATCHES
   )

    trainer = Train(ds, gsn, algorithm=alg, save_path="./results/gsn_ae_trained.pkl",
                    save_freq=5, extensions=[MonitorBasedLRAdjuster()])
    trainer.main_loop()
    print "done training"
开发者ID:GarfieldEr007,项目名称:imageClassification,代码行数:34,代码来源:gsn_wrapper.py

示例3: test_train_ae

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_train_ae():
    GC = GaussianCorruptor

    gsn = GSN.new(
        layer_sizes=[ds.X.shape[1], 1000],
        activation_funcs=["sigmoid", "tanh"],
        pre_corruptors=[None, GC(1.0)],
        post_corruptors=[SaltPepperCorruptor(0.5), GC(1.0)],
        layer_samplers=[BinomialSampler(), None],
        tied=False
    )

    # average MBCE over example rather than sum it
    _mbce = MeanBinaryCrossEntropy()
    reconstruction_cost = lambda a, b: _mbce.cost(a, b) / ds.X.shape[1]

    c = GSNCost([(0, 1.0, reconstruction_cost)], walkback=WALKBACK)

    alg = SGD(
        LEARNING_RATE,
        init_momentum=MOMENTUM,
        cost=c,
        termination_criterion=EpochCounter(MAX_EPOCHS),
        batches_per_iter=BATCHES_PER_EPOCH,
        batch_size=BATCH_SIZE,
        monitoring_dataset=ds,
        monitoring_batches=10
   )

    trainer = Train(ds, gsn, algorithm=alg, save_path="gsn_ae_example.pkl",
                    save_freq=5)
    trainer.main_loop()
    print "done training"
开发者ID:EderSantana,项目名称:pylearn2,代码行数:35,代码来源:gsn_example.py

示例4: train_layer5

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def train_layer5(supervised=True):
    global unsup_dataset, sup_dataset
    
    # Process unsupervised layer 5
    unsup_dataset = TransformerDataset(raw=unsup_dataset, transformer=serial.load(layer4_unsup_model))
    model = DenoisingAutoencoder(BinomialCorruptor(corruption_level=0.002), nvis=nhid4, nhid=nhid5, act_enc='tanh', act_dec=None,  irange=0.5)
    training_alg = SGD(cost=MeanSquaredReconstructionError(), learning_rate=1e-4, batch_size= batch_size, monitoring_dataset=unsup_dataset, termination_criterion=EpochCounter(max_epochs=max_epochs))
    extensions = [MonitorBasedLRAdjuster()]
    experiment = Train(dataset=unsup_dataset, model=model, algorithm=training_alg, save_path=layer5_unsup_model, save_freq=50, allow_overwrite=True, extensions=extensions)
    experiment.main_loop()
    
    if supervised:
        # Process supervised layer 5, this will be the final classifier
        layers = [PretrainedLayer(layer_name='h1', layer_content=serial.load(layer1_unsup_model), freeze_params=False),
                  PretrainedLayer(layer_name='h2', layer_content=serial.load(layer2_unsup_model), freeze_params=False),
                  PretrainedLayer(layer_name='h3', layer_content=serial.load(layer3_unsup_model), freeze_params=False),
                  PretrainedLayer(layer_name='h4', layer_content=serial.load(layer4_unsup_model), freeze_params=False), 
                  PretrainedLayer(layer_name='h5', layer_content=serial.load(layer5_unsup_model), freeze_params=False),
                  Softmax(n_classes=class_number, layer_name='y', irange=0.5)]
        model = MLP(layers=layers, batch_size=sup_dataset.y.shape[0], nvis=nvis, layer_name=None)
        training_alg = SGD(learning_rate=1e-3, monitoring_dataset=sup_dataset, termination_criterion=EpochCounter(max_epochs=10000))
        experiment = Train(dataset=sup_dataset, model=model, algorithm=training_alg, save_path=mlp_model, save_freq=50, allow_overwrite=True, extensions=extensions)
        experiment.main_loop()
        serial.save(layer1_unsup_model, model.layers[0].layer_content)
        serial.save(layer2_unsup_model, model.layers[1].layer_content)
        serial.save(layer3_unsup_model, model.layers[2].layer_content)
    serial.save(layer4_unsup_model, model.layers[3].layer_content)
开发者ID:lluiscastrejonsubira,项目名称:Network-Oracle,代码行数:29,代码来源:trainer_v2.py

示例5: test_batch_size_specialization

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_batch_size_specialization():

    # Tests that using a batch size of 1 for training and a batch size
    # other than 1 for monitoring does not result in a crash.
    # This catches a bug reported in the [email protected]
    # e-mail "[pylearn-dev] monitor assertion error: channel_X.type != X.type"
    # The training data was specialized to a row matrix (theano tensor with
    # first dim broadcastable) and the monitor ended up with expressions
    # mixing the specialized and non-specialized version of the expression.

    m = 2
    rng = np.random.RandomState([25,9,2012])
    X = np.zeros((m,1))
    dataset = DenseDesignMatrix(X=X)

    model = SoftmaxModel(1)

    learning_rate = 1e-3

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost, batch_size=1,
                 monitoring_batches=1, monitoring_dataset=dataset,
                 termination_criterion=EpochCounter(max_epochs=1),
                 update_callbacks=None,
                 set_batch_size = False)

    train = Train(dataset, model, algorithm, save_path=None,
                 save_freq=0, extensions=None)

    train.main_loop()
开发者ID:fancyspeed,项目名称:pylearn2,代码行数:33,代码来源:test_sgd.py

示例6: train

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def train():
    LEARNING_RATE = 1e-4
    MOMENTUM = 0.25

    MAX_EPOCHS = 500
    BATCHES_PER_EPOCH = 100
    BATCH_SIZE = 1000

    dataset = FunnelDistribution()
    cost = FunnelGSNCost([(0, 1.0, MSR())], walkback=1)

    gc = GaussianCorruptor(0.75)
    dc = DropoutCorruptor(.5)
    gsn = GSN.new([10, 200, 10],
                  [None, "tanh", "tanh"], # activation
                  [None] * 3, # pre corruption
                  [None] * 3, # post corruption
                  [None] * 3, # layer samplers
                  tied=False)
    gsn._bias_switch = False

    alg = SGD(LEARNING_RATE, init_momentum=MOMENTUM, cost=cost,
              termination_criterion=EpochCounter(MAX_EPOCHS),
              batches_per_iter=BATCHES_PER_EPOCH, batch_size=BATCH_SIZE,
              monitoring_batches=100,
              monitoring_dataset=dataset)

    trainer = Train(dataset, gsn, algorithm=alg, save_path="funnel_gsn.pkl",
                    extensions=[MonitorBasedLRAdjuster()],
                    save_freq=50)

    trainer.main_loop()
    print "done training"
开发者ID:lightcatcher,项目名称:funnel_gsn,代码行数:35,代码来源:learner.py

示例7: test_training_a_model

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_training_a_model():
    """
    tests wether SparseDataset can be trained
    with a dummy model.
    """

    dim = 3
    m = 10
    rng = np.random.RandomState([22, 4, 2014])

    X = rng.randn(m, dim)
    ds = csr_matrix(X)
    dataset = SparseDataset(from_scipy_sparse_dataset=ds)

    model = SoftmaxModel(dim)
    learning_rate = 1e-1
    batch_size = 5

    epoch_num = 2
    termination_criterion = EpochCounter(epoch_num)

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost, batch_size=batch_size,
                    termination_criterion=termination_criterion,
                    update_callbacks=None,
                    init_momentum=None,
                    set_batch_size=False)

    train = Train(dataset, model, algorithm, save_path=None,
                  save_freq=0, extensions=None)

    train.main_loop()
开发者ID:Deathmonster,项目名称:pylearn2,代码行数:35,代码来源:test_sparse_dataset.py

示例8: test_execution_order

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_execution_order():

    # ensure save is called directly after monitoring by checking 
    # parameter values in `on_monitor` and `on_save`.

    model = MLP(layers=[Softmax(layer_name='y',
                                n_classes=2,
                                irange=0.)],
                nvis=3)

    dataset = DenseDesignMatrix(X=np.random.normal(size=(6, 3)),
                                y=np.random.normal(size=(6, 2)))

    epoch_counter = EpochCounter(max_epochs=1)

    algorithm = SGD(batch_size=2, learning_rate=0.1,
                    termination_criterion=epoch_counter)

    extension = ParamMonitor()

    train = Train(dataset=dataset,
                  model=model,
                  algorithm=algorithm,
                  extensions=[extension],
                  save_freq=1,
                  save_path="save.pkl")

    # mock save
    train.save = MethodType(only_run_extensions, train)

    train.main_loop()
开发者ID:123fengye741,项目名称:pylearn2,代码行数:33,代码来源:test_train.py

示例9: test_empty_monitoring_datasets

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_empty_monitoring_datasets():
    """
    Test that handling of monitoring datasets dictionnary
    does not fail when it is empty.
    """

    learning_rate = 1e-3
    batch_size = 5

    dim = 3

    rng = np.random.RandomState([25, 9, 2012])

    train_dataset = DenseDesignMatrix(X=rng.randn(10, dim))

    model = SoftmaxModel(dim)

    cost = DummyCost()

    algorithm = SGD(learning_rate, cost,
                    batch_size=batch_size,
                    monitoring_dataset={},
                    termination_criterion=EpochCounter(2))

    train = Train(train_dataset,
                  model,
                  algorithm,
                  save_path=None,
                  save_freq=0,
                  extensions=None)

    train.main_loop()
开发者ID:allansp84,项目名称:pylearn2,代码行数:34,代码来源:test_sgd.py

示例10: finish_one_layer

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def finish_one_layer(X_train, y_train, X_test, y_test, img_units, txt_units, h_units, epochs, lr=0.1, model_type='FullModal', alpha=0.5, beta=0.5, layer_num='1', prefix='', suffix='', save_path=''):
    """
    预备+训练+测试完整的一层
    暂时假定单模态是图像,将图像平均分为两半
    """
    #0.参数检查
    print 'img_units=', img_units
    print 'txt_units=', txt_units
    print 'X_train.shape[1]=', X_train.shape[1]
    assert img_units + txt_units == X_train.shape[1]
    assert img_units + txt_units == X_test.shape[1]
    #1.构造数据集
    dsit_train, dsit_test = make_dataset_single_modal(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test)

    #2.训练单层模型
    monitoring_dataset = {'train': dsit_train, 'test': dsit_test}
    print 'in finish_one_layer, alpha=%f, beta=%f' % (alpha, beta)	
    ae_model = AdjustableMultimodalAutoEncoder(model_type=model_type, alpha=alpha, beta=beta, n_vis_img=img_units, n_vis_txt=txt_units, n_hid_img=h_units, n_hid_txt=h_units, dec_f_img=True, dec_f_txt=True)
    alg = SGD(learning_rate=lr, cost=None, batch_size=20, init_momentum=None, monitoring_dataset=monitoring_dataset, termination_criterion=EpochCounter(max_epochs=epochs)) #cost=None,目的是使用模型自带的get_default_cost()的返回值提供的代价
    
    train = Train(dataset=dsit_train, model=ae_model, algorithm=alg, save_path='multi_ae_save_layer' + layer_num + '.pkl', save_freq=10)
    
    t0 = time.clock()
    train.main_loop()
    print 'training time for layer%s: %f' % (layer_num, time.clock() - t0)
    
    #3.计算经过训练后模型传播的设计矩阵
    X_img_propup_train, X_txt_propup_train, X_img_propup_test, X_txt_propup_test, X_propup_train, X_propup_test = propup_design_matrix(X_train=dsit_train.X, X_test=dsit_test.X, ae_model=ae_model)
    
    #4.测试训练后的模型分类性能
    print '!!!evaluate model on dataset+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
    model_evaluate(X_img_train=X_img_propup_train, X_txt_train=X_txt_propup_train, y_train=y_train, X_img_test= X_img_propup_test, X_txt_test=X_txt_propup_test, y_test=y_test, layer_num=layer_num, prefix=prefix, suffix=suffix, save_path=save_path)
    
    return X_propup_train, X_propup_test
开发者ID:zanghu,项目名称:MyDNNmodule,代码行数:36,代码来源:corre_ae_tools_single.py

示例11: test_multiple_inputs

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_multiple_inputs():
    """
    Create a VectorSpacesDataset with two inputs (features0 and features1)
    and train an MLP which takes both inputs for 1 epoch.
    """
    mlp = MLP(
        layers=[
            FlattenerLayer(
                CompositeLayer(
                    'composite',
                    [Linear(10, 'h0', 0.1),
                     Linear(10, 'h1', 0.1)],
                    {
                        0: [1],
                        1: [0]
                    }
                )
            ),
            Softmax(5, 'softmax', 0.1)
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1')
    )
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace([
            VectorSpace(20),
            VectorSpace(15),
            VectorSpace(5)]),
         ('features1', 'features0', 'targets')))
    train = Train(dataset, mlp, SGD(0.1, batch_size=5))
    train.algorithm.termination_criterion = EpochCounter(1)
    train.main_loop()
开发者ID:lamblin,项目名称:pylearn2,代码行数:37,代码来源:test_mlp.py

示例12: train_with_monitoring_datasets

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
    def train_with_monitoring_datasets(train_dataset,
                                       monitoring_datasets,
                                       model_force_batch_size,
                                       train_iteration_mode,
                                       monitor_iteration_mode):

        model = SoftmaxModel(dim)
        if model_force_batch_size:
            model.force_batch_size = model_force_batch_size

        cost = DummyCost()

        algorithm = SGD(learning_rate, cost,
                        batch_size=batch_size,
                        train_iteration_mode=train_iteration_mode,
                        monitor_iteration_mode=monitor_iteration_mode,
                        monitoring_dataset=monitoring_datasets,
                        termination_criterion=EpochCounter(2))

        train = Train(train_dataset,
                      model,
                      algorithm,
                      save_path=None,
                      save_freq=0,
                      extensions=None)

        train.main_loop()
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:29,代码来源:test_sgd.py

示例13: test_sgd_sup

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_sgd_sup():

    # tests that we can run the sgd algorithm
    # on a supervised cost.
    # does not test for correctness at all, just
    # that the algorithm runs without dying

    dim = 3
    m = 10

    rng = np.random.RandomState([25, 9, 2012])

    X = rng.randn(m, dim)

    idx = rng.randint(0, dim, (m, ))
    Y = np.zeros((m, dim))
    for i in xrange(m):
        Y[i, idx[i]] = 1

    dataset = DenseDesignMatrix(X=X, y=Y)

    m = 15
    X = rng.randn(m, dim)

    idx = rng.randint(0, dim, (m,))
    Y = np.zeros((m, dim))
    for i in xrange(m):
        Y[i, idx[i]] = 1

    # Including a monitoring dataset lets us test that
    # the monitor works with supervised data
    monitoring_dataset = DenseDesignMatrix(X=X, y=Y)

    model = SoftmaxModel(dim)

    learning_rate = 1e-3
    batch_size = 5

    cost = SupervisedDummyCost()

    # We need to include this so the test actually stops running at some point
    termination_criterion = EpochCounter(5)

    algorithm = SGD(learning_rate, cost,
                    batch_size=batch_size,
                    monitoring_batches=3,
                    monitoring_dataset=monitoring_dataset,
                    termination_criterion=termination_criterion,
                    update_callbacks=None,
                    init_momentum=None,
                    set_batch_size=False)

    train = Train(dataset,
                  model,
                  algorithm,
                  save_path=None,
                  save_freq=0,
                  extensions=None)

    train.main_loop()
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:62,代码来源:test_sgd.py

示例14: __init__

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
class RBMTraining:
	def __init__(self, data_path="./datasets/", save_path="training.pkl", simulation_data = None, identifier = 0):
		self.id = identifier
		self.data_path = data_path
		self.save_path = save_path
		if simulation_data != None:
			self.sim_data = simulation_data
			self.save_data_loaded()
		else:
			self.sim_data = SimulationData(data_path)
			self.load_data()
		
	def load_data(self):
		self.sim_data.load_data()
		self.sim_data.preprocessor() 

		tmp = self.sim_data.split_train_test()
		self.datasets = {'train' : tmp[0], 'test' : tmp[1]}

		self.num_simulations = self.sim_data.num_simulations
		self.input_values = self.sim_data.input_values
		self.output_values = self.sim_data.output_values

	def set_structure(self, num_layers = 4, shape = 'linear'):
		self.vis = self.input_values
		self.hid = self.output_values
		return [self.vis, self.hid]
		
		   
	def get_model(self):
		self.model = RBM(nvis=self.vis, nhid=self.hid, irange=.05)
		return self.model
	   
	def set_training_criteria(self, 
							learning_rate=0.05,
							batch_size=10, 
							max_epochs=10):
		
		self.training_alg = DefaultTrainingAlgorithm(batch_size = batch_size, 
													monitoring_dataset = self.datasets, 
													termination_criterion = EpochCounter(max_epochs))
	
	def set_extensions(self, extensions=None):
		self.extensions = None #[MonitorBasedSaveBest(channel_name='objective',
												#save_path = './training/training_monitor_best.pkl')]
		
	def set_attributes(self, attributes):
		self.attributes = attributes

	def define_training_experiment(self, save_freq = 10):
		self.experiment = Train(dataset=self.datasets['train'], 
								model=self.model, 
								algorithm=self.training_alg, 
								save_path=self.save_path , 
								save_freq=save_freq, 
								allow_overwrite=True, 
								extensions=self.extensions)

	def train_experiment(self):
		self.experiment.main_loop()
开发者ID:albertomontesg,项目名称:deeplearn,代码行数:62,代码来源:RBMTraining.py

示例15: test_serialization_guard

# 需要导入模块: from pylearn2.train import Train [as 别名]
# 或者: from pylearn2.train.Train import main_loop [as 别名]
def test_serialization_guard():

    # tests that Train refuses to serialize the dataset

    dim = 2
    m = 11

    rng = np.random.RandomState([28,9,2012])
    X = rng.randn(m, dim)
    dataset = DenseDesignMatrix(X=X)

    model = DummyModel(dim)
    # make the dataset part of the model, so it will get
    # serialized
    model.dataset = dataset

    Monitor.get_monitor(model)

    algorithm = DummyAlgorithm()

    train = Train(dataset, model, algorithm, save_path='_tmp_unit_test.pkl',
                 save_freq=1, extensions=None)

    try:
        train.main_loop()
    except RuntimeError:
        return
    assert False # train did not complain, this is a bug
开发者ID:BloodNg,项目名称:pylearn2,代码行数:30,代码来源:test_train.py


注:本文中的pylearn2.train.Train.main_loop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。