当前位置: 首页>>代码示例>>Python>>正文


Python Trainer.__init__方法代码示例

本文整理汇总了Python中trainer.Trainer.__init__方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.__init__方法的具体用法?Python Trainer.__init__怎么用?Python Trainer.__init__使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在trainer.Trainer的用法示例。


在下文中一共展示了Trainer.__init__方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import __init__ [as 别名]
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0.):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by `lrdecay`,
        which is used to to multiply the learning rate after each training
        step. The parameters are also adjusted with respect to `momentum`, which
        is the ratio by which the gradient of the last timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end of
        each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
开发者ID:kaeufl,项目名称:pybrain,代码行数:33,代码来源:backprop.py

示例2: __init__

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import __init__ [as 别名]
 def __init__(self, module, ds_train=None, ds_val=None, gtol = 1e-05, norm = inf, 
              verbose = False, **kwargs):
     """
     Create a BFGSTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(ds_train)
     self.ds_val = ds_val
     self.verbose = verbose
     self.epoch = 0
     self.totalepochs = 0
     self.train_errors = []
     self.test_errors = []
     self.optimal_params = None
     self.optimal_epoch = 0
     
     self.module = module
开发者ID:kaeufl,项目名称:pybrain,代码行数:20,代码来源:bfgs.py

示例3: __init__

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import __init__ [as 别名]
 def __init__(self, module, dataset, totalIterations = 100,
              xPrecision = finfo(float).eps, fPrecision = finfo(float).eps,
              init_scg=True, **kwargs):
     """Create a SCGTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.input_sequences = self.ds.getField('input')
     self.epoch = 0
     self.totalepochs = 0
     self.module = module
     #self.tmp_module = module.copy()
     if init_scg:
         self.scg = SCG(self.module.params, self.f, self.df, self,
                        totalIterations, xPrecision, fPrecision,
                        evalFunc = lambda x: str(x / self.ds.getLength()))
     else:
         print "Warning: SCG trainer not initialized!"
开发者ID:kaeufl,项目名称:pybrain,代码行数:21,代码来源:scg.py

示例4: __init__

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import __init__ [as 别名]
    def __init__(self, evolino_network, dataset, **kwargs):
        """
            :key subPopulationSize: Size of the subpopulations.
            :key nCombinations: Number of times each chromosome is built into an individual. default=1
            :key nParents: Number of individuals left in a subpopulation after selection.
            :key initialWeightRange: Range of the weights of the RNN after initialization. default=(-0.1,0.1)
            :key weightInitializer: Initializer object for the weights of the RNN. default=Randomization(...)
            :key mutationAlpha: The mutation's intensity. default=0.01
            :key mutationVariate: The variate used for mutation. default=CauchyVariate(...)
            :key wtRatio: The quotient: washout-time/training-time. Needed to
                            split the sequences into washout phase and training phase.
            :key nBurstMutationEpochs: Number of epochs without increase of fitness in a row,
                                         before burstmutation is applied. default=Infinity
            :key backprojectionFactor: Weight of the backprojection. Usually
                                         supplied through evolino_network.
            :key selection: Selection object for evolino
            :key reproduction: Reproduction object for evolino
            :key burstMutation: BurstMutation object for evolino
            :key evaluation: Evaluation object for evolino
            :key verbosity: verbosity level
        """
        Trainer.__init__(self, evolino_network)

        self.network = evolino_network
        self.setData(dataset)

        ap = KWArgsProcessor(self, kwargs)

        # misc
        ap.add('verbosity', default=0)

        # population
        ap.add('subPopulationSize', private=True, default=8)
        ap.add('nCombinations', private=True, default=4)
        ap.add('nParents', private=True, default=None)
        ap.add('initialWeightRange', private=True, default=(-0.1, 0.1))
        ap.add('weightInitializer', private=True, default=Randomization(self._initialWeightRange[0], self._initialWeightRange[1]))

        # mutation
        ap.add('mutationAlpha', private=True, default=0.01)
        ap.add('mutationVariate', private=True, default=CauchyVariate(0, self._mutationAlpha))

        # evaluation
        ap.add('wtRatio', private=True, default=(1, 3))

        # burst mutation
        ap.add('nBurstMutationEpochs', default=Infinity)

        # network
        ap.add('backprojectionFactor', private=True, default=float(evolino_network.backprojectionFactor))
        evolino_network.backprojectionFactor = self._backprojectionFactor

        # aggregated objects
        ap.add('selection', default=EvolinoSelection())
        ap.add('reproduction', default=EvolinoReproduction(mutationVariate=self.mutationVariate))
        ap.add('burstMutation', default=EvolinoBurstMutation())
        ap.add('evaluation', default=EvolinoEvaluation(evolino_network, self.ds, **kwargs))

        self.selection.nParents = self.nParents

        self._population = EvolinoPopulation(
            EvolinoSubIndividual(evolino_network.getGenome()),
            self._subPopulationSize,
            self._nCombinations,
            self._weightInitializer
            )

        filters = []
        filters.append(self.evaluation)
        filters.append(self.selection)
        filters.append(self.reproduction)

        self._filters = filters

        self.totalepochs = 0
        self._max_fitness = self.evaluation.max_fitness
        self._max_fitness_epoch = self.totalepochs
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:79,代码来源:evolino.py


注:本文中的trainer.Trainer.__init__方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。