當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizers.MomentumSGD方法代碼示例

本文整理匯總了Python中chainer.optimizers.MomentumSGD方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizers.MomentumSGD方法的具體用法?Python optimizers.MomentumSGD怎麽用?Python optimizers.MomentumSGD使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在chainer.optimizers的用法示例。


在下文中一共展示了optimizers.MomentumSGD方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
        super(OptimizerStochasticDepth, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerStochasticDepth._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links 
開發者ID:nutszebra,項目名稱:neural_architecture_search_with_reinforcement_learning_appendix_a,代碼行數:18,代碼來源:nutszebra_optimizer.py

示例2: test_remove_link

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def test_remove_link(self):
        opt = optimizers.MomentumSGD(lr=0.01)
        # Update each depth
        for depth in six.moves.range(1, self.n_encdec + 1):
            model = segnet.SegNet(self.n_encdec, self.n_classes,
                                  self.x_shape[1], self.n_mid)
            model = segnet.SegNetLoss(
                model, class_weight=None, train_depth=depth)
            opt.setup(model)

            # Deregister non-target links from opt
            if depth > 1:
                model.predictor.remove_link('conv_cls')
            for d in range(1, self.n_encdec + 1):
                if d != depth:
                    model.predictor.remove_link('encdec{}'.format(d))

            for name, link in model.namedparams():
                if depth > 1:
                    self.assertTrue(
                        'encdec{}'.format(depth) in name)
                else:
                    self.assertTrue(
                        'encdec{}'.format(depth) in name or 'conv_cls' in name) 
開發者ID:pfnet-research,項目名稱:chainer-segnet,代碼行數:26,代碼來源:test_segnet.py

示例3: get_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer 
開發者ID:pfnet-research,項目名稱:chainer-segnet,代碼行數:22,代碼來源:train_utils.py

示例4: train

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def train(epoch=10, batch_size=32, gpu=False):
    if gpu:
        cuda.check_cuda_available()
    xp = cuda.cupy if gpu else np

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)

    # make mean image
    if not os.path.isfile(MEAN_IMAGE_FILE):
        print("make mean image...")
        td.make_mean_image(MEAN_IMAGE_FILE)
    else:
        td.mean_image_file = MEAN_IMAGE_FILE

    # train model
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    optimizer.setup(model)
    epoch = epoch
    batch_size = batch_size

    print("Now our model is {0} classification task.".format(len(label_def)))
    print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))

    if gpu:
        model.to_gpu()

    for i in range(epoch):
        print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
        td.shuffle(overwrite=True)

        for x_batch, y_batch in td.generate_batches(batch_size):
            x = chainer.Variable(xp.asarray(x_batch))
            t = chainer.Variable(xp.asarray(y_batch))

            optimizer.update(model, x, t)
            print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))

        serializers.save_npz(MODEL_FILE, model)
        optimizer.lr *= 0.97 
開發者ID:icoxfog417,項目名稱:mlimages,代碼行數:43,代碼來源:chainer_alex.py

示例5: prepare_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def prepare_optimizer(self):
        if self.args.opt == 'MomentumSGD':
            self.opt = optimizers.MomentumSGD(momentum=0.9)
        elif self.args.opt == 'Adam':
            self.opt = optimizers.Adam(alpha=self.args.adam_alpha)
            print('Adam alpha=', self.args.adam_alpha)
        else:
            raise ValueError('Opt should be MomentumSGD or Adam.')
        self.opt.setup(self.x_link) 
開發者ID:mitmul,項目名稱:ssai-cnn,代碼行數:11,代碼來源:invert.py

示例6: get_model_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def get_model_optimizer(args):
    model = get_model(args)

    if 'opt' in args:
        # prepare optimizer
        if args.opt == 'MomentumSGD':
            optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
        elif args.opt == 'Adam':
            optimizer = optimizers.Adam(alpha=args.alpha)
        elif args.opt == 'AdaGrad':
            optimizer = optimizers.AdaGrad(lr=args.lr)
        else:
            raise Exception('No optimizer is selected')

        optimizer.setup(model)

        if args.opt == 'MomentumSGD':
            optimizer.add_hook(
                chainer.optimizer.WeightDecay(args.weight_decay))

        if args.resume_opt is not None:
            serializers.load_hdf5(args.resume_opt, optimizer)
            args.epoch_offset = int(
                re.search('epoch-([0-9]+)', args.resume_opt).groups()[0])

        return model, optimizer
    else:
        print('No optimizer generated.')
        return model 
開發者ID:mitmul,項目名稱:ssai-cnn,代碼行數:31,代碼來源:train.py

示例7: __init__

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
        super(OptimizerResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.warmup_lr = warm_up_lr
        self.momentum = momentum
        self.weight_decay = weight_decay 
開發者ID:nutszebra,項目名稱:googlenet,代碼行數:14,代碼來源:nutszebra_optimizer.py

示例8: create

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def create(self):
        return optimizers.MomentumSGD(0.1) 
開發者ID:chainer,項目名稱:chainer,代碼行數:4,代碼來源:test_optimizers_by_linear_model.py

示例9: get_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import MomentumSGD [as 別名]
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None,
                  resume_opt=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)

    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    if resume_opt is not None:
        serializers.load_npz(resume_opt, optimizer)

    return optimizer 
開發者ID:mitmul,項目名稱:deeppose,代碼行數:29,代碼來源:train.py


注:本文中的chainer.optimizers.MomentumSGD方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。