当前位置: 首页>>代码示例>>Python>>正文


Python config.learning_rate方法代码示例

本文整理汇总了Python中config.learning_rate方法的典型用法代码示例。如果您正苦于以下问题:Python config.learning_rate方法的具体用法?Python config.learning_rate怎么用?Python config.learning_rate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在config的用法示例。


在下文中一共展示了config.learning_rate方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_train_op

# 需要导入模块: import config [as 别名]
# 或者: from config import learning_rate [as 别名]
def get_train_op(loss, mode):

    if mode != ModeKeys.TRAIN:
        return None

    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.exponential_decay(config.learning_rate, global_step, config.decay_circles, config.lr_decay, staircase=True)
    tf.summary.scalar('learning_rate', learning_rate)

    tvars = tf.trainable_variables()
    regularizer = tf.contrib.layers.l2_regularizer(config.weight_decay)
    regularizer_loss = tf.contrib.layers.apply_regularization(regularizer, tvars)
    loss += regularizer_loss
    grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), config.clip_gradients)
    # optimizer = tf.train.GradientDescentOptimizer(self.lr)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(batchnorm_update_ops):
        train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)

    return train_op 
开发者ID:skyoung,项目名称:MemTrack,代码行数:24,代码来源:model.py

示例2: train

# 需要导入模块: import config [as 别名]
# 或者: from config import learning_rate [as 别名]
def train(epoch):
    net.train()
    net.training = True
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)

    print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)               # Forward Propagation
        loss = criterion(outputs, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step() # Optimizer update

        train_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                %(epoch, num_epochs, batch_idx+1,
                    (len(trainset)//batch_size)+1, loss.item(), 100.*correct/total))
        sys.stdout.flush() 
开发者ID:meliketoy,项目名称:wide-resnet.pytorch,代码行数:31,代码来源:main.py

示例3: __init__

# 需要导入模块: import config [as 别名]
# 或者: from config import learning_rate [as 别名]
def __init__(self, is_train, z_examplar=None, x_crops=None, y_crops=None, init_z_exemplar=None):

        self._is_train = is_train
        input_shape = z_examplar.get_shape().as_list()

        self._batch_size = input_shape[0]
        self._time_steps = input_shape[1]
        x_shape = x_crops.get_shape().as_list()
        self._z_examplar = tf.reshape(z_examplar, [-1, config.z_exemplar_size, config.z_exemplar_size, 3])
        self._x_crops = tf.reshape(x_crops, [-1]+ x_shape[2:])
        self._y_crops = y_crops
        self._response_size = config.response_size-int(2*8/config.stride) if config.is_augment and is_train else config.response_size
        self._gt_pos = tf.convert_to_tensor(np.floor([self._response_size/2, self._response_size/2]), tf.float32)
        if init_z_exemplar is not None:
            self.init_z_exemplar = tf.reshape(init_z_exemplar, [-1, config.z_exemplar_size, config.z_exemplar_size, 3])

        self.filter
        self.response
        if y_crops is not None:
            self.loss
            self.dist_error
        else:
            self.init_state_filter
        if is_train:

            self._global_step = tf.get_variable('global_step', [], tf.int64, initializer=tf.constant_initializer(0),
                                          trainable=False)
            self._lr = tf.train.exponential_decay(config.learning_rate, self._global_step, config.decay_circles,
                                                  config.lr_decay, staircase=True)
            tf.summary.scalar('learning_rate', self._lr)
            self.optimize

        self._summary = tf.summary.merge_all()
        self._saver = tf.train.Saver(tf.global_variables()) 
开发者ID:skyoung,项目名称:RFL,代码行数:36,代码来源:rfl_net.py

示例4: train

# 需要导入模块: import config [as 别名]
# 或者: from config import learning_rate [as 别名]
def train(config):
	# prepare
	if not os.path.exists(config.save_dir):
		os.mkdir(config.save_dir)
	use_cuda = torch.cuda.is_available()
	# define the model
	model = NetsTorch(net_name=config.net_name, pretrained=config.load_pretrained, num_classes=config.num_classes)
	if use_cuda:
		os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus
		if config.ngpus > 1:
			model = nn.DataParallel(model).cuda()
		else:
			model = model.cuda()
	model.train()
	# dataset
	dataset_train = ImageFolder(data_dir=config.traindata_dir, image_size=config.image_size, is_train=True)
	saveClasses(dataset_train.classes, config.clsnamespath)
	dataset_test = ImageFolder(data_dir=config.testdata_dir, image_size=config.image_size, is_train=False)
	dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	Logging('Train dataset size: %d...' % len(dataset_train), config.logfile)
	Logging('Test dataset size: %d...' % len(dataset_test), config.logfile)
	# optimizer
	optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
	criterion = nn.CrossEntropyLoss()
	# train
	FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
	for epoch in range(1, config.num_epochs+1):
		Logging('[INFO]: epoch now is %d...' % epoch, config.logfile)
		for batch_i, (imgs, labels) in enumerate(dataloader_train):
			imgs = imgs.type(FloatTensor)
			labels = labels.type(FloatTensor)
			optimizer.zero_grad()
			preds = model(imgs)
			loss = criterion(preds, labels.long())
			if config.ngpus > 1:
				loss = loss.mean()
			Logging('[INFO]: batch%d of epoch%d, loss is %.2f...' % (batch_i, epoch, loss.item()), config.logfile)
			loss.backward()
			optimizer.step()
		if ((epoch % config.save_interval == 0) and (epoch > 0)) or (epoch == config.num_epochs):
			pklpath = os.path.join(config.save_dir, 'epoch_%s.pkl' % str(epoch))
			if config.ngpus > 1:
				cur_model = model.module
			else:
				cur_model = model
			torch.save(cur_model.state_dict(), pklpath)
			acc = test(model, dataloader_test)
			Logging('[INFO]: Accuracy of epoch %d is %.2f...' % (epoch, acc), config.logfile) 
开发者ID:CharlesPikachu,项目名称:garbageClassifier,代码行数:51,代码来源:train.py

示例5: train_entry

# 需要导入模块: import config [as 别名]
# 或者: from config import learning_rate [as 别名]
def train_entry():
    from models import QANet

    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.train_eval_file, "r") as fh:
        train_eval_file = json.load(fh)
    with open(config.dev_eval_file, "r") as fh:
        dev_eval_file = json.load(fh)

    print("Building model...")

    train_dataset = SQuADDataset(config.train_record_file, config.num_steps, config.batch_size)
    dev_dataset = SQuADDataset(config.dev_record_file, config.test_num_batches, config.batch_size)

    lr = config.learning_rate
    base_lr = 1.0
    warm_up = config.lr_warm_up_num

    model = QANet(word_mat, char_mat).to(device)
    ema = EMA(config.ema_decay)
    for name, p in model.named_parameters():
        if p.requires_grad: ema.set(name, p)
    params = filter(lambda param: param.requires_grad, model.parameters())
    optimizer = optim.Adam(lr=base_lr, betas=(config.beta1, config.beta2), eps=1e-7, weight_decay=3e-7, params=params)
    cr = lr / log2(warm_up)
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda ee: cr * log2(ee + 1) if ee < warm_up else lr)
    L = config.checkpoint
    N = config.num_steps
    best_f1 = best_em = patience = 0
    for iter in range(0, N, L):
        train(model, optimizer, scheduler, ema, train_dataset, iter, L)
        valid(model, train_dataset, train_eval_file)
        metrics = test(model, dev_dataset, dev_eval_file)
        print("Learning rate: {}".format(scheduler.get_lr()))
        dev_f1 = metrics["f1"]
        dev_em = metrics["exact_match"]
        if dev_f1 < best_f1 and dev_em < best_em:
            patience += 1
            if patience > config.early_stop: break
        else:
            patience = 0
            best_f1 = max(best_f1, dev_f1)
            best_em = max(best_em, dev_em)

        fn = os.path.join(config.save_dir, "model.pt")
        torch.save(model, fn) 
开发者ID:setoidz,项目名称:QANet-pytorch,代码行数:51,代码来源:main.py


注:本文中的config.learning_rate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。