当前位置: 首页>>代码示例>>Python>>正文


Python config.num_epochs方法代码示例

本文整理汇总了Python中config.num_epochs方法的典型用法代码示例。如果您正苦于以下问题:Python config.num_epochs方法的具体用法?Python config.num_epochs怎么用?Python config.num_epochs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在config的用法示例。


在下文中一共展示了config.num_epochs方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import config [as 别名]
# 或者: from config import num_epochs [as 别名]
def main():
    args = parse_args()
    print('\nCalled with args:')
    for key in args:
        print(f"{key:<10}:  {args[key]}")
    print("="*78)

    # get the command line args
    config.max_lr = args["max_lr"]
    config.min_lr = args["min_lr"]
    config.batch_size = args["batch_size"]
    config.num_epochs = args["epochs"]
    config.IMAGE_SIZE = args["img_size"]
    config.plot_name = args["plot_name"]
    
    if args["save_dir_path"] == "":
        config.save_dir_path = './model_checkpoints'
    else:
        config.save_dir_path = args["save_dir_path"]
        


    # get the data
    print("\nLoading data now.", end=" ")
    x_train, y_train, x_test, y_test, y_train_cat, y_test_cat = get_cifar_data()
    training_data = [x_train, y_train, y_train_cat]
    validation_data = [x_test, y_test, y_test_cat]
    print("Data loading complete. \n")

    # pass the arguments to the trainer
    train(training_data=training_data,
            validation_data=validation_data,
            batch_size=config.batch_size, 
            nb_epochs=config.num_epochs,
            min_lr=config.min_lr,
            max_lr=config.max_lr,
            save_dir_path=config.save_dir_path)
    

########################################################################### 
开发者ID:AakashKumarNain,项目名称:AugMix_TF2,代码行数:42,代码来源:main.py

示例2: train

# 需要导入模块: import config [as 别名]
# 或者: from config import num_epochs [as 别名]
def train(epoch):
    net.train()
    net.training = True
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)

    print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)               # Forward Propagation
        loss = criterion(outputs, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step() # Optimizer update

        train_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                %(epoch, num_epochs, batch_idx+1,
                    (len(trainset)//batch_size)+1, loss.item(), 100.*correct/total))
        sys.stdout.flush() 
开发者ID:meliketoy,项目名称:wide-resnet.pytorch,代码行数:31,代码来源:main.py

示例3: test

# 需要导入模块: import config [as 别名]
# 或者: from config import num_epochs [as 别名]
def test(epoch):
    global best_acc, best_model
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(validloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        test_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

    # Save checkpoint when best model
    acc = 100.*float(correct)/total
    print("\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%" %(epoch, loss.item(), acc))

    if acc > best_acc:
        #print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc))
        best_model = save(acc, num_epochs, net, best=True)
        best_acc = acc 
开发者ID:eth-sri,项目名称:dl2,代码行数:28,代码来源:main.py

示例4: train

# 需要导入模块: import config [as 别名]
# 或者: from config import num_epochs [as 别名]
def train(self):
        batch_num = len(self.train_loader)
        best_loss = 1e10
        for epoch in range(1, config.num_epochs + 1):
            self.model.train()
            print("epoch {}/{} :".format(epoch, config.num_epochs), end="\r")
            start = time.time()
            # halving the learning rate after epoch 8
            if epoch >= 8 and epoch % 2 == 0:
                self.lr *= 0.5
                state_dict = self.optim.state_dict()
                for param_group in state_dict["param_groups"]:
                    param_group["lr"] = self.lr
                self.optim.load_state_dict(state_dict)

            for batch_idx, train_data in enumerate(self.train_loader, start=1):
                batch_loss = self.step(train_data)

                self.model.zero_grad()
                batch_loss.backward()
                # gradient clipping
                nn.utils.clip_grad_norm_(self.model.parameters(),
                                         config.max_grad_norm)

                self.optim.step()
                batch_loss = batch_loss.detach().item()
                msg = "{}/{} {} - ETA : {} - loss : {:.4f}" \
                    .format(batch_idx, batch_num, progress_bar(batch_idx, batch_num),
                            eta(start, batch_idx, batch_num), batch_loss)
                print(msg, end="\r")

            val_loss = self.evaluate(msg)
            if val_loss <= best_loss:
                best_loss = val_loss
                self.save_model(val_loss, epoch)

            print("Epoch {} took {} - final loss : {:.4f} - val loss :{:.4f}"
                  .format(epoch, user_friendly_time(time_since(start)), batch_loss, val_loss)) 
开发者ID:seanie12,项目名称:neural-question-generation,代码行数:40,代码来源:trainer.py

示例5: train

# 需要导入模块: import config [as 别名]
# 或者: from config import num_epochs [as 别名]
def train(config):
	# prepare
	if not os.path.exists(config.save_dir):
		os.mkdir(config.save_dir)
	use_cuda = torch.cuda.is_available()
	# define the model
	model = NetsTorch(net_name=config.net_name, pretrained=config.load_pretrained, num_classes=config.num_classes)
	if use_cuda:
		os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus
		if config.ngpus > 1:
			model = nn.DataParallel(model).cuda()
		else:
			model = model.cuda()
	model.train()
	# dataset
	dataset_train = ImageFolder(data_dir=config.traindata_dir, image_size=config.image_size, is_train=True)
	saveClasses(dataset_train.classes, config.clsnamespath)
	dataset_test = ImageFolder(data_dir=config.testdata_dir, image_size=config.image_size, is_train=False)
	dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	Logging('Train dataset size: %d...' % len(dataset_train), config.logfile)
	Logging('Test dataset size: %d...' % len(dataset_test), config.logfile)
	# optimizer
	optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
	criterion = nn.CrossEntropyLoss()
	# train
	FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
	for epoch in range(1, config.num_epochs+1):
		Logging('[INFO]: epoch now is %d...' % epoch, config.logfile)
		for batch_i, (imgs, labels) in enumerate(dataloader_train):
			imgs = imgs.type(FloatTensor)
			labels = labels.type(FloatTensor)
			optimizer.zero_grad()
			preds = model(imgs)
			loss = criterion(preds, labels.long())
			if config.ngpus > 1:
				loss = loss.mean()
			Logging('[INFO]: batch%d of epoch%d, loss is %.2f...' % (batch_i, epoch, loss.item()), config.logfile)
			loss.backward()
			optimizer.step()
		if ((epoch % config.save_interval == 0) and (epoch > 0)) or (epoch == config.num_epochs):
			pklpath = os.path.join(config.save_dir, 'epoch_%s.pkl' % str(epoch))
			if config.ngpus > 1:
				cur_model = model.module
			else:
				cur_model = model
			torch.save(cur_model.state_dict(), pklpath)
			acc = test(model, dataloader_test)
			Logging('[INFO]: Accuracy of epoch %d is %.2f...' % (epoch, acc), config.logfile) 
开发者ID:CharlesPikachu,项目名称:garbageClassifier,代码行数:51,代码来源:train.py


注:本文中的config.num_epochs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。