当前位置: 首页>>代码示例>>Python>>正文


Python Trainer.test方法代码示例

本文整理汇总了Python中trainer.Trainer.test方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.test方法的具体用法?Python Trainer.test怎么用?Python Trainer.test使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在trainer.Trainer的用法示例。


在下文中一共展示了Trainer.test方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import test [as 别名]
def main(_):
  prepare_dirs_and_logger(config)

  if not config.task.lower().startswith('tsp'):
    raise Exception("[!] Task should starts with TSP")

  if config.max_enc_length is None:
    config.max_enc_length = config.max_data_length
  if config.max_dec_length is None:
    config.max_dec_length = config.max_data_length

  rng = np.random.RandomState(config.random_seed)
  tf.set_random_seed(config.random_seed)

  trainer = Trainer(config, rng)
  save_config(config.model_dir, config)

  if config.is_train:
    trainer.train()
  else:
    if not config.load_path:
      raise Exception("[!] You should specify `load_path` to load a pretrained model")
    trainer.test()

  tf.logging.info("Run finished.")
开发者ID:huyuxiang,项目名称:tensorflow_practice,代码行数:27,代码来源:main.py

示例2: main

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import test [as 别名]
def main(FLAGS):
    # import data
    kwargs = {'num_workers': 1, 'pin_memory': True} if FLAGS.cuda else {}

    if FLAGS.dataset == "cifar10":
        proj_dst = datasets.CIFAR10
        num_classes = 10
    elif FLAGS.dataset == "cifar100":
        proj_dst = datasets.CIFAR100
        num_classes = 100
    elif FLAGS.dataset == "mnist":
        proj_dst = datasets.MNIST
        num_classes = 10

    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           lambda x: 2 * (x - 0.5),
                       ])),
        batch_size=FLAGS.batchsize, shuffle=True, **kwargs)

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            lambda x: 2 * (x - 0.5),
        ])),
        batch_size=FLAGS.batchsize, shuffle=True, **kwargs)

    if FLAGS.dataset.startswith("cifar"):
        if FLAGS.nettype == "lenet":
            model = BayesianModule.LeNet_Cifar(num_classes)
        elif FLAGS.nettype == "mlp":
            model = BayesianModule.MLP_Cifar(num_classes)
    elif FLAGS.dataset == "mnist":
        if FLAGS.nettype == "lenet":
            model = BayesianModule.LeNet_MNIST(num_classes)
        elif FLAGS.nettype == "mlp":
            model = BayesianModule.MLP_MNIST(num_classes)

    print(FLAGS.dataset, FLAGS.nettype)
    if FLAGS.cuda:
        model.cuda()

    # init optimizer
    optimizer = optim.Adam(model.parameters())

    # we optimize the variational lower bound scaled by the number of data
    # points (so we can keep our intuitions about hyper-params such as the learning rate)
    discrimination_loss = nn.functional.cross_entropy

    class objection(object):
        def __init__(self, N, use_cuda=True):
            self.d_loss = nn.functional.cross_entropy
            self.N = N
            self.use_cuda = use_cuda

        def __call__(self, output, target, kl_divergence):
            d_error = self.d_loss(output, target)
            variational_bound = d_error + kl_divergence / self.N  # TODO: why divide by N?
            if self.use_cuda:
                variational_bound = variational_bound.cuda()
            return variational_bound

    objective = objection(len(train_loader.dataset))

    from trainer import Trainer
    trainer = Trainer(model, train_loader, test_loader, optimizer, objective)
    # train the model and save some visualisations on the way
    for epoch in range(1, FLAGS.epochs + 1):
        trainer.train(epoch)
        trainer.test()

    # compute compression rate and new model accuracy
    layers = model.layers
    thresholds = FLAGS.thresholds
    compute_compression_rate(layers, model.get_masks(thresholds))

    print("Test error after with reduced bit precision:")

    weights = compute_reduced_weights(layers, model.get_masks(thresholds))
    for layer, weight in zip(layers, weights):
        if FLAGS.cuda:
            layer.post_weight_mu.data = torch.Tensor(weight).cuda()
        else:
            layer.post_weight_mu.data = torch.Tensor(weight)

    for layer in layers:
        layer.deterministic = True
    trainer.test()
开发者ID:ScalarZhou,项目名称:Bayesian-Compression-for-Deep-Learning,代码行数:92,代码来源:example.py

示例3: main

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import test [as 别名]
def main():
    global args
    args = parse_args()
    # global logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter("[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
    # file logger
    fh = logging.FileHandler(os.path.join(args.save, args.expname)+'.log', mode='w')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    # console logger
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    # argument validation
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.sparse and args.wd != 0:
        logger.error('Sparsity and weight decay are incompatible, pick one!')
        exit()
    logger.debug(args)
    torch.manual_seed(args.seed)
    random.seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.benchmark = True
    if not os.path.exists(args.save):
        os.makedirs(args.save)

    train_dir = os.path.join(args.data, 'train/')
    dev_dir = os.path.join(args.data, 'dev/')
    test_dir = os.path.join(args.data, 'test/')

    # write unique words from all token files
    sick_vocab_file = os.path.join(args.data, 'sick.vocab')
    if not os.path.isfile(sick_vocab_file):
        token_files_b = [os.path.join(split, 'b.toks') for split in [train_dir, dev_dir, test_dir]]
        token_files_a = [os.path.join(split, 'a.toks') for split in [train_dir, dev_dir, test_dir]]
        token_files = token_files_a + token_files_b
        sick_vocab_file = os.path.join(args.data, 'sick.vocab')
        build_vocab(token_files, sick_vocab_file)

    # get vocab object from vocab file previously written
    vocab = Vocab(filename=sick_vocab_file, data=[Constants.PAD_WORD, Constants.UNK_WORD, Constants.BOS_WORD, Constants.EOS_WORD])
    logger.debug('==> SICK vocabulary size : %d ' % vocab.size())

    # load SICK dataset splits
    train_file = os.path.join(args.data, 'sick_train.pth')
    if os.path.isfile(train_file):
        train_dataset = torch.load(train_file)
    else:
        train_dataset = SICKDataset(train_dir, vocab, args.num_classes)
        torch.save(train_dataset, train_file)
    logger.debug('==> Size of train data   : %d ' % len(train_dataset))
    dev_file = os.path.join(args.data, 'sick_dev.pth')
    if os.path.isfile(dev_file):
        dev_dataset = torch.load(dev_file)
    else:
        dev_dataset = SICKDataset(dev_dir, vocab, args.num_classes)
        torch.save(dev_dataset, dev_file)
    logger.debug('==> Size of dev data     : %d ' % len(dev_dataset))
    test_file = os.path.join(args.data, 'sick_test.pth')
    if os.path.isfile(test_file):
        test_dataset = torch.load(test_file)
    else:
        test_dataset = SICKDataset(test_dir, vocab, args.num_classes)
        torch.save(test_dataset, test_file)
    logger.debug('==> Size of test data    : %d ' % len(test_dataset))

    # initialize model, criterion/loss_function, optimizer
    model = SimilarityTreeLSTM(
                vocab.size(),
                args.input_dim,
                args.mem_dim,
                args.hidden_dim,
                args.num_classes,
                args.sparse,
                args.freeze_embed)
    criterion = nn.KLDivLoss()
    if args.cuda:
        model.cuda(), criterion.cuda()
    if args.optim == 'adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.wd)
    elif args.optim == 'adagrad':
        optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.wd)
    elif args.optim == 'sgd':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.wd)
    metrics = Metrics(args.num_classes)

    # for words common to dataset vocab and GLOVE, use GLOVE vectors
    # for other words in dataset vocab, use random normal vectors
    emb_file = os.path.join(args.data, 'sick_embed.pth')
    if os.path.isfile(emb_file):
        emb = torch.load(emb_file)
    else:
        # load glove embeddings and vocab
        glove_vocab, glove_emb = load_word_vectors(os.path.join(args.glove, 'glove.840B.300d'))
        logger.debug('==> GLOVE vocabulary size: %d ' % glove_vocab.size())
#.........这里部分代码省略.........
开发者ID:shubhampachori12110095,项目名称:treelstm.pytorch,代码行数:103,代码来源:main.py

示例4: main

# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import test [as 别名]
def main():
    
    

    args=parse_args()
    print(args)


    num_classes = 7
    
    data_dir = args.data_dir #,'train_texts.blk')
    train_file=os.path.join(data_dir,'train_data.pth')
    
    #val_dir = args.val_data #'val_texts.blk')
    val_file= os.path.join(data_dir,'val_data.pth')

    
    vocab_file="../data/vocab.txt"
    vocab = Vocab(filename=vocab_file)
    

    if os.path.isfile(train_file):
        train_dataset = torch.load(train_file)
        
    else:
        train_dataset = WebKbbDataset(vocab, num_classes,os.path.join(data_dir,'train_texts.blk'),os.path.join(data_dir,'train_labels.blk'))

        torch.save(train_dataset, train_file)
    
    if os.path.isfile(val_file):
        val_dataset = torch.load(val_file)
        
    else:
        val_dataset = WebKbbDataset(vocab, num_classes,os.path.join(data_dir,'val_texts.blk'),os.path.join(data_dir,'val_labels.blk'))
        torch.save(val_dataset, val_file)
    


    
    
    

    vocab_size=vocab.size()
    in_dim=200
    mem_dim=200
    hidden_dim=200
    num_classes=7
    sparsity=True
    freeze=args.freeze_emb
    epochs=args.epochs
    lr=args.lr
    pretrain=args.pretrain
    
    
    
    cuda_flag=True

    if not torch.cuda.is_available():
        cuda_flag=False
                        
    model = DomTreeLSTM(vocab_size,in_dim, mem_dim, hidden_dim, num_classes, sparsity, freeze)
    criterion = nn.CrossEntropyLoss()

    if pretrain:
        
        emb_file = os.path.join('../data', 'emb.pth')
        if os.path.isfile(emb_file):
            emb = torch.load(emb_file)
            print(emb.size())
            print("Embedding weights loaded")
        else:
            print("Embedding file not found")
        
        model.emb.weight.data.copy_(emb)

    optimizer = optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
    
    trainer = Trainer(model, criterion, optimizer,train_dataset,val_dataset,cuda_flag=cuda_flag)
    
    for epoch in range(epochs):

        trainer.train()
        
        trainer.test()
开发者ID:Jarvx,项目名称:web-classification,代码行数:86,代码来源:main.py


注:本文中的trainer.Trainer.test方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。