当前位置: 首页>>代码示例>>Python>>正文


Python model.eval方法代码示例

本文整理汇总了Python中model.eval方法的典型用法代码示例。如果您正苦于以下问题:Python model.eval方法的具体用法?Python model.eval怎么用?Python model.eval使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在model的用法示例。


在下文中一共展示了model.eval方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, source_sampler, target_sampler, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    if args.model == 'QRNN':
        model.reset()
    total_loss = 0
    hidden = model.init_hidden(batch_size)

    for source_sample, target_sample in zip(source_sampler, target_sampler):
        model.train()
        data = torch.stack([data_source[i] for i in source_sample])
        targets = torch.stack([data_source[i] for i in target_sample]).view(-1)
        with torch.no_grad():
            output, hidden = model(data, hidden)
        total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output,
                                            targets).item()
        hidden = repackage_hidden(hidden)
    return total_loss / len(data_source) 
开发者ID:PetrochukM,项目名称:PyTorch-NLP,代码行数:20,代码来源:main.py

示例2: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i, args)
            targets = targets.view(-1)
            
            log_prob, hidden = parallel_model(data, hidden)
            loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

            total_loss += len(data) * loss
            hidden = repackage_hidden(hidden)
    return total_loss.item() / len(data_source) 
开发者ID:zihangdai,项目名称:mos,代码行数:19,代码来源:finetune.py

示例3: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i, args)
            targets = targets.view(-1)

            log_prob, hidden = parallel_model(data, hidden)
            loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

            total_loss += loss * len(data)

            hidden = repackage_hidden(hidden)
    return total_loss.item() / len(data_source) 
开发者ID:zihangdai,项目名称:mos,代码行数:20,代码来源:main.py

示例4: dev

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def dev(i):
    model.eval()
    total_loss = 0
    for chars, words, position, sub_sidx, sub_eidx, obj_sidx, obj_eidx, sub_slidx, sub_elidx in tqdm(validation_data, mininterval=1, desc='dev Processing', leave=False):
        with torch.no_grad():
            p_sub_sidx, p_sub_eidx, p_obj_sidx, p_obj_eidx, mask = model(
                chars, words, position, sub_slidx, sub_elidx)

            ss_loss = mask_binary_cross_entropy(p_sub_sidx, sub_sidx, mask)
            se_loss = mask_binary_cross_entropy(p_sub_eidx, sub_eidx, mask)
            os_loss = mask_binary_cross_entropy(p_obj_sidx, obj_sidx, mask)
            oe_loss = mask_binary_cross_entropy(p_obj_eidx, obj_eidx, mask)

            loss = ss_loss+se_loss+os_loss+oe_loss
            total_loss += loss.data.item()

    print(
        f"dev epoch {i+1}/{args.epochs} loss: {total_loss/training_data.stop_step:.4f}") 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:20,代码来源:train.py

示例5: test

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def test(i, predict):
    model.eval()
    t = pre = groud = 0
    inf = open("data/dev_data.json", encoding="utf8")
    for line in inf:
        line = json.loads(line)
        text = line["text"]
        g_triples = set()
        for trip in line["spo_list"]:
            g_triples.add((trip["subject"], trip["predicate"], trip["object"]))

        p_triples = predict.predict(text)
        pre += len(p_triples)
        groud += len(g_triples)
        t += len(p_triples.intersection(g_triples))

    print(
        f"test epoch {i+1}/{args.epochs} precision: {t/(pre+0.001):.4f} recall: {t/groud:.4f} f1: {2*t/(pre+groud):.4f}")
    return 2*t/(pre+groud) 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:21,代码来源:train.py

示例6: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0.
    ntokens = len(corpus.dictionary)
    if args.model != 'Transformer':
        hidden = model.init_hidden(eval_batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i)
            if args.model == 'Transformer':
                output = model(data)
                output = output.view(-1, ntokens)
            else:
                output, hidden = model(data, hidden)
                hidden = repackage_hidden(hidden)
            total_loss += len(data) * criterion(output, targets).item()
    return total_loss / (len(data_source) - 1) 
开发者ID:pytorch,项目名称:examples,代码行数:20,代码来源:main.py

示例7: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        print(i, data_source.size(0)-1)
        data, targets = get_batch(data_source, i, args, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source)

# Load the best saved model. 
开发者ID:quark0,项目名称:darts,代码行数:22,代码来源:test.py

示例8: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
开发者ID:quark0,项目名称:darts,代码行数:19,代码来源:train.py

示例9: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
开发者ID:renqianluo,项目名称:NAO,代码行数:19,代码来源:train.py

示例10: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0.
    ntokens = len(corpus.dictionary)
    if args.model != 'Transformer':
        hidden = model.init_hidden(eval_batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i)
            if args.model == 'Transformer':
                output = model(data)
            else:
                output, hidden = model(data, hidden)
                hidden = repackage_hidden(hidden)
            output_flat = output.view(-1, ntokens)
            total_loss += len(data) * criterion(output_flat, targets).item()
    return total_loss / (len(data_source) - 1) 
开发者ID:Lornatang,项目名称:PyTorch,代码行数:20,代码来源:main.py

示例11: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(lm_data_source, ccg_data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    if (not args.single) and (torch.cuda.device_count() > 1):
        #"module" is necessary when using DataParallel
        hidden = model.module.init_hidden(eval_batch_size)
    else:
        hidden = model.init_hidden(eval_batch_size)
    for i in range(0, lm_data_source.size(0) + ccg_data_source.size(0) - 1, args.bptt):
        # TAG
        if i > lm_data_source.size(0):
            data, targets = get_batch(ccg_data_source, i - lm_data_source.size(0), evaluation=True)
        # LM
        else:
            data, targets = get_batch(lm_data_source, i, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        curr_loss = len(data) * criterion(output_flat, targets).data
        total_loss += curr_loss
        hidden = repackage_hidden(hidden)
    if len(ccg_data_source) == 0:
        return total_loss / len(lm_data_source)
    return total_loss[0] / (len(lm_data_source)+len(ccg_data_source)) 
开发者ID:BeckyMarvin,项目名称:LM_syneval,代码行数:27,代码来源:main.py

示例12: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    if args.model == 'QRNN': model.reset()
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
开发者ID:urvashik,项目名称:lm-context-analysis,代码行数:16,代码来源:finetune.py

示例13: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    if args.model == 'QRNN': model.reset()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
开发者ID:urvashik,项目名称:lm-context-analysis,代码行数:16,代码来源:main.py

示例14: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source, batch_size, seq_len):
    # Turn on evaluation mode which disables dropout.
    model.eval()

    total_loss = 0
    tokens = 0
    n = 0
    save_all_losses = []

    ntokens = len(corpus.dictionary)

    hidden = model.init_hidden(batch_size)

    for i in range(0, data_source.size(0) - 1, seq_len):
        tokens += seq_len
        data, targets = get_batch(data_source, i, args, evaluation=True, seq_len=seq_len)
        output, hidden = model(data, hidden)
        output = nn.functional.log_softmax(output.permute(2,1,0)).permute(2,1,0)
        targets = targets.view(data.data.shape[0], batch_size, -1)
        CELoss = torch.gather(output.data, dim=2, index=targets.data).squeeze()
        CELoss = -1*CELoss
        if tokens < args.start_token: continue # We are not ready to accumulate error yet
        elif tokens >= args.start_token and tokens-seq_len < args.start_token:
            data.data = data.data[-(tokens-args.start_token+1):]
            CELoss = CELoss[-(tokens-args.start_token+1):]
            print('First word: %s' % (corpus.dictionary.idx2word[data.data[-(tokens-args.start_token+1),0]]))
        total_loss += torch.sum(CELoss)
        n += data.size(0)
        save_all_losses += CELoss.tolist()
        hidden = repackage_hidden(hidden)
    print('total: %d' % n)
    print('Last word: %s' % (corpus.dictionary.idx2word[data.data[-1,0]]))
    return total_loss / float(n), save_all_losses 
开发者ID:urvashik,项目名称:lm-context-analysis,代码行数:35,代码来源:eval.py

示例15: evaluate

# 需要导入模块: import model [as 别名]
# 或者: from model import eval [as 别名]
def evaluate(data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(eval_batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
开发者ID:jiacheng-xu,项目名称:vmf_vae_nlp,代码行数:15,代码来源:main.py


注:本文中的model.eval方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。