當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.get_batch方法代碼示例

本文整理匯總了Python中utils.get_batch方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.get_batch方法的具體用法?Python utils.get_batch怎麽用?Python utils.get_batch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.get_batch方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i, args)
            targets = targets.view(-1)
            
            log_prob, hidden = parallel_model(data, hidden)
            loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

            total_loss += len(data) * loss
            hidden = repackage_hidden(hidden)
    return total_loss.item() / len(data_source) 
開發者ID:zihangdai,項目名稱:mos,代碼行數:19,代碼來源:finetune.py

示例2: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(self, sess, set_type):
        if set_type == 'val':
            num_images = len(self.obj.val_list)
            generator = self.obj.val_generator()
        else:
            num_images = len(self.obj.train_list)
            generator = self.obj.train_generator()
        
        true_positives = 0
        val_loss = 0
        num_batches = num_images//self.batch_size if num_images%self.batch_size == 0 else num_images//self.batch_size + 1 
        for i in range(num_batches):
            x_batch, y_batch = get_batch(generator, set_type, height=self.model.height, width=self.model.width)

            predicted = sess.run([self.model.pred], feed_dict={self.model.x:x_batch, self.model.y:y_batch})
            
            true_positives = true_positives + np.sum(predicted[0] == np.argmax(y_batch,1))

        print('set_type:',set_type, 'accuracy = ', true_positives*100.0/num_images)
        

    #predict the labels for test dataset 
開發者ID:halwai,項目名稱:bird_classification,代碼行數:24,代碼來源:main.py

示例3: predict

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def predict(self, sess, set_type):
        if set_type == 'val':
            num_images = len(self.obj.val_list)
            generator = self.obj.val_generator()
        elif  set_type == 'test':
            num_images = len(self.obj.test_list)
            generator = self.obj.test_generator()
        else:
            num_images = len(self.obj.train_list)
            generator = self.obj.train_generator()
        
        true_positives = 0
        num_batches = num_images//self.batch_size if num_images%self.batch_size == 0 else num_images//self.batch_size + 1 
        model_predictions = []
        for i in range(num_batches):
            x_batch, _ = get_batch(generator, set_type , height=self.model.height, width=self.model.width)
            predicted = sess.run([ self.model.pred], feed_dict={self.model.x:x_batch})
            model_predictions.extend(predicted[0])
        return model_predictions 
開發者ID:halwai,項目名稱:bird_classification,代碼行數:21,代碼來源:main.py

示例4: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i, args)
            targets = targets.view(-1)

            log_prob, hidden = parallel_model(data, hidden)
            loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

            total_loss += loss * len(data)

            hidden = repackage_hidden(hidden)
    return total_loss.item() / len(data_source) 
開發者ID:zihangdai,項目名稱:mos,代碼行數:20,代碼來源:main.py

示例5: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
開發者ID:quark0,項目名稱:darts,代碼行數:19,代碼來源:train_search.py

示例6: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        print(i, data_source.size(0)-1)
        data, targets = get_batch(data_source, i, args, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source)

# Load the best saved model. 
開發者ID:quark0,項目名稱:darts,代碼行數:22,代碼來源:test.py

示例7: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, model, parallel_model, params, batch_size=10):
  # Turn on evaluation mode which disables dropout.
  arch_pool = params['arch_pool']
  logging.info('Evaluating on {} archs'.format(len(arch_pool)))
  start_time = time.time()
  valid_score_list = []
  for arch in arch_pool:
    model.eval()
    hidden = model.init_hidden(batch_size)
    #whether use random batch ?
    # data_source is in the format of [length, bs, ...]
    #for i in range(0, data_source.size(0) - 1, params['bptt']):
    #for i in range(1):
    batch = np.random.randint(0, data_source.size(0)//params['bptt'])
    data, targets = get_batch(data_source, batch, params['bptt'], evaluation=True)
    targets = targets.view(-1)
    log_prob, hidden = parallel_model(data, hidden, arch)
    loss = F.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data[0]
    valid_score_list.append(loss)
  eval_time = time.time() - start_time
  mean_valid_score = np.mean(valid_score_list)
  logging.info('Mean loss {:5.2f} | mean ppl {:8.2f} | time {:5.2f} secs'.format(mean_valid_score, np.exp(mean_valid_score), eval_time))
  return valid_score_list 
開發者ID:renqianluo,項目名稱:NAO,代碼行數:25,代碼來源:model_search.py

示例8: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        print(i, data_source.size(0)-1)
        data, targets = get_batch(data_source, i, args, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden, args.arc)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source)

# Load the best saved model. 
開發者ID:renqianluo,項目名稱:NAO,代碼行數:22,代碼來源:test.py

示例9: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
        targets = targets.view(-1)

        log_prob, hidden = parallel_model(data, hidden)
        loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data

        total_loss += loss * len(data)

        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
開發者ID:renqianluo,項目名稱:NAO,代碼行數:19,代碼來源:train.py

示例10: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    if args.model == 'QRNN': model.reset()
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
開發者ID:urvashik,項目名稱:lm-context-analysis,代碼行數:16,代碼來源:finetune.py

示例11: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    if args.model == 'QRNN': model.reset()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
開發者ID:urvashik,項目名稱:lm-context-analysis,代碼行數:16,代碼來源:main.py

示例12: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size, seq_len):
    # Turn on evaluation mode which disables dropout.
    model.eval()

    total_loss = 0
    tokens = 0
    n = 0
    save_all_losses = []

    ntokens = len(corpus.dictionary)

    hidden = model.init_hidden(batch_size)

    for i in range(0, data_source.size(0) - 1, seq_len):
        tokens += seq_len
        data, targets = get_batch(data_source, i, args, evaluation=True, seq_len=seq_len)
        output, hidden = model(data, hidden)
        output = nn.functional.log_softmax(output.permute(2,1,0)).permute(2,1,0)
        targets = targets.view(data.data.shape[0], batch_size, -1)
        CELoss = torch.gather(output.data, dim=2, index=targets.data).squeeze()
        CELoss = -1*CELoss
        if tokens < args.start_token: continue # We are not ready to accumulate error yet
        elif tokens >= args.start_token and tokens-seq_len < args.start_token:
            data.data = data.data[-(tokens-args.start_token+1):]
            CELoss = CELoss[-(tokens-args.start_token+1):]
            print('First word: %s' % (corpus.dictionary.idx2word[data.data[-(tokens-args.start_token+1),0]]))
        total_loss += torch.sum(CELoss)
        n += data.size(0)
        save_all_losses += CELoss.tolist()
        hidden = repackage_hidden(hidden)
    print('total: %d' % n)
    print('Last word: %s' % (corpus.dictionary.idx2word[data.data[-1,0]]))
    return total_loss / float(n), save_all_losses 
開發者ID:urvashik,項目名稱:lm-context-analysis,代碼行數:35,代碼來源:eval.py

示例13: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source) 
開發者ID:kondiz,項目名稱:fraternal-dropout,代碼行數:15,代碼來源:main.py

示例14: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        output_flat = output.view(-1, ntokens)
        total_loss += len(data) * criterion(output_flat, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss[0] / len(data_source)

# Load the best saved model. 
開發者ID:kondiz,項目名稱:fraternal-dropout,代碼行數:17,代碼來源:eval.py

示例15: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import get_batch [as 別名]
def evaluate(data_source, batch_size=10):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    if args.model == 'QRNN': model.reset()
    total_loss = 0
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(batch_size)
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, hidden = model(data, hidden)
        total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data
        hidden = repackage_hidden(hidden)
    return total_loss.item() / len(data_source) 
開發者ID:salesforce,項目名稱:awd-lstm-lm,代碼行數:15,代碼來源:main.py


注:本文中的utils.get_batch方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。