當前位置: 首頁>>代碼示例>>Python>>正文


Python data_load.get_batch方法代碼示例

本文整理匯總了Python中data_load.get_batch方法的典型用法代碼示例。如果您正苦於以下問題:Python data_load.get_batch方法的具體用法?Python data_load.get_batch怎麽用?Python data_load.get_batch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在data_load的用法示例。


在下文中一共展示了data_load.get_batch方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train_and_eval

# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import get_batch [as 別名]
def train_and_eval(model, optimizer, criterion, ids2tokens, idx2phr):
    model.train()
    for step in tqdm(range(hp.n_train_steps+1)):
        x, y = get_batch(hp.max_span, hp.batch_size, hp.n_classes, True)
        x = x.cuda()
        y = y.cuda()

        optimizer.zero_grad()

        logits, y_hat, _ = model(x) # logits: (N, classes), y_hat: (N,)

        loss = criterion(logits, y)
        loss.backward()

        optimizer.step()

        # evaluation
        if step and step%500==0: # monitoring
            eval(model, f'{hp.logdir}/{step}', ids2tokens, idx2phr)
            print(f"step: {step}, loss: {loss.item()}")
            model.train() 
開發者ID:Kyubyong,項目名稱:msg_reply,代碼行數:23,代碼來源:train.py

示例2: __init__

# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import get_batch [as 別名]
def __init__(self, is_training=True):
        self.graph = tf.Graph()
        with self.graph.as_default():
            if is_training:
                self.x, self.y, self.num_batch = get_batch()
            else: # Evaluation
                self.x = tf.placeholder(tf.int32, shape=(None, hp.max_len,))
                self.y = tf.placeholder(tf.int32, shape=(None, hp.max_len,))
            
            # Character Embedding for x
            self.enc = embed(self.x, len(roma2idx), hp.embed_size, scope="emb_x")
                
            # Encoder
            self.memory = encode(self.enc, is_training=is_training)
            
            # Character Embedding for decoder_inputs
            self.decoder_inputs = shift_by_one(self.y)
            self.dec = embed(self.decoder_inputs, len(surf2idx), hp.embed_size, scope="emb_decoder_inputs")
            
            # Decoder
            self.outputs = decode(self.dec, self.memory, len(surf2idx), is_training=is_training) # (N, T', hp.n_mels*hp.r)
            self.logprobs = tf.log(tf.nn.softmax(self.outputs)+1e-10) 
            self.preds = tf.arg_max(self.outputs, dimension=-1)
                
            if is_training: 
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs) 
                self.istarget = tf.to_float(tf.not_equal(self.y, tf.zeros_like(self.y))) # masking
                self.mean_loss = tf.reduce_sum(self.loss * self.istarget) / (tf.reduce_sum(self.istarget))
               
                # Training Scheme
                self.global_step = tf.Variable(0, name='global_step', trainable=False)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr)
                self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
                   
                # Summary 
                tf.summary.scalar('mean_loss', self.mean_loss)
                self.merged = tf.summary.merge_all() 
開發者ID:Kyubyong,項目名稱:neural_japanese_transliterator,代碼行數:39,代碼來源:train.py

示例3: __init__

# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import get_batch [as 別名]
def __init__(self, is_training=True):
        self.graph = tf.Graph()
        self.is_training=is_training
        with self.graph.as_default():
            if is_training:
                self.x, self.y, self.num_batch = get_batch() 
            else: # Evaluation
                self.x = tf.placeholder(tf.float32, shape=(None, None, hp.n_mels*hp.r))
                self.y = tf.placeholder(tf.int32, shape=(None, hp.max_len))
            
            self.decoder_inputs = embed(shift_by_one(self.y), len(char2idx), hp.embed_size) # (N, T', E)
            
            with tf.variable_scope('net'):
                # Encoder
                self.memory = encode(self.x, is_training=is_training) # (N, T, hp.n_mels*hp.r)
                
                # Decoder
                self.outputs = decode(self.decoder_inputs, self.memory, is_training=is_training) # (N, T', E)
                self.logprobs = tf.log(tf.nn.softmax(self.outputs)+1e-10) 
                self.preds = tf.arg_max(self.outputs, dimension=-1)
                
            if is_training:  
                # Loss
                self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs)
                
                # Target masking
                self.istarget = tf.to_float(tf.not_equal(self.y, 0))
                self.mean_loss = tf.reduce_sum(self.loss*self.istarget) / (tf.reduce_sum(self.istarget) + 1e-7)
                
                # Training Scheme
                self.global_step = tf.Variable(0, name='global_step', trainable=False)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr)
                self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
                   
                # Summary 
                tf.summary.scalar('mean_loss', self.mean_loss)
                self.merged = tf.summary.merge_all() 
開發者ID:Kyubyong,項目名稱:tacotron_asr,代碼行數:39,代碼來源:train.py

示例4: eval

# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import get_batch [as 別名]
def eval(model, f, ids2tokens, idx2phr):
    model.eval()

    Y, Y_hat = [], []
    with torch.no_grad():
        x, y  = get_batch(hp.max_span, hp.batch_size, hp.n_classes, False)
        x = x.cuda()

        _, y_hat, _ = model(x)  # y_hat: (N, n_candidates)

        x = x.cpu().numpy().tolist()
        y = y.cpu().numpy().tolist()
        y_hat = y_hat.cpu().numpy().tolist()

        Y.extend(y)
        Y_hat.extend(y_hat)

        # monitoring
        pointer = random.randint(0, len(x)-1)
        xx, yy, yy_hat = x[pointer], y[pointer], y_hat[pointer] # one sample

        tokens = ids2tokens(xx) # this is a function.
        ctx = " ".join(tokens).replace(" ##", "").split("[PAD]")[0] # bert detokenization
        gt = idx2phr[yy] # this is a dict.
        ht = " | ".join(idx2phr[each] for each in yy_hat)

        print(f"context: {ctx}")
        print(f"ground truth: {gt}")
        print(f"predictions: {ht}")

    # calc acc.
    n_samples = len(Y)
    n_correct = 0
    for y, y_hat in zip(Y, Y_hat):
        if y in y_hat:
            n_correct += 1
    acc = n_correct / n_samples
    print(f"acc@{hp.n_candidates}: %.2f"%acc)

    acc = str(round(acc, 2))

    torch.save(model.state_dict(), f"{f}_ACC{acc}.pt") 
開發者ID:Kyubyong,項目名稱:msg_reply,代碼行數:44,代碼來源:train.py

示例5: __init__

# 需要導入模塊: import data_load [as 別名]
# 或者: from data_load import get_batch [as 別名]
def __init__(self,is_training = True, demo = False):
        # Build the computational graph when initializing
        self.is_training = is_training
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.global_step = tf.Variable(0, name='global_step', trainable=False)
            if demo:
                self.passage_w = tf.placeholder(tf.int32,
                                        [1, Params.max_p_len,],"passage_w")
                self.question_w = tf.placeholder(tf.int32,
                                        [1, Params.max_q_len,],"passage_q")
                self.passage_c = tf.placeholder(tf.int32,
                                        [1, Params.max_p_len,Params.max_char_len],"passage_pc")
                self.question_c = tf.placeholder(tf.int32,
                                        [1, Params.max_q_len,Params.max_char_len],"passage_qc")
                self.passage_w_len_ = tf.placeholder(tf.int32,
                                        [1,1],"passage_w_len_")
                self.question_w_len_ = tf.placeholder(tf.int32,
                                        [1,1],"question_w_len_")
                self.passage_c_len = tf.placeholder(tf.int32,
                                        [1, Params.max_p_len],"passage_c_len")
                self.question_c_len = tf.placeholder(tf.int32,
                                        [1, Params.max_q_len],"question_c_len")
                self.data = (self.passage_w,
                            self.question_w,
                            self.passage_c,
                            self.question_c,
                            self.passage_w_len_,
                            self.question_w_len_,
                            self.passage_c_len,
                            self.question_c_len)
            else:
                self.data, self.num_batch = get_batch(is_training = is_training)
                (self.passage_w,
                self.question_w,
                self.passage_c,
                self.question_c,
                self.passage_w_len_,
                self.question_w_len_,
                self.passage_c_len,
                self.question_c_len,
                self.indices) = self.data
                
            self.passage_w_len = tf.squeeze(self.passage_w_len_, -1)
            self.question_w_len = tf.squeeze(self.question_w_len_, -1)

            self.encode_ids()
            self.params = get_attn_params(Params.attn_size, initializer = tf.contrib.layers.xavier_initializer)
            self.attention_match_rnn()
            self.bidirectional_readout()
            self.pointer_network()
            self.outputs()

            if is_training:
                self.loss_function()
                self.summary()
                self.init_op = tf.global_variables_initializer()
            total_params() 
開發者ID:NLPLearn,項目名稱:R-net,代碼行數:60,代碼來源:model.py


注:本文中的data_load.get_batch方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。