本文整理匯總了Python中model.RNNModel方法的典型用法代碼示例。如果您正苦於以下問題:Python model.RNNModel方法的具體用法?Python model.RNNModel怎麽用?Python model.RNNModel使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類model
的用法示例。
在下文中一共展示了model.RNNModel方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: generate_flow
# 需要導入模塊: import model [as 別名]
# 或者: from model import RNNModel [as 別名]
def generate_flow(epoch=3):
"""讀取存儲的模型,生成新詞"""
corpus = Corpus(train_dir)
config = Config()
config.vocab_size = len(corpus.dictionary)
model = RNNModel(config)
model_file = os.path.join(save_dir, model_name.format(epoch))
assert os.path.exists(model_file), 'File %s does not exist.' % model_file
model.load_state_dict(torch.load(model_file, map_location=lambda storage, loc: storage))
word_list = generate(model, corpus.dictionary.idx2word, word_len=50)
print(''.join(word_list))
示例2: train
# 需要導入模塊: import model [as 別名]
# 或者: from model import RNNModel [as 別名]
def train():
# 載入數據與配置模型
print("Loading data...")
corpus = Corpus(train_dir)
print(corpus)
config = Config()
config.vocab_size = len(corpus.dictionary)
train_data = batchify(corpus.train, config.batch_size)
train_len = train_data.size(0)
seq_len = config.seq_len
print("Configuring model...")
model = RNNModel(config)
if use_cuda:
model.cuda()
print(model)
criterion = nn.CrossEntropyLoss()
lr = config.learning_rate # 初始學習率
start_time = time.time()
print("Training and generating...")
for epoch in range(1, config.num_epochs + 1): # 多輪次訓練
total_loss = 0.0
model.train() # 在訓練模式下dropout才可用。
hidden = model.init_hidden(config.batch_size) # 初始化隱藏層參數
for ibatch, i in enumerate(range(0, train_len - 1, seq_len)):
data, targets = get_batch(train_data, i, seq_len) # 取一個批次的數據
# 在每批開始之前,將隱藏的狀態與之前產生的結果分離。
# 如果不這樣做,模型會嘗試反向傳播到數據集的起點。
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, config.vocab_size), targets)
loss.backward() # 反向傳播
# `clip_grad_norm` 有助於防止RNNs/LSTMs中的梯度爆炸問題。
torch.nn.utils.clip_grad_norm(model.parameters(), config.clip)
for p in model.parameters(): # 梯度更新
p.data.add_(-lr, p.grad.data)
total_loss += loss.data # loss累計
if ibatch % config.log_interval == 0 and ibatch > 0: # 每隔多少個批次輸出一次狀態
cur_loss = total_loss[0] / config.log_interval
elapsed = get_time_dif(start_time)
print("Epoch {:3d}, {:5d}/{:5d} batches, lr {:2.3f}, loss {:5.2f}, ppl {:8.2f}, time {}".format(
epoch, ibatch, train_len // seq_len, lr, cur_loss, math.exp(cur_loss), elapsed))
total_loss = 0.0
lr /= 4.0 # 在一輪迭代完成後,嘗試縮小學習率
# 每隔多少輪次保存一次模型參數
if epoch % config.save_interval == 0:
torch.save(model.state_dict(), os.path.join(save_dir, model_name.format(epoch)))
print(''.join(generate(model, corpus.dictionary.idx2word)))
示例3: build_model
# 需要導入模塊: import model [as 別名]
# 或者: from model import RNNModel [as 別名]
def build_model():
"""Build the model according to CLI arguments
Global Dependencies:
- corpus
- args
"""
# noise for soise sampling in NCE
noise = build_unigram_noise(
torch.FloatTensor(corpus.vocab.idx2count)
)
norm_term = 'auto' if args.norm_term == -1 else args.norm_term
# setting up NCELoss modules
if args.index_module == 'linear':
criterion = IndexLinear(
args.emsize,
ntoken,
noise=noise,
noise_ratio=args.noise_ratio,
norm_term=norm_term,
loss_type=args.loss,
reduction='none',
)
model = RNNModel(
ntoken, args.emsize, args.nhid, args.nlayers,
criterion=criterion, dropout=args.dropout,
)
elif args.index_module == 'gru':
if args.nlayers != 1:
logger.warning('Falling into one layer GRU due to Index_GRU supporting')
nce_criterion = IndexGRU(
ntoken, args.emsize, args.nhid,
args.dropout,
noise=noise,
noise_ratio=args.noise_ratio,
norm_term=norm_term,
)
model = GenModel(
criterion=nce_criterion,
)
else:
logger.error('The index module [%s] is not supported yet' % args.index_module)
raise(NotImplementedError('index module not supported'))
if args.cuda:
model.cuda()
logger.info('model definition:\n %s', model)
return model