本文整理汇总了Python中misc.utils.RewardCriterion方法的典型用法代码示例。如果您正苦于以下问题:Python utils.RewardCriterion方法的具体用法?Python utils.RewardCriterion怎么用?Python utils.RewardCriterion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类misc.utils
的用法示例。
在下文中一共展示了utils.RewardCriterion方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from misc import utils [as 别名]
# 或者: from misc.utils import RewardCriterion [as 别名]
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = utils.LanguageModelCriterion()
self.rl_crit = utils.RewardCriterion()
self.struc_crit = utils.StructureLosses(opt)
if opt.vse_model != 'None':
self.vse = VSEFCModel(opt)
for p in self.vse.parameters():
p.requires_grad = False
self.retrieval_reward_weight = opt.retrieval_reward_weight #
self.vse.load_state_dict({k[4:]:v for k,v in torch.load(opt.initialize_retrieval).items() if 'vse.' in k})
self.retrieval_reward_weight = 0
示例2: __init__
# 需要导入模块: from misc import utils [as 别名]
# 或者: from misc.utils import RewardCriterion [as 别名]
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = utils.LanguageModelCriterion()
self.rl_crit = utils.RewardCriterion()
示例3: main
# 需要导入模块: from misc import utils [as 别名]
# 或者: from misc.utils import RewardCriterion [as 别名]
def main(opt):
dataset = VideoDataset(opt, 'train')
dataloader = DataLoader(dataset, batch_size=opt["batch_size"], shuffle=True)
opt["vocab_size"] = dataset.get_vocab_size()
if opt["model"] == 'S2VTModel':
model = S2VTModel(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
opt['dim_vid'],
rnn_cell=opt['rnn_type'],
n_layers=opt['num_layers'],
bidirectional=opt["bidirectional"],
rnn_dropout_p=opt["rnn_dropout_p"]).cuda()
elif opt["model"] == "S2VTAttModel":
encoder = EncoderRNN(
opt["dim_vid"],
opt["dim_hidden"],
n_layers=opt['num_layers'],
bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"])
decoder = DecoderRNN(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
n_layers=opt['num_layers'],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"],
bidirectional=opt["bidirectional"])
model = S2VTAttModel(encoder, decoder).cuda()
crit = utils.LanguageModelCriterion()
rl_crit = utils.RewardCriterion()
optimizer = optim.Adam(
model.parameters(),
lr=opt["learning_rate"],
weight_decay=opt["weight_decay"])
exp_lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=opt["learning_rate_decay_every"],
gamma=opt["learning_rate_decay_rate"])
train(dataloader, model, crit, optimizer, exp_lr_scheduler, opt, rl_crit)
示例4: main
# 需要导入模块: from misc import utils [as 别名]
# 或者: from misc.utils import RewardCriterion [as 别名]
def main(opt):
dataset = VideoDataset(opt, 'train')
dataloader = DataLoader(dataset, batch_size=opt["batch_size"], shuffle=True)
opt["vocab_size"] = dataset.get_vocab_size()
if opt["model"] == 'S2VTModel':
model = S2VTModel(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
opt['dim_vid'],
rnn_cell=opt['rnn_type'],
n_layers=opt['num_layers'],
rnn_dropout_p=opt["rnn_dropout_p"])
elif opt["model"] == "S2VTAttModel":
encoder = EncoderRNN(
opt["dim_vid"],
opt["dim_hidden"],
bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"])
decoder = DecoderRNN(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"],
bidirectional=opt["bidirectional"])
model = S2VTAttModel(encoder, decoder)
model = model.cuda()
crit = utils.LanguageModelCriterion()
rl_crit = utils.RewardCriterion()
optimizer = optim.Adam(
model.parameters(),
lr=opt["learning_rate"],
weight_decay=opt["weight_decay"])
exp_lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=opt["learning_rate_decay_every"],
gamma=opt["learning_rate_decay_rate"])
train(dataloader, model, crit, optimizer, exp_lr_scheduler, opt, rl_crit)