本文整理匯總了Python中torch.nn.MultiMarginLoss方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MultiMarginLoss方法的具體用法?Python nn.MultiMarginLoss怎麽用?Python nn.MultiMarginLoss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.MultiMarginLoss方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_load_parameters
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def test_load_parameters():
encoder = ConvWordsEncoder(*wordembeddings.shape)
encoder.load_word_embeddings_from_numpy(wordembeddings)
net = GNNModel(encoder, hp_dropout=0.2)
criterion = nn.MultiMarginLoss(margin=0.5)
container = fackel.TorchContainer(
torch_model=net,
batch_size=8,
max_epochs=5,
model_checkpoint=False,
save_to_dir="../trainedmodels/",
early_stopping=5,
criterion=criterion,
init_model_weights=True,
lr_decay=2
)
container.save_model()
container.reload_from_saved()
assert container._model._gnn._prop_model._dropout.p == 0.2
示例2: test_ggnn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def test_ggnn():
encoder = ConvWordsEncoder(*wordembeddings.shape)
encoder.load_word_embeddings_from_numpy(wordembeddings)
net = GNNModel(encoder)
criterion = nn.MultiMarginLoss(margin=0.5)
container = fackel.TorchContainer(
torch_model=net,
batch_size=8,
max_epochs=5,
model_checkpoint=False,
early_stopping=5,
criterion=criterion,
init_model_weights=True,
lr_decay=2
)
train_questions = V.encode_batch_questions(training_dataset, word2idx)[..., 0, :]
train_graphs = V.encode_batch_graph_structure(training_dataset, word2idx)
targets = np.zeros(len(training_dataset), dtype=np.int32)
container.train(train=(train_questions, *train_graphs), train_targets=targets)
示例3: test_gnn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def test_gnn():
encoder = ConvWordsEncoder(*wordembeddings.shape)
encoder.load_word_embeddings_from_numpy(wordembeddings)
net = GNNModel(encoder, hp_gated=False)
criterion = nn.MultiMarginLoss(margin=0.5)
container = fackel.TorchContainer(
torch_model=net,
batch_size=8,
max_epochs=5,
model_checkpoint=False,
early_stopping=5,
criterion=criterion,
init_model_weights=True,
lr_decay=2
)
train_questions = V.encode_batch_questions(training_dataset, word2idx)[..., 0, :]
train_graphs = V.encode_batch_graph_structure(training_dataset, word2idx)
targets = np.zeros(len(training_dataset), dtype=np.int32)
container.train(train=(train_questions, *train_graphs), train_targets=targets)
示例4: test_pool_edges_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def test_pool_edges_model():
encoder = ConvWordsEncoder(*wordembeddings.shape)
encoder.load_word_embeddings_from_numpy(wordembeddings)
net = PooledEdgesModel(encoder)
criterion = nn.MultiMarginLoss()
container = fackel.TorchContainer(
torch_model=net,
batch_size=8,
max_epochs=5,
model_checkpoint=False,
early_stopping=5,
criterion=criterion
)
selected_questions = [s for s in training_dataset if any(scores[2] > 0.0 for g, scores in s.graphs)]
targets = np.zeros((len(selected_questions)), dtype=np.int32)
for qi, q in enumerate(selected_questions):
random.shuffle(q.graphs)
targets[qi] = np.argsort([g.scores[2] for g in q.graphs])[::-1][0]
train_questions = V.encode_batch_questions(selected_questions, word2idx)[..., 0, :]
train_edges = V.encode_batch_graphs(selected_questions, word2idx)[..., 0, :]
container.train(train=(train_questions, train_edges), train_targets=targets)
示例5: set_loss
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def set_loss(self, loss_fn):
assert loss_fn in ['multinomial', 'hinge', 'squared', 'huber']
if loss_fn == 'hinge':
l = nn.MultiMarginLoss(size_average=False)
self.loss_fn = lambda p, t, _: l(p, Varng(torch.LongTensor([t])))
elif loss_fn == 'multinomial':
l = nn.NLLLoss(size_average=False)
self.loss_fn = lambda p, t, _: l(F.log_softmax(p.unsqueeze(0), dim=1), Varng(torch.LongTensor([t])))
elif loss_fn in ['squared', 'huber']:
l = (nn.MSELoss if loss_fn == 'squared' else nn.SmoothL1Loss)(size_average=False)
self.loss_fn = lambda p, t, sa: self._compute_loss(l, p, 1 - truth_to_vec(t, torch.zeros(self.n_actions)), sa)
示例6: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def forward(self, sentences, neg_samples, diora, info):
batch_size, length = sentences.shape
input_size = self.embeddings.weight.shape[1]
size = diora.outside_h.shape[-1]
k = self.k_neg
emb_pos = self.embeddings(sentences)
emb_neg = self.embeddings(neg_samples)
# Calculate scores.
## The predicted vector.
cell = diora.outside_h[:, :length].view(batch_size, length, 1, -1)
## The projected samples.
proj_pos = torch.matmul(emb_pos, torch.t(self.mat))
proj_neg = torch.matmul(emb_neg, torch.t(self.mat))
## The score.
xp = torch.einsum('abc,abxc->abx', proj_pos, cell)
xn = torch.einsum('ec,abxc->abe', proj_neg, cell)
score = torch.cat([xp, xn], 2)
# Calculate loss.
lossfn = nn.MultiMarginLoss(margin=self.margin)
inputs = score.view(batch_size * length, k + 1)
device = torch.cuda.current_device() if self._cuda else None
outputs = torch.full((inputs.shape[0],), 0, dtype=torch.int64, device=device)
self.loss_hook(sentences, neg_samples, inputs)
loss = lossfn(inputs, outputs)
ret = dict(reconstruction_loss=loss)
return loss, ret
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def __init__(self, weight):
super(CustomCombinedLoss, self).__init__()
self._weight = weight
self._criterion_choice = nn.MultiMarginLoss(size_average=False, margin=0.5)
示例8: test_metrics
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MultiMarginLoss [as 別名]
def test_metrics():
encoder = ConvWordsEncoder(*wordembeddings.shape)
encoder.load_word_embeddings_from_numpy(wordembeddings)
net = PooledEdgesModel(encoder)
criterion = nn.MultiMarginLoss()
def metrics(targets, predictions, validation=False):
_, predicted_targets = torch.topk(predictions, 1, dim=-1)
# _, targets = torch.topk(targets, 1, dim=-1)
predicted_targets = predicted_targets.squeeze(1)
cur_acc = torch.sum(predicted_targets == targets).float()
cur_acc /= predicted_targets.size(0)
cur_f1 = 0.0
if validation:
for i, q in enumerate(training_dataset):
if i < predicted_targets.size(0):
idx = predicted_targets.data[i]
if idx < len(q.graphs):
cur_f1 += q.graphs[idx].scores[2]
cur_f1 /= targets.size(0)
return {'acc': cur_acc.data[0], 'f1': cur_f1}
container = fackel.TorchContainer(
torch_model=net,
batch_size=8,
max_epochs=5,
model_checkpoint=False,
early_stopping=5,
criterion=criterion,
metrics=metrics
)
selected_questions = [s for s in training_dataset if any(scores[2] > 0.0 for g, scores in s.graphs)]
targets = np.zeros((len(selected_questions)), dtype=np.int32)
for qi, q in enumerate(selected_questions):
random.shuffle(q.graphs)
targets[qi] = np.argsort([g.scores[2] for g in q.graphs])[::-1][0]
train_questions = V.encode_batch_questions(selected_questions, word2idx)[..., 0, :]
train_edges = V.encode_batch_graphs(selected_questions, word2idx)[..., 0, :]
container.train(train=(train_questions, train_edges), train_targets=targets,
dev=(train_questions, train_edges), dev_targets=targets)