本文整理汇总了Python中torch.eq方法的典型用法代码示例。如果您正苦于以下问题:Python torch.eq方法的具体用法?Python torch.eq怎么用?Python torch.eq使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.eq方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def forward(self, feed_dict):
feed_dict = GView(feed_dict)
feature_f = self._extract_sent_feature(feed_dict.sent_f, feed_dict.sent_f_length, self.gru_f)
feature_b = self._extract_sent_feature(feed_dict.sent_b, feed_dict.sent_b_length, self.gru_b)
feature_img = feed_dict.image
feature = torch.cat([feature_f, feature_b, feature_img], dim=1)
predict = self.predict(feature)
if self.training:
label = self.embedding(feed_dict.label)
loss = cosine_loss(predict, label).mean()
return loss, {}, {}
else:
output_dict = dict(pred=predict)
if 'label' in feed_dict:
dis = cosine_distance(predict, self.embedding.weight)
_, topk = dis.topk(1000, dim=1, sorted=True)
for k in [1, 10, 100, 1000]:
output_dict['top{}'.format(k)] = torch.eq(topk, feed_dict.label.unsqueeze(-1))[:, :k].float().sum(dim=1).mean()
return output_dict
示例2: _compute_xi
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def _compute_xi(self, s, aug, y):
# find argmax of augmented scores
_, y_star = torch.max(aug, 1)
# xi_max: one-hot encoding of maximal indices
xi_max = torch.eq(y_star[:, None], self._range).float()
if MultiClassHingeLoss.smooth:
# find smooth argmax of scores
xi_smooth = nn.functional.softmax(s, dim=1)
# compute for each sample whether it has a positive contribution to the loss
losses = torch.sum(xi_smooth * aug, 1)
mask_smooth = torch.ge(losses, 0).float()[:, None]
# keep only smoothing for positive contributions
xi = mask_smooth * xi_smooth + (1 - mask_smooth) * xi_max
else:
xi = xi_max
return xi
示例3: update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def update(self, output):
y_pred, y = output
num_classes = y_pred.size(1)
indices = torch.max(y_pred, 1)[1]
correct = torch.eq(indices, y)
actual_onehot = to_onehot(y, num_classes)
actual = actual_onehot.sum(dim=0)
if correct.sum() == 0:
true_positives = torch.zeros_like(actual)
else:
correct_onehot = to_onehot(indices[correct], num_classes)
true_positives = correct_onehot.sum(dim=0)
if self._actual is None:
self._actual = actual
self._true_positives = true_positives
else:
self._actual += actual
self._true_positives += true_positives
示例4: update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def update(self, output):
y_pred, y = output
num_classes = y_pred.size(1)
indices = torch.max(y_pred, 1)[1]
correct = torch.eq(indices, y)
pred_onehot = to_onehot(indices, num_classes)
all_positives = pred_onehot.sum(dim=0)
if correct.sum() == 0:
true_positives = torch.zeros_like(all_positives)
else:
correct_onehot = to_onehot(indices[correct], num_classes)
true_positives = correct_onehot.sum(dim=0)
if self._all_positives is None:
self._all_positives = all_positives
self._true_positives = true_positives
else:
self._all_positives += all_positives
self._true_positives += true_positives
示例5: test_train
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_train(self):
self._metric.train()
calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
[torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
for i in range(len(self._states)):
self._metric.process(self._states[i])
self.assertEqual(2, len(self._metric_function.call_args_list))
for i in range(len(self._metric_function.call_args_list)):
self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
self._metric_function.reset_mock()
self._metric.process_final({})
self.assertEqual(self._metric_function.call_count, 1)
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
示例6: test_forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_forward(self, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones(obs_dim, dtype=torch.float32).unsqueeze(0)
act = torch.ones(act_dim, dtype=torch.float32).unsqueeze(0)
qf = ContinuousMLPQFunction(env_spec=env_spec,
hidden_nonlinearity=None,
hidden_sizes=hidden_sizes,
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
output = qf(obs, act)
expected_output = torch.full([1, 1],
fill_value=(obs_dim + act_dim) *
np.prod(hidden_sizes),
dtype=torch.float32)
assert torch.eq(output, expected_output)
# yapf: disable
示例7: test_is_pickleable
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_is_pickleable(self, hidden_sizes):
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones(obs_dim, dtype=torch.float32).unsqueeze(0)
act = torch.ones(act_dim, dtype=torch.float32).unsqueeze(0)
qf = ContinuousMLPQFunction(env_spec=env_spec,
hidden_nonlinearity=None,
hidden_sizes=hidden_sizes,
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
output1 = qf(obs, act)
p = pickle.dumps(qf)
qf_pickled = pickle.loads(p)
output2 = qf_pickled(obs, act)
assert torch.eq(output1, output2)
示例8: test_update_context
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_update_context(self):
"""Test update_context."""
s = TimeStep(env_spec=self.env_spec,
observation=np.ones(self.obs_dim),
next_observation=np.ones(self.obs_dim),
action=np.ones(self.action_dim),
reward=1.0,
terminal=False,
env_info={},
agent_info={})
updates = 10
for _ in range(updates):
self.module.update_context(s)
assert torch.all(
torch.eq(self.module.context,
torch.ones(updates, self.encoder_input_dim)))
示例9: test_encode_param
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_encode_param():
param = torch.rand(256, 128, 3, 3)
prune_vanilla_elementwise(sparsity=0.7, param=param)
quantize_linear_fix_zeros(param, k=16)
huffman = EncodedParam(param=param, method='huffman',
encode_indices=True, bit_length_zero_run_length=4)
stats = huffman.stats
print(stats)
assert torch.eq(param, huffman.data).all()
state_dict = huffman.state_dict()
huffman = EncodedParam()
huffman.load_state_dict(state_dict)
assert torch.eq(param, huffman.data).all()
vanilla = EncodedParam(param=param, method='vanilla',
encode_indices=True, bit_length_zero_run_length=4)
stats = vanilla.stats
print(stats)
assert torch.eq(param, vanilla.data).all()
quantize_fixed_point(param=param, bit_length=4, bit_length_integer=0)
fixed_point = EncodedParam(param=param, method='fixed_point',
bit_length=4, bit_length_integer=0,
encode_indices=True, bit_length_zero_run_length=4)
stats = fixed_point.stats
print(stats)
assert torch.eq(param, fixed_point.data).all()
示例10: test_data_pickle_correctness
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def test_data_pickle_correctness(self):
# this will create new pickle files for train, valid, test
dataset = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
# create new dataset which loads the triples from stored pckl files
dataset_load_by_pickle = Dataset.create(
config=self.config, folder=self.dataset_folder, preload_data=True
)
for split in dataset._triples.keys():
self.assertTrue(
torch.all(
torch.eq(dataset_load_by_pickle.split(split), dataset.split(split))
)
)
self.assertEqual(dataset._meta, dataset_load_by_pickle._meta)
示例11: decode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def decode(self, input_word_orig, input_word, input_char, _, target=None, mask=None, length=None, hx=None,
leading_symbolic=0):
if len(input_word.size()) == 3:
# input_word is the packed sents [n_sent, sent_len]
input_word, input_char, target, sent_mask, length, doc_n_sent = self._doc2sent(
input_word, input_char, target)
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word_orig, input_word, input_char, mask=mask,
length=length, hx=hx)
if target is None:
return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None
if length is not None:
max_len = length.max()
target = target[:, :max_len]
preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)
if mask is None:
return preds, torch.eq(preds, target).float().sum()
else:
return preds, (torch.eq(preds, target).float() * mask).sum()
示例12: query
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def query(self, x):
'''
:param x: input data CxN tensor
:return: mask: Nxnode_num
'''
# expand as CxNxnode_num
node = self.node.unsqueeze(1).expand(x.size(0), x.size(1), self.rows * self.cols)
x_expanded = x.unsqueeze(2).expand_as(node)
# calcuate difference between x and each node
diff = x_expanded - node # CxNxnode_num
diff_norm = (diff ** 2).sum(dim=0) # Nxnode_num
# find the nearest neighbor
_, min_idx = torch.min(diff_norm, dim=1) # N
min_idx_expanded = min_idx.unsqueeze(1).expand(min_idx.size()[0], self.rows * self.cols).float() # Nxnode_num
node_idx_list = self.node_idx_list.unsqueeze(0).expand_as(min_idx_expanded) # Nxnode_num
mask = torch.eq(min_idx_expanded, node_idx_list).float() # Nxnode_num
mask_row_max, _ = torch.max(mask, dim=0) # node_num, this indicates whether the node has nearby x
return mask, mask_row_max
示例13: update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def update(self, output):
y_pred, y = self._check_shape(output)
self._check_type((y_pred, y))
if self._type == "binary":
correct = torch.eq(y_pred.type(y.type()), y).view(-1)
elif self._type == "multiclass":
indices = torch.max(y_pred, dim=1)[1]
correct = torch.eq(indices, y).view(-1)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N x ..., C)
num_classes = y_pred.size(1)
last_dim = y_pred.ndimension()
y_pred = torch.transpose(y_pred, 1, last_dim - 1).reshape(-1, num_classes)
y = torch.transpose(y, 1, last_dim - 1).reshape(-1, num_classes)
correct = torch.all(y == y_pred.type_as(y), dim=-1)
self._num_correct += torch.sum(correct).item()
self._num_examples += correct.shape[0]
示例14: get_accuracy_bin
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def get_accuracy_bin(scores, labels):
preds = torch.ge(scores, 0).long()
acc = torch.eq(preds, labels).float()
return torch.sum(acc) / labels.nelement()
示例15: get_accuracy
# 需要导入模块: import torch [as 别名]
# 或者: from torch import eq [as 别名]
def get_accuracy(scores, labels):
_,preds = torch.max(scores, dim=-1)
acc = torch.eq(preds, labels).float()
return torch.sum(acc) / labels.nelement()