当前位置: 首页>>代码示例>>Python>>正文


Python torch.argmax方法代码示例

本文整理汇总了Python中torch.argmax方法的典型用法代码示例。如果您正苦于以下问题:Python torch.argmax方法的具体用法?Python torch.argmax怎么用?Python torch.argmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.argmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: evaluate_accuracy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def evaluate_accuracy(data_iter, net,
                      device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 评估模式,会关闭 dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回训练模式
            else:
                # 如果是自定义的模型
                if 'is_training' in net.__code__.co_varnames:
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:19,代码来源:utils.py

示例2: train_cnn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def train_cnn(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
    net = net.to(device)
    print('training on', device)
    loss = nn.CrossEntropyLoss()
    batch_count = 0
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
        for X, y in train_iter:
            X = X.to(device)
            y = y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()

            train_l_sum += l.cpu().item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec' %
              (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc, time.time() - start)) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:25,代码来源:utils.py

示例3: predict_rnn_pytorch

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,
                        char_to_idx):
    state = None
    output = [char_to_idx[prefix[0]]]  # output会记录prefix加上输出
    for t in range(num_chars + len(prefix) - 1):
        X = torch.tensor([output[-1]], device=device).view(1, 1)
        if state is not None:
            if isinstance(state, tuple):  # LSTM, state:(h, c)
                state = (state[0].to(device), state[1].to(device))
            else:
                state = state.to(device)

        (Y, state) = model(X, state)  # 前向计算不需要传入模型参数
        if t < len(prefix) - 1:
            output.append(char_to_idx[prefix[t + 1]])
        else:
            output.append(int(Y.argmax(dim=1).item()))
    return ''.join([idx_to_char[i] for i in output]) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:20,代码来源:utils.py

示例4: calculate_outputs_and_gradients

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def calculate_outputs_and_gradients(inputs, model, target_label_idx, cuda=False):
    # do the pre-processing
    predict_idx = None
    gradients = []
    for input in inputs:
        input = pre_processing(input, cuda)
        output = model(input)
        output = F.softmax(output, dim=1)
        if target_label_idx is None:
            target_label_idx = torch.argmax(output, 1).item()
        index = np.ones((output.size()[0], 1)) * target_label_idx
        index = torch.tensor(index, dtype=torch.int64)
        if cuda:
            index = index.cuda()
        output = output.gather(1, index)
        # clear grad
        model.zero_grad()
        output.backward()
        gradient = input.grad.detach().cpu().numpy()[0]
        gradients.append(gradient)
    gradients = np.array(gradients)
    return gradients, target_label_idx 
开发者ID:TianhongDai,项目名称:integrated-gradient-pytorch,代码行数:24,代码来源:utils.py

示例5: classwise_f1

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def classwise_f1(output, gt):
    """
    Args:
        output: torch.Tensor of shape (n_batch, n_classes, image.shape)
        gt: torch.LongTensor of shape (n_batch, image.shape)
    """

    epsilon = 1e-20
    n_classes = output.shape[1]

    output = torch.argmax(output, dim=1)
    true_positives = torch.tensor([((output == i) * (gt == i)).sum() for i in range(n_classes)]).float()
    selected = torch.tensor([(output == i).sum() for i in range(n_classes)]).float()
    relevant = torch.tensor([(gt == i).sum() for i in range(n_classes)]).float()

    precision = (true_positives + epsilon) / (selected + epsilon)
    recall = (true_positives + epsilon) / (relevant + epsilon)
    classwise_f1 = 2 * (precision * recall) / (precision + recall)

    return classwise_f1 
开发者ID:cosmic-cortex,项目名称:pytorch-UNet,代码行数:22,代码来源:metrics.py

示例6: batch_intersection_union

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def batch_intersection_union(output, target, nclass):
    """mIoU"""
    # inputs are NDarray, output 4D, target 3D
    # the category -1 is ignored class, typically for background / boundary
    mini = 1
    maxi = nclass
    nbins = nclass
    predict = torch.argmax(output, 1) + 1
    target = target.float() + 1

    predict = predict.float() * (target > 0).float()
    intersection = predict * (predict == target).float()
    # areas of intersection and union
    area_inter = torch.histc(intersection, bins=nbins, min=mini, max=maxi)
    area_pred = torch.histc(predict, bins=nbins, min=mini, max=maxi)
    area_lab = torch.histc(target, bins=nbins, min=mini, max=maxi)
    area_union = area_pred + area_lab - area_inter
    assert torch.sum(area_inter > area_union).item() == 0, \
        "Intersection area should be smaller than Union area"
    return area_inter.float(), area_union.float() 
开发者ID:AceCoooool,项目名称:LEDNet,代码行数:22,代码来源:metric_seg.py

示例7: _sample_action

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def _sample_action(self, cat_distr, mask, relaxed, tau_weights, straight_through, gumbel_noise):
        if self.training:
            if relaxed:
                N = mask.sum(dim=-1, keepdim=True)
                tau = tau_weights[0] + tau_weights[1].exp() * torch.log(N + 1) + tau_weights[2].exp() * N
                actions, gumbel_noise = cat_distr.rsample(temperature=tau, gumbel_noise=gumbel_noise)
                if straight_through:
                    actions_hard = torch.zeros_like(actions)
                    actions_hard.scatter_(-1, actions.argmax(dim=-1, keepdim=True), 1.0)
                    actions = (actions_hard - actions).detach() + actions
                actions = clamp_grad(actions, -0.5, 0.5)
            else:
                actions, gumbel_noise = cat_distr.rsample(gumbel_noise=gumbel_noise)
        else:
            actions = torch.zeros_like(cat_distr.probs)
            actions.scatter_(-1, torch.argmax(cat_distr.probs, dim=-1, keepdim=True), 1.0)
            gumbel_noise = None
        return actions, gumbel_noise 
开发者ID:facebookresearch,项目名称:latent-treelstm,代码行数:20,代码来源:BottomUpTreeLstmParser.py

示例8: batch_intersection_union

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def batch_intersection_union(output, target, nclass):
    """mIoU"""
    # inputs are numpy array, output 4D, target 3D
    mini = 1
    maxi = nclass
    nbins = nclass
    predict = torch.argmax(output, 1) + 1
    target = target.float() + 1

    predict = predict.float() * (target > 0).float()
    intersection = predict * (predict == target).float()
    # areas of intersection and union
    # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.
    area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi)
    area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi)
    area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi)
    area_union = area_pred + area_lab - area_inter
    assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area"
    return area_inter.float(), area_union.float() 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:21,代码来源:score.py

示例9: test_softmax

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def test_softmax(self):
        em = LogisticRegression(seed=1, input_dim=2, output_dim=3, verbose=False)
        Xs, _ = self.single_problem
        Ys = []
        for X in Xs:
            class1 = X[:, 0] < X[:, 1]
            class2 = X[:, 0] > X[:, 1] + 0.5
            class3 = X[:, 0] > X[:, 1]
            Y = torch.argmax(torch.stack([class1, class2, class3], dim=1), dim=1) + 1
            Ys.append(Y)
        em.train_model(
            (Xs[0], Ys[0]),
            valid_data=(Xs[1], Ys[1]),
            lr=0.1,
            n_epochs=10,
            checkpoint=False,
        )
        score = em.score((Xs[2], Ys[2]), verbose=False)
        self.assertGreater(score, 0.95) 
开发者ID:HazyResearch,项目名称:metal,代码行数:21,代码来源:test_end_model.py

示例10: evaluate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def evaluate(epoch, args, model, feats, labels, train, val, test):
    with torch.no_grad():
        batch_size = args.eval_batch_size
        if batch_size <= 0:
            pred = model(feats)
        else:
            pred = []
            num_nodes = labels.shape[0]
            n_batch = (num_nodes + batch_size - 1) // batch_size
            for i in range(n_batch):
                batch_start = i * batch_size
                batch_end = min((i + 1) * batch_size, num_nodes)
                batch_feats = [feat[batch_start: batch_end] for feat in feats]
                pred.append(model(batch_feats))
            pred = torch.cat(pred)

        pred = torch.argmax(pred, dim=1)
        correct = (pred == labels).float()
        train_acc = correct[train].sum() / len(train)
        val_acc = correct[val].sum() / len(val)
        test_acc = correct[test].sum() / len(test)
        return train_acc, val_acc, test_acc 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:sign.py

示例11: evaluate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def evaluate(dataloader, model, prog_args, logger=None):
    '''
    evaluate function
    '''
    if logger is not None and prog_args.save_dir is not None:
        model.load_state_dict(torch.load(prog_args.save_dir + "/" + prog_args.dataset
                                         + "/model.iter-" + str(logger['best_epoch'])))
    model.eval()
    correct_label = 0
    with torch.no_grad():
        for batch_idx, (batch_graph, graph_labels) in enumerate(dataloader):
            if torch.cuda.is_available():
                for (key, value) in batch_graph.ndata.items():
                    batch_graph.ndata[key] = value.cuda()
                graph_labels = graph_labels.cuda()
            ypred = model(batch_graph)
            indi = torch.argmax(ypred, dim=1)
            correct = torch.sum(indi == graph_labels)
            correct_label += correct.item()
    result = correct_label / (len(dataloader) * prog_args.batch_size)
    return result 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:train.py

示例12: _graph_fn_sample_deterministic

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def _graph_fn_sample_deterministic(self, distribution):
        """
        Returns the argmax (int) of a relaxed one-hot vector. See `_graph_fn_sample_stochastic` for details.
        """
        if get_backend() == "tf":
            # Cast to float again because this is called from a tf.cond where the other option calls a stochastic
            # sample returning a float.
            argmax = tf.argmax(input=distribution._distribution.probs, axis=-1, output_type=tf.int32)
            sample = tf.cast(argmax, dtype=tf.float32)
            # Argmax turns (?, n) into (?,), not (?, 1)
            # TODO: What if we have a time rank as well?
            if len(sample.shape) == 1:
                sample = tf.expand_dims(sample, -1)
            return sample
        elif get_backend() == "pytorch":
            # TODO: keepdims?
            return torch.argmax(distribution.probs, dim=-1).int() 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:19,代码来源:gumbel_softmax.py

示例13: _graph_fn_get_action_components

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def _graph_fn_get_action_components(self, flat_key, logits, parameters, deterministic):
        action_space_component = self.flat_action_space[flat_key]

        # Skip our distribution, iff discrete action-space and deterministic acting (greedy).
        # In that case, one does not need to create a distribution in the graph each act (only to get the argmax
        # over the logits, which is the same as the argmax over the probabilities (or log-probabilities)).
        if isinstance(action_space_component, IntBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            return self._graph_fn_get_deterministic_action_wo_distribution(logits)
        # Bernoulli: Sigmoid derived p must be larger 0.5.
        elif isinstance(action_space_component, BoolBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            # Note: Change 0.5 to 1.0, once parameters are logits, not probs anymore (so far, parameters for
            # Bernoulli distributions are still probs).
            if get_backend() == "tf":
                return tf.greater(parameters, 0.5)
            elif get_backend() == "pytorch":
                return torch.gt(parameters, 0.5)
        # Deterministic is tensor or False. Pass through graph.
        else:
            return self.distributions[flat_key].draw(parameters, deterministic) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:23,代码来源:policy.py

示例14: BPDA_attack

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def BPDA_attack(image,target, model, step_size = 1., iterations = 10, linf=False, transform_func=identity_transform):
    target = label2tensor(target)
    adv = image.detach().numpy()
    adv = torch.from_numpy(adv)
    adv.requires_grad_()
    for _ in range(iterations):
        adv_def = transform_func(adv)
        adv_def.requires_grad_()
        l2 = nn.MSELoss()
        loss = l2(0, adv_def)
        loss.backward()
        g = get_cw_grad(adv_def, image, target, model)
        if linf:
            g = torch.sign(g)
        print(g.numpy().sum())
        adv = adv.detach().numpy() - step_size * g.numpy()
        adv = clip_bound(adv)
        adv = torch.from_numpy(adv)
        adv.requires_grad_()
        if linf:
            print('label', torch.argmax(model(adv)), 'linf', torch.max(torch.abs(adv - image)).detach().numpy())
        else:
            print('label', torch.argmax(model(adv)), 'l2', l2_norm(adv, image))
    return adv.detach().numpy() 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:26,代码来源:BPDA.py

示例15: val_gzsl

# 需要导入模块: import torch [as 别名]
# 或者: from torch import argmax [as 别名]
def val_gzsl(self, test_X, test_label, target_classes):

        with torch.no_grad():
            start = 0
            ntest = test_X.size()[0]
            predicted_label = torch.LongTensor(test_label.size())
            for i in range(0, ntest, self.batch_size):

                end = min(ntest, start+self.batch_size)

                output = self.model(test_X[start:end]) #.to(self.device)

                #_, predicted_label[start:end] = torch.max(output.data, 1)
                predicted_label[start:end] = torch.argmax(output.data, 1)

                start = end

            #print(str(predicted_label[:3]).ljust(40,'.'), end= ' '     )
            acc = self.compute_per_class_acc_gzsl(test_label, predicted_label, target_classes)
            return acc 
开发者ID:edgarschnfld,项目名称:CADA-VAE-PyTorch,代码行数:22,代码来源:final_classifier.py


注:本文中的torch.argmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。