当前位置: 首页>>代码示例>>Python>>正文


Python torch.gt方法代码示例

本文整理汇总了Python中torch.gt方法的典型用法代码示例。如果您正苦于以下问题:Python torch.gt方法的具体用法?Python torch.gt怎么用?Python torch.gt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.gt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _graph_fn_get_action_components

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def _graph_fn_get_action_components(self, flat_key, logits, parameters, deterministic):
        action_space_component = self.flat_action_space[flat_key]

        # Skip our distribution, iff discrete action-space and deterministic acting (greedy).
        # In that case, one does not need to create a distribution in the graph each act (only to get the argmax
        # over the logits, which is the same as the argmax over the probabilities (or log-probabilities)).
        if isinstance(action_space_component, IntBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            return self._graph_fn_get_deterministic_action_wo_distribution(logits)
        # Bernoulli: Sigmoid derived p must be larger 0.5.
        elif isinstance(action_space_component, BoolBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            # Note: Change 0.5 to 1.0, once parameters are logits, not probs anymore (so far, parameters for
            # Bernoulli distributions are still probs).
            if get_backend() == "tf":
                return tf.greater(parameters, 0.5)
            elif get_backend() == "pytorch":
                return torch.gt(parameters, 0.5)
        # Deterministic is tensor or False. Pass through graph.
        else:
            return self.distributions[flat_key].draw(parameters, deterministic) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:23,代码来源:policy.py

示例2: score_msk_ent

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def score_msk_ent(ner_mat, ner_dict):
    seq_len, batch_sz = ner_mat.size()
    assert ner_dict.fword2idx('O') == 0
    # msk = torch.zeros((batch_sz, seq_len))
    ner = ner_mat.transpose(1, 0)
    indicator = torch.gt(ner, 0).int()
    global_bag = []
    for bid in range(batch_sz):
        tmp_bag = []
        for t in range(seq_len):
            if indicator[bid][t] != -1:

                if indicator[bid][t] == 0:
                    indicator, l = rec(indicator, bid, t, indicator[bid][t], seq_len)
                    tmp_bag.append([0, l])
                else:
                    indicator, l = rec(indicator, bid, t, indicator[bid][t], seq_len)
                    tmp_bag.append([1, l])
        global_bag.append(tmp_bag)
    return global_bag 
开发者ID:jiacheng-xu,项目名称:vmf_vae_nlp,代码行数:22,代码来源:helper.py

示例3: get_score

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def get_score(self, model, texta, textb, labels, score_type='f1'):
        metrics_map = {
            'f1': f1_score,
            'p': precision_score,
            'r': recall_score,
            'acc': accuracy_score
        }
        metric_func = metrics_map[score_type] if score_type in metrics_map else metrics_map['f1']
        assert texta.size(1) == textb.size(1) == len(labels)
        predict_prob = model(texta, textb)
        # print('predict', predict_prob)
        # print('labels', labels)
        predict_labels = torch.gt(predict_prob, 0.5)
        predict_labels = predict_labels.view(-1).cpu().data.numpy()
        labels = labels.view(-1).cpu().data.numpy()
        return metric_func(predict_labels, labels, average='micro') 
开发者ID:smilelight,项目名称:lightNLP,代码行数:18,代码来源:tool.py

示例4: sample_sigmoid

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def sample_sigmoid(args, y, sample=False):
    r"""
    Sample from scores between 0 and 1 as means of Bernouolli distribution, or threshold over 0.5
    
    :param args: parsed arguments
    :param y: values to threshold
    :param sample: if True, sample, otherwise, threshold
    :return: sampled/thresholed values, in {0., 1.}
    """
    thresh = 0.5
    if sample:
        y_thresh = torch.rand(y.size(0), y.size(1), y.size(2)).to(args.device)
        y_result = torch.gt(y, y_thresh).float()
    else:
        y_thresh = (torch.ones(y.size(0), y.size(1), y.size(2)) * thresh).to(args.device)
        y_result = torch.gt(y, y_thresh).float()
    return y_result 
开发者ID:davide-belli,项目名称:generative-graph-transformer,代码行数:19,代码来源:utils.py

示例5: train_generator

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def train_generator(self, train_data=None, generated_data=None, generator=None, discriminator=None, **kwargs):
        for entry in train_data:
            qid, batch_ranking, batch_label = entry[0], entry[1], entry[2]
            if gpu: batch_ranking = batch_ranking.to(device)

            pos_inds = torch.gt(torch.squeeze(batch_label), 0).nonzero()

            g_preds = generator.predict(batch_ranking, train=True)
            g_probs = torch.sigmoid(torch.squeeze(g_preds))

            neg_inds = torch.multinomial(g_probs, pos_inds.size(0), replacement=True)

            pos_docs = batch_ranking[:, pos_inds[:, 0], :]
            neg_docs = batch_ranking[:, neg_inds, :]

            reward = discriminator.get_reward(pos_docs=pos_docs, neg_docs=neg_docs, loss_type=self.loss_type)

            g_loss = -torch.mean((torch.log(g_probs[neg_inds]) * reward))

            generator.optimizer.zero_grad()
            g_loss.backward()
            generator.optimizer.step() 
开发者ID:pt-ranking,项目名称:pt-ranking.github.io,代码行数:24,代码来源:pair_ir_gan.py

示例6: check_mask_rele

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def check_mask_rele(mask_ratio=0.4):
    mat = torch.randint(size=(1, 20), low=-2, high=3)

    mat = torch.squeeze(mat,dim=0)
    print('mat', mat.size(), mat)

    all_rele_inds = torch.gt(mat, torch_zero).nonzero()
    print('all_rele_inds', all_rele_inds.size(), all_rele_inds)
    num_rele = all_rele_inds.size()[0]
    print('num_rele', num_rele)

    num_to_mask = int(num_rele*mask_ratio)
    mask_inds = np.random.choice(num_rele, size=num_to_mask, replace=False)
    print('mask_inds', mask_inds)

    rele_inds_to_mask = all_rele_inds[mask_inds, 0]
    print('rele_inds_to_mask', rele_inds_to_mask) 
开发者ID:pt-ranking,项目名称:pt-ranking.github.io,代码行数:19,代码来源:test_data_utils.py

示例7: calc_mask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def calc_mask(self, sparsity, wrapper, wrapper_idx=None):
        assert wrapper.type == 'BatchNorm2d', 'SlimPruner only supports 2d batch normalization layer pruning'
        weight = wrapper.module.weight.data.clone()
        if wrapper.weight_mask is not None:
            # apply base mask for iterative pruning
            weight = weight * wrapper.weight_mask

        base_mask = torch.ones(weight.size()).type_as(weight).detach()
        mask = {'weight_mask': base_mask.detach(), 'bias_mask': base_mask.clone().detach()}
        filters = weight.size(0)
        num_prune = int(filters * sparsity)
        if filters >= 2 and num_prune >= 1:
            w_abs = weight.abs()
            mask_weight = torch.gt(w_abs, self.global_threshold).type_as(weight)
            mask_bias = mask_weight.clone()
            mask = {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias.detach()}
        return mask 
开发者ID:microsoft,项目名称:nni,代码行数:19,代码来源:structured_pruning.py

示例8: check_monitor_top_k

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def check_monitor_top_k(self, current):
        less_than_k_models = len(self.best_k_models) < self.save_top_k
        if less_than_k_models:
            return True

        if not isinstance(current, torch.Tensor):
            rank_zero_warn(
                f'{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly.'
                f' HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
            )
            current = torch.tensor(current)

        monitor_op = {
            "min": torch.lt,
            "max": torch.gt,
        }[self.mode]

        return monitor_op(current, self.best_k_models[self.kth_best_model_path]) 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:20,代码来源:model_checkpoint.py

示例9: loss_per_level

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def loss_per_level(self, estDisp, gtDisp, label):
        N, C, H, W = estDisp.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # (start disparity, max disparity / scale)
        # Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        if mask.sum() < 1.0:
            print('Relative loss: there is no point\'s disparity is in ({},{})!'.format(self.start_disp,
                                                                                        self.max_disp / scale))
            loss = (torch.abs(estDisp - scaled_gtDisp) * mask.float()).mean()
            return loss

        # relative loss
        valid_pixel_number = mask.float().sum()
        diff = scaled_gtDisp[mask] - estDisp[mask]
        label = label[mask]
        # some value which is over large for torch.exp() is not suitable for soft margin loss
        # get absolute value great than 66
        over_large_mask = torch.gt(torch.abs(diff), 66)
        over_large_diff = diff[over_large_mask]
        # get absolute value smaller than 66
        proper_mask = torch.le(torch.abs(diff), 66)
        proper_diff = diff[proper_mask]
        # generate lable for soft margin loss
        label = label[proper_mask]
        loss = F.soft_margin_loss(proper_diff, label, reduction='sum') + torch.abs(over_large_diff).sum()
        loss = loss / valid_pixel_number

        return loss 
开发者ID:DeepMotionAIResearch,项目名称:DenseMatchingBenchmark,代码行数:39,代码来源:relative_loss.py

示例10: _graph_fn_get_action_and_log_likelihood

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def _graph_fn_get_action_and_log_likelihood(self, flat_key, parameters, deterministic):
        # TODO: Utilize same logic in _graph_fn_get_action_components.
        # TODO: Not working right now, because we would split twice (here and in _graph_fn_get_action_components).
        action = None
        log_prob_or_likelihood = None

        action_space_component = self.flat_action_space[flat_key]

        # Categorical: Argmax over raw logits.
        if isinstance(action_space_component, IntBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            action = self._graph_fn_get_deterministic_action_wo_distribution(parameters)
            if get_backend() == "tf":
                log_prob_or_likelihood = tf.log(tf.reduce_max(tf.nn.softmax(parameters, axis=-1), axis=-1))
            elif get_backend() == "pytorch":
                log_prob_or_likelihood = torch.log(torch.max(torch.softmax(parameters, dim=-1), dim=-1)[0])
        # Bernoulli: Sigmoid derived p must be larger 0.5.
        elif isinstance(action_space_component, BoolBox) and \
                (deterministic is True or (isinstance(deterministic, np.ndarray) and deterministic)):
            # Note: Change 0.5 to 1.0, once parameters are logits, not probs anymore (so far, parameters for
            # Bernoulli distributions are still probs).
            if get_backend() == "tf":
                action = tf.greater(parameters, 0.5)
                log_prob_or_likelihood = tf.log(tf.where(parameters > 0.5, parameters, 1.0 - parameters))
            elif get_backend() == "pytorch":
                action = torch.gt(parameters, 0.5)
                log_prob_or_likelihood = torch.log(torch.where(parameters > 0.5, parameters, 1.0 - parameters))
        # Deterministic is tensor or False. Pass through graph.
        else:
            action, log_prob_or_likelihood = self.distributions[flat_key].sample_and_log_prob(
                parameters, deterministic
            )

        return action, log_prob_or_likelihood 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:36,代码来源:policy.py

示例11: sent_level_feat

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def sent_level_feat(self, ner_mat, sent_split):
        seq_len, batch_sz = ner_mat.size()
        meta_feat = []

        for bidx in range(batch_sz):
            feat = []
            split = sent_split[bidx]
            ner = ner_mat[:, bidx]
            _cursor = 0
            for sidx, s in enumerate(split):
                position_in_doc = float(sidx / len(split))
                first_three = 1 if sidx < 3 else 0
                sent_len_0 = 1 if s < 4 else 0
                sent_len_1 = 1 if s < 8 else 0
                sent_len_2 = 1 if s < 16 else 0
                sent_len_3 = 1 if s < 32 else 0
                sent_len_4 = 1 if s < 64 else 0
                ner_num = torch.sum(torch.gt(ner[_cursor:_cursor + s], 0).int())
                ner_num_0 = 1 if ner_num < 1 else 0
                ner_num_1 = 1 if ner_num < 2 else 0
                ner_num_2 = 1 if ner_num < 4 else 0
                ner_num_3 = 1 if ner_num < 8 else 0
                ner_rate = float(ner_num / s)
                tmp = [position_in_doc, first_three, sent_len_0, sent_len_1, sent_len_2, sent_len_3, sent_len_4,
                       ner_num_0, ner_num_1, ner_num_2, ner_num_3, ner_rate]
                # feat = feat + tmp * s
                feat.extend([tmp for i in range(s)])
                # for k in range(s):
                #     feat[_cursor+k] = tmp
                _cursor += s
            meta_feat.append(feat)
        return torch.FloatTensor(np.asarray(meta_feat)), len(meta_feat[0][0]) 
开发者ID:jiacheng-xu,项目名称:vmf_vae_nlp,代码行数:34,代码来源:feat.py

示例12: score_msk_word

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def score_msk_word(mat):
    mat = mat.transpose(1, 0)
    batch_sz, seq_len = mat.size()
    msk = torch.gt(mat, 0).float()

    return msk 
开发者ID:jiacheng-xu,项目名称:vmf_vae_nlp,代码行数:8,代码来源:helper.py

示例13: step

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def step(x, b):
    """ 
    The step function for ideal quantization function in test stage.
    """
    y = torch.zeros_like(x)
    mask = torch.gt(x - b,  0.0)
    y[mask] = 1.0
    return y 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:10,代码来源:quantization.py

示例14: step

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def step(x, bias):
    """ 
    The step function for ideal quantization function in test stage.
    """
    y = torch.zeros_like(x) 
    mask = torch.gt(x - bias,  0.0)
    y[mask] = 1.0
    return y 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:10,代码来源:anybit.py

示例15: df

# 需要导入模块: import torch [as 别名]
# 或者: from torch import gt [as 别名]
def df(self, module, g_inp, g_out):
        return gt(module.input0, 0).float() 
开发者ID:f-dangel,项目名称:backpack,代码行数:4,代码来源:relu.py


注:本文中的torch.gt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。