当前位置: 首页>>代码示例>>Python>>正文


Python torch.median方法代码示例

本文整理汇总了Python中torch.median方法的典型用法代码示例。如果您正苦于以下问题:Python torch.median方法的具体用法?Python torch.median怎么用?Python torch.median使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.median方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _median_smoothing

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def _median_smoothing(
        indices: Tensor,
        win_length: int
) -> Tensor:
    r"""
    Apply median smoothing to the 1D tensor over the given window.
    """

    # Centered windowed
    pad_length = (win_length - 1) // 2

    # "replicate" padding in any dimension
    indices = torch.nn.functional.pad(
        indices, (pad_length, 0), mode="constant", value=0.
    )

    indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
    roll = indices.unfold(-1, win_length, 1)

    values, _ = torch.median(roll, -1)
    return values 
开发者ID:pytorch,项目名称:audio,代码行数:23,代码来源:functional.py

示例2: bounds

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def bounds(self, network=None): 
        if network is None: 
            nu_u = self.nu_u[-1]
            nu_one_u = self.nu_one_u[-1]
            nu_l = self.nu_l[-1]
            nu_one_l = self.nu_one_l[-1]
        else: 
            nu_u = network(self.nu_u[0])
            nu_one_u = network(self.nu_one_u[0])
            nu_l = network(self.nu_l[0])
            nu_one_l = network(self.nu_one_l[0])

        nu_l1_u = torch.median(nu_u.abs(),1)[0]
        nu_pos_u = (nu_l1_u + nu_one_u)/2
        nu_neg_u = (-nu_l1_u + nu_one_u)/2

        nu_l1_l = torch.median(nu_l.abs(),1)[0]
        nu_pos_l = (nu_l1_l + nu_one_l)/2
        nu_neg_l = (-nu_l1_l + nu_one_l)/2

        zu = nu_pos_u + nu_neg_l
        zl = nu_neg_u + nu_pos_l
        return zl,zu

# L2 balls 
开发者ID:locuslab,项目名称:convex_adversarial,代码行数:27,代码来源:dual_inputs.py

示例3: bounds

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def bounds(self, network=None): 
        if self.I_empty: 
            return 0,0

        if network is None: 
            nu = self.nus[-1]
            no = self.nu_ones[-1]
        else: 
            nu = network(self.nus[0])
            no = network(self.nu_ones[0])

        n = torch.median(nu.abs(), 1)[0]

        # From notes: 
        # \sum_i l_i[nu_i]_+ \approx (-n + no)/2
        # which is the negative of the term for the upper bound
        # for the lower bound, use -nu and negate the output, so 
        # (n - no)/2 since the no term flips twice and the l1 term
        # flips only once. 
        zl = (-n - no)/2
        zu = (n - no)/2

        return zl,zu 
开发者ID:locuslab,项目名称:convex_adversarial,代码行数:25,代码来源:dual_layers.py

示例4: analyze

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def analyze(self, batch, pred, true, pred_label=None, paths=None):
        pred = torch.sigmoid(pred)
        for_label = torch.median((pred * true + (1 - pred) * (1 - true)).reshape(pred.shape[0], -1), -1)[0]
        if pred_label is None:
            pred_label = (pred > 0.5).int()
        if paths is None:
            paths = [None] * len(batch)
        this_data = list(zip(batch, for_label, true.mean(tuple(range(1,true.dim()))) > 0.5, (pred_label == true).float().mean(tuple(range(1,pred_label.dim()))) > 0.5,
                             paths))

        self.best += this_data
        self.best.sort(key=lambda x: -x[1])
        self.best = self.best[:self.topk]

        self.worst += this_data
        self.worst.sort(key=lambda x: x[1])
        self.worst = self.worst[:self.topk]

        self.confused += this_data
        self.confused.sort(key=lambda x: abs(self.center_value - x[1]))
        self.confused = self.confused[:self.topk] 
开发者ID:Vermeille,项目名称:Torchelie,代码行数:23,代码来源:inspector.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def forward(self, input, label):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        # add margin
        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        target_logits = torch.cos(theta + self.m)
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        if self.ls_eps > 0:
            one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features
        output = logits * (1 - one_hot) + target_logits * one_hot
        # feature re-scale
        with torch.no_grad():
            B_avg = torch.where(one_hot < 1, torch.exp(self.s * logits), torch.zeros_like(logits))
            B_avg = torch.sum(B_avg) / input.size(0)
            theta_med = torch.median(theta)
            self.s = torch.log(B_avg) / torch.cos(torch.min(self.theta_zero * torch.ones_like(theta_med), theta_med))
        output *= self.s

        return output 
开发者ID:lyakaap,项目名称:Landmark2019-1st-and-3rd-Place-Solution,代码行数:26,代码来源:metric_learning.py

示例6: bounds

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def bounds(self, network=None): 
        if network is None: 
            nu_u = self.nu_u[-1]
            nu_one_u = self.nu_one_u[-1]
            nu_l = self.nu_l[-1]
            nu_one_l = self.nu_one_l[-1]
        else: 
            nu_u = network(self.nu_u[0])
            nu_one_u = network(self.nu_one_u[0])
            nu_l = network(self.nu_l[0])
            nu_one_l = network(self.nu_one_l[0])

        nu_l1_u = torch.median(nu_u.abs(),1)[0]
        nu_pos_u = (nu_l1_u + nu_one_u)/2
        nu_neg_u = (-nu_l1_u + nu_one_u)/2

        nu_l1_l = torch.median(nu_l.abs(),1)[0]
        nu_pos_l = (nu_l1_l + nu_one_l)/2
        nu_neg_l = (-nu_l1_l + nu_one_l)/2

        zu = nu_pos_u + nu_neg_l
        zl = nu_neg_u + nu_pos_l
        return zl,zu 
开发者ID:max-andr,项目名称:provable-robustness-max-linear-regions,代码行数:25,代码来源:dual_inputs.py

示例7: bounds

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def bounds(self, network=None):
        if self.I_empty:
            return 0, 0

        if network is None:
            nu = self.nus[-1]
            no = self.nu_ones[-1]
        else:
            nu = network(self.nus[0])
            no = network(self.nu_ones[0])

        n = torch.median(self.nus[-1].abs(), 1)[0]

        # From notes: 
        # \sum_i l_i[nu_i]_+ \approx (-n + no)/2
        # which is the negative of the term for the upper bound
        # for the lower bound, use -nu and negate the output, so 
        # (n - no)/2 since the no term flips twice and the l1 term
        # flips only once. 
        zl = (-n - no) / 2
        zu = (n - no) / 2

        return zl, zu 
开发者ID:max-andr,项目名称:provable-robustness-max-linear-regions,代码行数:25,代码来源:dual_layers.py

示例8: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def forward(self, input, label=None):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.W)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits
        # feature re-scale
        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        with torch.no_grad():
            B_avg = torch.where(one_hot < 1, torch.exp(self.s * logits), torch.zeros_like(logits))
            B_avg = torch.sum(B_avg) / input.size(0)
            # print(B_avg)
            theta_med = torch.median(theta[one_hot == 1])
            self.s = torch.log(B_avg) / torch.cos(torch.min(math.pi/4 * torch.ones_like(theta_med), theta_med))
        output = self.s * logits

        return output 
开发者ID:4uiiurz1,项目名称:pytorch-adacos,代码行数:24,代码来源:metrics.py

示例9: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def __init__(self, *args, **kargs):
        super(PeakResponseMapping, self).__init__(*args)

        self.inferencing = False
        # use global average pooling to aggregate responses if peak stimulation is disabled
        self.enable_peak_stimulation = kargs.get('enable_peak_stimulation', True)
        # return only the class response maps in inference mode if peak backpropagation is disabled
        self.enable_peak_backprop = kargs.get('enable_peak_backprop', True)
        # window size for peak finding
        self.win_size = kargs.get('win_size', 3)
        # sub-pixel peak finding
        self.sub_pixel_locating_factor = kargs.get('sub_pixel_locating_factor', 1)
        # peak filtering
        self.filter_type = kargs.get('filter_type', 'median')
        if self.filter_type == 'median':
            self.peak_filter = self._median_filter
        elif self.filter_type == 'mean':
            self.peak_filter = self._mean_filter
        elif self.filter_type == 'max':
            self.peak_filter = self._max_filter
        elif isinstance(self.filter_type, (int, float)):
            self.peak_filter = lambda x: self.filter_type
        else:
            self.peak_filter = None 
开发者ID:chuchienshu,项目名称:ultra-thin-PRM,代码行数:26,代码来源:peak_response_mapping.py

示例10: Confidence_Loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def Confidence_Loss(self, pred_confidence, mask, pred_d, gt_d):
        # using least square to find scaling factor
        N = torch.sum(mask) + EPSILON
        N = N.item()

        if N > 0.5:
            scale_factor = torch.median(
                gt_d.data[mask.data > 0.1] /
                (pred_d.data[mask.data > 0.1] + EPSILON)).item()
            pred_d_aligned = pred_d * scale_factor

            error = torch.abs(pred_d_aligned.data -
                              gt_d.data) / (gt_d.data + EPSILON)
            error = torch.exp(-error * 2.0)

            error_var = autograd.Variable(error, requires_grad=False)
            u_loss = mask * torch.abs(pred_confidence - error_var)
            confidence_term = torch.sum(u_loss) / N
        else:
            confidence_term = 0.0

        return confidence_term 
开发者ID:google,项目名称:mannequinchallenge,代码行数:24,代码来源:networks.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def forward(self, x):
        db7_decomp_high = self.db7_decomp_high
        if x.shape[1] > 1:
            db7_decomp_high = torch.cat([self.db7_decomp_high]*x.shape[1], dim=0)

        if x.is_cuda:
            db7_decomp_high = db7_decomp_high.cuda()

        diagonal = F.pad(x, (0,0,self.db7_decomp_high.shape[2]//2,self.db7_decomp_high.shape[2]//2), mode='reflect')
        diagonal = F.conv2d(diagonal, db7_decomp_high, stride=(2,1), groups=x.shape[1])
        diagonal = F.pad(diagonal, (self.db7_decomp_high.shape[2]//2,self.db7_decomp_high.shape[2]//2,0,0), mode='reflect')
        diagonal = F.conv2d(diagonal.transpose(2,3), db7_decomp_high, stride=(2,1), groups=x.shape[1])
        #diagonal = diagonal.transpose(2,3)
        sigma = 0
        diagonal = diagonal.view(diagonal.shape[0],diagonal.shape[1],-1)
        for c in range(diagonal.shape[1]):
            d = diagonal[:,c]
            sigma += torch.median(torch.abs(d), dim=1)[0] / 0.6745
        sigma = sigma / diagonal.shape[1]
        sigma = sigma.detach()
        del db7_decomp_high
        return sigma 
开发者ID:cig-skoltech,项目名称:deep_demosaick,代码行数:24,代码来源:wmad_estimator.py

示例12: template_calculation_parallel

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def template_calculation_parallel(unit, ids, spike_array, temps, data, snipit):

    idx = np.where(ids==unit)[0]
    times = spike_array[idx]
    
    # get indexes of spikes that are not too close to end; 
    #       note: spiketimes are relative to current chunk; i.e. start at 0
    idx2 = np.where(times<(data.shape[1]-temps.shape[1]))[0]

    # grab waveforms; 
    if idx2.shape[0]>0:
        wfs = np.median(data[:,times[idx2][:,None]+
                                snipit-temps.shape[1]+1].
                                transpose(1,2,0),0)
        return (wfs, idx2.shape[0])
    else:
        return (wfs_empty, 0) 
开发者ID:paninski-lab,项目名称:yass,代码行数:19,代码来源:run_original.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def forward(self, x):
        # x should be of shape batchSize x dimNodeSignals x N
        batchSize = x.shape[0]
        dimNodeSignals = x.shape[1]
        assert x.shape[2] == self.N
        xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
        # max's
        # It is initialized with the 0-hop neigh. (x itself)
        xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
        #x = x.unsqueeze(3) # B x F x N x 1
        for k in range(1,self.K+1):
            kHopNeighborhood = self.neighborhood[k-1] 
            # Fetching k-hop neighborhoods of all nodes
            kHopMedian = torch.empty(0).to(x.device)
            # Initializing the vector that will contain the k-hop median for
            # every node
            for n in range(self.N):
                # Iterating over the nodes
                # This step is necessary because here the neighborhoods are
                # lists of lists. It is impossible to pad them and feed them as
                # a matrix, as this would impact the outcome of the median
                # operation
                nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n]))
                neighborhoodLen = len(nodeNeighborhood)
                gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen])
                gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1])
                # Reshaping the node neighborhood for the gather operation
                xNodeNeighbors=torch.gather(x,2,gatherNode.long().to(x.device))
                # Gathering signal values in the node neighborhood
                nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2,
                                            keepdim=True)
                # Computing the median in the neighborhood
                kHopMedian = torch.cat([kHopMedian,nodeMedian],2)
                # Concatenating k-hop medians node by node
            kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for
            # concatenation with the previous (k-1)-hop median tensor 
            xK = torch.cat([xK,kHopMedian],3)
        out = torch.matmul(xK,self.weight.unsqueeze(2))
        # Multiplying each k-hop median by corresponding trainable weight
        out = out.reshape([batchSize,dimNodeSignals,self.N])
        return out 
开发者ID:alelab-upenn,项目名称:graph-neural-networks,代码行数:43,代码来源:graphML.py

示例14: _find_max_per_frame

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def _find_max_per_frame(
        nccf: Tensor,
        sample_rate: int,
        freq_high: int
) -> Tensor:
    r"""
    For each frame, take the highest value of NCCF,
    apply centered median smoothing, and convert to frequency.

    Note: If the max among all the lags is very close
    to the first half of lags, then the latter is taken.
    """

    lag_min = int(math.ceil(sample_rate / freq_high))

    # Find near enough max that is smallest

    best = torch.max(nccf[..., lag_min:], -1)

    half_size = nccf.shape[-1] // 2
    half = torch.max(nccf[..., lag_min:half_size], -1)

    best = _combine_max(half, best)
    indices = best[1]

    # Add back minimal lag
    indices += lag_min
    # Add 1 empirical calibration offset
    indices += 1

    return indices 
开发者ID:pytorch,项目名称:audio,代码行数:33,代码来源:functional.py

示例15: detect_pitch_frequency

# 需要导入模块: import torch [as 别名]
# 或者: from torch import median [as 别名]
def detect_pitch_frequency(
        waveform: Tensor,
        sample_rate: int,
        frame_time: float = 10 ** (-2),
        win_length: int = 30,
        freq_low: int = 85,
        freq_high: int = 3400,
) -> Tensor:
    r"""Detect pitch frequency.

    It is implemented using normalized cross-correlation function and median smoothing.

    Args:
        waveform (Tensor): Tensor of audio of dimension (..., freq, time)
        sample_rate (int): The sample rate of the waveform (Hz)
        frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
        win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
        freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
        freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).

    Returns:
        Tensor: Tensor of freq of dimension (..., frame)
    """
    # pack batch
    shape = list(waveform.size())
    waveform = waveform.reshape([-1] + shape[-1:])

    nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
    indices = _find_max_per_frame(nccf, sample_rate, freq_high)
    indices = _median_smoothing(indices, win_length)

    # Convert indices to frequency
    EPSILON = 10 ** (-9)
    freq = sample_rate / (EPSILON + indices.to(torch.float))

    # unpack batch
    freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))

    return freq 
开发者ID:pytorch,项目名称:audio,代码行数:41,代码来源:functional.py


注:本文中的torch.median方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。