当前位置: 首页>>代码示例>>Python>>正文


Python functional.threshold方法代码示例

本文整理汇总了Python中torch.nn.functional.threshold方法的典型用法代码示例。如果您正苦于以下问题:Python functional.threshold方法的具体用法?Python functional.threshold怎么用?Python functional.threshold使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.threshold方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _threshold

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def _threshold(raw,input, threshold, value, inplace=False):
    # for threshold or relu
    if threshold==0 and value==0:
        x = raw(input,threshold, value, inplace)
        bottom_blobs=[log.blobs(input)]
        name = log.add_layer(name='relu')
        log.add_blobs([x], name='relu_blob')
        layer = caffe_net.Layer_param(name=name, type='ReLU',
                                      bottom=bottom_blobs, top=[log.blobs(x)])
        log.cnet.add_layer(layer)
        return x
    if value!=0:
        raise NotImplemented("value !=0 not implemented in caffe")
    x=raw(input,input, threshold, value, inplace)
    bottom_blobs=[log.blobs(input)]
    layer_name=log.add_layer(name='threshold')
    top_blobs=log.add_blobs([x],name='threshold_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Threshold',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.threshold_param.threshold = threshold
    log.cnet.add_layer(layer)
    return x 
开发者ID:xxradon,项目名称:PytorchToCaffe,代码行数:24,代码来源:pytorch_to_caffe.py

示例2: _threshold

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def _threshold(raw, input, threshold, value, inplace=False):
    # for threshold or relu
    if threshold == 0 and value == 0:
        x = raw(input, threshold, value, inplace)
        bottom_blobs = [log.blobs(input)]
        name = log.add_layer(name='relu')
        log.add_blobs([x], name='relu_blob')
        layer = caffe_net.Layer_param(name=name, type='ReLU',
                                      bottom=bottom_blobs, top=[log.blobs(x)])
        log.cnet.add_layer(layer)
        return x
    if value != 0:
        raise NotImplemented("value !=0 not implemented in caffe")
    x = raw(input, input, threshold, value, inplace)
    bottom_blobs = [log.blobs(input)]
    layer_name = log.add_layer(name='threshold')
    top_blobs = log.add_blobs([x], name='threshold_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Threshold',
                                  bottom=bottom_blobs, top=top_blobs)
    layer.param.threshold_param.threshold = threshold
    log.cnet.add_layer(layer)
    return x 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:24,代码来源:pytorch_to_caffe.py

示例3: hard_sigmoid

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def hard_sigmoid(x):
    """
    Computes element-wise hard sigmoid of x.
    See e.g. https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py#L279
    """
    x = (0.2 * x) + 0.5
    x = F.threshold(-x, -1, -1)
    x = F.threshold(-x, 0, 0)
    return x 
开发者ID:chenyangh,项目名称:SemEval2019Task3,代码行数:11,代码来源:lstm_hard_sigmoid.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def forward(self, x, encoder_padding_mask):
        residual = x

        x = self.maybe_layer_norm(0, x, before=True)
        x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
        if self.fuse_dropout_add and self.training :
            x = fused_dropout_add(x, residual, self.dropout)
        else :
            x = F.dropout(x, p=self.dropout, training=self.training)
            x = residual + x
        x = self.maybe_layer_norm(0, x, after=True)

        residual = x
        x = self.maybe_layer_norm(1, x, before=True)

        if self.fuse_relu_dropout :
            x = fused_relu_dropout(self.fc1(x), self.relu_dropout)
        else :
            x = F.threshold(self.fc1(x),0,0)
            x = F.dropout(x, p=self.relu_dropout, training=self.training)
        x = self.fc2(x)

        if self.fuse_dropout_add and self.training :
            x = fused_dropout_add(x, residual, self.dropout)
        else :
            x = F.dropout(x, p=self.dropout, training=self.training)
            x = residual + x
        x = self.maybe_layer_norm(1, x, after=True)
        return x 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:31,代码来源:transformer.py

示例5: _relu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x 
开发者ID:xxradon,项目名称:PytorchToCaffe,代码行数:11,代码来源:pytorch_to_caffe.py

示例6: _prelu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs=[log.blobs(input)]
    name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = caffe_net.Layer_param(name=name, type='PReLU',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    if weight.size()[0]==1:
        layer.param.prelu_param.channel_shared=True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x 
开发者ID:xxradon,项目名称:PytorchToCaffe,代码行数:17,代码来源:pytorch_to_caffe.py

示例7: relu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def relu(inputs):
    return F.threshold(inputs, 0, 0, inplace=True) 
开发者ID:HAHA-DL,项目名称:MLDG,代码行数:4,代码来源:ops.py

示例8: find_tensor_peak_batch

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def find_tensor_peak_batch(heatmap, radius, downsample, threshold = 0.000001):
  assert heatmap.dim() == 3, 'The dimension of the heatmap is wrong : {}'.format(heatmap.size())
  assert radius > 0 and isinstance(radius, numbers.Number), 'The radius is not ok : {}'.format(radius)
  num_pts, H, W = heatmap.size(0), heatmap.size(1), heatmap.size(2)
  assert W > 1 and H > 1, 'To avoid the normalization function divide zero'
  # find the approximate location:
  score, index = torch.max(heatmap.view(num_pts, -1), 1)
  index_w = (index % W).float()
  index_h = (index / W).float()
  
  def normalize(x, L):
    return -1. + 2. * x.data / (L-1)
  boxes = [index_w - radius, index_h - radius, index_w + radius, index_h + radius]
  boxes[0] = normalize(boxes[0], W)
  boxes[1] = normalize(boxes[1], H)
  boxes[2] = normalize(boxes[2], W)
  boxes[3] = normalize(boxes[3], H)

  affine_parameter = torch.zeros((num_pts, 2, 3))
  affine_parameter[:,0,0] = (boxes[2]-boxes[0])/2
  affine_parameter[:,0,2] = (boxes[2]+boxes[0])/2
  affine_parameter[:,1,1] = (boxes[3]-boxes[1])/2
  affine_parameter[:,1,2] = (boxes[3]+boxes[1])/2
  # extract the sub-region heatmap
  theta = MU.np2variable(affine_parameter, heatmap.is_cuda, False)
  grid_size = torch.Size([num_pts, 1, radius*2+1, radius*2+1])
  grid = F.affine_grid(theta, grid_size)
  sub_feature = F.grid_sample(heatmap.unsqueeze(1), grid).squeeze(1)
  sub_feature = F.threshold(sub_feature, threshold, np.finfo(float).eps)

  X = MU.np2variable(torch.arange(-radius, radius+1), heatmap.is_cuda, False).view(1, 1, radius*2+1)
  Y = MU.np2variable(torch.arange(-radius, radius+1), heatmap.is_cuda, False).view(1, radius*2+1, 1)
  
  sum_region = torch.sum(sub_feature.view(num_pts,-1),1)
  x = torch.sum((sub_feature*X).view(num_pts,-1),1) / sum_region + index_w
  y = torch.sum((sub_feature*Y).view(num_pts,-1),1) / sum_region + index_h
     
  x = x * downsample + downsample / 2.0 - 0.5
  y = y * downsample + downsample / 2.0 - 0.5
  return torch.stack([x, y],1), score 
开发者ID:D-X-Y,项目名称:landmark-detection,代码行数:42,代码来源:basic_batch.py

示例9: _prelu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs = [log.blobs(input)]
    name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = caffe_net.Layer_param(name=name, type='PReLU',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    if weight.size()[0] == 1:
        layer.param.prelu_param.channel_shared = True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:17,代码来源:pytorch_to_caffe.py

示例10: test_threshold

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def test_threshold(self):
        inp = torch.randn(1, 8, 32, 32, device='cuda', dtype=self.dtype)
        output = F.threshold(inp, 6, 6, inplace=False) 
开发者ID:NVIDIA,项目名称:apex,代码行数:5,代码来源:test_pyprof_nvtx.py

示例11: test_softplus

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def test_softplus(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        output = F.softplus(inp, beta=1, threshold=20) 
开发者ID:NVIDIA,项目名称:apex,代码行数:5,代码来源:test_pyprof_nvtx.py

示例12: relu

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def relu(input):
    return F.threshold(input, 0, 0, inplace=True) 
开发者ID:FerranAlet,项目名称:modular-metalearning,代码行数:4,代码来源:layers.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def forward(self, x):
        x = (self.slope * x) + self.offset
        x = F.threshold(-x, -1, -1)
        x = F.threshold(-x, 0, 0)
        return x 
开发者ID:WenmuZhou,项目名称:DBNet.pytorch,代码行数:7,代码来源:MobilenetV3.py

示例14: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def __init__(self):
        super(LayerThresholdTest, self).__init__()
        self.threshold = random.random()
        self.value = self.threshold + random.random()
        self.thresh = nn.Threshold(self.threshold, self.value) 
开发者ID:nerox8664,项目名称:onnx2keras,代码行数:7,代码来源:threshold.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import threshold [as 别名]
def forward(self, x):
        from torch.nn import functional as F
        return F.threshold(x, threshold=self.threshold, value=self.value) 
开发者ID:nerox8664,项目名称:onnx2keras,代码行数:5,代码来源:threshold.py


注:本文中的torch.nn.functional.threshold方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。