当前位置: 首页>>代码示例>>Python>>正文


Python functional.hardtanh方法代码示例

本文整理汇总了Python中torch.nn.functional.hardtanh方法的典型用法代码示例。如果您正苦于以下问题:Python functional.hardtanh方法的具体用法?Python functional.hardtanh怎么用?Python functional.hardtanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.hardtanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, input, lengths=None):
        "See :obj:`onmt.modules.EncoderBase.forward()`"
        # (batch_size, 1, nfft, t)
        # layer 1
        input = self.batch_norm1(self.layer1(input[:, :, :, :]))

        # (batch_size, 32, nfft/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        # (batch_size, 32, nfft/2/2, t/2)
        # layer 2
        input = self.batch_norm2(self.layer2(input))

        # (batch_size, 32, nfft/2/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        batch_size = input.size(0)
        length = input.size(3)
        input = input.view(batch_size, -1, length)
        input = input.transpose(0, 2).transpose(1, 2)

        output, hidden = self.rnn(input)

        return hidden, output 
开发者ID:xiadingZ,项目名称:video-caption-openNMT.pytorch,代码行数:26,代码来源:AudioEncoder.py

示例2: add_mask_transformer

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
    """
    hard_sigmoid:
        False:  use sigmoid only
        True:   hard thresholding
        (a, b): hard thresholding on rescaled sigmoid
    """
    self.temperature = temperature
    self.hard_sigmoid = hard_sigmoid

    view = -1, 1, 28, 28
    if hard_sigmoid is False:
        self.transform = lambda x: torch.sigmoid(x / temperature).view(*view)
    elif hard_sigmoid is True:
        self.transform = lambda x: F.hardtanh(
            x / temperature, 0, 1).view(*view)
    else:
        a, b = hard_sigmoid
        self.transform = lambda x: F.hardtanh(
            torch.sigmoid(x / temperature) * (b - a) + a, 0, 1).view(*view)


# Must sub-class ConvGenerator to provide transform() 
开发者ID:steveli,项目名称:misgan,代码行数:25,代码来源:mnist_generator.py

示例3: add_mask_transformer

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
    """
    hard_sigmoid:
        False:  use sigmoid only
        True:   hard thresholding
        (a, b): hard thresholding on rescaled sigmoid
    """
    self.temperature = temperature
    self.hard_sigmoid = hard_sigmoid

    if hard_sigmoid is False:
        self.transform = lambda x: torch.sigmoid(x / temperature)
    elif hard_sigmoid is True:
        self.transform = lambda x: F.hardtanh(
            x / temperature, 0, 1)
    else:
        a, b = hard_sigmoid
        self.transform = lambda x: F.hardtanh(
            torch.sigmoid(x / temperature) * (b - a) + a, 0, 1) 
开发者ID:steveli,项目名称:misgan,代码行数:21,代码来源:celeba_generator.py

示例4: aten_hardtanh

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def aten_hardtanh(inputs, attributes, scope):
    inp, min_val, max_val = inputs[:3]
    ctx = current_context()
    net = current_context().network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        # use relu(x) - relu(x - 6) to implement relu6 (subset of hardtanh)
        # relu(x) - relu(x - 6) implementation is faster than hardsigmoid implementation
        assert min_val == 0, "only support relu6"
        layer = net.add_activation(inp, trt.ActivationType.RELU)
        output = layer.get_output(0)
        layer.name = scope + "/relu"
        tensor = np.full([1] * len(inp.shape), max_val, dtype=np.float32)
        trt_6 = ctx.network.add_constant([1] * len(inp.shape), tensor)
        layer = ctx.network.add_elementwise(output, trt_6.get_output(0), trt.ElementWiseOperation.MIN)
        output = layer.get_output(0)
        layer.name = scope + "/elem_min"
        output.name = scope + "/relu6"
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError
    return [F.hardtanh(inp, min_val, max_val)] 
开发者ID:traveller59,项目名称:torch2trt,代码行数:23,代码来源:activation.py

示例5: aten_hardtanh_

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def aten_hardtanh_(inputs, attributes, scope):
    inp, min_val, max_val = inputs[:3]
    ctx = current_context()
    net = current_context().network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        # use relu(x) - relu(x - 6) to implement relu6 (subset of hardtanh)
        assert min_val == 0, "only support relu6"
        layer = net.add_activation(inp, trt.ActivationType.RELU)
        output = layer.get_output(0)
        layer.name = scope + "/relu"
        tensor = np.full([1] * len(inp.shape), max_val, dtype=np.float32)
        trt_6 = ctx.network.add_constant([1] * len(inp.shape), tensor)
        layer = ctx.network.add_elementwise(output, trt_6.get_output(0), trt.ElementWiseOperation.MIN)
        output = layer.get_output(0)
        layer.name = scope + "/elem_min"
        output.name = scope + "/relu6"
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError
    return [F.hardtanh_(inp, min_val, max_val)] 
开发者ID:traveller59,项目名称:torch2trt,代码行数:22,代码来源:activation.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, input, lengths=None):
        # (batch_size, 1, nfft, t)
        # layer 1
        input = self.batch_norm1(self.layer1(input[:, :, :, :]))

        # (batch_size, 32, nfft/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        # (batch_size, 32, nfft/2/2, t/2)
        # layer 2
        input = self.batch_norm2(self.layer2(input))

        # (batch_size, 32, nfft/2/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        batch_size = input.size(0)
        length = input.size(3)
        input = input.view(batch_size, -1, length)
        input = input.transpose(0, 2).transpose(1, 2)

        output, hidden = self.rnn(input)

        return hidden, output 
开发者ID:moonlightlane,项目名称:QG-Net,代码行数:25,代码来源:AudioEncoder.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, src, lengths=None):
        "See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
        # (batch_size, 1, nfft, t)
        # layer 1
        src = self.batch_norm1(self.layer1(src[:, :, :, :]))

        # (batch_size, 32, nfft/2, t/2)
        src = F.hardtanh(src, 0, 20, inplace=True)

        # (batch_size, 32, nfft/2/2, t/2)
        # layer 2
        src = self.batch_norm2(self.layer2(src))

        # (batch_size, 32, nfft/2/2, t/2)
        src = F.hardtanh(src, 0, 20, inplace=True)

        batch_size = src.size(0)
        length = src.size(3)
        src = src.view(batch_size, -1, length)
        src = src.transpose(0, 2).transpose(1, 2)

        output, hidden = self.rnn(src)

        return hidden, output 
开发者ID:InitialBug,项目名称:BiSET,代码行数:26,代码来源:audio_encoder.py

示例8: makeSegMask

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def makeSegMask(img):
	frame_data = torch.FloatTensor( img ) / 255.0

	input_tensor = preprocess(frame_data.permute(2, 0, 1))
	input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model

	# move the input and model to GPU for speed if available
	if torch.cuda.is_available():
		input_batch = input_batch.to('cuda')


	with torch.no_grad():
		output = model(input_batch)['out'][0]

	segmentation = output.argmax(0)

	bgOut = output[0:1][:][:]
	a = (1.0 - F.relu(torch.tanh(bgOut * 0.30 - 1.0))).pow(0.5) * 2.0

	people = segmentation.eq( torch.ones_like(segmentation).long().fill_(people_class) ).float()

	people.unsqueeze_(0).unsqueeze_(0)
	
	for i in range(3):
		people = F.conv2d(people, blur, stride=1, padding=1)

	# combined_mask = F.hardtanh(a * b)
	combined_mask = F.relu(F.hardtanh(a * (people.squeeze().pow(1.5)) ))
	combined_mask = combined_mask.expand(1, 3, -1, -1)

	res = (combined_mask * 255.0).cpu().squeeze().byte().permute(1, 2, 0).numpy()

	return res 
开发者ID:WhiteNoise,项目名称:deep-bgremove,代码行数:35,代码来源:createmask.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, x):
        from torch.nn import functional as F
        return F.hardtanh(x, min_val=self.min_val, max_val=self.max_val) 
开发者ID:nerox8664,项目名称:pytorch2keras,代码行数:5,代码来源:hard_tanh.py

示例10: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, emb, parser_state):
        emb_last, cum_gate = parser_state
        ntimestep = emb.size(0)

        emb_last = torch.cat([emb_last, emb], dim=0)
        emb = emb_last.transpose(0, 1).transpose(1, 2)  # bsz, ninp, ntimestep + nlookback

        gates = self.gate(emb)  # bsz, 2, ntimestep
        gate = gates[:, 0, :]
        gate_next = gates[:, 1, :]
        cum_gate = torch.cat([cum_gate, gate], dim=1)
        gate_hat = torch.stack([cum_gate[:, i:i + ntimestep] for i in range(self.nslots, 0, -1)],
                               dim=2)  # bsz, ntimestep, nslots

        if self.hard:
            memory_gate = (F.hardtanh((gate[:, :, None] - gate_hat) / self.resolution * 2 + 1) + 1) / 2
        else:
            memory_gate = F.sigmoid(
                (gate[:, :, None] - gate_hat) / self.resolution * 10 + 5)  # bsz, ntimestep, nslots
        memory_gate = torch.cumprod(memory_gate, dim=2)  # bsz, ntimestep, nlookback+1
        memory_gate = torch.unbind(memory_gate, dim=1)

        if self.hard:
            memory_gate_next = (F.hardtanh((gate_next[:, :, None] - gate_hat) / self.resolution * 2 + 1) + 1) / 2
        else:
            memory_gate_next = F.sigmoid(
                (gate_next[:, :, None] - gate_hat) / self.resolution * 10 + 5)  # bsz, ntimestep, nslots
        memory_gate_next = torch.cumprod(memory_gate_next, dim=2)  # bsz, ntimestep, nlookback+1
        memory_gate_next = torch.unbind(memory_gate_next, dim=1)

        return (memory_gate, memory_gate_next), gate, (emb_last[-self.nlookback:], cum_gate[:, -self.nslots:]) 
开发者ID:nyu-mll,项目名称:PRPN-Analysis,代码行数:33,代码来源:ParsingNetwork.py

示例11: sample_z

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample_z(self, batch_size, sample=True):
        """Sample the hard-concrete gates for training and use a deterministic value for testing"""
        if sample:
            eps = self.get_eps(self.floatTensor(batch_size, self.in_features))
            z = self.quantile_concrete(eps)
            return F.hardtanh(z, min_val=0, max_val=1)
        else:  # mode
            pi = F.sigmoid(self.qz_loga).view(1, self.in_features).expand(batch_size, self.in_features)
            return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1) 
开发者ID:AMLab-Amsterdam,项目名称:L0_regularization,代码行数:11,代码来源:l0_layers.py

示例12: sample_weights

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample_weights(self):
        z = self.quantile_concrete(self.get_eps(self.floatTensor(self.in_features)))
        mask = F.hardtanh(z, min_val=0, max_val=1)
        return mask.view(self.in_features, 1) * self.weights 
开发者ID:AMLab-Amsterdam,项目名称:L0_regularization,代码行数:6,代码来源:l0_layers.py

示例13: test_hardtanh

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def test_hardtanh(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        output = F.hardtanh(inp, min_val=-1., max_val=1., inplace=False) 
开发者ID:NVIDIA,项目名称:apex,代码行数:5,代码来源:test_pyprof_nvtx.py

示例14: backward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def backward(ctx,
                 grad_output: torch.Tensor) -> torch.Tensor:
        return F.hardtanh(grad_output) 
开发者ID:moskomule,项目名称:homura,代码行数:5,代码来源:discretizations.py

示例15: sample

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample(self, size=None):
        # sample a stretched variable and rectify it
        x_ = self._dist.sample(size=size)
        return F.hardtanh(x_, min_val=0., max_val=1.) 
开发者ID:bastings,项目名称:interpretable_predictions,代码行数:6,代码来源:kuma.py


注:本文中的torch.nn.functional.hardtanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。