本文整理汇总了Python中torch.nn.functional.hardtanh方法的典型用法代码示例。如果您正苦于以下问题:Python functional.hardtanh方法的具体用法?Python functional.hardtanh怎么用?Python functional.hardtanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.hardtanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, input, lengths=None):
"See :obj:`onmt.modules.EncoderBase.forward()`"
# (batch_size, 1, nfft, t)
# layer 1
input = self.batch_norm1(self.layer1(input[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
input = self.batch_norm2(self.layer2(input))
# (batch_size, 32, nfft/2/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
batch_size = input.size(0)
length = input.size(3)
input = input.view(batch_size, -1, length)
input = input.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(input)
return hidden, output
示例2: add_mask_transformer
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
"""
hard_sigmoid:
False: use sigmoid only
True: hard thresholding
(a, b): hard thresholding on rescaled sigmoid
"""
self.temperature = temperature
self.hard_sigmoid = hard_sigmoid
view = -1, 1, 28, 28
if hard_sigmoid is False:
self.transform = lambda x: torch.sigmoid(x / temperature).view(*view)
elif hard_sigmoid is True:
self.transform = lambda x: F.hardtanh(
x / temperature, 0, 1).view(*view)
else:
a, b = hard_sigmoid
self.transform = lambda x: F.hardtanh(
torch.sigmoid(x / temperature) * (b - a) + a, 0, 1).view(*view)
# Must sub-class ConvGenerator to provide transform()
示例3: add_mask_transformer
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
"""
hard_sigmoid:
False: use sigmoid only
True: hard thresholding
(a, b): hard thresholding on rescaled sigmoid
"""
self.temperature = temperature
self.hard_sigmoid = hard_sigmoid
if hard_sigmoid is False:
self.transform = lambda x: torch.sigmoid(x / temperature)
elif hard_sigmoid is True:
self.transform = lambda x: F.hardtanh(
x / temperature, 0, 1)
else:
a, b = hard_sigmoid
self.transform = lambda x: F.hardtanh(
torch.sigmoid(x / temperature) * (b - a) + a, 0, 1)
示例4: aten_hardtanh
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def aten_hardtanh(inputs, attributes, scope):
inp, min_val, max_val = inputs[:3]
ctx = current_context()
net = current_context().network
if ctx.is_tensorrt and has_trt_tensor(inputs):
# use relu(x) - relu(x - 6) to implement relu6 (subset of hardtanh)
# relu(x) - relu(x - 6) implementation is faster than hardsigmoid implementation
assert min_val == 0, "only support relu6"
layer = net.add_activation(inp, trt.ActivationType.RELU)
output = layer.get_output(0)
layer.name = scope + "/relu"
tensor = np.full([1] * len(inp.shape), max_val, dtype=np.float32)
trt_6 = ctx.network.add_constant([1] * len(inp.shape), tensor)
layer = ctx.network.add_elementwise(output, trt_6.get_output(0), trt.ElementWiseOperation.MIN)
output = layer.get_output(0)
layer.name = scope + "/elem_min"
output.name = scope + "/relu6"
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
raise NotImplementedError
return [F.hardtanh(inp, min_val, max_val)]
示例5: aten_hardtanh_
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def aten_hardtanh_(inputs, attributes, scope):
inp, min_val, max_val = inputs[:3]
ctx = current_context()
net = current_context().network
if ctx.is_tensorrt and has_trt_tensor(inputs):
# use relu(x) - relu(x - 6) to implement relu6 (subset of hardtanh)
assert min_val == 0, "only support relu6"
layer = net.add_activation(inp, trt.ActivationType.RELU)
output = layer.get_output(0)
layer.name = scope + "/relu"
tensor = np.full([1] * len(inp.shape), max_val, dtype=np.float32)
trt_6 = ctx.network.add_constant([1] * len(inp.shape), tensor)
layer = ctx.network.add_elementwise(output, trt_6.get_output(0), trt.ElementWiseOperation.MIN)
output = layer.get_output(0)
layer.name = scope + "/elem_min"
output.name = scope + "/relu6"
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
raise NotImplementedError
return [F.hardtanh_(inp, min_val, max_val)]
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, input, lengths=None):
# (batch_size, 1, nfft, t)
# layer 1
input = self.batch_norm1(self.layer1(input[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
input = self.batch_norm2(self.layer2(input))
# (batch_size, 32, nfft/2/2, t/2)
input = F.hardtanh(input, 0, 20, inplace=True)
batch_size = input.size(0)
length = input.size(3)
input = input.view(batch_size, -1, length)
input = input.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(input)
return hidden, output
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, src, lengths=None):
"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
# (batch_size, 1, nfft, t)
# layer 1
src = self.batch_norm1(self.layer1(src[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
src = self.batch_norm2(self.layer2(src))
# (batch_size, 32, nfft/2/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
batch_size = src.size(0)
length = src.size(3)
src = src.view(batch_size, -1, length)
src = src.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(src)
return hidden, output
示例8: makeSegMask
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def makeSegMask(img):
frame_data = torch.FloatTensor( img ) / 255.0
input_tensor = preprocess(frame_data.permute(2, 0, 1))
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
segmentation = output.argmax(0)
bgOut = output[0:1][:][:]
a = (1.0 - F.relu(torch.tanh(bgOut * 0.30 - 1.0))).pow(0.5) * 2.0
people = segmentation.eq( torch.ones_like(segmentation).long().fill_(people_class) ).float()
people.unsqueeze_(0).unsqueeze_(0)
for i in range(3):
people = F.conv2d(people, blur, stride=1, padding=1)
# combined_mask = F.hardtanh(a * b)
combined_mask = F.relu(F.hardtanh(a * (people.squeeze().pow(1.5)) ))
combined_mask = combined_mask.expand(1, 3, -1, -1)
res = (combined_mask * 255.0).cpu().squeeze().byte().permute(1, 2, 0).numpy()
return res
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, x):
from torch.nn import functional as F
return F.hardtanh(x, min_val=self.min_val, max_val=self.max_val)
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def forward(self, emb, parser_state):
emb_last, cum_gate = parser_state
ntimestep = emb.size(0)
emb_last = torch.cat([emb_last, emb], dim=0)
emb = emb_last.transpose(0, 1).transpose(1, 2) # bsz, ninp, ntimestep + nlookback
gates = self.gate(emb) # bsz, 2, ntimestep
gate = gates[:, 0, :]
gate_next = gates[:, 1, :]
cum_gate = torch.cat([cum_gate, gate], dim=1)
gate_hat = torch.stack([cum_gate[:, i:i + ntimestep] for i in range(self.nslots, 0, -1)],
dim=2) # bsz, ntimestep, nslots
if self.hard:
memory_gate = (F.hardtanh((gate[:, :, None] - gate_hat) / self.resolution * 2 + 1) + 1) / 2
else:
memory_gate = F.sigmoid(
(gate[:, :, None] - gate_hat) / self.resolution * 10 + 5) # bsz, ntimestep, nslots
memory_gate = torch.cumprod(memory_gate, dim=2) # bsz, ntimestep, nlookback+1
memory_gate = torch.unbind(memory_gate, dim=1)
if self.hard:
memory_gate_next = (F.hardtanh((gate_next[:, :, None] - gate_hat) / self.resolution * 2 + 1) + 1) / 2
else:
memory_gate_next = F.sigmoid(
(gate_next[:, :, None] - gate_hat) / self.resolution * 10 + 5) # bsz, ntimestep, nslots
memory_gate_next = torch.cumprod(memory_gate_next, dim=2) # bsz, ntimestep, nlookback+1
memory_gate_next = torch.unbind(memory_gate_next, dim=1)
return (memory_gate, memory_gate_next), gate, (emb_last[-self.nlookback:], cum_gate[:, -self.nslots:])
示例11: sample_z
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample_z(self, batch_size, sample=True):
"""Sample the hard-concrete gates for training and use a deterministic value for testing"""
if sample:
eps = self.get_eps(self.floatTensor(batch_size, self.in_features))
z = self.quantile_concrete(eps)
return F.hardtanh(z, min_val=0, max_val=1)
else: # mode
pi = F.sigmoid(self.qz_loga).view(1, self.in_features).expand(batch_size, self.in_features)
return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1)
示例12: sample_weights
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample_weights(self):
z = self.quantile_concrete(self.get_eps(self.floatTensor(self.in_features)))
mask = F.hardtanh(z, min_val=0, max_val=1)
return mask.view(self.in_features, 1) * self.weights
示例13: test_hardtanh
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def test_hardtanh(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.hardtanh(inp, min_val=-1., max_val=1., inplace=False)
示例14: backward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def backward(ctx,
grad_output: torch.Tensor) -> torch.Tensor:
return F.hardtanh(grad_output)
示例15: sample
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import hardtanh [as 别名]
def sample(self, size=None):
# sample a stretched variable and rectify it
x_ = self._dist.sample(size=size)
return F.hardtanh(x_, min_val=0., max_val=1.)