本文整理匯總了Python中torch.nn.Sigmoid方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Sigmoid方法的具體用法?Python nn.Sigmoid怎麽用?Python nn.Sigmoid使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.Sigmoid方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):
super(ImageDecoder, self).__init__()
ngf = ngf * (2 ** (n_layers - 2))
layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True)]
for i in range(1, n_layers - 1):
layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf // 2),
nn.ReLU(True)]
ngf = ngf // 2
layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]
if activation == 'tanh':
layers += [nn.Tanh()]
elif activation == 'sigmoid':
layers += [nn.Sigmoid()]
else:
raise NotImplementedError
self.main = nn.Sequential(*layers)
示例2: get_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def get_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), deepsea_cpu)
示例3: get_seqpred_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def get_seqpred_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), ConcatenateRC(), deepsea_cpu, AverageRC())
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self):
super(LeNet, self).__init__()
# 卷積層
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 5), # in_channels, out_channels, kernel_size
nn.Sigmoid(),
nn.MaxPool2d(2, 2), # kernel_size, stride
nn.Conv2d(6, 16, 5),
nn.Sigmoid(),
nn.MaxPool2d(2, 2)
)
# 全連接層
self.fc = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10)
)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, config):
super().__init__()
self.config = config
self.relu = nn.LeakyReLU(self.config.relu_slope, inplace=True)
self.conv1 = nn.Conv2d(in_channels=self.config.input_channels, out_channels=self.config.num_filt_d, kernel_size=4, stride=2, padding=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=self.config.num_filt_d, out_channels=self.config.num_filt_d * 2, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(self.config.num_filt_d*2)
self.conv3 = nn.Conv2d(in_channels=self.config.num_filt_d*2, out_channels=self.config.num_filt_d * 4, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm2 = nn.BatchNorm2d(self.config.num_filt_d*4)
self.conv4 = nn.Conv2d(in_channels=self.config.num_filt_d*4, out_channels=self.config.num_filt_d*8, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm3 = nn.BatchNorm2d(self.config.num_filt_d*8)
self.conv5 = nn.Conv2d(in_channels=self.config.num_filt_d*8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False)
self.out = nn.Sigmoid()
self.apply(weights_init)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, lang, shared_emb, vocab_size, hidden_size, dropout, slots, nb_gate):
super(Generator, self).__init__()
self.vocab_size = vocab_size
self.lang = lang
self.embedding = shared_emb
self.dropout_layer = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, dropout=dropout)
self.nb_gate = nb_gate
self.hidden_size = hidden_size
self.W_ratio = nn.Linear(3 * hidden_size, 1)
self.softmax = nn.Softmax(dim=1)
self.sigmoid = nn.Sigmoid()
self.slots = slots
self.W_gate = nn.Linear(hidden_size, nb_gate)
# Create independent slot embeddings
self.slot_w2i = {}
for slot in self.slots:
if slot.split("-")[0] not in self.slot_w2i.keys():
self.slot_w2i[slot.split("-")[0]] = len(self.slot_w2i)
if slot.split("-")[1] not in self.slot_w2i.keys():
self.slot_w2i[slot.split("-")[1]] = len(self.slot_w2i)
self.Slot_emb = nn.Embedding(len(self.slot_w2i), hidden_size)
self.Slot_emb.weight.data.normal_(0, 0.1)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, input_size, inner_linear, inner_groups=1, layer_norm=True, weight_norm=False, dropout=0, batch_first=True):
super(AverageNetwork, self).__init__()
wn_func = wn if weight_norm else lambda x: x
self.input_size = input_size
self.time_step = 0
self.batch_dim, self.time_dim = (0, 1) if batch_first else (1, 0)
self.gates = nn.Sequential(
wn_func(nn.Linear(2 * input_size, 2 * input_size)),
nn.Sigmoid()
)
if layer_norm:
self.lnorm = nn.LayerNorm(input_size)
self.fc = nn.Sequential(wn_func(Linear(input_size, inner_linear, groups=inner_groups)),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
wn_func(Linear(inner_linear, input_size, groups=inner_groups)))
開發者ID:nadavbh12,項目名稱:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代碼行數:18,代碼來源:transformer_blocks.py
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, in_planes, out_planes,
reduction=1, norm_layer=nn.BatchNorm2d):
super(FeatureFusion, self).__init__()
self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=True, has_bias=False),
ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, inplanes, kernel=3, reduction=16, with_padding=False):
super(GDWSe2d, self).__init__()
if with_padding:
padding = kernel // 2
else:
padding = 0
self.globle_dw = nn.Conv2d(inplanes, inplanes, kernel_size=kernel, padding=padding, stride=1,
groups=inplanes, bias=False)
self.bn = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(inplanes, inplanes // reduction),
nn.ReLU(inplace=True),
nn.Linear(inplanes // reduction, inplanes),
nn.Sigmoid()
)
self._init_weights()
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, d_in, d_hid, dropout=0.0, num_layers=1):
super().__init__()
self.d_hid = d_hid
self.d_in = d_in
self.num_layers = num_layers
self.dropout = nn.Dropout(dropout)
self.input_feed = True
if self.input_feed:
d_in += 1 * d_hid
self.rnn = LSTMDecoder(self.num_layers, d_in, d_hid, dropout)
self.context_attn = LSTMDecoderAttention(d_hid, dot=True)
self.question_attn = LSTMDecoderAttention(d_hid, dot=True)
self.vocab_pointer_switch = nn.Sequential(Feedforward(2 * self.d_hid + d_in, 1), nn.Sigmoid())
self.context_question_switch = nn.Sequential(Feedforward(2 * self.d_hid + d_in, 1), nn.Sigmoid())
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, latent_size, output_size):
super().__init__()
self.fc1 = nn.Linear(latent_size, 100)
self.relu1 = nn.LeakyReLU(0.2, )
self.fc2 = nn.Linear(100, 50)
self.relu2 = nn.LeakyReLU(0.2)
self.fc3 = nn.Linear(50, output_size)
self.sigmoid = nn.Sigmoid()
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator, position_layer, model_size, latent_size):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
self.position_layer = position_layer
self.model_size = model_size
self.latent_size = latent_size
self.sigmoid = nn.Sigmoid()
# self.memory2latent = nn.Linear(self.model_size, self.latent_size)
# self.latent2memory = nn.Linear(self.latent_size, self.model_size)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def __init__(self, input_size, hidden_size, depth):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.depth = depth
self.W_i = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
self.W_o = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
self.W_f = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
self.W = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Tanh() )
示例15: r_ggnn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Sigmoid [as 別名]
def r_ggnn(self, h):
aux = Variable( torch.Tensor(h[0].size(0), self.args['out']).type_as(h[0].data).zero_() )
# For each graph
for i in range(h[0].size(0)):
nn_res = nn.Sigmoid()(self.learn_modules[0](torch.cat([h[0][i,:,:], h[-1][i,:,:]], 1)))*self.learn_modules[1](h[-1][i,:,:])
# Delete virtual nodes
nn_res = (torch.sum(h[0][i,:,:],1).expand_as(nn_res)>0).type_as(nn_res)* nn_res
aux[i,:] = torch.sum(nn_res,0)
return aux