本文整理匯總了Python中torch.nn.LogSoftmax方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.LogSoftmax方法的具體用法?Python nn.LogSoftmax怎麽用?Python nn.LogSoftmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.LogSoftmax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, n_classes=20, sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet34',
pretrained=True):
super().__init__()
self.feats = getattr(extractors, backend)(pretrained)
self.psp = PSPModule(psp_size, 1024, sizes)
self.drop_1 = nn.Dropout2d(p=0.3)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.drop_2 = nn.Dropout2d(p=0.15)
self.final = nn.Sequential(
nn.Conv2d(64, n_classes, kernel_size=1),
nn.LogSoftmax(dim=1)
)
self.classifier = nn.Sequential(
nn.Linear(deep_features_size, 256),
nn.ReLU(),
nn.Linear(256, n_classes)
)
示例2: act_fun
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def act_fun(act_type):
if act_type=="relu":
return nn.ReLU()
if act_type=="tanh":
return nn.Tanh()
if act_type=="sigmoid":
return nn.Sigmoid()
if act_type=="leaky_relu":
return nn.LeakyReLU(0.2)
if act_type=="elu":
return nn.ELU()
if act_type=="softmax":
return nn.LogSoftmax(dim=1)
if act_type=="linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, num_inputs=None,
num_spks=None,
hidden_size=2048,
z_bnorm=False,
name='MLP'):
super().__init__(name=name, max_ckpts=1000)
if num_spks is None:
raise ValueError('Please specify a number of spks.')
if z_bnorm:
# apply z-norm to the input
self.z_bnorm = nn.BatchNorm1d(frontend.emb_dim, affine=False)
self.model = nn.Sequential(
nn.Conv1d(num_inputs, hidden_size, 1),
nn.LeakyReLU(),
nn.BatchNorm1d(hidden_size),
nn.Conv1d(hidden_size, num_spks, 1),
nn.LogSoftmax(dim=1)
)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self,
n_kernels,
in_feats,
hiddens,
out_feats):
super(MoNet, self).__init__()
self.pool = nn.MaxPool1d(2)
self.layers = nn.ModuleList()
self.readout = MaxPooling()
# Input layer
self.layers.append(
GMMConv(in_feats, hiddens[0], 2, n_kernels))
# Hidden layer
for i in range(1, len(hiddens)):
self.layers.append(GMMConv(hiddens[i - 1], hiddens[i], 2, n_kernels))
self.cls = nn.Sequential(
nn.Linear(hiddens[-1], out_feats),
nn.LogSoftmax()
)
示例5: log_prob
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def log_prob(self, w_in):
lsm = nn.LogSoftmax(dim=1).cuda()
head_out = self.head(w_in)
batch_size = head_out.size(0)
prob = torch.zeros(batch_size, self.cutoff[-1]).cuda()
lsm_head = lsm(head_out)
prob.narrow(1, 0, self.output_size).add_(lsm_head.narrow(1, 0, self.output_size).data)
for i in range(len(self.tail)):
pos = self.cutoff[i]
i_size = self.cutoff[i + 1] - pos
buffer = lsm_head.narrow(1, self.cutoff[0] + i, 1)
buffer = buffer.expand(batch_size, i_size)
lsm_tail = lsm(self.tail[i](w_in))
prob.narrow(1, pos, i_size).copy_(buffer.data).add_(lsm_tail.data)
return prob
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, config, vocabulary):
super().__init__()
self.config = config
self.word_embed = nn.Embedding(
len(vocabulary),
config["word_embedding_size"],
padding_idx=vocabulary.PAD_INDEX,
)
self.answer_rnn = nn.LSTM(
config["word_embedding_size"],
config["lstm_hidden_size"],
config["lstm_num_layers"],
batch_first=True,
dropout=config["dropout"],
)
self.lstm_to_words = nn.Linear(
self.config["lstm_hidden_size"], len(vocabulary)
)
self.dropout = nn.Dropout(p=config["dropout"])
self.logsoftmax = nn.LogSoftmax(dim=-1)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(
n_categories +
input_size +
hidden_size,
hidden_size)
self.i2o = nn.Linear(
n_categories +
input_size +
hidden_size,
output_size)
self.o2o = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, name, **params):
super().__init__()
if name is None or name == 'identity':
self.activation = nn.Identity(**params)
elif name == 'sigmoid':
self.activation = nn.Sigmoid()
elif name == 'softmax2d':
self.activation = nn.Softmax(dim=1, **params)
elif name == 'softmax':
self.activation = nn.Softmax(**params)
elif name == 'logsoftmax':
self.activation = nn.LogSoftmax(**params)
elif name == 'argmax':
self.activation = ArgMax(**params)
elif name == 'argmax2d':
self.activation = ArgMax(dim=1, **params)
elif callable(name):
self.activation = name(**params)
else:
raise ValueError('Activation should be callable/sigmoid/softmax/logsoftmax/None; got {}'.format(name))
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, n_classes=21, sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet18',
pretrained=False):
super(PSPNet, self).__init__()
self.feats = getattr(extractors, backend)(pretrained)
self.psp = PSPModule(psp_size, 1024, sizes)
self.drop_1 = nn.Dropout2d(p=0.3)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.drop_2 = nn.Dropout2d(p=0.15)
self.final = nn.Sequential(
nn.Conv2d(64, 32, kernel_size=1),
nn.LogSoftmax()
)
self.classifier = nn.Sequential(
nn.Linear(deep_features_size, 256),
nn.ReLU(),
nn.Linear(256, n_classes)
)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def __init__(self, in_features, out_features, num_units=2048):
super(BinMNIST, self).__init__()
self.linear1 = LinearBin(in_features, num_units)
self.norm1 =nn.BatchNorm1d(num_units, eps=1e-4, momentum=0.15)
self.linear2 = LinearBin(num_units, num_units)
self.norm2 = nn.BatchNorm1d(num_units, eps=1e-4, momentum=0.15)
self.linear3 = LinearBin(num_units, num_units)
self.norm3 =nn.BatchNorm1d(num_units, eps=1e-4, momentum=0.15)
self.linear4 = LinearBin(num_units, out_features)
self.norm4 =nn.BatchNorm1d(out_features, eps=1e-4, momentum=0.15)
self.activation = nn.ReLU()
self.act_end = nn.LogSoftmax()
示例11: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def forward(self, g, h_in, e):
h = []
# Padding to some larger dimension d
h_t = torch.cat([h_in, Variable(torch.Tensor(h_in.size(0), h_in.size(1), self.args['out'] - h_in.size(2)).type_as(h_in.data).zero_())], 2)
h.append(h_t.clone())
# Layer
for t in range(0, self.n_layers):
h_t = Variable(torch.zeros(h[0].size(0), h[0].size(1), h[0].size(2)).type_as(h_in.data))
# Apply one layer pass (Message + Update)
for v in range(0, h_in.size(1)):
m = self.m[0].forward(h[t][:, v, :], h[t], e[:, v, :])
# Nodes without edge set message to 0
m = g[:, v, :, None].expand_as(m) * m
m = torch.sum(m, 1)
# Update
h_t[:, v, :] = self.u[0].forward(h[t][:, v, :], m)
# Delete virtual nodes
h_t = (torch.sum(torch.abs(h_in), 2).expand_as(h_t) > 0).type_as(h_t)*h_t
h.append(h_t.clone())
# Readout
res = self.r.forward(h)
if self.type == 'classification':
res = nn.LogSoftmax()(res)
return res
示例12: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def forward(self, g, h_in, e):
h = []
# Padding to some larger dimension d
h_t = torch.cat([h_in, Variable(
torch.zeros(h_in.size(0), h_in.size(1), self.args['out'] - h_in.size(2)).type_as(h_in.data))], 2)
h.append(h_t.clone())
# Layer
for t in range(0, self.n_layers):
e_aux = e.view(-1, e.size(3))
h_aux = h[t].view(-1, h[t].size(2))
m = self.m[0].forward(h[t], h_aux, e_aux)
m = m.view(h[0].size(0), h[0].size(1), -1, m.size(1))
# Nodes without edge set message to 0
m = torch.unsqueeze(g, 3).expand_as(m) * m
m = torch.squeeze(torch.sum(m, 1))
h_t = self.u[0].forward(h[t], m)
# Delete virtual nodes
h_t = (torch.sum(h_in, 2).expand_as(h_t) > 0).type_as(h_t) * h_t
h.append(h_t)
# Readout
res = self.r.forward(h)
if self.type == 'classification':
res = nn.LogSoftmax()(res)
return res
示例13: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def forward(self, g, h_in, e):
h = []
h.append(h_in)
# Layer
for t in range(0, len(self.m)):
u_args = self.u[t].get_args()
h_t = Variable(torch.zeros(h_in.size(0), h_in.size(1), u_args['out']).type_as(h[t].data))
# Apply one layer pass (Message + Update)
for v in range(0, h_in.size(1)):
m = self.m[t].forward(h[t][:, v, :], h[t], e[:, v, :, :])
# Nodes without edge set message to 0
m = g[:, v, :,None].expand_as(m) * m
m = torch.sum(m, 1)
# Interaction Net
opt = {}
opt['x_v'] = Variable(torch.Tensor([]).type_as(m.data))
h_t[:, v, :] = self.u[t].forward(h[t][:, v, :], m, opt)
h.append(h_t.clone())
# Readout
res = self.r.forward(h)
if self.type == 'classification':
res = nn.LogSoftmax()(res)
return res
示例14: loss_calc
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def loss_calc(out, label, gpu0):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = label[:,:,0,:].transpose(2,0,1)
label = torch.from_numpy(label).long()
if useGPU:
label = Variable(label).cuda(gpu0)
if onlyLesions:
criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000]))
else:
criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000, 100000]))
else:
label = Variable(label)
if onlyLesions:
criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000]))
else:
criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000, 100000]))
m = nn.LogSoftmax()
out = m(out)
return criterion(out,label)
示例15: cross_entropy_loss
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSoftmax [as 別名]
def cross_entropy_loss(output, labels):
"""According to Pytorch documentation, nn.CrossEntropyLoss combines nn.LogSoftmax and nn.NLLLoss
For loss,
first argument should be class scores with shape: N,C,h,w
second argument should be class labels with shape: N,h,w
Assumes labels are binary
"""
ce_loss = nn.CrossEntropyLoss()
images, channels, height, width = output.data.shape
loss = ce_loss(output, labels.long().view(images, height, width))
return loss