本文整理匯總了Python中torch.nn.LogSigmoid方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.LogSigmoid方法的具體用法?Python nn.LogSigmoid怎麽用?Python nn.LogSigmoid使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.LogSigmoid方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self,
embed_size,
counter,
n_negatives,
power,
device,
ignore_index):
super(NegativeSampling, self).__init__()
self.counter = counter
self.n_negatives = n_negatives
self.power = power
self.device = device
self.W = nn.Embedding(num_embeddings=len(counter),
embedding_dim=embed_size,
padding_idx=ignore_index)
self.W.weight.data.zero_()
self.logsigmoid = nn.LogSigmoid()
self.sampler = WalkerAlias(np.power(counter, power))
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self):
nn.Module.__init__(self)
self.m = nn.LogSigmoid()
示例3: create_str_to_activations_converter
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def create_str_to_activations_converter(self):
"""Creates a dictionary which converts strings to activations"""
str_to_activations_converter = {"elu": nn.ELU(), "hardshrink": nn.Hardshrink(), "hardtanh": nn.Hardtanh(),
"leakyrelu": nn.LeakyReLU(), "logsigmoid": nn.LogSigmoid(), "prelu": nn.PReLU(),
"relu": nn.ReLU(), "relu6": nn.ReLU6(), "rrelu": nn.RReLU(), "selu": nn.SELU(),
"sigmoid": nn.Sigmoid(), "softplus": nn.Softplus(), "logsoftmax": nn.LogSoftmax(),
"softshrink": nn.Softshrink(), "softsign": nn.Softsign(), "tanh": nn.Tanh(),
"tanhshrink": nn.Tanhshrink(), "softmin": nn.Softmin(), "softmax": nn.Softmax(dim=1),
"none": None}
return str_to_activations_converter
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self):
super(NegativeSampling, self).__init__()
self._log_sigmoid = nn.LogSigmoid()
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self, vocab_size, original_hidden_size, num_layers, tau=1):
super().__init__()
self.bert_layer = BertLayer(BertConfig(
vocab_size_or_config_json_file=vocab_size,
hidden_size=original_hidden_size * num_layers,
))
self.linear_layer = nn.Linear(original_hidden_size * num_layers, 1)
self.log_sigmoid = nn.LogSigmoid()
self.tau = tau
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self, weights=None):
self.weights = weights
self.logsigmoid = nn.LogSigmoid()
示例7: get_activation
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def get_activation(name: str = "relu") -> nn.Module:
"""Get back an `nn.Module` by string name of the activation operator
:param name: A string name of the operation
:return: A module associated with that string
"""
if name is None or name == "ident":
return nn.Identity()
if name == "tanh":
return nn.Tanh()
if name == "gelu":
return GeLU()
if name == "hardtanh":
return nn.Hardtanh()
if name == "leaky_relu":
return nn.LeakyReLU()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
if name == "log_softmax":
return nn.LogSoftmax(dim=-1)
if name == "softmax":
return nn.Softmax(dim=-1)
return nn.ReLU()
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self, vocab_size, emb_dim):
super(SkipGramNeg, self).__init__()
self.input_emb = nn.Embedding(vocab_size, emb_dim)
self.output_emb = nn.Embedding(vocab_size, emb_dim)
self.log_sigmoid = nn.LogSigmoid()
initrange = (2.0 / (vocab_size + emb_dim)) ** 0.5 # Xavier init
self.input_emb.weight.data.uniform_(-initrange, initrange)
self.output_emb.weight.data.uniform_(-0, 0)
示例9: get_activation_fn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def get_activation_fn(name):
""" PyTorch built-in activation functions """
activation_functions = {
"linear": lambda: lambda x: x,
"relu": nn.ReLU,
"relu6": nn.ReLU6,
"elu": nn.ELU,
"prelu": nn.PReLU,
"leaky_relu": nn.LeakyReLU,
"threshold": nn.Threshold,
"hardtanh": nn.Hardtanh,
"sigmoid": nn.Sigmoid,
"tanh": nn.Tanh,
"log_sigmoid": nn.LogSigmoid,
"softplus": nn.Softplus,
"softshrink": nn.Softshrink,
"softsign": nn.Softsign,
"tanhshrink": nn.Tanhshrink,
}
if name not in activation_functions:
raise ValueError(
f"'{name}' is not included in activation_functions. use below one. \n {activation_functions.keys()}"
)
return activation_functions[name]
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self):
super(LayerLogSigmoidTest, self).__init__()
self.sig = nn.LogSigmoid()
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LogSigmoid [as 別名]
def __init__(self, num_input, num_hidden, num_context):
super().__init__()
self.made = MADE(num_input=num_input, num_output=num_input * 2,
num_hidden=num_hidden, num_context=num_context)
# init such that sigmoid(s) is close to 1 for stability
self.sigmoid_arg_bias = nn.Parameter(torch.ones(num_input) * 2)
self.sigmoid = nn.Sigmoid()
self.log_sigmoid = nn.LogSigmoid()