本文整理匯總了Python中torch.tanh方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.tanh方法的具體用法?Python torch.tanh怎麽用?Python torch.tanh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.tanh方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, word, char, sentence_length):
"""
:param char:
:param word:
:param sentence_length:
:return:
"""
char_conv = self._char_forward(char)
char_conv = self.dropout(char_conv)
word = self.embed(word) # (N,W,D)
x = torch.cat((word, char_conv), -1)
x = self.dropout_embed(x)
x, _ = self.bilstm(x)
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
示例2: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, word, sentence_length):
"""
:param word:
:param sentence_length:
:param desorted_indices:
:return:
"""
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word) # (N,W,D)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
示例3: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def __init__(self, F, H, K, nonlinearity = torch.tanh, E = 1, bias = True):
# Initialize parent:
super().__init__()
# Store the values (using the notation in the paper):
self.F = F # Input Features
self.H = H # Hidden Features
self.K = K # Filter taps
self.E = E # Number of edge features
self.S = None
self.bias = bias # Boolean
self.sigma = nonlinearity # torch.nn.functional
# Create parameters:
self.aWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, F))
self.bWeights = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
if self.bias:
self.xBias = nn.parameter.Parameter(torch.Tensor(H, 1))
self.zBias = nn.parameter.Parameter(torch.Tensor(H, 1))
else:
self.register_parameter('xBias', None)
self.register_parameter('zBias', None)
# Initialize parameters
self.reset_parameters()
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, q, k, v):
bs_nh, ts, _ = k.shape
bs = bs_nh//self.num_head
# Uniformly init prev_att
if self.prev_att is None:
self.prev_att = torch.zeros((bs, self.num_head, ts)).to(k.device)
for idx, sl in enumerate(self.k_len):
self.prev_att[idx, :, :sl] = 1.0/sl
# Calculate location context
loc_context = torch.tanh(self.loc_proj(self.loc_conv(
self.prev_att).transpose(1, 2))) # BxNxT->BxTxD
loc_context = loc_context.unsqueeze(1).repeat(
1, self.num_head, 1, 1).view(-1, ts, self.dim) # BxNxTxD -> BNxTxD
q = q.unsqueeze(1) # BNx1xD
# Compute energy and context
energy = self.gen_energy(torch.tanh(
k+q+loc_context)).squeeze(2) # BNxTxD -> BNxT
output, attn = self._attend(energy, v)
attn = attn.view(bs, self.num_head, ts) # BNxT -> BxNxT
self.prev_att = attn
return output, attn
示例5: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, query_embed, in_memory_embed, atten_mask=None):
if self.atten_type == 'simple': # simple attention
attention = torch.bmm(in_memory_embed, query_embed.unsqueeze(2)).squeeze(2)
elif self.atten_type == 'mul': # multiplicative attention
attention = torch.bmm(in_memory_embed, torch.mm(query_embed, self.W).unsqueeze(2)).squeeze(2)
elif self.atten_type == 'add': # additive attention
attention = torch.tanh(torch.mm(in_memory_embed.view(-1, in_memory_embed.size(-1)), self.W2)\
.view(in_memory_embed.size(0), -1, self.W2.size(-1)) \
+ torch.mm(query_embed, self.W).unsqueeze(1))
attention = torch.mm(attention.view(-1, attention.size(-1)), self.W3).view(attention.size(0), -1)
else:
raise RuntimeError('Unknown atten_type: {}'.format(self.atten_type))
if atten_mask is not None:
# Exclude masked elements from the softmax
attention = atten_mask * attention - (1 - atten_mask) * INF
return attention
示例6: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
if self.adj_dropout > 0:
edge_index, edge_type = dropout_adj(
edge_index, edge_type, p=self.adj_dropout,
force_undirected=self.force_undirected, num_nodes=len(x),
training=self.training
)
concat_states = []
for conv in self.convs:
x = torch.tanh(conv(x, edge_index))
concat_states.append(x)
concat_states = torch.cat(concat_states, 1)
x = global_add_pool(concat_states, batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
if self.regression:
return x[:, 0]
else:
return F.log_softmax(x, dim=-1)
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, hidden, memory):
"""
Args:
hidden: shape N X 2D
memory: shape N X D
Returns:
hidden: shape N X D
memory: shape N X D
"""
ft = self.forget_gate(hidden)
it = self.input_gate(hidden)
ot = self.output_gate(hidden)
ct = self.memory_gate(hidden)
memory = ft * memory + it * ct
hidden = ot * torch.tanh(memory)
return hidden, memory
示例8: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
h_fc = self.fc(h)
self.outputs['fc'] = h_fc
h_norm = self.ln(h_fc)
self.outputs['ln'] = h_norm
out = torch.tanh(h_norm)
self.outputs['tanh'] = out
return out
示例9: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, inp):
x = inp[0]
adj = inp[1]
for i in range(self.n_layers):
x = self.graph_convolutions[i](x, adj)
x = torch.tanh(x)
n = adj.size(1)
d = x.size()[-1]
adj_new = adj.unsqueeze(3)
adj_new = adj_new.expand(-1, n, n, d)
x_new = x.repeat(1, n, 1).view(-1, n, n, d)
res = x_new*adj_new
x = res.max(dim=2)[0]
x = torch.tanh(self.dense(x))
x = torch.tanh(x.sum(dim=1))
return x
示例10: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, inputs, context=None):
mask_right = (inputs > self.cut_point)
mask_left = (inputs < -self.cut_point)
mask_middle = ~(mask_right | mask_left)
outputs = torch.zeros_like(inputs)
outputs[mask_middle] = torch.tanh(inputs[mask_middle])
outputs[mask_right] = self.alpha * torch.log(self.beta * inputs[mask_right])
outputs[mask_left] = self.alpha * -torch.log(-self.beta * inputs[mask_left])
logabsdet = torch.zeros_like(inputs)
logabsdet[mask_middle] = torch.log(1 - outputs[mask_middle] ** 2)
logabsdet[mask_right] = torch.log(self.alpha / inputs[mask_right])
logabsdet[mask_left] = torch.log(-self.alpha / inputs[mask_left])
logabsdet = utils.sum_except_batch(logabsdet, num_batch_dims=1)
return outputs, logabsdet
示例11: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, context, question, context_padding, question_padding):
context_padding = torch.cat([context.new_zeros((context.size(0), 1), dtype=torch.long)==1, context_padding], 1)
question_padding = torch.cat([question.new_zeros((question.size(0), 1), dtype=torch.long)==1, question_padding], 1)
context_sentinel = self.embed_sentinel(context.new_zeros((context.size(0), 1), dtype=torch.long))
context = torch.cat([context_sentinel, self.dropout(context)], 1) # batch_size x (context_length + 1) x features
question_sentinel = self.embed_sentinel(question.new_ones((question.size(0), 1), dtype=torch.long))
question = torch.cat([question_sentinel, question], 1) # batch_size x (question_length + 1) x features
question = torch.tanh(self.proj(question)) # batch_size x (question_length + 1) x features
affinity = context.bmm(question.transpose(1,2)) # batch_size x (context_length + 1) x (question_length + 1)
attn_over_context = self.normalize(affinity, context_padding) # batch_size x (context_length + 1) x 1
attn_over_question = self.normalize(affinity.transpose(1,2), question_padding) # batch_size x (question_length + 1) x 1
sum_of_context = self.attn(attn_over_context, context) # batch_size x (question_length + 1) x features
sum_of_question = self.attn(attn_over_question, question) # batch_size x (context_length + 1) x features
coattn_context = self.attn(attn_over_question, sum_of_context) # batch_size x (context_length + 1) x features
coattn_question = self.attn(attn_over_context, sum_of_question) # batch_size x (question_length + 1) x features
return torch.cat([coattn_context, sum_of_question], 2)[:, 1:], torch.cat([coattn_question, sum_of_context], 2)[:, 1:]
示例12: poincare_case
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def poincare_case():
torch.manual_seed(42)
shape = manifold_shapes[geoopt.manifolds.PoincareBall]
ex = torch.randn(*shape, dtype=torch.float64) / 3
ev = torch.randn(*shape, dtype=torch.float64) / 3
x = torch.tanh(torch.norm(ex)) * ex / torch.norm(ex)
ex = x.clone()
v = ev.clone()
manifold = geoopt.PoincareBall().to(dtype=torch.float64)
x = geoopt.ManifoldTensor(x, manifold=manifold)
case = UnaryCase(shape, x, ex, v, ev, manifold)
yield case
manifold = geoopt.PoincareBallExact().to(dtype=torch.float64)
x = geoopt.ManifoldTensor(x, manifold=manifold)
case = UnaryCase(shape, x, ex, v, ev, manifold)
yield case
示例13: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, positive_edges, negative_edges, target):
"""
Model forward propagation pass. Can fit deep and single layer SGCN models.
:param positive_edges: Positive edges.
:param negative_edges: Negative edges.
:param target: Target vectors.
:return loss: Loss value.
:return self.z: Hidden vertex representations.
"""
self.h_pos, self.h_neg = [], []
self.h_pos.append(torch.tanh(self.positive_base_aggregator(self.X, positive_edges)))
self.h_neg.append(torch.tanh(self.negative_base_aggregator(self.X, negative_edges)))
for i in range(1, self.layers):
self.h_pos.append(torch.tanh(self.positive_aggregators[i-1](self.h_pos[i-1], self.h_neg[i-1], positive_edges, negative_edges)))
self.h_neg.append(torch.tanh(self.negative_aggregators[i-1](self.h_neg[i-1], self.h_pos[i-1], positive_edges, negative_edges)))
self.z = torch.cat((self.h_pos[-1], self.h_neg[-1]), 1)
loss = self.calculate_loss_function(self.z, positive_edges, negative_edges, target)
return loss, self.z
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, input, hx, att_score):
"""
References
----------
https://github.com/pytorch/pytorch/blob/v0.4.1/torch/nn/_functions/rnn.py#L49
"""
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, i_z, i_n = gi.chunk(3, 1)
h_r, h_z, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
# updategate = torch.sigmoid(i_z + h_z)
newgate = torch.tanh(i_n + resetgate * h_n)
# hy = newgate + updategate * (hx - newgate)
att_score = att_score.view(-1, 1)
hy = (1. - att_score) * hx + att_score * newgate
return hy
示例15: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import tanh [as 別名]
def forward(self, data):
# Implement Equation 4.2 of the paper i.e. concat all layers' graph representations and apply linear model
# note: this can be decomposed in one smaller linear model per layer
x, edge_index, batch = data.x, data.edge_index, data.batch
hidden_repres = []
for conv in self.convs:
x = torch.tanh(conv(x, edge_index))
hidden_repres.append(x)
# apply sortpool
x_to_sortpool = torch.cat(hidden_repres, dim=1)
x_1d = global_sort_pool(x_to_sortpool, batch, self.k) # in the code the authors sort the last channel only
# apply 1D convolutional layers
x_1d = torch.unsqueeze(x_1d, dim=1)
conv1d_res = F.relu(self.conv1d_params1(x_1d))
conv1d_res = self.maxpool1d(conv1d_res)
conv1d_res = F.relu(self.conv1d_params2(conv1d_res))
conv1d_res = conv1d_res.reshape(conv1d_res.shape[0], -1)
# apply dense layer
out_dense = self.dense_layer(conv1d_res)
return out_dense