本文整理汇总了Python中torch.nn方法的典型用法代码示例。如果您正苦于以下问题:Python torch.nn方法的具体用法?Python torch.nn怎么用?Python torch.nn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.nn方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.bn4 = nn.BatchNorm2d(128)
self.conv5 = nn.Conv2d(128, 128, 3, dilation=2, padding=2)
self.bn5 = nn.BatchNorm2d(128)
self.conv6 = nn.Conv2d(128, 128, 3, dilation=4, padding=4)
self.bn6 = nn.BatchNorm2d(128)
self.conv7 = nn.Conv2d(128, 1+9, 3, padding=1)
示例2: _init_modules
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def _init_modules(self):
self._init_head_tail()
# rpn
self.rpn_net = nn.Conv2d(self._net_conv_channels, cfg.RPN_CHANNELS, [3, 3], padding=1)
self.rpn_cls_score_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 2, [1, 1])
self.rpn_bbox_pred_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 4, [1, 1])
self.cls_score_net_fast = nn.Linear(self._fc7_channels, self._num_classes+1)
self.bbox_pred_net_fast = nn.Linear(self._fc7_channels, (self._num_classes+1) * 4)
self.cls_score_net = nn.Linear(self._fc7_channels, self._num_classes) # between class
self.bbox_pred_net = nn.Linear(self._fc7_channels, self._num_classes) # between boxes
self.init_weights()
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:20,代码来源:network.py
示例3: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
# maxpool different from pytorch-resnet, to match tf-faster-rcnn
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:24,代码来源:resnet_v1.py
示例4: _make_layer
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:18,代码来源:resnet_v1.py
示例5: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
super(AttentionScore, self).__init__()
self.correlation_func = correlation_func
self.hidden_size = hidden_size
if correlation_func == 2 or correlation_func == 3:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
if do_similarity:
self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
else:
self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)
if correlation_func == 4:
self.linear = nn.Linear(input_size, input_size, bias=False)
if correlation_func == 5:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
示例6: make_model
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def make_model(d_vocab, N, d_model, d_ff=1024, h=4, dropout=0.1):
"""Helper: Construct a model from hyperparameters."""
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.GRU(d_model, d_model, 1),
nn.Sequential(Embeddings(d_model, d_vocab), c(position)),
nn.Sequential(Embeddings(d_model, d_vocab), c(position)),
Generator(d_model, d_vocab),
d_model
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
示例7: make_model
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def make_model(d_vocab, N, d_model, latent_size, d_ff=1024, h=4, dropout=0.1):
"""Helper: Construct a model from hyperparameters."""
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
share_embedding = Embeddings(d_model, d_vocab)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
# nn.Sequential(Embeddings(d_model, d_vocab), c(position)),
# nn.Sequential(Embeddings(d_model, d_vocab), c(position)),
nn.Sequential(share_embedding, c(position)),
nn.Sequential(share_embedding, c(position)),
Generator(d_model, d_vocab),
c(position),
d_model,
latent_size,
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
示例8: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
示例9: train
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def train(model_q, model_k, device, train_loader, queue, optimizer, epoch, temp=0.07):
model_q.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
x_q = data[0]
x_k = data[1]
x_q, x_k = x_q.to(device), x_k.to(device)
q = model_q(x_q)
k = model_k(x_k)
k = k.detach()
N = data[0].shape[0]
K = queue.shape[0]
l_pos = torch.bmm(q.view(N,1,-1), k.view(N,-1,1))
l_neg = torch.mm(q.view(N,-1), queue.T.view(-1,K))
logits = torch.cat([l_pos.view(N, 1), l_neg], dim=1)
labels = torch.zeros(N, dtype=torch.long)
labels = labels.to(device)
cross_entropy_loss = nn.CrossEntropyLoss()
loss = cross_entropy_loss(logits/temp, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
momentum_update(model_q, model_k)
queue = queue_data(queue, k)
queue = dequeue_data(queue)
total_loss /= len(train_loader.dataset)
print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, total_loss))
示例10: conv3x3
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
# here with dilation
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1+(dilation-1)*(3-1), dilation=dilation, bias=False)
示例11: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
示例13: init_bert_weights
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
示例14: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import nn [as 别名]
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)