本文整理匯總了Python中torch.nn.AdaptiveMaxPool1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AdaptiveMaxPool1d方法的具體用法?Python nn.AdaptiveMaxPool1d怎麽用?Python nn.AdaptiveMaxPool1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.AdaptiveMaxPool1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, c):
super(STN3D, self).__init__()
self.c = c
self.conv1 = nn.Conv1d(self.c, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.mp = nn.AdaptiveMaxPool1d(1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.c*self.c)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(
self, *, feat_in, num_classes, init_mode="xavier_uniform", return_logits=True, pooling_type='avg', **kwargs
):
TrainableNM.__init__(self, **kwargs)
self._feat_in = feat_in
self._return_logits = return_logits
self._num_classes = num_classes
if pooling_type == 'avg':
self.pooling = nn.AdaptiveAvgPool1d(1)
elif pooling_type == 'max':
self.pooling = nn.AdaptiveMaxPool1d(1)
else:
raise ValueError('Pooling type chosen is not valid. Must be either `avg` or `max`')
self.decoder_layers = nn.Sequential(nn.Linear(self._feat_in, self._num_classes, bias=True))
self.apply(lambda x: init_weights(x, mode=init_mode))
self.to(self._device)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, opt ):
super(BasicCNN1D, self).__init__(opt)
self.content_dim=opt.__dict__.get("content_dim",256)
self.kernel_size=opt.__dict__.get("kernel_size",3)
self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
if opt.__dict__.get("embeddings",None) is not None:
self.encoder.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
self.content_conv = nn.Sequential(
nn.Conv1d(in_channels = opt.embedding_dim,
out_channels = self.content_dim, #256
kernel_size = self.kernel_size), #3
nn.ReLU(),
nn.MaxPool1d(kernel_size = (opt.max_seq_len - self.kernel_size + 1))
# nn.AdaptiveMaxPool1d()
)
self.fc = nn.Linear(self.content_dim, opt.label_size)
self.properties.update(
{"content_dim":self.content_dim,
"kernel_size":self.kernel_size,
})
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, config):
super(HBMP, self).__init__()
self.config = config
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.cells = config.cells
self.hidden_dim = config.hidden_dim
self.rnn1 = nn.LSTM(input_size=config.embed_dim,
hidden_size=config.hidden_dim,
num_layers=config.layers,
dropout=config.dropout,
bidirectional=True)
self.rnn2 = nn.LSTM(input_size=config.embed_dim,
hidden_size=config.hidden_dim,
num_layers=config.layers,
dropout=config.dropout,
bidirectional=True)
self.rnn3 = nn.LSTM(input_size=config.embed_dim,
hidden_size=config.hidden_dim,
num_layers=config.layers,
dropout=config.dropout,
bidirectional=True)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self,
tokens_encoder=None,
**kwargs
):
super(GNNModel, self).__init__()
if tokens_encoder is None:
tokens_encoder = modules.ConvWordsEncoder(**kwargs)
self._tokens_encoder: nn.Module = tokens_encoder
self.output_vector_size = tokens_encoder.output_vector_size // 2
self._gnn: nn.Module = GNN(self._tokens_encoder._word_embedding.embedding_dim,
tokens_encoder.output_vector_size,
hp_dropout=kwargs.get("hp_dropout", 0.1),
hp_gated=kwargs.get("hp_gated", True)
)
# self._pool = nn.AdaptiveMaxPool1d(1)
self._question_layer = nn.Sequential(nn.Linear(in_features=tokens_encoder.output_vector_size,
out_features=self.output_vector_size),
nn.ReLU()
)
self._graph_layer = nn.Sequential(nn.Linear(in_features=tokens_encoder.output_vector_size,
out_features=self.output_vector_size),
nn.ReLU()
)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self):
super(MixA_Module,self).__init__()
self.softmax = nn.Softmax(dim=-1)
self.AVGpool = nn.AdaptiveAvgPool1d(1)
self.MAXpool = nn.AdaptiveMaxPool1d(1)
示例7: _create_base_network
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def _create_base_network(self) -> nn.Module:
"""
Apply conv and maxpooling operation towards to each letter-ngram.
The input shape is `fixed_text_length`*`number of letter-ngram`,
as described in the paper, `n` is 3, `number of letter-trigram`
is about 30,000 according to their observation.
:return: A :class:`nn.Module` of CDSSM network, tensor in tensor out.
"""
pad = nn.ConstantPad1d((0, self._params['kernel_size'] - 1), 0)
conv = nn.Conv1d(
in_channels=self._params['vocab_size'],
out_channels=self._params['filters'],
kernel_size=self._params['kernel_size']
)
activation = parse_activation(
self._params['conv_activation_func']
)
dropout = nn.Dropout(p=self._params['dropout_rate'])
pool = nn.AdaptiveMaxPool1d(1)
squeeze = Squeeze()
mlp = self._make_multi_layer_perceptron_layer(
self._params['filters']
)
return nn.Sequential(
pad, conv, activation, dropout, pool, squeeze, mlp
)
示例8: build
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def build(self):
"""
Build model structure.
HBMP use Siamese arthitecture.
"""
self.embedding = self._make_default_embedding_layer()
encoder_layer = nn.LSTM(
input_size=self._params['embedding_output_dim'],
hidden_size=self._params['lstm_hidden_size'],
num_layers=self._params['num_layers'],
dropout=self._params['dropout_rate'],
batch_first=True,
bidirectional=True)
self.encoder = nn.ModuleList(
[copy.deepcopy(encoder_layer)
for _ in range(self._params['lstm_num'])])
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.mlp = self._make_multi_layer_perceptron_layer(
self._params['lstm_hidden_size'] * 24
)
self.out = self._make_output_layer(
self._params['mlp_num_fan_out']
)
示例9: adaptive_maxpooling_factory
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def adaptive_maxpooling_factory(dim):
types = [nn.AdaptiveMaxPool1d, nn.AdaptiveMaxPool2d, nn.AdaptiveMaxPool3d]
return types[dim - 1]
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, pretrained=False, in_channel=1, out_channel=10):
super(CNN, self).__init__()
if pretrained == True:
warnings.warn("Pretrained model is not available")
self.layer1 = nn.Sequential(
nn.Conv1d(in_channel, 16, kernel_size=15), # 16, 26 ,26
nn.BatchNorm1d(16),
nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(
nn.Conv1d(16, 32, kernel_size=3), # 32, 24, 24
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=2, stride=2),
) # 32, 12,12 (24-2) /2 +1
self.layer3 = nn.Sequential(
nn.Conv1d(32, 64, kernel_size=3), # 64,10,10
nn.BatchNorm1d(64),
nn.ReLU(inplace=True))
self.layer4 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=3), # 128,8,8
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.AdaptiveMaxPool1d(4)) # 128, 4,4
self.layer5 = nn.Sequential(
nn.Linear(128 * 4, 256),
nn.ReLU(inplace=True),
nn.Dropout())
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, pretrained=False, in_channel=1, out_channel=10):
super(CNN, self).__init__()
if pretrained == True:
warnings.warn("Pretrained model is not available")
self.layer1 = nn.Sequential(
nn.Conv1d(in_channel, 16, kernel_size=15), # 16, 26 ,26
nn.BatchNorm1d(16),
nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(
nn.Conv1d(16, 32, kernel_size=3), # 32, 24, 24
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=2, stride=2),
) # 32, 12,12 (24-2) /2 +1
self.layer3 = nn.Sequential(
nn.Conv1d(32, 64, kernel_size=3), # 64,10,10
nn.BatchNorm1d(64),
nn.ReLU(inplace=True))
self.layer4 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=3), # 128,8,8
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.AdaptiveMaxPool1d(4)) # 128, 4,4
self.layer5 = nn.Sequential(
nn.Linear(128 * 4, 256),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(256, 256),
nn.ReLU(inplace=True),
nn.Dropout(),
)
self.fc = nn.Linear(256, out_channel)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, out_size=2, channels=128, window_size=512, embd_size=8):
super(MalConv, self).__init__()
self.embd = nn.Embedding(257, embd_size, padding_idx=0)
self.window_size = window_size
self.conv_1 = nn.Conv1d(embd_size, channels, window_size, stride=window_size, bias=True)
self.conv_2 = nn.Conv1d(embd_size, channels, window_size, stride=window_size, bias=True)
self.pooling = nn.AdaptiveMaxPool1d(1)
self.fc_1 = nn.Linear(channels, channels)
self.fc_2 = nn.Linear(channels, out_size)
示例13: conv_leaky_max
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def conv_leaky_max(self,embedding_dim,num_hidden,kernel_size,slope=0.25):
conv_leaky_unit= nn.Sequential(
nn.Conv1d(embedding_dim,num_hidden,kernel_size=kernel_size),
nn.ELU(alpha=slope,inplace=True),
nn.AdaptiveMaxPool1d(1)
)
return conv_leaky_unit
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, config):
super().__init__()
dataset = config.dataset
self.output_channel = config.output_channel
target_class = config.target_class
words_num = config.words_num
words_dim = config.words_dim
self.mode = config.mode
self.num_bottleneck_hidden = config.num_bottleneck_hidden
self.dynamic_pool_length = config.dynamic_pool_length
self.ks = 3 # There are three conv nets here
input_channel = 1
if config.mode == 'rand':
rand_embed_init = torch.Tensor(words_num, words_dim).uniform_(-0.25, 0.25)
self.embed = nn.Embedding.from_pretrained(rand_embed_init, freeze=False)
elif config.mode == 'static':
self.static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=True)
elif config.mode == 'non-static':
self.non_static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=False)
elif config.mode == 'multichannel':
self.static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=True)
self.non_static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=False)
input_channel = 2
else:
print("Unsupported Mode")
exit()
## Different filter sizes in xml_cnn than kim_cnn
self.conv1 = nn.Conv2d(input_channel, self.output_channel, (2, words_dim), padding=(1,0))
self.conv2 = nn.Conv2d(input_channel, self.output_channel, (4, words_dim), padding=(3,0))
self.conv3 = nn.Conv2d(input_channel, self.output_channel, (8, words_dim), padding=(7,0))
self.dropout = nn.Dropout(config.dropout)
self.bottleneck = nn.Linear(self.ks * self.output_channel * self.dynamic_pool_length, self.num_bottleneck_hidden)
self.fc1 = nn.Linear(self.num_bottleneck_hidden, target_class)
self.pool = nn.AdaptiveMaxPool1d(self.dynamic_pool_length) #Adaptive pooling
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool1d [as 別名]
def __init__(self, out_rotation_mode="Quaternion", regress_t=False):
super(Model, self).__init__()
self.out_rotation_mode = out_rotation_mode
self.regress_t =regress_t
if(out_rotation_mode == "Quaternion"):
self.out_channel = 4
elif (out_rotation_mode == "ortho6d"):
self.out_channel = 6
elif (out_rotation_mode == "ortho5d"):
self.out_channel = 5
elif (out_rotation_mode == "rmat"):
self.out_channel = 9
elif (out_rotation_mode == "axisAngle"):
self.out_channel = 4
elif (out_rotation_mode == "euler"):
self.out_channel = 3
if(regress_t==True):
self.out_channel = self.out_channel+3
#in b*point_num*3
#out b*1*512
self.feature_extracter = nn.Sequential(
nn.Conv1d(3, 64, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(64, 128, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(128, 1024, kernel_size=1),
nn.AdaptiveMaxPool1d(output_size=1)
)
#in b*1024
#out b*out_channel
self.mlp = nn.Sequential(
nn.Linear(2048, 512),
nn.LeakyReLU(),
nn.Linear(512, self.out_channel))
#pt b*point_num*3