本文整理匯總了Python中torch.nn.AvgPool1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AvgPool1d方法的具體用法?Python nn.AvgPool1d怎麽用?Python nn.AvgPool1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.AvgPool1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, norm_layer, nf0, conv_res, nclasses, input_res, pool_res, fc_n,
nresblocks=3):
super(MeshConvNet, self).__init__()
self.k = [nf0] + conv_res
self.res = [input_res] + pool_res
norm_args = get_norm_args(norm_layer, self.k[1:])
for i, ki in enumerate(self.k[:-1]):
setattr(self, 'conv{}'.format(i), MResConv(ki, self.k[i + 1], nresblocks))
setattr(self, 'norm{}'.format(i), norm_layer(**norm_args[i]))
setattr(self, 'pool{}'.format(i), MeshPool(self.res[i + 1]))
self.gp = torch.nn.AvgPool1d(self.res[-1])
# self.gp = torch.nn.MaxPool1d(self.res[-1])
self.fc1 = nn.Linear(self.k[-1], fc_n)
self.fc2 = nn.Linear(fc_n, nclasses)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, ninp, fmaps, din=0, dout=0, context=1,
tie_context_weights=False, name='MLPBlock',
ratio_fixed=None, range_fixed=None,
dropin_mode='std', drop_channels=False, emb_size=100):
super().__init__(name=name)
self.ninp = ninp
self.fmaps = fmaps
self.tie_context_weights = tie_context_weights
assert context % 2 != 0, context
if tie_context_weights:
self.W = nn.Conv1d(ninp, fmaps, 1)
self.pool = nn.AvgPool1d(kernel_size=context, stride=1,
padding=context//2, count_include_pad=False)
else:
self.W = nn.Conv1d(ninp, fmaps, context, padding=context//2)
self.din = PatternedDropout(emb_size=emb_size, p=din,
dropout_mode=dropin_mode,
range_fixed=range_fixed,
ratio_fixed=ratio_fixed,
drop_whole_channels=drop_channels)
self.act = nn.PReLU(fmaps)
self.dout = nn.Dropout(dout)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, no_vocabs, embedding_dim=16, window_size=1):
super(Model, self).__init__()
self.input_size = 2*window_size+1
self.embeddings = nn.ModuleList([
nn.Embedding(
no_vocabs,
embedding_dim,
padding_idx=0
) for i in range(self.input_size)
])
self.pooling = nn.AvgPool1d(embedding_dim)
self.linear1 = nn.Linear(embedding_dim, 8)
self.linear2 = nn.Linear(8, 1)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, __C, atten=False):
super(BC, self).__init__()
self.__C = __C
self.v_net = MLP([__C.IMG_FEAT_SIZE,
__C.BA_HIDDEN_SIZE], dropout_r=__C.DROPOUT_R)
self.q_net = MLP([__C.HIDDEN_SIZE,
__C.BA_HIDDEN_SIZE], dropout_r=__C.DROPOUT_R)
if not atten:
self.p_net = nn.AvgPool1d(__C.K_TIMES, stride=__C.K_TIMES)
else:
self.dropout = nn.Dropout(__C.CLASSIFER_DROPOUT_R) # attention
self.h_mat = nn.Parameter(torch.Tensor(
1, __C.GLIMPSE, 1, __C.BA_HIDDEN_SIZE).normal_())
self.h_bias = nn.Parameter(
torch.Tensor(1, __C.GLIMPSE, 1, 1).normal_())
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU', dropout=[.2,.5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim; self.q_dim = q_dim
self.h_dim = h_dim; self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if None == h_out:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, kernel_size, segment_num=None):
"""
Args:
input_size: dimention of input embedding
kernel_size: kernel_size for CNN
padding: padding for CNN
hidden_size: hidden size
"""
super().__init__()
self.segment_num = segment_num
if self.segment_num != None:
self.mask_embedding = nn.Embedding(segment_num + 1, segment_num)
self.mask_embedding.weight.data.copy_(torch.FloatTensor(np.concatenate([np.zeros(segment_num), np.identity(segment_num)], axis = 0)))
self.mask_embedding.weight.requires_grad = False
self.pool = nn.AvgPool1d(kernel_size)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, mot_en_channels, body_en_channels, view_en_channels, de_channels):
super(AutoEncoder3x, self).__init__()
assert mot_en_channels[0] == de_channels[-1] and \
mot_en_channels[-1] + body_en_channels[-1] + view_en_channels[-1] == de_channels[0]
self.mot_encoder = Encoder(mot_en_channels)
self.body_encoder = Encoder(body_en_channels, kernel_size=7,
global_pool=F.max_pool1d, convpool=nn.MaxPool1d, compress=True)
self.view_encoder = Encoder(view_en_channels, kernel_size=7,
global_pool=F.avg_pool1d, convpool=nn.AvgPool1d, compress=True)
self.decoder = Decoder(de_channels)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, __C, img_feat_size, ques_feat_size, is_first):
super(MFB, self).__init__()
self.__C = __C
self.is_first = is_first
self.proj_i = nn.Linear(img_feat_size, __C.MFB_K * __C.MFB_O)
self.proj_q = nn.Linear(ques_feat_size, __C.MFB_K * __C.MFB_O)
self.dropout = nn.Dropout(__C.DROPOUT_R)
self.pool = nn.AvgPool1d(__C.MFB_K, stride=__C.MFB_K)
示例9: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def forward(self, x):
r"""
:param torch.Tensor x: [N, C, L] 初始tensor
:return: torch.Tensor x: [N, C] avg pool後的結果
"""
# [N,C,L] -> [N,C]
kernel_size = x.size(2)
pooling = nn.AvgPool1d(
kernel_size=kernel_size,
stride=self.stride,
padding=self.padding)
x = pooling(x)
return x.squeeze(dim=-1)
示例10: avgpooling_factory
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def avgpooling_factory(dim):
types = [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]
return types[dim - 1]
示例11: test_avg_pool1d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def test_avg_pool1d(self, input_shape, kernel_size, stride, pad, include_pad):
if pad > kernel_size / 2:
# Because this test is xfail, we have to fail rather than
# just return here, otherwise these test cases unexpectedly pass.
# This can be changed to `return` once the above radar
# is fixed and the test is no longer xfail.
raise ValueError("pad must be less than half the kernel size")
model = nn.AvgPool1d(kernel_size, stride, pad, False, include_pad)
run_numerical_test(input_shape, model)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self,seq_len, hidden_size,output_size):
super(Wavelet_LSTM,self).__init__()
self.seq_len = seq_len
self.hidden_size = hidden_size
self.output_size = output_size
self.mWDN1_H = nn.Linear(seq_len,seq_len)
self.mWDN1_L = nn.Linear(seq_len,seq_len)
self.mWDN2_H = nn.Linear(int(seq_len/2),int(seq_len/2))
self.mWDN2_L = nn.Linear(int(seq_len/2),int(seq_len/2))
self.a_to_x = nn.AvgPool1d(2)
self.sigmoid = nn.Sigmoid()
self.lstm_xh1 = nn.LSTM(1,hidden_size,batch_first=True)
self.lstm_xh2 = nn.LSTM(1,hidden_size,batch_first=True)
self.lstm_xl2 = nn.LSTM(1,hidden_size,batch_first=True)
self.output = nn.Linear(hidden_size,output_size)
self.l_filter = [-0.0106,0.0329,0.0308,-0.187,-0.028,0.6309,0.7148,0.2304]
self.h_filter = [-0.2304,0.7148,-0.6309,-0.028,0.187,0.0308,-0.0329,-0.0106]
self.cmp_mWDN1_H = ToVariable(self.create_W(seq_len,False,is_comp=True))
self.cmp_mWDN1_L = ToVariable(self.create_W(seq_len,True,is_comp=True))
self.cmp_mWDN2_H = ToVariable(self.create_W(int(seq_len/2),False,is_comp=True))
self.cmp_mWDN2_L = ToVariable(self.create_W(int(seq_len/2),True,is_comp=True))
self.mWDN1_H.weight = torch.nn.Parameter(ToVariable(self.create_W(seq_len,False)))
self.mWDN1_L.weight = torch.nn.Parameter(ToVariable(self.create_W(seq_len,True)))
self.mWDN2_H.weight = torch.nn.Parameter(ToVariable(self.create_W(int(seq_len/2),False)))
self.mWDN2_L.weight = torch.nn.Parameter(ToVariable(self.create_W(int(seq_len/2),True)))
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, v_dim, q_dim, h_dim, h_out, act='ReLU',
dropout=[.2, .5], k=3):
super(BCNet, self).__init__()
self.c = 32
self.k = k
self.v_dim = v_dim
self.q_dim = q_dim
self.h_dim = h_dim
self.h_out = h_out
self.v_net = FCNet([v_dim, h_dim * self.k], act=act,
dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act,
dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1]) # attention
if 1 < k:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if h_out is None:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(
torch.Tensor(1, h_out, 1, h_dim * self.k).normal_())
self.h_bias = nn.Parameter(
torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(
nn.Linear(h_dim * self.k, h_out), dim=None)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, kernal_size, pre_mask, post_mask):
super(AvgPool, self).__init__()
self.avg_pool = nn.AvgPool1d(kernal_size, 1, padding=(kernal_size - 1) // 2)
self.pre_mask = pre_mask
self.post_mask = post_mask
self.mask_opt = Mask()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool1d [as 別名]
def __init__(self, layer_conf):
super(Pooling1D, self).__init__(layer_conf)
self.pool = None
if layer_conf.pool_type == "max":
self.pool = nn.MaxPool1d(kernel_size=layer_conf.window_size, stride=layer_conf.stride,
padding=layer_conf.padding)
elif layer_conf.pool_type == "mean":
self.pool = nn.AvgPool1d(kernel_size=layer_conf.window_size, stride=layer_conf.stride,
padding=layer_conf.padding)