當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Flatten方法代碼示例

本文整理匯總了Python中torch.nn.Flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Flatten方法的具體用法?Python nn.Flatten怎麽用?Python nn.Flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Flatten方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,block,block_list):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,64*b_,block_list[0],1)
        self.layer_2 = self._make_layer(block,64*b_,128*b_,block_list[1],2)
        self.layer_3 = self._make_layer(block,128*b_,256*b_,block_list[2],2)
        self.layer_4 = self._make_layer(block,256*b_,512*b_,block_list[3],2)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(512*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:20,代碼來源:ResNetV2.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,block,block_list,cardinality):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,128*b_,block_list[0],1,cardinality)
        self.layer_2 = self._make_layer(block,128*b_,256*b_,block_list[1],2,cardinality)
        self.layer_3 = self._make_layer(block,256*b_,512*b_,block_list[2],2,cardinality)
        self.layer_4 = self._make_layer(block,512*b_,1024*b_,block_list[3],2,cardinality)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1024*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:20,代碼來源:ResNeXt2016.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, frames=4, n_atoms=51, v_min=-10, v_max=10):
        super(QNetwork, self).__init__()
        self.n_atoms = n_atoms
        self.atoms = torch.linspace(v_min, v_max, steps=n_atoms).to(device)
        self.network = nn.Sequential(
            Scale(1/255),
            nn.Conv2d(frames, 32, 8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, stride=1),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(3136, 512),
            nn.ReLU(),
            nn.Linear(512, env.action_space.n * n_atoms)
        ) 
開發者ID:vwxyzjn,項目名稱:cleanrl,代碼行數:19,代碼來源:c51_atari.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,k,block_list,num_init_features=64, bn_size=4, 
                 drop_rate=0, memory_efficient=False):
        super(DenseNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,num_init_features,7,2,3,bias=False),
            nn.BatchNorm2d(num_init_features),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        self.dense_body, self.final_channels = self._make_layers(num_init_features,
                                  bn_size,block_list,k,drop_rate, memory_efficient)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(self.final_channels,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:18,代碼來源:DenseNet2016.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,):
        super(MobileNet_V1,self).__init__()
        self.conv = nn.Sequential(BasicConv(3,32,3,2,1),
             DPConv(32,64,1),
             DPConv(64,128,2),
             DPConv(128,128,1),
             DPConv(128,256,2),
             DPConv(256,256,1),
             DPConv(256,512,2),

             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),

             DPConv(512,1024,2),
             DPConv(1024,1024,1),)
        
        self.final = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(1024,1000),
            nn.Softmax(dim=1)
        ) 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:27,代碼來源:MobileNet.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, frames=4):
        super(Agent, self).__init__()
        self.network = nn.Sequential(
            Scale(1/255),
            layer_init(nn.Conv2d(frames, 32, 8, stride=4)),
            nn.ReLU(),
            layer_init(nn.Conv2d(32, 64, 4, stride=2)),
            nn.ReLU(),
            layer_init(nn.Conv2d(64, 64, 3, stride=1)),
            nn.ReLU(),
            nn.Flatten(),
            layer_init(nn.Linear(3136, 512)),
            nn.ReLU()
        )
        self.actor = layer_init(nn.Linear(512, envs.action_space.n), std=0.01)
        self.critic = layer_init(nn.Linear(512, 1), std=1) 
開發者ID:vwxyzjn,項目名稱:cleanrl,代碼行數:18,代碼來源:ppo_atari.py

示例7: build_model

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def build_model(self, num_channel, num_sample, num_neighbor, num_class):
        rep1, stride1 = 4, 4
        num_filter1, num_filter2 = 16, 8
        self.conv1 = nn.Conv1d(num_channel, num_filter1, rep1, stride=stride1, groups=1)
        self.conv2 = nn.Conv1d(num_filter1, num_filter2, num_neighbor, stride=1, groups=1)
        
        num_lin = (int(num_sample * num_neighbor/ stride1 ) - num_neighbor + 1)  * num_filter2
        self.lin1 = torch.nn.Linear(num_lin, 128)
        self.lin2 = torch.nn.Linear(128, num_class)
        
        self.nn = nn.Sequential(
            self.conv1,
            nn.ReLU(),
            self.conv2,
            nn.ReLU(),
            nn.Flatten(),
            self.lin1,
            nn.ReLU(),
            nn.Dropout(0.2),
            self.lin2,
            nn.Softmax(),
        )
        
        self.criterion = nn.CrossEntropyLoss()
        # self.criterion = nn.NLLLoss() 
開發者ID:THUDM,項目名稱:cogdl,代碼行數:27,代碼來源:patchy_san.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, action_num=2, state_shape=None, mlp_layers=None):
        ''' Initialize the Q network

        Args:
            action_num (int): number of legal actions
            state_shape (list): shape of state tensor
            mlp_layers (list): output size of each fc layer
        '''
        super(EstimatorNetwork, self).__init__()

        self.action_num = action_num
        self.state_shape = state_shape
        self.mlp_layers = mlp_layers

        # build the Q network
        layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
        fc = [nn.Flatten()]
        fc.append(nn.BatchNorm1d(layer_dims[0]))
        for i in range(len(layer_dims)-1):
            fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))
            fc.append(nn.Tanh())
        fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))
        self.fc_layers = nn.Sequential(*fc) 
開發者ID:datamllab,項目名稱:rlcard,代碼行數:25,代碼來源:dqn_agent_pytorch.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, action_num=2, state_shape=None, mlp_layers=None):
        ''' Initialize the policy network.  It's just a bunch of ReLU
        layers with no activation on the final one, initialized with
        Xavier (sonnet.nets.MLP and tensorflow defaults)

        Args:
            action_num (int): number of output actions
            state_shape (list): shape of state tensor for each sample
            mlp_laters (list): output size of each mlp layer including final
        '''
        super(AveragePolicyNetwork, self).__init__()

        self.action_num = action_num
        self.state_shape = state_shape
        self.mlp_layers = mlp_layers

        # set up mlp w/ relu activations
        layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
        mlp = [nn.Flatten()]
        mlp.append(nn.BatchNorm1d(layer_dims[0]))
        for i in range(len(layer_dims)-1):
            mlp.append(nn.Linear(layer_dims[i], layer_dims[i+1]))
            if i != len(layer_dims) - 2: # all but final have relu
                mlp.append(nn.ReLU())
        self.mlp = nn.Sequential(*mlp) 
開發者ID:datamllab,項目名稱:rlcard,代碼行數:27,代碼來源:nfsp_agent_pytorch.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, input_shape: Tuple[int]):
        """
        Args:
            input_shape (Tuple[int]): Shape of input tensor.
        """
        super().__init__()
        assert len(input_shape) == 3
        c, h, w = input_shape
        self.conv1 = nn.Conv2d(in_channels=c, out_channels=64, kernel_size=3)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2)
        self.flatten = nn.Flatten()

        for conv in [self.conv1, self.conv2]:
            h_kernel, w_kernel = conv.kernel_size
            h_stride, w_stride = conv.stride
            c = conv.out_channels
            h, w = self.conv2d_size_out(
                size=(h, w),
                kernel_size=(h_kernel, w_kernel),
                stride=(h_stride, w_stride),
            )

        self.fc1 = nn.Linear(in_features=c * h * w, out_features=10) 
開發者ID:catalyst-team,項目名稱:catalyst,代碼行數:26,代碼來源:test_tracer_callback.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, input_shape: Tuple[int]):
        super().__init__()
        assert len(input_shape) == 3
        c, h, w = input_shape
        self.conv1 = nn.Conv2d(in_channels=c, out_channels=64, kernel_size=3)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2)
        self.flatten = nn.Flatten()

        for conv in [self.conv1, self.conv2]:
            h_kernel, w_kernel = conv.kernel_size
            h_stride, w_stride = conv.stride
            c = conv.out_channels
            h, w = self.conv2d_size_out(
                size=(h, w),
                kernel_size=(h_kernel, w_kernel),
                stride=(h_stride, w_stride),
            )

        self.fc1 = nn.Linear(in_features=c * h * w, out_features=10) 
開發者ID:catalyst-team,項目名稱:catalyst,代碼行數:22,代碼來源:test_gradnorm_logger.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self, features, blocks, dropout_rate=0.2, num_classes=1000, input_size=224):
        super().__init__()
        self.features = features
        # blocks
        self.blocks = blocks
        # head
        Conv2D = get_same_padding_conv2d(input_size//32)
        self.conv_head = nn.Sequential(
            Conv2D(320, 1280, kernel_size=3, stride=2),
            nn.BatchNorm2d(1280),
            nn.ReLU(True),
        )
        # pool + fc
        self.pool = nn.AdaptiveAvgPool2d(1)
        self.flatten = nn.Flatten()
        self._dropout = nn.Dropout(dropout_rate) if dropout_rate > 0 else None
        self.fc = nn.Linear(1280, num_classes) 
開發者ID:awslabs,項目名稱:autogluon,代碼行數:19,代碼來源:train_enas_imagenet.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,block_config):
        super(_DarkNet,self).__init__()
        self.headconv = nn.Sequential(BasicConv(3,32,3,1,1))
        self.in_dim = 32     
        self.layers = self._make_layers(block_config)
        self.final = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(1024,1000),
            nn.Softmax(dim=1)
        ) 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:13,代碼來源:Darknet2016.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,block_config,groups):
        super(_ShuffleNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,24,3,2,1,bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        self.layer_1 = self._make_layer(24,block_config[0][1],block_config[0][0],groups)
        self.layer_2 = self._make_layer(block_config[0][1],block_config[1][1],block_config[1][0],groups)
        self.layer_3 = self._make_layer(block_config[1][1],block_config[2][1],block_config[2][0],groups)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1536,1000),
            nn.Softmax(dim = 1),) 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:17,代碼來源:ShuffleNet.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Flatten [as 別名]
def __init__(self,in_dim,ratio):
        super(_SElayer,self).__init__()
        self.gap = nn.AdaptiveAvgPool2d((1,1))
        reduced_dim = max(1, in_dim//ratio)
        self.fc1 = nn.Sequential(nn.Flatten(),
                   nn.Linear(in_dim, reduced_dim),
                   Swish(),
                   nn.Linear(reduced_dim, in_dim),
                   nn.Softmax(dim=1),) 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:11,代碼來源:EfficientNet2019.py


注:本文中的torch.nn.Flatten方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。