當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.flatten方法代碼示例

本文整理匯總了Python中torch.flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.flatten方法的具體用法?Python torch.flatten怎麽用?Python torch.flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.flatten方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        #x = x.view(x.size(0), -1)
        x = torch.flatten(x, 1)
        if self.drop:
            x = self.drop(x)
        x = self.fc(x)

        return x 
開發者ID:zhanghang1989,項目名稱:PyTorch-Encoding,代碼行數:21,代碼來源:resnet.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.model(x)

        x = torch.flatten(x, start_dim=1)  # Flattens layers without losing batches

        x = self.full_conn1(x)
        x = self.norm1(x)
        x = F.relu(x)
        x = F.dropout(x)

        x = self.full_conn2(x)
        x = F.relu(x)
        x = F.dropout(x)

        x = self.full_conn3(x)

        return x 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:19,代碼來源:pytorch_ts.py

示例3: _forward_impl

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def _forward_impl(self, x):
        # See note [TorchScript super()]
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        # EDIT(momohatt): Add 'start_dim='
        x = torch.flatten(x, start_dim=1)
        x = self.fc(x)

        return x 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:20,代碼來源:resnet.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, start_dim=1) # EDIT(momohatt): Add 'start_dim='
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = F.log_softmax(x, dim=1)
        return output


# Example input 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:18,代碼來源:mnist.py

示例5: protobuf_tensor_serializer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def protobuf_tensor_serializer(worker: AbstractWorker, tensor: torch.Tensor) -> TensorDataPB:
    """Strategy to serialize a tensor using Protobuf"""
    dtype = TORCH_DTYPE_STR[tensor.dtype]

    protobuf_tensor = TensorDataPB()

    if tensor.is_quantized:
        protobuf_tensor.is_quantized = True
        protobuf_tensor.scale = tensor.q_scale()
        protobuf_tensor.zero_point = tensor.q_zero_point()
        data = torch.flatten(tensor).int_repr().tolist()
    else:
        data = torch.flatten(tensor).tolist()

    protobuf_tensor.dtype = dtype
    protobuf_tensor.shape.dims.extend(tensor.size())
    getattr(protobuf_tensor, "contents_" + dtype).extend(data)

    return protobuf_tensor 
開發者ID:OpenMined,項目名稱:PySyft,代碼行數:21,代碼來源:torch_serde.py

示例6: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x, layer1_sum = self.layer1(x)
        x, layer2_sum = self.layer2(x)
        x, layer3_sum = self.layer3(x)
        x, layer4_sum = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x, layer1_sum + layer2_sum + layer3_sum + layer4_sum 
開發者ID:d-li14,項目名稱:dgconv.pytorch,代碼行數:18,代碼來源:g_resnext.py

示例7: _forward_impl

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def _forward_impl(self, x):
        # See note [TorchScript super()]
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.bn5(x)
        x = self.relu(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        #x = self.fc(x)

        return x 
開發者ID:legolas123,項目名稱:cv-tricks.com,代碼行數:20,代碼來源:resnet_preact_bin.py

示例8: _forward_impl

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def _forward_impl(self, x):
        # See note [TorchScript super()]
        x = self.conv1(x)
        # x = self.bn1(x)
        # x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.bn5(x)
        x = self.relu(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        #x = self.fc(x)

        return x 
開發者ID:legolas123,項目名稱:cv-tricks.com,代碼行數:20,代碼來源:resnet_preact.py

示例9: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
    x = x.reshape(280, 280, 4)
    x = torch.narrow(x, dim=2, start=3, length=1)
    x = x.reshape(1, 1, 280, 280)
    x = F.avg_pool2d(x, 10, stride=10)
    x = x / 255
    x = (x - MEAN) / STANDARD_DEVIATION

    x = self.conv1(x)
    x = F.relu(x)
    x = self.conv2(x)
    x = F.max_pool2d(x, 2)
    x = self.dropout1(x)
    x = torch.flatten(x, 1)
    x = self.fc1(x)
    x = F.relu(x)
    x = self.dropout2(x)
    x = self.fc2(x)
    output = F.softmax(x, dim=1)
    return output 
開發者ID:elliotwaite,項目名稱:pytorch-to-javascript-with-onnx-js,代碼行數:22,代碼來源:inference_mnist_model.py

示例10: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        #x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x 
開發者ID:facebookresearch,項目名稱:fastMRI,代碼行數:18,代碼來源:unpooled_resnet.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x 
開發者ID:facebookresearch,項目名稱:fastMRI,代碼行數:18,代碼來源:torchvision_resnet.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        x = self.conv1(x)       # batch*32*20*20
        x = self.bn1(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)  # batch*32*10*10
        x = self.conv2(x)       # batch*64*8*8
        x = self.bn2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)  # batch*64*4*4
        x = self.conv3(x)       # batch*128*2*2
        x = self.bn3(x)
        x = F.relu(x)
        x = torch.flatten(x, 1) # batch*512
        x = self.fc1(x)         # batch*128
        x = F.relu(x)
        x = self.fc2(x)         # batch*55
        x = F.log_softmax(x, dim=1)
        return x 
開發者ID:zhongxinghong,項目名稱:PKUAutoElective,代碼行數:20,代碼來源:cnn.py

示例13: findwordlist

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def findwordlist(template, closewordind, vocab, numwords=10, addeos=False):
    """
    Based on a template sentence, find the candidate word list.
    
    Input:
        template: source sentence.
        closewordind: precalculated 100 closest word indices (using character embeddings). torch.LongTensor.
        vocab: full vocabulary.
        numwords: number of closest words per word in the template.
        addeos: whether to include '<eos>' in the candidate word list.
    """
    if isinstance(template, str):
        template = template.split()
    templateind = closewordind.new_tensor([vocab.stoi[w] for w in template])
    # subvocab = closewordind[templateind, :numwords].flatten().cpu()  # torch.flatten() only exists from PyTorch 0.4.1
    subvocab = closewordind[templateind, :numwords].view(-1).cpu()
    if addeos:
        subvocab = torch.cat([subvocab, torch.LongTensor([vocab.stoi['<eos>']])])
    subvocab = subvocab.unique(sorted=True)
    word_list = [vocab.itos[i] for i in subvocab]
    
    return word_list, subvocab 
開發者ID:jzhou316,項目名稱:Unsupervised-Sentence-Summarization,代碼行數:24,代碼來源:pre_word_list.py

示例14: _forward_impl

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def _forward_impl(self, x):
        # See note [TorchScript super()]
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x 
開發者ID:intel,項目名稱:optimized-models,代碼行數:19,代碼來源:resnet.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flatten [as 別名]
def forward(self, x):
        # N x 768 x 17 x 17
        x = F.avg_pool2d(x, kernel_size=5, stride=3)
        # N x 768 x 5 x 5
        x = self.conv0(x)
        # N x 128 x 5 x 5
        x = self.conv1(x)
        # N x 768 x 1 x 1
        # Adaptive average pooling
        x = F.adaptive_avg_pool2d(x, (1, 1))
        # N x 768 x 1 x 1
        x = torch.flatten(x, 1)
        # N x 768
        x = self.fc(x)
        # N x 1000
        return x 
開發者ID:rwightman,項目名稱:pytorch-image-models,代碼行數:18,代碼來源:inception_v3.py


注:本文中的torch.flatten方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。