當前位置: 首頁>>代碼示例>>Python>>正文


Python thop.profile方法代碼示例

本文整理匯總了Python中thop.profile方法的典型用法代碼示例。如果您正苦於以下問題:Python thop.profile方法的具體用法?Python thop.profile怎麽用?Python thop.profile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在thop的用法示例。


在下文中一共展示了thop.profile方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def main():
    args = get_args()

    model_kwargs = {}
    if args.rectify:
        model_kwargs['rectified_conv'] = True
        model_kwargs['rectify_avg'] = args.rectify_avg

    model = encoding.models.get_model(args.model, **model_kwargs)
    print(model)

    dummy_images = torch.rand(1, 3, args.crop_size, args.crop_size)

    #count_ops(model, dummy_images, verbose=False)
    macs, params = profile(model, inputs=(dummy_images, ))
    macs, params = clever_format([macs, params], "%.3f") 

    print(f"macs: {macs}, params: {params}") 
開發者ID:zhanghang1989,項目名稱:PyTorch-Encoding,代碼行數:20,代碼來源:test_flops.py

示例2: __init__

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def __init__(self, loadweights=True, downsample=4, model_path='pretrained_model/mobilenetv2_1.0-0c6065bc.pth'):
        super(mobilenetv2_base, self).__init__()

        model = MobileNetV2(width_mult=1.0)

        if loadweights:
            model.load_state_dict(torch.load(model_path))

        #if downsample == 4:
        #    self.feature = nn.Sequential(model.features[:14])
        #elif downsample == 5:
        #    self.feature = nn.Sequential(model.features)

        self.feature3 = nn.Sequential(model.features[:7])
        self.feature4 = nn.Sequential(model.features[7:14])
        self.feature5 = nn.Sequential(model.features[14:])

        #flops, params = profile(self.feature, input_size=(1, 3, 256,256)) 
開發者ID:HuiZeng,項目名稱:Grid-Anchor-based-Image-Cropping-Pytorch,代碼行數:20,代碼來源:croppingModel.py

示例3: print_flops

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def print_flops(model):
    shape = None
    if config["dataset"] in ["Cifar10", "Cifar100"]:
        shape = (1, 3, 32, 32)
    else:
        print(f"Unknown dataset {config['dataset']} input size to compute # FLOPS")
        return

    try:
        from thop import profile
    except:
        print("Please `pip install thop` to compute # FLOPS")
        return

    model = model.train()
    input_data = torch.rand(*shape)
    num_flops, num_params = profile(model, inputs=(input_data, ))
    print("Number of FLOPS:", human_format(num_flops)) 
開發者ID:epfml,項目名稱:attention-cnn,代碼行數:20,代碼來源:train.py

示例4: show_flops_params

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def show_flops_params(model, device, input_shape=[1, 3, 1024, 2048]):
    #summary(model, tuple(input_shape[1:]), device=device)
    input = torch.randn(*input_shape).to(torch.device(device))
    flops, params = profile(model, inputs=(input,), verbose=False)

    logging.info('{} flops: {:.3f}G input shape is {}, params: {:.3f}M'.format(
        model.__class__.__name__, flops / 1000000000, input_shape[1:], params / 1000000)) 
開發者ID:LikeLy-Journey,項目名稱:SegmenTron,代碼行數:9,代碼來源:visualize.py

示例5: calcFlops

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def calcFlops(model, input):
    flops, params = profile(model, inputs=(input, ))
    flops, params = clever_format([flops, params], "%.3f")
    print('flops: {} \nparameters: {}'.format(flops, params))
    return flops, params 
開發者ID:DeepMotionAIResearch,項目名稱:DenseMatchingBenchmark,代碼行數:7,代碼來源:test_model.py

示例6: _flops

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False):
        layer = ConvNorm(C_in, C_out, kernel_size, stride, padding, dilation, groups, bias, slimmable=False)
        flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False)
        return flops 
開發者ID:TAMU-VITA,項目名稱:FasterSeg,代碼行數:6,代碼來源:operations.py

示例7: objective

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def objective(trial):

    # Generate the model.
    model = define_model(trial).to(DEVICE)

    # Generate the optimizers.
    optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
    lr = trial.suggest_uniform("lr", 1e-5, 1e-1)
    optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)

    # Get the MNIST dataset.
    train_loader, val_loader = get_mnist()

    # Training of the model.
    model.train()
    for epoch in range(EPOCHS):
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)

            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()

    # Validation of the model.
    model.eval()
    correct = 0
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(val_loader):
            data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)
            output = model(data)
            pred = output.argmax(dim=1, keepdim=True)  # Get the index of the max log-probability.
            correct += pred.eq(target.view_as(pred)).sum().item()

    accuracy = correct / N_VAL_EXAMPLES

    flops, _params = thop.profile(model, inputs=(torch.randn(1, 28 * 28),), verbose=False)
    return flops, accuracy 
開發者ID:optuna,項目名稱:optuna,代碼行數:41,代碼來源:pytorch_simple.py

示例8: main

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def main():
    global args
    args = parser.parse_args()
    model = models.__dict__[args.arch]()    
    print(model)
    input = torch.randn(1, 3, 224, 224)
    model.train()
    # model.eval()
    flops, params = profile(model, inputs=(input, ))
    print("flops = ", flops)
    print("params = ", params)
    flops, params = clever_format([flops, params], "%.3f")
    print("flops = ", flops)
    print("params = ", params) 
開發者ID:BangguWu,項目名稱:ECANet,代碼行數:16,代碼來源:paras_flops.py

示例9: main

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def main():
    create_exp_dir(config.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh'))

    log_format = '%(asctime)s %(message)s'
    logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
    fh = logging.FileHandler(os.path.join(config.save, 'log.txt'))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)
    logging.info("args = %s", str(config))
    # preparation ################
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    seed = config.seed
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    # Model #######################################
    lasts = []
    for idx, arch_idx in enumerate(config.arch_idx):
        if config.load_epoch == "last":
            state = torch.load(os.path.join(config.load_path, "arch_%d.pt"%arch_idx))
        else:
            state = torch.load(os.path.join(config.load_path, "arch_%d_%d.pt"%(arch_idx, int(config.load_epoch))))

        model = Network(
            [state["alpha_%d_0"%arch_idx].detach(), state["alpha_%d_1"%arch_idx].detach(), state["alpha_%d_2"%arch_idx].detach()],
            [None, state["beta_%d_1"%arch_idx].detach(), state["beta_%d_2"%arch_idx].detach()],
            [state["ratio_%d_0"%arch_idx].detach(), state["ratio_%d_1"%arch_idx].detach(), state["ratio_%d_2"%arch_idx].detach()],
            num_classes=config.num_classes, layers=config.layers, Fch=config.Fch, width_mult_list=config.width_mult_list, stem_head_width=config.stem_head_width[idx], ignore_skip=arch_idx==0)

        mIoU02 = state["mIoU02"]; latency02 = state["latency02"]; obj02 = objective_acc_lat(mIoU02, latency02)
        mIoU12 = state["mIoU12"]; latency12 = state["latency12"]; obj12 = objective_acc_lat(mIoU12, latency12)
        if obj02 > obj12: last = [2, 0]
        else: last = [2, 1]
        lasts.append(last)
        model.build_structure(last)
        logging.info("net: " + str(model))
        for b in last:
            if len(config.width_mult_list) > 1:
                plot_op(getattr(model, "ops%d"%b), getattr(model, "path%d"%b), width=getattr(model, "widths%d"%b), head_width=config.stem_head_width[idx][1], F_base=config.Fch).savefig(os.path.join(config.save, "ops_%d_%d.png"%(arch_idx,b)), bbox_inches="tight")
            else:
                plot_op(getattr(model, "ops%d"%b), getattr(model, "path%d"%b), F_base=config.Fch).savefig(os.path.join(config.save, "ops_%d_%d.png"%(arch_idx,b)), bbox_inches="tight")
        plot_path_width(model.lasts, model.paths, model.widths).savefig(os.path.join(config.save, "path_width%d.png"%arch_idx))
        plot_path_width([2, 1, 0], [model.path2, model.path1, model.path0], [model.widths2, model.widths1, model.widths0]).savefig(os.path.join(config.save, "path_width_all%d.png"%arch_idx))
        flops, params = profile(model, inputs=(torch.randn(1, 3, 1024, 2048),), verbose=False)
        logging.info("params = %fMB, FLOPs = %fGB", params / 1e6, flops / 1e9)
        logging.info("ops:" + str(model.ops))
        logging.info("path:" + str(model.paths))
        model = model.cuda()
        #####################################################
        print(config.save)
        latency = compute_latency(model, (1, 3, config.image_height, config.image_width))
        logging.info("FPS:" + str(1000./latency)) 
開發者ID:TAMU-VITA,項目名稱:FasterSeg,代碼行數:57,代碼來源:run_latency.py

示例10: count_flops_params

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def count_flops_params(model: nn.Module, input_size, verbose=True):
    """
    Count FLOPs and Params of the given model.
    This function would identify the mask on the module
    and take the pruned shape into consideration.
    Note that, for sturctured pruning, we only identify
    the remained filters according to its mask, which
    not taking the pruned input channels into consideration,
    so the calculated FLOPs will be larger than real number.

    Parameters
    ---------
    model : nn.Module
        target model.
    input_size: list, tuple
        the input shape of data


    Returns
    -------
    flops: float
        total flops of the model
    params:
        total params of the model
    """

    assert input_size is not None

    device = next(model.parameters()).device
    inputs = torch.randn(input_size).to(device)

    hook_module_list = []
    prev_m = None
    for m in model.modules():
        weight_mask = None
        m_type = type(m)
        if m_type in custom_ops:
            if isinstance(prev_m, PrunerModuleWrapper):
                weight_mask = prev_m.weight_mask

            m.register_buffer('weight_mask', weight_mask)
            hook_module_list.append(m)
        prev_m = m

    flops, params = profile(model, inputs=(inputs, ), custom_ops=custom_ops, verbose=verbose)

    for m in hook_module_list:
        m._buffers.pop("weight_mask")

    return flops, params 
開發者ID:microsoft,項目名稱:nni,代碼行數:52,代碼來源:counter.py

示例11: evaluate_detector

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def evaluate_detector(args):
    """Evaluate directional point detector."""
    args.cuda = not args.disable_cuda and torch.cuda.is_available()
    device = torch.device('cuda:' + str(args.gpu_id) if args.cuda else 'cpu')
    torch.set_grad_enabled(False)

    dp_detector = DirectionalPointDetector(
        3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
    if args.detector_weights:
        dp_detector.load_state_dict(torch.load(args.detector_weights))
    dp_detector.eval()

    psdataset = ParkingSlotDataset(args.dataset_directory)
    logger = util.Logger(enable_visdom=args.enable_visdom)

    total_loss = 0
    position_errors = []
    direction_errors = []
    ground_truths_list = []
    predictions_list = []
    for iter_idx, (image, marking_points) in enumerate(psdataset):
        ground_truths_list.append(marking_points)

        image = torch.unsqueeze(image, 0).to(device)
        prediction = dp_detector(image)
        objective, gradient = generate_objective([marking_points], device)
        loss = (prediction - objective) ** 2
        total_loss += torch.sum(loss*gradient).item()

        pred_points = get_predicted_points(prediction[0], 0.01)
        predictions_list.append(pred_points)

        dists, angles = collect_error(marking_points, pred_points,
                                      config.CONFID_THRESH_FOR_POINT)
        position_errors += dists
        direction_errors += angles

        logger.log(iter=iter_idx, total_loss=total_loss)

    precisions, recalls = util.calc_precision_recall(
        ground_truths_list, predictions_list, match_marking_points)
    average_precision = util.calc_average_precision(precisions, recalls)
    if args.enable_visdom:
        logger.plot_curve(precisions, recalls)

    sample = torch.randn(1, 3, config.INPUT_IMAGE_SIZE,
                         config.INPUT_IMAGE_SIZE)
    flops, params = profile(dp_detector, inputs=(sample.to(device), ))
    logger.log(average_loss=total_loss / len(psdataset),
               average_precision=average_precision,
               flops=flops,
               params=params) 
開發者ID:Teoge,項目名稱:DMPR-PS,代碼行數:54,代碼來源:evaluate.py

示例12: bulid_up_network

# 需要導入模塊: import thop [as 別名]
# 或者: from thop import profile [as 別名]
def bulid_up_network(config,criterion):

    # if config.model.use_backbone:
    #     logger.info("backbone of architecture is {}".format(config.model.backbone_net_name))

    if config.model.backbone_net_name=="resnet":
        backbone = BackBone_ResNet(config,is_train=True)

    if config.model.backbone_net_name=="mobilenet_v2":
        backbone = BackBone_MobileNet(config,is_train=True)

    if config.model.backbone_net_name=="meta_arch":
        logger.info("backbone:{}".format(config.model.backbone))
        backbone = Backbone_Arch(criterion,**config.model.backbone)

    if config.model.backbone_net_name=="hrnet":
        backbone = BackBone_HRNet(config,is_train=True)
       
    Arch = Body_Part_Representation(config.model.keypoints_num,  criterion, backbone, **config.model.subnetwork_config)


    if config.model.use_pretrained:
        Arch.load_pretrained(config.model.pretrained)

    
        
    logger.info("\n\nbackbone: params and flops")
    logger.info(get_model_summary(backbone,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))

    logger.info("\n\nwhole architecture: params and flops")
    logger.info(get_model_summary(Arch,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))

    logger.info("=========== thop statistics ==========")
    dump = torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)
    flops, params = profile( backbone, inputs=(dump,),  )
    logger.info(">>> total params of BackBone: {:.2f}M\n>>> total FLOPS of Backbone: {:.3f} G\n".format(
                    (params / 1000000.0),(flops / 1000000000.0)))
    flops, params = profile(Arch, inputs=(dump,),  )
    logger.info(">>> total params of Whole Model: {:.2f}M\n>>> total FLOPS of Model: {:.3f} G\n".format(
                        (params / 1000000.0),(flops / 1000000000.0)))

    return Arch 
開發者ID:yangsenius,項目名稱:PoseNFS,代碼行數:44,代碼來源:build_your_net.py


注:本文中的thop.profile方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。