本文整理汇总了Python中thop.profile方法的典型用法代码示例。如果您正苦于以下问题:Python thop.profile方法的具体用法?Python thop.profile怎么用?Python thop.profile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类thop
的用法示例。
在下文中一共展示了thop.profile方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def main():
args = get_args()
model_kwargs = {}
if args.rectify:
model_kwargs['rectified_conv'] = True
model_kwargs['rectify_avg'] = args.rectify_avg
model = encoding.models.get_model(args.model, **model_kwargs)
print(model)
dummy_images = torch.rand(1, 3, args.crop_size, args.crop_size)
#count_ops(model, dummy_images, verbose=False)
macs, params = profile(model, inputs=(dummy_images, ))
macs, params = clever_format([macs, params], "%.3f")
print(f"macs: {macs}, params: {params}")
示例2: __init__
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def __init__(self, loadweights=True, downsample=4, model_path='pretrained_model/mobilenetv2_1.0-0c6065bc.pth'):
super(mobilenetv2_base, self).__init__()
model = MobileNetV2(width_mult=1.0)
if loadweights:
model.load_state_dict(torch.load(model_path))
#if downsample == 4:
# self.feature = nn.Sequential(model.features[:14])
#elif downsample == 5:
# self.feature = nn.Sequential(model.features)
self.feature3 = nn.Sequential(model.features[:7])
self.feature4 = nn.Sequential(model.features[7:14])
self.feature5 = nn.Sequential(model.features[14:])
#flops, params = profile(self.feature, input_size=(1, 3, 256,256))
示例3: print_flops
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def print_flops(model):
shape = None
if config["dataset"] in ["Cifar10", "Cifar100"]:
shape = (1, 3, 32, 32)
else:
print(f"Unknown dataset {config['dataset']} input size to compute # FLOPS")
return
try:
from thop import profile
except:
print("Please `pip install thop` to compute # FLOPS")
return
model = model.train()
input_data = torch.rand(*shape)
num_flops, num_params = profile(model, inputs=(input_data, ))
print("Number of FLOPS:", human_format(num_flops))
示例4: show_flops_params
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def show_flops_params(model, device, input_shape=[1, 3, 1024, 2048]):
#summary(model, tuple(input_shape[1:]), device=device)
input = torch.randn(*input_shape).to(torch.device(device))
flops, params = profile(model, inputs=(input,), verbose=False)
logging.info('{} flops: {:.3f}G input shape is {}, params: {:.3f}M'.format(
model.__class__.__name__, flops / 1000000000, input_shape[1:], params / 1000000))
示例5: calcFlops
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def calcFlops(model, input):
flops, params = profile(model, inputs=(input, ))
flops, params = clever_format([flops, params], "%.3f")
print('flops: {} \nparameters: {}'.format(flops, params))
return flops, params
示例6: _flops
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False):
layer = ConvNorm(C_in, C_out, kernel_size, stride, padding, dilation, groups, bias, slimmable=False)
flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False)
return flops
示例7: objective
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def objective(trial):
# Generate the model.
model = define_model(trial).to(DEVICE)
# Generate the optimizers.
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
lr = trial.suggest_uniform("lr", 1e-5, 1e-1)
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)
# Get the MNIST dataset.
train_loader, val_loader = get_mnist()
# Training of the model.
model.train()
for epoch in range(EPOCHS):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
# Validation of the model.
model.eval()
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)
output = model(data)
pred = output.argmax(dim=1, keepdim=True) # Get the index of the max log-probability.
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = correct / N_VAL_EXAMPLES
flops, _params = thop.profile(model, inputs=(torch.randn(1, 28 * 28),), verbose=False)
return flops, accuracy
示例8: main
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def main():
global args
args = parser.parse_args()
model = models.__dict__[args.arch]()
print(model)
input = torch.randn(1, 3, 224, 224)
model.train()
# model.eval()
flops, params = profile(model, inputs=(input, ))
print("flops = ", flops)
print("params = ", params)
flops, params = clever_format([flops, params], "%.3f")
print("flops = ", flops)
print("params = ", params)
示例9: main
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def main():
create_exp_dir(config.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(config.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("args = %s", str(config))
# preparation ################
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
seed = config.seed
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# Model #######################################
lasts = []
for idx, arch_idx in enumerate(config.arch_idx):
if config.load_epoch == "last":
state = torch.load(os.path.join(config.load_path, "arch_%d.pt"%arch_idx))
else:
state = torch.load(os.path.join(config.load_path, "arch_%d_%d.pt"%(arch_idx, int(config.load_epoch))))
model = Network(
[state["alpha_%d_0"%arch_idx].detach(), state["alpha_%d_1"%arch_idx].detach(), state["alpha_%d_2"%arch_idx].detach()],
[None, state["beta_%d_1"%arch_idx].detach(), state["beta_%d_2"%arch_idx].detach()],
[state["ratio_%d_0"%arch_idx].detach(), state["ratio_%d_1"%arch_idx].detach(), state["ratio_%d_2"%arch_idx].detach()],
num_classes=config.num_classes, layers=config.layers, Fch=config.Fch, width_mult_list=config.width_mult_list, stem_head_width=config.stem_head_width[idx], ignore_skip=arch_idx==0)
mIoU02 = state["mIoU02"]; latency02 = state["latency02"]; obj02 = objective_acc_lat(mIoU02, latency02)
mIoU12 = state["mIoU12"]; latency12 = state["latency12"]; obj12 = objective_acc_lat(mIoU12, latency12)
if obj02 > obj12: last = [2, 0]
else: last = [2, 1]
lasts.append(last)
model.build_structure(last)
logging.info("net: " + str(model))
for b in last:
if len(config.width_mult_list) > 1:
plot_op(getattr(model, "ops%d"%b), getattr(model, "path%d"%b), width=getattr(model, "widths%d"%b), head_width=config.stem_head_width[idx][1], F_base=config.Fch).savefig(os.path.join(config.save, "ops_%d_%d.png"%(arch_idx,b)), bbox_inches="tight")
else:
plot_op(getattr(model, "ops%d"%b), getattr(model, "path%d"%b), F_base=config.Fch).savefig(os.path.join(config.save, "ops_%d_%d.png"%(arch_idx,b)), bbox_inches="tight")
plot_path_width(model.lasts, model.paths, model.widths).savefig(os.path.join(config.save, "path_width%d.png"%arch_idx))
plot_path_width([2, 1, 0], [model.path2, model.path1, model.path0], [model.widths2, model.widths1, model.widths0]).savefig(os.path.join(config.save, "path_width_all%d.png"%arch_idx))
flops, params = profile(model, inputs=(torch.randn(1, 3, 1024, 2048),), verbose=False)
logging.info("params = %fMB, FLOPs = %fGB", params / 1e6, flops / 1e9)
logging.info("ops:" + str(model.ops))
logging.info("path:" + str(model.paths))
model = model.cuda()
#####################################################
print(config.save)
latency = compute_latency(model, (1, 3, config.image_height, config.image_width))
logging.info("FPS:" + str(1000./latency))
示例10: count_flops_params
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def count_flops_params(model: nn.Module, input_size, verbose=True):
"""
Count FLOPs and Params of the given model.
This function would identify the mask on the module
and take the pruned shape into consideration.
Note that, for sturctured pruning, we only identify
the remained filters according to its mask, which
not taking the pruned input channels into consideration,
so the calculated FLOPs will be larger than real number.
Parameters
---------
model : nn.Module
target model.
input_size: list, tuple
the input shape of data
Returns
-------
flops: float
total flops of the model
params:
total params of the model
"""
assert input_size is not None
device = next(model.parameters()).device
inputs = torch.randn(input_size).to(device)
hook_module_list = []
prev_m = None
for m in model.modules():
weight_mask = None
m_type = type(m)
if m_type in custom_ops:
if isinstance(prev_m, PrunerModuleWrapper):
weight_mask = prev_m.weight_mask
m.register_buffer('weight_mask', weight_mask)
hook_module_list.append(m)
prev_m = m
flops, params = profile(model, inputs=(inputs, ), custom_ops=custom_ops, verbose=verbose)
for m in hook_module_list:
m._buffers.pop("weight_mask")
return flops, params
示例11: evaluate_detector
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def evaluate_detector(args):
"""Evaluate directional point detector."""
args.cuda = not args.disable_cuda and torch.cuda.is_available()
device = torch.device('cuda:' + str(args.gpu_id) if args.cuda else 'cpu')
torch.set_grad_enabled(False)
dp_detector = DirectionalPointDetector(
3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
if args.detector_weights:
dp_detector.load_state_dict(torch.load(args.detector_weights))
dp_detector.eval()
psdataset = ParkingSlotDataset(args.dataset_directory)
logger = util.Logger(enable_visdom=args.enable_visdom)
total_loss = 0
position_errors = []
direction_errors = []
ground_truths_list = []
predictions_list = []
for iter_idx, (image, marking_points) in enumerate(psdataset):
ground_truths_list.append(marking_points)
image = torch.unsqueeze(image, 0).to(device)
prediction = dp_detector(image)
objective, gradient = generate_objective([marking_points], device)
loss = (prediction - objective) ** 2
total_loss += torch.sum(loss*gradient).item()
pred_points = get_predicted_points(prediction[0], 0.01)
predictions_list.append(pred_points)
dists, angles = collect_error(marking_points, pred_points,
config.CONFID_THRESH_FOR_POINT)
position_errors += dists
direction_errors += angles
logger.log(iter=iter_idx, total_loss=total_loss)
precisions, recalls = util.calc_precision_recall(
ground_truths_list, predictions_list, match_marking_points)
average_precision = util.calc_average_precision(precisions, recalls)
if args.enable_visdom:
logger.plot_curve(precisions, recalls)
sample = torch.randn(1, 3, config.INPUT_IMAGE_SIZE,
config.INPUT_IMAGE_SIZE)
flops, params = profile(dp_detector, inputs=(sample.to(device), ))
logger.log(average_loss=total_loss / len(psdataset),
average_precision=average_precision,
flops=flops,
params=params)
示例12: bulid_up_network
# 需要导入模块: import thop [as 别名]
# 或者: from thop import profile [as 别名]
def bulid_up_network(config,criterion):
# if config.model.use_backbone:
# logger.info("backbone of architecture is {}".format(config.model.backbone_net_name))
if config.model.backbone_net_name=="resnet":
backbone = BackBone_ResNet(config,is_train=True)
if config.model.backbone_net_name=="mobilenet_v2":
backbone = BackBone_MobileNet(config,is_train=True)
if config.model.backbone_net_name=="meta_arch":
logger.info("backbone:{}".format(config.model.backbone))
backbone = Backbone_Arch(criterion,**config.model.backbone)
if config.model.backbone_net_name=="hrnet":
backbone = BackBone_HRNet(config,is_train=True)
Arch = Body_Part_Representation(config.model.keypoints_num, criterion, backbone, **config.model.subnetwork_config)
if config.model.use_pretrained:
Arch.load_pretrained(config.model.pretrained)
logger.info("\n\nbackbone: params and flops")
logger.info(get_model_summary(backbone,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))
logger.info("\n\nwhole architecture: params and flops")
logger.info(get_model_summary(Arch,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))
logger.info("=========== thop statistics ==========")
dump = torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)
flops, params = profile( backbone, inputs=(dump,), )
logger.info(">>> total params of BackBone: {:.2f}M\n>>> total FLOPS of Backbone: {:.3f} G\n".format(
(params / 1000000.0),(flops / 1000000000.0)))
flops, params = profile(Arch, inputs=(dump,), )
logger.info(">>> total params of Whole Model: {:.2f}M\n>>> total FLOPS of Model: {:.3f} G\n".format(
(params / 1000000.0),(flops / 1000000000.0)))
return Arch