當前位置: 首頁>>代碼示例>>Python>>正文


Python gluoncv.data方法代碼示例

本文整理匯總了Python中gluoncv.data方法的典型用法代碼示例。如果您正苦於以下問題:Python gluoncv.data方法的具體用法?Python gluoncv.data怎麽用?Python gluoncv.data使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在gluoncv的用法示例。


在下文中一共展示了gluoncv.data方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: parse_args

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def parse_args():
    parser = argparse.ArgumentParser(description='Train YOLO networks with random input shape.')
    parser.add_argument('--network', type=str, default='yolo3_darknet53_voc',
                        #use yolo3_darknet53_voc, yolo3_mobilenet1.0_voc, yolo3_mobilenet0.25_voc 
                        help="Base network name which serves as feature extraction base.")
    parser.add_argument('--short', type=int, default=416,
                        help='Input data shape for evaluation, use 320, 416, 512, 608, '                  
                        'larger size for dense object and big size input')
    parser.add_argument('--threshold', type=float, default=0.4,
                        help='confidence threshold for object detection')

    parser.add_argument('--gpu', action='store_false',
                        help='use gpu or cpu.')
    
    args = parser.parse_args()
    return args 
開發者ID:njvisionpower,項目名稱:Safety-Helmet-Wearing-Dataset,代碼行數:18,代碼來源:test_yolo.py

示例2: benchmarking

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def benchmarking(model, args):
    bs = args.batch_size
    num_iterations = args.num_iterations
    input_shape = (bs, 3, args.image_shape, args.image_shape)
    size = num_iterations * bs
    data = mx.random.uniform(-1.0, 1.0, shape=input_shape, ctx=args.ctx[0], dtype='float32')
    dry_run = 5

    with tqdm(total=size+dry_run*bs) as pbar:
        for n in range(dry_run + num_iterations):
            if n == dry_run:
                tic = time.time()
            outputs = model(data)
            for output in outputs:
                output.wait_to_read()
            pbar.update(bs)
    speed = size / (time.time() - tic)
    print('With batch size %d , %d batches, throughput is %f imgs/sec' % (bs, num_iterations, speed)) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:20,代碼來源:test.py

示例3: training

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += np.mean(loss.asnumpy()) / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f' % \
                (epoch, train_loss/(i+1)))
            if i != 0 and i % self.args.log_interval == 0:
                self.logger.info('Epoch %d iteration %04d/%04d: training loss %.3f' % \
                    (epoch, i, len(self.train_data), train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        if self.args.no_val:
            save_checkpoint(self.net.module, self.args, epoch, 0, False) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:24,代碼來源:train.py

示例4: training

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch {}, training loss {}'.format(epoch, train_loss / (i + 1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:19,代碼來源:train_gl_seg.py

示例5: training

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def training(self, epoch):
        tbar = tqdm(self.train_data)
        train_loss = 0.0
        alpha = 0.2
        for i, (data, target) in enumerate(tbar):
            with autograd.record(True):
                outputs = self.net(data.astype(args.dtype, copy=False))
                losses = self.criterion(outputs, target)
                mx.nd.waitall()
                autograd.backward(losses)
            self.optimizer.step(self.args.batch_size)
            for loss in losses:
                train_loss += loss.asnumpy()[0] / len(losses)
            tbar.set_description('Epoch %d, training loss %.3f'%\
                (epoch, train_loss/(i+1)))
            mx.nd.waitall()

        # save every epoch
        save_checkpoint(self.net.module, self.args, False) 
開發者ID:Angzz,項目名稱:panoptic-fpn-gluon,代碼行數:21,代碼來源:train.py

示例6: test_quantization

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def test_quantization(model, args, test_data, size, num_class, pred_offset):
    # output folder
    outdir = 'outdir_int8'
    if not os.path.exists(outdir):
        os.makedirs(outdir)
    print(model)
    metric = gluoncv.utils.metrics.SegmentationMetric(num_class)

    tbar = tqdm(test_data)
    metric.reset()
    tic = time.time()
    for i, (batch, dsts) in enumerate(tbar):
        if args.eval:
            targets = mx.gluon.utils.split_and_load(dsts, ctx_list=args.ctx, even_split=False)
            data = mx.gluon.utils.split_and_load(batch, ctx_list=args.ctx, batch_axis=0, even_split=False)
            outputs = None
            for x in data:
                output = model(x)
                outputs = output if outputs is None else nd.concat(outputs, output, axis=0)
            metric.update(targets, outputs)
            pixAcc, mIoU = metric.get()
            tbar.set_description('pixAcc: %.4f, mIoU: %.4f' % (pixAcc, mIoU))
        else:
            for data, impath in zip(batch, dsts):
                data = data.as_in_context(args.ctx[0])
                if len(data.shape) < 4:
                    data = nd.expand_dims(data, axis=0)
                predict = model(data)[0]
                predict = mx.nd.squeeze(mx.nd.argmax(predict, 1)).asnumpy() + pred_offset
                mask = get_color_pallete(predict, args.dataset)
                outname = os.path.splitext(impath)[0] + '.png'
                mask.save(os.path.join(outdir, outname))
    speed = size / (time.time() - tic)
    print('Inference speed with batchsize %d is %.2f img/sec' % (args.batch_size, speed)) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:36,代碼來源:test.py

示例7: validation

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def validation(self, epoch):
        self.metric.reset()
        tbar = tqdm(self.eval_data)
        for i, (data, target) in enumerate(tbar):
            outputs = self.evaluator(data.astype(args.dtype, copy=False))
            outputs = [x[0] for x in outputs]
            targets = mx.gluon.utils.split_and_load(target, args.ctx, even_split=False)
            self.metric.update(targets, outputs)
            pixAcc, mIoU = self.metric.get()
            tbar.set_description('Epoch %d, validation pixAcc: %.3f, mIoU: %.3f' % \
                (epoch, pixAcc, mIoU))
            mx.nd.waitall()
        self.logger.info('Epoch %d validation pixAcc: %.3f, mIoU: %.3f' % (epoch, pixAcc, mIoU))
        save_checkpoint(self.net.module, self.args, epoch, mIoU, False) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:16,代碼來源:train.py

示例8: validation

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def validation(self, epoch):
        self.metric.reset()
        tbar = tqdm(self.eval_data)
        for i, (data, target) in enumerate(tbar):
            outputs = self.evaluator(data.astype(args.dtype, copy=False))
            outputs = [x[0] for x in outputs]
            targets = mx.gluon.utils.split_and_load(target, args.ctx, even_split=False)
            self.metric.update(targets, outputs)
            pixAcc, mIoU = self.metric.get()
            tbar.set_description('Epoch {}, validation pixAcc: {}, mIoU: {}'.format(epoch, pixAcc, mIoU))
            mx.nd.waitall() 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:13,代碼來源:train_gl_seg.py

示例9: validation

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def validation(self, epoch):
        #total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
        self.metric.reset()
        tbar = tqdm(self.eval_data)
        for i, (data, target) in enumerate(tbar):
            outputs = self.evaluator(data.astype(args.dtype, copy=False))
            outputs = [x[0] for x in outputs]
            targets = mx.gluon.utils.split_and_load(target, args.ctx, even_split=False)
            self.metric.update(targets, outputs)
            pixAcc, mIoU = self.metric.get()
            tbar.set_description('Epoch %d, validation pixAcc: %.3f, mIoU: %.3f'%\
                (epoch, pixAcc, mIoU))
            mx.nd.waitall() 
開發者ID:Angzz,項目名稱:panoptic-fpn-gluon,代碼行數:15,代碼來源:train.py

示例10: _detect_vehicles

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def _detect_vehicles(self, raw):
        x, _ = data.transforms.presets.yolo.transform_test(raw, short=512)
        classes, scores, bboxes = self.yolo(x)
        bboxes[0, :, 0::2] = bboxes[0, :, 0::2] / x.shape[3] * raw.shape[1]
        bboxes[0, :, 1::2] = bboxes[0, :, 1::2] / x.shape[2] * raw.shape[0]
        return [
            fixed_crop(raw, bboxes[0, i]) for i in range(classes.shape[1])
                if (self.yolo.classes[int(classes[0, i].asscalar())] == 'car' or
                    self.yolo.classes[int(classes[0, i].asscalar())] == 'bus') and
                    scores[0, i].asscalar() > 0.5
        ] 
開發者ID:ufownl,項目名稱:alpr_utils,代碼行數:13,代碼來源:server.py

示例11: png_encode

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def png_encode(img):
    height = img.shape[0]
    width = img.shape[1]
    img = img.astype("uint8").reshape((-1, width * 3))
    f = io.BytesIO()
    w = png.Writer(width, height, greyscale=False)
    w.write(f, img.asnumpy())
    return "data:image/png;base64, " + base64.b64encode(f.getvalue()).decode() 
開發者ID:ufownl,項目名稱:alpr_utils,代碼行數:10,代碼來源:server.py

示例12: build

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def build(target):
    mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
    with tvm.transform.PassContext(opt_level=3):
        graph, lib, params = relay.build(mod, target, params=params)
    return graph, lib, params

######################################################################
# Create TVM runtime and do inference 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:10,代碼來源:deploy_ssd_gluoncv.py

示例13: run

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def run(graph, lib, params, ctx):
    # Build TVM runtime
    m = graph_runtime.create(graph, lib, ctx)
    tvm_input = tvm.nd.array(x.asnumpy(), ctx=ctx)
    m.set_input('data', tvm_input)
    m.set_input(**params)
    # execute
    m.run()
    # get outputs
    class_IDs, scores, bounding_boxs = m.get_output(0), m.get_output(1), m.get_output(2)
    return class_IDs, scores, bounding_boxs 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:13,代碼來源:deploy_ssd_gluoncv.py

示例14: test_segmentation_utils

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def test_segmentation_utils():
    ctx = mx.context.current_context()
    import os
    if not os.path.isdir(os.path.expanduser('~/.mxnet/datasets/voc')):
        return

    transform_fn = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([.485, .456, .406], [.229, .224, .225])
    ])
    # get the dataset
    # TODO FIXME: change it to ADE20K dataset and pretrained model
    dataset = ADE20KSegmentation(split='val')
    # load pretrained net
    net = gluoncv.model_zoo.get_model('fcn_resnet50_ade', pretrained=True, ctx=ctx)
    # count for pixAcc and mIoU
    total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
    np_inter, np_union, np_correct, np_label = 0, 0, 0, 0
    tbar = tqdm(range(10))
    for i in tbar:
        img, mask = dataset[i]
        # prepare data and make prediction
        img = transform_fn(img)
        img = img.expand_dims(0).as_in_context(ctx)
        mask = mask.expand_dims(0)
        pred = net.evaluate(img).as_in_context(mx.cpu(0))
        # gcv prediction
        correct1, labeled1 = batch_pix_accuracy(pred, mask)
        inter1, union1 = batch_intersection_union(pred, mask, dataset.num_class)
        total_correct += correct1
        total_label += labeled1
        total_inter += inter1
        total_union += union1
        pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
        IoU = 1.0 * total_inter / (np.spacing(1) + total_union)
        mIoU = IoU.mean()

        # np predicition
        pred2 = np.argmax(pred.asnumpy().astype('int64'), 1) + 1
        mask2 = mask.squeeze().asnumpy().astype('int64') + 1
        _, correct2, labeled2 = pixelAccuracy(pred2, mask2)
        inter2, union2 = intersectionAndUnion(pred2, mask2, dataset.num_class)
        np_correct += correct2
        np_label += labeled2
        np_inter += inter2
        np_union += union2
        np_pixAcc = 1.0 * np_correct / (np.spacing(1) + np_label)
        np_IoU = 1.0 * np_inter / (np.spacing(1) + np_union)
        np_mIoU = np_IoU.mean()
        tbar.set_description('pixAcc: %.3f, np_pixAcc: %.3f, mIoU: %.3f, np_mIoU: %.3f'%\
            (pixAcc, np_pixAcc, mIoU, np_mIoU))

    np.testing.assert_allclose(total_inter, np_inter)
    np.testing.assert_allclose(total_union, np_union)
    np.testing.assert_allclose(total_correct, np_correct)
    np.testing.assert_allclose(total_label, np_label) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:58,代碼來源:test_utils_segmentation.py

示例15: parse_args

# 需要導入模塊: import gluoncv [as 別名]
# 或者: from gluoncv import data [as 別名]
def parse_args():
    """Training Options for Segmentation Experiments"""
    parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')

    parser.add_argument('--model', type=str, default='fcn', help='model name (default: fcn)')
    parser.add_argument('--backbone', type=str, default='resnet50', help='backbone name (default: resnet50)')
    parser.add_argument('--dataset', type=str, default='pascalaug', help='dataset name (default: pascal)')
    parser.add_argument('--dataset-dir', type=str, default='../imgclsmob_data/voc', help='dataset path')
    parser.add_argument('--workers', type=int, default=16, metavar='N', help='dataloader threads')
    parser.add_argument('--base-size', type=int, default=520, help='base image size')
    parser.add_argument('--crop-size', type=int, default=480, help='crop image size')
    parser.add_argument('--train-split', type=str, default='train', help='dataset train split (default: train)')

    parser.add_argument('--aux', action='store_true', default=False, help='Auxiliary loss')
    parser.add_argument('--aux-weight', type=float, default=0.5, help='auxiliary loss weight')
    parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 50)')
    parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
    parser.add_argument('--batch-size', type=int, default=16, metavar='N',
                        help='input batch size for training (default: 16)')
    parser.add_argument('--test-batch-size', type=int, default=16, metavar='N',
                        help='input batch size for testing (default: 32)')
    parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate (default: 1e-3)')
    parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
    parser.add_argument('--weight-decay', type=float, default=1e-4, metavar='M', help='w-decay (default: 1e-4)')
    parser.add_argument('--no-wd', action='store_true',
                        help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.')

    parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
    parser.add_argument('--ngpus', type=int, default=len(mx.test_utils.list_gpus()), help='number of GPUs (default: 4)')
    parser.add_argument('--kvstore', type=str, default='device', help='kvstore to use for trainer/module.')
    parser.add_argument('--dtype', type=str, default='float32', help='data type for training. default is float32')

    # checking point
    parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
    parser.add_argument('--checkname', type=str, default='default', help='set the checkpoint name')
    parser.add_argument('--model-zoo', type=str, default=None, help='evaluating on model zoo model')

    # evaluation only
    parser.add_argument('--eval', action='store_true', default=False, help='evaluation only')
    parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training')

    # synchronized Batch Normalization
    parser.add_argument('--syncbn', action='store_true', default=False, help='using Synchronized Cross-GPU BatchNorm')

    # the parser
    args = parser.parse_args()
    # handle contexts
    if args.no_cuda:
        print('Using CPU')
        args.kvstore = 'local'
        args.ctx = [mx.cpu(0)]
    else:
        print('Number of GPUs:', args.ngpus)
        args.ctx = [mx.gpu(i) for i in range(args.ngpus)]
    # Synchronized BatchNorm
    args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn else mx.gluon.nn.BatchNorm
    args.norm_kwargs = {'num_devices': args.ngpus} if args.syncbn else {}
    print(args)
    return args 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:61,代碼來源:train_gl_seg.py


注:本文中的gluoncv.data方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。