当前位置: 首页>>代码示例>>Python>>正文


Python mxnet.image方法代码示例

本文整理汇总了Python中mxnet.image方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.image方法的具体用法?Python mxnet.image怎么用?Python mxnet.image使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet的用法示例。


在下文中一共展示了mxnet.image方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def test(network, ctx, val_data, mode='image'):
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()
    if not opt.rec_dir:
        num_batch = len(val_data)
    num = 0
    start = time.time()
    for i, batch in enumerate(val_data):
        if mode == 'image':
            data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
            label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
        else:
            data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
            label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
        outputs = [network(X.astype(opt.dtype, copy=False)) for X in data]
        acc_top1.update(label, outputs)
        acc_top5.update(label, outputs)

        _, top1 = acc_top1.get()
        _, top5 = acc_top5.get()
        if not opt.rec_dir:
            print('%d / %d : %.8f, %.8f'%(i, num_batch, 1-top1, 1-top5))
        else:
            print('%d : %.8f, %.8f'%(i, 1-top1, 1-top5))
        num += batch_size
    end = time.time()
    speed = num / (end - start)
    print('Throughput is %f img/sec.'% speed)

    _, top1 = acc_top1.get()
    _, top5 = acc_top5.get()
    return (1-top1, 1-top5) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:36,代码来源:verify_pretrained.py

示例2: get_attribute

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def get_attribute(self, image): 
        """Face attribute predictor.
        Parameters
        ----------
        image: NDArray.
            The NDArray data format for MXNet to process, such as (H, W, C).
        Returns
        -------
        type: tuple
            Results of Face Attribute Predict:
            (str(gender), int(age), str(expression)).
        """     
        img = transform_eval(image, resize_short=self._image_size, crop_size=self._image_size)
        img = img.as_in_context(self.ctx[0])   
        tic = time.time()
        pred = self.net(img)
        toc = time.time() - tic
        print('Attribute inference time: %fms' % (toc*1000))

        topK = 1
        topK_age = 6
        topK_exp = 2
        age = 0
        ind_1 = nd.topk(pred[0], k=topK)[0].astype('int')
        ind_2 = nd.topk(pred[1], k=topK_age)[0].astype('int')
        ind_3 = nd.topk(pred[2], k=topK_exp)[0].astype('int')
        for i in range(topK_age):
            age += int(nd.softmax(pred[1])[0][ind_2[i]].asscalar() * self.attribute_map2[1][ind_2[i].asscalar()])
        gender = self.attribute_map2[0][ind_1[0].asscalar()]
        if  nd.softmax(pred[2])[0][ind_3[0]].asscalar() < 0.45:
            expression = self.attribute_map2[2][7]
        else:
            expression_1 = self.attribute_map2[2][ind_3[0].asscalar()]
            expression_2 = self.attribute_map2[2][ind_3[1].asscalar()]  

        return (gender, age, (expression_1, expression_2)) 
开发者ID:becauseofAI,项目名称:MobileFace,代码行数:38,代码来源:mobileface_attribute_predictor.py

示例3: parse_args

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def parse_args():
    parser = argparse.ArgumentParser(description='Train a model for image classification.')
    parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
                        help='Imagenet directory for validation.')
    parser.add_argument('--rec-dir', type=str, default='',
                        help='recio directory for validation.')
    parser.add_argument('--batch-size', type=int, default=32,
                        help='training batch size per device (CPU/GPU).')
    parser.add_argument('--num-gpus', type=int, default=0,
                        help='number of gpus to use.')
    parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
                        help='number of preprocessing workers')
    parser.add_argument('--model', type=str, required=True,
                        help='type of model to use. see vision_model for options.')
    parser.add_argument('--quantized', action='store_true',
                        help='use int8 pretrained model')
    parser.add_argument('--input-size', type=int, default=224,
                        help='input shape of the image, default is 224.')
    parser.add_argument('--num-batches', type=int, default=100,
                        help='run specified number of batches for inference')
    parser.add_argument('--benchmark', action='store_true',
                        help='use synthetic data to evalute benchmark')
    parser.add_argument('--crop-ratio', type=float, default=0.875,
                        help='The ratio for crop and input size, for validation dataset only')
    parser.add_argument('--params-file', type=str,
                        help='local parameter file to load, instead of pre-trained weight.')
    parser.add_argument('--dtype', type=str,
                        help='training data type')
    parser.add_argument('--use_se', action='store_true',
                        help='use SE layers or not in resnext. default is false.')
    opt = parser.parse_args()
    return opt 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:34,代码来源:verify_pretrained.py

示例4: test

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def test(ctx, val_data, mode='image'):
        acc_top1.reset()
        acc_top5.reset()
        if not opt.rec_dir:
            num_batch = len(val_data)
        num = 0
        start = time.time()
        for i, batch in enumerate(val_data):
            if mode == 'image':
                data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
                label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
            else:
                data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
                label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
            outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
            acc_top1.update(label, outputs)
            acc_top5.update(label, outputs)

            _, top1 = acc_top1.get()
            _, top5 = acc_top5.get()
            if not opt.rec_dir:
                print('%d / %d : %.8f, %.8f'%(i, num_batch, 1-top1, 1-top5))
            else:
                print('%d : %.8f, %.8f'%(i, 1-top1, 1-top5))
            num += batch_size
        end = time.time()
        speed = num / (end - start)
        print('Throughput is %f img/sec.'% speed)

        _, top1 = acc_top1.get()
        _, top5 = acc_top5.get()
        return (1-top1, 1-top5) 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:34,代码来源:verify_pretrained.py

示例5: test

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def test(ctx, val_data, mode='image'):
    acc_top1.reset()
    acc_top5.reset()
    if not opt.rec_dir:
        num_batch = len(val_data)
    for i, batch in enumerate(val_data):
        if mode == 'image':
            data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
            label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
        else:
            data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
            label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
        outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
        acc_top1.update(label, outputs)
        acc_top5.update(label, outputs)

        _, top1 = acc_top1.get()
        _, top5 = acc_top5.get()
        if not opt.rec_dir:
            print('%d / %d : %.8f, %.8f'%(i, num_batch, 1-top1, 1-top5))
        else:
            print('%d : %.8f, %.8f'%(i, 1-top1, 1-top5))

    _, top1 = acc_top1.get()
    _, top5 = acc_top5.get()
    return (1-top1, 1-top5) 
开发者ID:zzdang,项目名称:cascade_rcnn_gluon,代码行数:28,代码来源:verify_pretrained.py

示例6: parse_args

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def parse_args():
    parser = argparse.ArgumentParser(description='Train a model for image classification.')
    parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
                        help='Imagenet directory for validation.')
    parser.add_argument('--rec-dir', type=str, default='',
                        help='recio directory for validation.')
    parser.add_argument('--batch-size', type=int, default=32,
                        help='training batch size per device (CPU/GPU).')
    parser.add_argument('--num-gpus', type=int, default=0,
                        help='number of gpus to use.')
    parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
                        help='number of preprocessing workers')
    parser.add_argument('--model', type=str, default='model', required=False,
                        help='type of model to use. see vision_model for options.')
    parser.add_argument('--deploy', action='store_true',
                        help='whether load static model for deployment')
    parser.add_argument('--model-prefix', type=str, required=False,
                        help='load static model as hybridblock.')
    parser.add_argument('--quantized', action='store_true',
                        help='use int8 pretrained model')
    parser.add_argument('--input-size', type=int, default=224,
                        help='input shape of the image, default is 224.')
    parser.add_argument('--num-batches', type=int, default=100,
                        help='run specified number of batches for inference')
    parser.add_argument('--benchmark', action='store_true',
                        help='use synthetic data to evalute benchmark')
    parser.add_argument('--crop-ratio', type=float, default=0.875,
                        help='The ratio for crop and input size, for validation dataset only')
    parser.add_argument('--params-file', type=str,
                        help='local parameter file to load, instead of pre-trained weight.')
    parser.add_argument('--dtype', type=str,
                        help='training data type')
    parser.add_argument('--use_se', action='store_true',
                        help='use SE layers or not in resnext. default is false.')
    parser.add_argument('--calibration', action='store_true',
                        help='quantize model')
    parser.add_argument('--num-calib-batches', type=int, default=5,
                        help='number of batches for calibration')
    parser.add_argument('--quantized-dtype', type=str, default='auto',
                        choices=['auto', 'int8', 'uint8'],
                        help='quantization destination data type for input data')
    parser.add_argument('--calib-mode', type=str, default='naive',
                        help='calibration mode used for generating calibration table for the quantized symbol; supports'
                             ' 1. none: no calibration will be used. The thresholds for quantization will be calculated'
                             ' on the fly. This will result in inference speed slowdown and loss of accuracy'
                             ' in general.'
                             ' 2. naive: simply take min and max values of layer outputs as thresholds for'
                             ' quantization. In general, the inference accuracy worsens with more examples used in'
                             ' calibration. It is recommended to use `entropy` mode as it produces more accurate'
                             ' inference results.'
                             ' 3. entropy: calculate KL divergence of the fp32 output and quantized output for optimal'
                             ' thresholds. This mode is expected to produce the best inference accuracy of all three'
                             ' kinds of quantized models if the calibration dataset is representative enough of the'
                             ' inference dataset.')
    opt = parser.parse_args()
    return opt 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:58,代码来源:verify_pretrained.py

示例7: main

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import image [as 别名]
def main():
    args = parse_args() 
    landmark_num = 5 # the version of v1 support 5 or 3 now
    # align_size = (96, 96) # the face image size after alined
    align_size = (112, 112) # the face image size after alined
    bboxes_predictor = MobileFaceDetection(args.model_detect, args.gpus)
    landmark_predictor = dlib.shape_predictor(args.model_landmark)
    align_tool = MobileFaceAlign(args.model_align)
    image_list = [x.strip() for x in args.images.split(',') if x.strip()]
    for img_dir in image_list:
        img_mat = cv2.imread(img_dir)
        results = bboxes_predictor.mobileface_detector(img_dir, img_mat)
        if results == None or len(results) < 1:
            continue
            
        for i, result in enumerate(results):
            xmin, ymin, xmax, ymax, score, classname = result
            # The landmarks predictor is not trained with union the detector of the mobilefacedet above. 
            # Therefore, we need to make some adjustments to the original detection results 
            # to adapt to the landmarks predictor. 
            size_scale = 0.75
            center_scale = 0.1
            center_shift = (ymax - ymin) * center_scale
            w_new = (ymax - ymin) * size_scale
            h_new = (ymax - ymin) * size_scale
            x_center = xmin + (xmax - xmin) / 2
            y_center = ymin + (ymax - ymin) / 2 + center_shift
            x_min = int(x_center - w_new / 2)
            y_min = int(y_center - h_new / 2)
            x_max = int(x_center + w_new / 2)
            y_max = int(y_center + h_new / 2)

            dlib_box = dlib.rectangle(x_min, y_min, x_max, y_max)

            tic = time.time()
            shape = landmark_predictor(img_mat, dlib_box)
            toc = time.time() - tic
            print('Landmark predict time: %fms' % (toc*1000))

            points = []
            for k in range(landmark_num):
                points.append([shape.part(k).x, shape.part(k).y])

            align_points = []
            align_points.append(points)
            tic = time.time()
            align_result = align_tool.get_align(img_mat, align_points, align_size)
            toc = time.time() - tic
            print('Face align time: %fms' % (toc*1000))
            save_aligned = './align_result_112/' + str(i) + '.jpg'
            cv2.imwrite(save_aligned, align_result[0]) 
开发者ID:becauseofAI,项目名称:MobileFace,代码行数:53,代码来源:get_face_align.py


注:本文中的mxnet.image方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。