當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.tensor_load_rgbimage方法代碼示例

本文整理匯總了Python中utils.tensor_load_rgbimage方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.tensor_load_rgbimage方法的具體用法?Python utils.tensor_load_rgbimage怎麽用?Python utils.tensor_load_rgbimage使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.tensor_load_rgbimage方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import tensor_load_rgbimage [as 別名]
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_parameters(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:main.py

示例2: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import tensor_load_rgbimage [as 別名]
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_params(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
開發者ID:StacyYang,項目名稱:MXNet-Gluon-Style-Transfer,代碼行數:18,代碼來源:main.py

示例3: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import tensor_load_rgbimage [as 別名]
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_params(args.model, ctx=ctx)
    # forward
    style_model.setTarget(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
開發者ID:mahyarnajibi,項目名稱:SNIPER-mxnet,代碼行數:18,代碼來源:main.py

示例4: stylize

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import tensor_load_rgbimage [as 別名]
def stylize(args):
    content_image = utils.tensor_load_rgbimage(args.content_image, scale=args.content_scale)
    content_image = content_image.unsqueeze(0)

    if args.cuda:
        content_image = content_image.cuda()
    content_image = Variable(utils.preprocess_batch(content_image), volatile=True)
    style_model = TransformerNet()
    style_model.load_state_dict(torch.load(args.model))

    if args.cuda:
        style_model.cuda()

    output = style_model(content_image)
    utils.tensor_save_bgrimage(output.data[0], args.output_image, args.cuda) 
開發者ID:abhiskk,項目名稱:fast-neural-style,代碼行數:17,代碼來源:neural_style.py

示例5: optimize

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import tensor_load_rgbimage [as 別名]
def optimize(args):
    """    Gatys et al. CVPR 2017
    ref: Image Style Transfer Using Convolutional Neural Networks
    """
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # load the content and style target
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    content_image = utils.subtract_imagenet_mean_preprocess_batch(content_image)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.subtract_imagenet_mean_preprocess_batch(style_image)
    # load the pre-trained vgg-16 and extract features
    vgg = net.Vgg16()
    utils.init_vgg_params(vgg, 'models', ctx=ctx)
    # content feature
    f_xc_c = vgg(content_image)[1]
    # style feature
    features_style = vgg(style_image)
    gram_style = [net.gram_matrix(y) for y in features_style]
    # output
    output = Parameter('output', shape=content_image.shape)
    output.initialize(ctx=ctx)
    output.set_data(content_image)
    # optimizer
    trainer = gluon.Trainer([output], 'adam',
                            {'learning_rate': args.lr})
    mse_loss = gluon.loss.L2Loss()

    # optimizing the images
    for e in range(args.iters):
        utils.imagenet_clamp_batch(output.data(), 0, 255)
        # fix BN for pre-trained vgg
        with autograd.record():
            features_y = vgg(output.data())
            content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c)
            style_loss = 0.
            for m in range(len(features_y)):
                gram_y = net.gram_matrix(features_y[m])
                gram_s = gram_style[m]
                style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s)
            total_loss = content_loss + style_loss
            total_loss.backward()

        trainer.step(1)
        if (e + 1) % args.log_interval == 0:
            print('loss:{:.2f}'.format(total_loss.asnumpy()[0]))

    # save the image
    output = utils.add_imagenet_mean_batch(output.data())
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:54,代碼來源:main.py


注:本文中的utils.tensor_load_rgbimage方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。