当前位置: 首页>>代码示例>>Python>>正文


Python mxnet.cpu方法代码示例

本文整理汇总了Python中mxnet.cpu方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.cpu方法的具体用法?Python mxnet.cpu怎么用?Python mxnet.cpu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet的用法示例。


在下文中一共展示了mxnet.cpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def main():
    """Module main execution"""
    # Initialization variables - update to change your model and execution context
    model_prefix = "FCN8s_VGG16"
    epoch = 19

    # By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
    ctx = mx.cpu()

    fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
    fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
    data_shape = fcnxs_args["data"].shape
    label_shape = (1, data_shape[2]*data_shape[3])
    fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
    exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
    exector.forward(is_train=False)
    output = exector.outputs[0]
    out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
    out_img = Image.fromarray(out_img)
    out_img.putpalette(get_palette())
    out_img.save(args.output) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,代码来源:image_segmentaion.py

示例2: extract_feature

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def extract_feature(sym, args, auxs, data_iter, N, xpu=mx.cpu()):
    input_buffs = [mx.nd.empty(shape, ctx=xpu) for k, shape in data_iter.provide_data]
    input_names = [k for k, shape in data_iter.provide_data]
    args = dict(args, **dict(zip(input_names, input_buffs)))
    exe = sym.bind(xpu, args=args, aux_states=auxs)
    outputs = [[] for _ in exe.outputs]
    output_buffs = None

    data_iter.hard_reset()
    for batch in data_iter:
        for data, buff in zip(batch.data, input_buffs):
            data.copyto(buff)
        exe.forward(is_train=False)
        if output_buffs is None:
            output_buffs = [mx.nd.empty(i.shape, ctx=mx.cpu()) for i in exe.outputs]
        else:
            for out, buff in zip(outputs, output_buffs):
                out.append(buff.asnumpy())
        for out, buff in zip(exe.outputs, output_buffs):
            out.copyto(buff)
    for out, buff in zip(outputs, output_buffs):
        out.append(buff.asnumpy())
    outputs = [np.concatenate(i, axis=0)[:N] for i in outputs]
    return dict(zip(sym.list_outputs(), outputs)) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:26,代码来源:model.py

示例3: load_module

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def load_module(prefix, epoch, data_names, data_shapes):
    """
    Loads the model from checkpoint specified by prefix and epoch, binds it
    to an executor, and sets its parameters and returns a mx.mod.Module
    """
    sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)

    # We don't need CTC loss for prediction, just a simple softmax will suffice.
    # We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top
    pred_fc = sym.get_internals()['pred_fc_output']
    sym = mx.sym.softmax(data=pred_fc)

    mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)
    mod.bind(for_training=False, data_shapes=data_shapes)
    mod.set_params(arg_params, aux_params, allow_missing=False)
    return mod 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:lstm_ocr_infer.py

示例4: evaluate

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_parameters(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:main.py

示例5: __init__

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def __init__(self, args):
        # Default training settings
        self.ctx = mx.gpu(0) if args.gpu else mx.cpu()
        self.init_func = mx.init.Xavier(rnd_type='uniform', factor_type="in",
                                        magnitude=1)
        self.learning_rate = 1e-3
        self.update_rule = "adam"
        self.grad_clip = True
        self.clip_magnitude = 40

        # Default model settings
        self.hidden_size = 200
        self.gamma = 0.99
        self.lambda_ = 1.0
        self.vf_wt = 0.5        # Weight of value function term in the loss
        self.entropy_wt = 0.01  # Weight of entropy term in the loss

        self.num_envs = 16
        self.t_max = 50

        # Override defaults with values from `args`.
        for arg in self.__dict__:
            if arg in args.__dict__:
                self.__setattr__(arg, args.__dict__[arg]) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:26,代码来源:config.py

示例6: main

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def main():
    # testing configurations
    cell_types = [gluon.rnn.RNNCell,
                  gluon.rnn.GRUCell,
                  gluon.rnn.LSTMCell]
    ctxs = [mx.cpu(0)]
    if args.gpu:
        ctxs = ctxs + [mx.gpu(i) for i in _get_gpus()]
    seq_lens = [100]
    batch_sizes = [1, 32]
    hidden_dims = [512]
    print("--------------------------------------")
    print("Benchmarking", args.benchmark)
    for cell_type, ctx, seq_len, batch_size, hidden_dim in product(  \
        cell_types, ctxs, seq_lens, batch_sizes, hidden_dims):
        print("--------------------------------------")
        print("cell: %s  ctx: %s  length: %d  batch size: %d dim: %d" % \
              (cell_type.__name__, str(ctx), seq_len, batch_size, hidden_dim))
        run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:rnn.py

示例7: load_params

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def load_params(self, filename, ctx=None, allow_missing=False,
                    ignore_extra=False):
        """[Deprecated] Please use load_parameters.

        Load parameters from file.

        filename : str
            Path to parameter file.
        ctx : Context or list of Context, default cpu()
            Context(s) to initialize loaded parameters on.
        allow_missing : bool, default False
            Whether to silently skip loading parameters not represents in the file.
        ignore_extra : bool, default False
            Whether to silently ignore parameters from the file that are not
            present in this Block.
        """
        warnings.warn("load_params is deprecated. Please use load_parameters.")
        self.load_parameters(filename, ctx, allow_missing, ignore_extra) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:20,代码来源:block.py

示例8: test_gluon_trainer_type

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_gluon_trainer_type():
    def check_trainer_kv_update(update_on_kv):
        params = mx.gluon.ParameterDict()
        x = params.get('x', shape=(10,1), lr_mult=1.0)
        params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
        try:
            trainer = mx.gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv, update_on_kvstore=update_on_kv)
            trainer._init_kvstore()
            assert trainer._kv_initialized
            assert trainer._update_on_kvstore is True
        except ValueError:
            assert update_on_kv is False

    check_trainer_kv_update(False)
    check_trainer_kv_update(True)
    check_trainer_kv_update(None)
    print('worker ' + str(my_rank) + ' passed test_gluon_trainer_type') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,代码来源:dist_async_kvstore.py

示例9: test_sync_init

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_sync_init(gpu_tests=False):
    def get_dtype(idx, cur_keys):
        if idx < len(cur_keys)/2:
            dtype = 'float32'
        else:
            dtype = 'float16'
        return dtype

    def check_init(kv, cur_keys, cur_shape, device=False):
        ctx = mx.gpu(0) if device else mx.cpu()
        val = [mx.nd.zeros(cur_shape, ctx=ctx, dtype=get_dtype(i, cur_keys)) for i in range(len(cur_keys))]
        for i in range(len(cur_keys)):
            expected = i
            kv.init(cur_keys[i], [mx.nd.ones(cur_shape, ctx=ctx, dtype=get_dtype(i, cur_keys)) * i])
            kv.pull(cur_keys[i], out=val[i])
            check_diff(val[i], expected)
    check_init(kv, init_test_keys, shape)
    check_init(kv, init_test_keys_big, big_shape)
    if gpu_tests:
        check_init(kv, init_test_keys_device, shape, device=True)
        check_init(kv, init_test_keys_device_big, big_shape, device=True)
    print('worker ' + str(kv.rank) + ' is initialized') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:dist_sync_kvstore.py

示例10: test_gluon_trainer_step

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_gluon_trainer_step():
    def check_trainer_step():
        ctx = mx.cpu(0)
        shape = (10, 1)
        x = mx.gluon.Parameter('x', shape=shape)
        x.initialize(ctx=ctx, init='ones')
        trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'multi_precision': False}, kvstore=kv)
        with mx.autograd.record():
            w = x.data(ctx)
            y = (my_rank + 1) * w
            y.backward()
        trainer.step(1)
        expected = 1 - (1 + nworker) * nworker / 2
        assert_almost_equal(x.data(ctx).asnumpy(), np.full(shape, expected))
    check_trainer_step()
    print('worker ' + str(my_rank) + ' passed test_gluon_trainer_step') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:dist_sync_kvstore.py

示例11: test_gluon_trainer_sparse_step

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_gluon_trainer_sparse_step():
    def check_trainer_sparse_step():
        ctx = mx.cpu(0)
        shape = (2, 10)
        all_rows = mx.nd.arange(0, shape[0], ctx=ctx)
        x = mx.gluon.Parameter('x', shape=shape, stype='row_sparse', grad_stype='row_sparse')
        x.initialize(ctx=ctx, init='ones')
        trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0}, kvstore=kv)
        with mx.autograd.record():
            w = x.row_sparse_data(all_rows)
            y = (my_rank + 1) * w
            y.backward()
        trainer.step(1)
        expected = 1 - (1 + nworker) * nworker / 2
        assert_almost_equal(x.row_sparse_data(all_rows).asnumpy(), np.full(shape, expected))
    check_trainer_sparse_step()
    print('worker ' + str(my_rank) + ' passed test_gluon_trainer_sparse_step') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,代码来源:dist_sync_kvstore.py

示例12: test_lstm_forget_bias

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_lstm_forget_bias():
    forget_bias = 2.0
    stack = gluon.rnn.SequentialRNNCell()
    stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
    stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))

    dshape = (32, 1, 200)
    data = mx.sym.Variable('data')

    sym, _ = stack.unroll(1, data, merge_outputs=True)
    mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
    mod.bind(data_shapes=[('data', dshape)], label_shapes=None)

    mod.init_params()

    bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
    expected_bias = np.hstack([np.zeros((100,)),
                               forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
    assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:test_gluon_rnn.py

示例13: test_module_dtype

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_module_dtype():
    dtype = np.float16
    dshape = (3, 8, 7)

    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],
                              label=None))
    mod.backward([mx.nd.ones(dshape, dtype=dtype)])

    for x in mod.get_outputs():
      assert x.dtype == dtype 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:test_module.py

示例14: test_module_input_grads

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_module_input_grads():
    a = mx.sym.Variable('a', __layout__='NC')
    b = mx.sym.Variable('b', __layout__='NC')
    c = mx.sym.Variable('c', __layout__='NC')

    c = a + 2 * b + 3 * c
    net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
                        context=[mx.cpu(0), mx.cpu(1)])
    net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
             label_shapes=None, inputs_need_grad=True)
    net.init_params()

    net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
                                                 nd.ones((5, 5)),
                                                 nd.ones((5, 5))]))
    net.backward(out_grads=[nd.ones((5, 5))])
    input_grads = net.get_input_grads()
    b_grad = input_grads[0].asnumpy()
    c_grad = input_grads[1].asnumpy()
    a_grad = input_grads[2].asnumpy()
    assert np.all(a_grad == 1), a_grad
    assert np.all(b_grad == 2), b_grad
    assert np.all(c_grad == 3), c_grad 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:25,代码来源:test_module.py

示例15: test_module_layout

# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import cpu [as 别名]
def test_module_layout():
    sym = mx.sym.Variable('data')
    sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')

    dshape = (3, 8, 7)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])
    mod.init_params()
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    assert mod.get_outputs()[0].shape == dshape

    hdshape = (3, 4, 7)
    for x in mod.get_outputs(merge_multi_context=False)[0]:
        assert x.shape == hdshape 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:test_module.py


注:本文中的mxnet.cpu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。