当前位置: 首页>>代码示例>>Python>>正文


Python mxnet.cpu函数代码示例

本文整理汇总了Python中mxnet.cpu函数的典型用法代码示例。如果您正苦于以下问题:Python cpu函数的具体用法?Python cpu怎么用?Python cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_convolution_grouping

def test_convolution_grouping():
    num_filter = 4
    num_group = 2
    kernel = (3, 3)
    shape = (1, 4, 9, 9)

    x = mx.sym.Variable('x')
    w = mx.sym.Variable('w')
    b = mx.sym.Variable('b')
    y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
    xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
    wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
    bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
    y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
                                            num_filter=num_filter//num_group, kernel=kernel)
                       for i in range(num_group)])

    exe1 = y1.simple_bind(mx.cpu(), x=shape)
    exe2 = y2.simple_bind(mx.cpu(), x=shape, w=(num_filter, shape[1]//num_group, kernel[0], kernel[1]), b=(num_filter,))
    for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
        arr1[:] = np.random.normal(size=arr1.shape)
        arr2[:] = arr1
    exe1.forward(is_train=True)
    exe1.backward(exe1.outputs[0])
    exe2.forward(is_train=True)
    exe2.backward(exe2.outputs[0])

    for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
        np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3)
开发者ID:1132520084,项目名称:mxnet,代码行数:29,代码来源:test_operator.py

示例2: test_paramdict

def test_paramdict():
    params = gluon.ParameterDict('net_')
    params.get('weight', shape=(10, 10))
    assert list(params.keys()) == ['net_weight']
    params.initialize(ctx=mx.cpu())
    params.save('test.params')
    params.load('test.params', mx.cpu())
开发者ID:yzhang87,项目名称:mxnet,代码行数:7,代码来源:test_nn.py

示例3: test_elementwisesum_with_type

def test_elementwisesum_with_type():
    sym = mx.sym.ElementWiseSum(name="ews", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float16, "ews_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
开发者ID:alextnewman,项目名称:mxnet,代码行数:35,代码来源:test_operator_gpu.py

示例4: test_concat_with_type

def test_concat_with_type():
    sym = mx.sym.Concat(name="concat", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float16, "concat_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
开发者ID:alextnewman,项目名称:mxnet,代码行数:35,代码来源:test_operator_gpu.py

示例5: run

	def run(self):
		data_validate = mx.io.CSVIter(data_csv="../validate-64x64-data.csv", data_shape=(30, 64, 64), batch_size=1)
		network = get_lenet()
		batch_size = 32
		devs = [mx.cpu(0), mx.cpu(0), mx.cpu(0), mx.cpu(0)] #..distribute to multiple cores
		data_train = mx.io.CSVIter(data_csv=self.input()['data'].path, data_shape=(30, 64, 64),
				label_csv=self.input()['label'].path, label_shape=(600,), batch_size=batch_size)


		print "\n%d epochs\n" % self.tune_epoch()
		model = mx.model.FeedForward(ctx=devs,
				symbol             = network,
				num_epoch          = self.tune_epoch(),
				learning_rate      = 0.001,
				wd                 = 0.00001,
				momentum           = 0.9)

		model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
		prob = model.predict(data_validate)
		prob_fname = "%s_prob" % self.name
		try:
			np.save(prob_fname, prob)
		except:
			pickle.dump(prob, open(prob_fname + '.p', 'wb'))

		pickle.dump(model, open(self.output().path, 'wb'))
开发者ID:polhooper,项目名称:KDSB_2015,代码行数:26,代码来源:Train.py

示例6: test_load_000800

def test_load_000800():
    with mx.AttrScope(ctx_group='stage1'):
        data = mx.symbol.Variable('data', lr_mult=0.2)
        weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2)
        fc1  = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3)
        act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")

    set_stage1 = set(act1.list_arguments())
    with mx.AttrScope(ctx_group='stage2'):
        fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01)
        act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
        fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
        fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0')
        sym1  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

    curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
    sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json'))

    attr1 = sym1.attr_dict()
    attr2 = sym2.attr_dict()
    for k, v1 in attr1.items():
        assert k in attr2, k
        v2 = attr2[k]
        for kk, vv1 in v1.items():
            if kk.startswith('__') and kk.endswith('__'):
                assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2)

    check_symbol_consistency(sym1, sym2,
        {'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)})
开发者ID:csgcmai,项目名称:mxnet,代码行数:29,代码来源:test_symbol.py

示例7: validate

def validate(val_data, val_dataset, net, ctx):
    if isinstance(ctx, mx.Context):
        ctx = [ctx]

    val_metric.reset()

    from tqdm import tqdm
    for batch in tqdm(val_data):
        data, scale, center, score, imgid = val_batch_fn(batch, ctx)

        outputs = [net(X) for X in data]
        if opt.flip_test:
            data_flip = [nd.flip(X, axis=3) for X in data]
            outputs_flip = [net(X) for X in data_flip]
            outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
            outputs = [(o + o_flip)/2 for o, o_flip in zip(outputs, outputs_flipback)]

        if len(outputs) > 1:
            outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
        else:
            outputs_stack = outputs[0].as_in_context(mx.cpu())

        preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
        val_metric.update(preds, maxvals, score, imgid)

    res = val_metric.get()
    return
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:27,代码来源:validate.py

示例8: test_convolution_with_type

def test_convolution_with_type():
    np.random.seed(1234)
    sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')

    data = mx.sym.Variable('conv_data')
    w = mx.sym.Variable('conv_weight')
    b = mx.sym.Variable('conv_bias')
    w = mx.sym.transpose(w, axes=(0,2,3,1))
    sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
    sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
    sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')

    sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                # NHWC
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
                ]
    # wider tolerance needed for true-fp16 NCHW test above
    tol = {np.dtype(np.float16): 0.5,
               np.dtype(np.float32): 1e-3,
               np.dtype(np.float64): 1e-5,
               np.dtype(np.uint8): 0,
               np.dtype(np.int32): 0}
    check_consistency(sym, ctx_list, tol=tol)
    # test ability to turn off training on bias
    check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
开发者ID:moveforever,项目名称:mxnet,代码行数:33,代码来源:test_operator_gpu.py

示例9: test_save_load

def test_save_load():
    net = mx.gluon.model_zoo.vision.get_resnet(1, 18, pretrained=True)
    net.save_parameters('test_save_load.params')

    net = mx.gluon.model_zoo.vision.get_resnet(1, 18)
    net.output = mx.gluon.nn.Dense(1000)

    net.load_parameters('test_save_load.params')

    class Network(gluon.Block):
        def __init__(self, **kwargs):
            super(Network, self).__init__(**kwargs)
            with self.name_scope():
                self.encoders = gluon.nn.Sequential()
                with self.encoders.name_scope():
                    for _ in range(2):
                        lstm = mx.gluon.rnn.LSTM(200, 1, bidirectional=True)
                        self.encoders.add(lstm)

        def forward(self, x):
            for i in range(2):
                x = self.encoders[i](x)
            return x
    net = Network()
    net.initialize(mx.init.Xavier(), ctx=mx.cpu())
    net.hybridize()
    x = np.random.rand(32, 10, 10)
    x = mx.nd.array(x).as_in_context(mx.cpu())
    net(x)
    net.save_parameters('tmp.params')
    net2 = Network()
    net2.load_parameters('tmp.params')
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:32,代码来源:test_gluon.py

示例10: test_parameter_sharing

def test_parameter_sharing():
    class Net(gluon.Block):
        def __init__(self, in_units=0, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.dense0 = nn.Dense(5, in_units=in_units)
                self.dense1 = nn.Dense(5, in_units=in_units)

        def forward(self, x):
            return self.dense1(self.dense0(x))

    net1 = Net(prefix='net1_', in_units=5)
    net2 = Net(prefix='net2_', params=net1.collect_params())
    net1.collect_params().initialize()
    net2(mx.nd.zeros((3, 5)))

    net1.save_parameters('net1.params')

    net3 = Net(prefix='net3_')
    net3.load_parameters('net1.params', mx.cpu())

    net4 = Net(prefix='net4_')
    net5 = Net(prefix='net5_', in_units=5, params=net4.collect_params())
    net4.collect_params().initialize()
    net5(mx.nd.zeros((3, 5)))

    net4.save_parameters('net4.params')

    net6 = Net(prefix='net6_')
    net6.load_parameters('net4.params', mx.cpu())
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:30,代码来源:test_gluon.py

示例11: test_module_ctx_group

def test_module_ctx_group():
    with mx.AttrScope(ctx_group='dev1'):
        a = mx.symbol.Variable('a')
        a = a * 2
    with mx.AttrScope(ctx_group='dev2'):
        b = mx.symbol.Variable('b')
        c = a + b
    shape = (2, 5)
    mod1 = mx.mod.Module(c, context=[mx.cpu(0)], data_names=['a', 'b'], label_names=None,
                         group2ctxs=[{'dev1':mx.cpu(1),'dev2':mx.cpu(2)}])
    mod1.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
    mod1.init_params()
    mod1.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
    mod1.backward([mx.nd.ones(shape)])
    mod1_input_grads = mod1.get_input_grads()

    mod2 = mx.mod.Module(c, data_names=['a', 'b'], label_names=None)
    mod2.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
    mod2.init_params()
    mod2.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
    mod2.backward([mx.nd.ones(shape)])
    mod2_input_grads = mod2.get_input_grads()

    assert np.all(mod1_input_grads[0].asnumpy() == mod2_input_grads[0].asnumpy())
    assert np.all(mod1_input_grads[1].asnumpy() == mod2_input_grads[1].asnumpy())
开发者ID:GrassSunFlower,项目名称:mxnet,代码行数:25,代码来源:test_module.py

示例12: test_activation_with_type

def test_activation_with_type():
    sym = mx.sym.Activation(name='act', act_type='sigmoid')
    ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py

示例13: test_fullyconnected_with_type

def test_fullyconnected_with_type():
    sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
    ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py

示例14: test_convolution_with_type

def test_convolution_with_type():
    sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py

示例15: test_ctx_group

def test_ctx_group():
    with mx.AttrScope(ctx_group='stage1'):
        data = mx.symbol.Variable('data')
        fc1  = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
        act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")

    set_stage1 = set(act1.list_arguments())
    with mx.AttrScope(ctx_group='stage2'):
        fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
        act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
        fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
        fc3 = mx.symbol.BatchNorm(fc3)
        mlp  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

    set_stage2 = set(mlp.list_arguments()) - set_stage1

    group2ctx = {
        'stage1' : mx.cpu(1),
        'stage2' : mx.cpu(2)
    }

    texec = mlp.simple_bind(mx.cpu(0),
                            group2ctx=group2ctx,
                            data=(1,200))

    for arr, name in zip(texec.arg_arrays, mlp.list_arguments()):
        if name in set_stage1:
            assert arr.context == group2ctx['stage1']
        else:
            assert arr.context == group2ctx['stage2']
开发者ID:green-dalii,项目名称:incubator-mxnet,代码行数:30,代码来源:test_multi_device_exec.py


注:本文中的mxnet.cpu函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。