本文整理汇总了Python中mxnet.current_context方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.current_context方法的具体用法?Python mxnet.current_context怎么用?Python mxnet.current_context使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet
的用法示例。
在下文中一共展示了mxnet.current_context方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_random_seed_setting
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_random_seed_setting():
ctx = mx.context.current_context()
seed_to_test = 1234
num_temp_seeds = 25
probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]
num_samples = 100000
for dtype in ['float16', 'float32', 'float64']:
seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
samples1 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),
shape=num_samples)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
samples2 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),
shape=num_samples)
samples1np = samples1.asnumpy()
set_seed_variously(seed, num_temp_seeds, seed_to_test+1)
samples2np = samples2.asnumpy()
assert same(samples1np, samples2np), \
"seed-setting test: `multinomial` should give the same result with the same seed"
# Tests that seed setting of parallel rng is synchronous w.r.t. rng use before and after.
示例2: test_uniform_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_uniform_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
print("ctx=%s, dtype=%s, Low=%g, High=%g:" % (ctx, dtype, low, high))
scale = high - low
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
# Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
buckets = np.array(buckets, dtype=dtype).tolist()
probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]
generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
示例3: check_fusion
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def check_fusion(sym, data_shape, attrs_op):
sym_sg = sym.get_backend_symbol("MKLDNN")
assert ''.join(sym_sg.get_internals().list_outputs()).find('sg_mkldnn_conv') != -1
for k, v in sym_sg.attr_dict().items():
if k.find('sg_mkldnn_conv') != -1:
for attr_op in attrs_op:
assert v[attr_op] == 'true'
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(-1, 1, shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe.forward()
os.environ['MXNET_SUBGRAPH_BACKEND'] = 'MKLDNN'
exe_sg = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe_sg.forward()
del os.environ['MXNET_SUBGRAPH_BACKEND']
for i in range(len(exe.outputs)):
assert_almost_equal(exe.outputs[i].asnumpy(), exe_sg.outputs[i].asnumpy(), rtol=1e-3, atol=1e-3)
# fp32 to uint8
check_quantize(sym, data_shape)
示例4: test_mxnet_module_wrapper
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_mxnet_module_wrapper(data_frame):
from datawig.imputer import _MXNetModule
import mxnet as mx
from datawig.iterators import ImputerIterDf
feature_col, label_col = "feature", "label"
df = data_frame(n_samples=100, feature_col=feature_col, label_col=label_col)
label_encoders = [CategoricalEncoder(label_col)]
data_encoders = [BowEncoder(feature_col)]
data_featurizers = [BowFeaturizer(feature_col, max_tokens=100)]
iter_train = ImputerIterDf(df, data_encoders, label_encoders)
mod = _MXNetModule(mx.current_context(), label_encoders, data_featurizers, final_fc_hidden_units=[])(iter_train)
assert mod._label_names == [label_col]
assert sorted(mod.data_names) == sorted([feature_col] + [INSTANCE_WEIGHT_COLUMN])
# weights and biases
assert len(mod._arg_params) == 2
示例5: test_random
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_random():
check_with_device(mx.context.current_context(), 'float16')
check_with_device(mx.context.current_context(), 'float32')
check_with_device(mx.context.current_context(), 'float64')
# Set seed variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`
示例6: test_parallel_random_seed_setting
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_parallel_random_seed_setting():
ctx = mx.context.current_context()
seed_to_test = 1234
for dtype in ['float16', 'float32', 'float64']:
# Avoid excessive test cpu runtimes
num_temp_seeds = 25 if ctx.device_type == 'gpu' else 1
# To flush out a possible race condition, run multiple times
for _ in range(20):
# Create enough samples such that we get a meaningful distribution.
shape = (200, 200)
params = { 'low': -1.5, 'high': 3.0 }
params.update(shape=shape, dtype=dtype, ctx=ctx)
# check directly
seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
ret1 = mx.nd.random.uniform(**params)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
ret2 = mx.nd.random.uniform(**params)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
assert same(ret1.asnumpy(), ret2.asnumpy()), \
"ndarray seed-setting test: `uniform` should give the same result with the same seed"
# check symbolic
X = mx.sym.Variable("X")
Y = mx.sym.random.uniform(**params) + X
x = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
yexec.forward(is_train=True)
yexec.backward(yexec.outputs[0])
un1 = (yexec.outputs[0] - x).copyto(ctx)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
yexec.forward()
set_seed_variously(seed, num_temp_seeds, seed_to_test)
un2 = (yexec.outputs[0] - x).copyto(ctx)
assert same(un1.asnumpy(), un2.asnumpy()), \
"symbolic seed-setting test: `uniform` should give the same result with the same seed"
# Set seed for the context variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`
示例7: test_random_seed_setting_for_context
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_random_seed_setting_for_context():
seed_to_test = 1234
num_temp_seeds = 25
probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]
num_samples = 100000
dev_type = mx.context.current_context().device_type
for dtype in ['float16', 'float32', 'float64']:
samples_imp = []
samples_sym = []
# Collect random number samples from the generators of all devices, each seeded with the same number.
for dev_id in range(0, mx.context.num_gpus() if dev_type == 'gpu' else 1):
with mx.Context(dev_type, dev_id):
ctx = mx.context.current_context()
seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)
# Check imperative. `multinomial` uses non-parallel rng.
rnds = mx.nd.random.multinomial(data=mx.nd.array(probs, dtype=dtype), shape=num_samples)
samples_imp.append(rnds.asnumpy())
# Check symbolic. `multinomial` uses non-parallel rng.
P = mx.sym.Variable("P")
X = mx.sym.random.multinomial(data=P, shape=num_samples, get_prob=False)
exe = X.bind(ctx, {"P": mx.nd.array(probs, dtype=dtype)})
set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)
exe.forward()
samples_sym.append(exe.outputs[0].asnumpy())
# The samples should be identical across different gpu devices.
for i in range(1, len(samples_imp)):
assert same(samples_imp[i - 1], samples_imp[i])
for i in range(1, len(samples_sym)):
assert same(samples_sym[i - 1], samples_sym[i])
# Tests that seed setting of parallel rng for specific context is synchronous w.r.t. rng use before and after.
示例8: test_parallel_random_seed_setting_for_context
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_parallel_random_seed_setting_for_context():
seed_to_test = 1234
dev_type = mx.context.current_context().device_type
for dtype in ['float16', 'float32', 'float64']:
samples_imp = []
samples_sym = []
# Collect random number samples from the generators of all devices, each seeded with the same number.
for dev_id in range(0, mx.context.num_gpus() if dev_type == 'gpu' else 1):
with mx.Context(dev_type, dev_id):
ctx = mx.context.current_context()
# Avoid excessive test cpu runtimes.
num_temp_seeds = 25 if dev_type == 'gpu' else 1
# To flush out a possible race condition, run multiple times.
for _ in range(20):
# Create enough samples such that we get a meaningful distribution.
shape = (200, 200)
params = { 'low': -1.5, 'high': 3.0 }
params.update(shape=shape, dtype=dtype)
# Check imperative. `uniform` uses parallel rng.
seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)
rnds = mx.nd.random.uniform(**params)
samples_imp.append(rnds.asnumpy())
# Check symbolic. `uniform` uses parallel rng.
X = mx.sym.Variable("X")
Y = mx.sym.random.uniform(**params) + X
x = mx.nd.zeros(shape, dtype=dtype)
xgrad = mx.nd.zeros(shape, dtype=dtype)
yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})
set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)
yexec.forward(is_train=True)
yexec.backward(yexec.outputs[0])
samples_sym.append(yexec.outputs[0].asnumpy())
# The samples should be identical across different gpu devices.
for i in range(1, len(samples_imp)):
assert same(samples_imp[i - 1], samples_imp[i])
for i in range(1, len(samples_sym)):
assert same(samples_sym[i - 1], samples_sym[i])
示例9: test_gamma_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_gamma_generator():
success_rate = 0.05
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for kappa, theta in [(0.5, 1.0), (1.0, 5.0)]:
print("ctx=%s, dtype=%s, Shape=%g, Scale=%g:" % (ctx, dtype, kappa, theta))
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gamma.ppf(x, a=kappa, loc=0, scale=theta), 5)
generator_mx = lambda x: mx.nd.random.gamma(kappa, theta, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs, success_rate=success_rate)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.gamma(kappa, theta, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=success_rate)
示例10: test_exponential_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_exponential_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for scale in [0.1, 1.0]:
print("ctx=%s, dtype=%s, Scale=%g:" % (ctx, dtype, scale))
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, loc=0, scale=scale), 5)
generator_mx = lambda x: mx.nd.random.exponential(scale, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.exponential(scale, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
示例11: test_poisson_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_poisson_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for lam in [1, 10]:
print("ctx=%s, dtype=%s, Lambda=%d:" % (ctx, dtype, lam))
buckets = [(-1.0, lam - 0.5), (lam - 0.5, 2 * lam + 0.5), (2 * lam + 0.5, np.inf)]
probs = [ss.poisson.cdf(bucket[1], lam) - ss.poisson.cdf(bucket[0], lam) for bucket in buckets]
generator_mx = lambda x: mx.nd.random.poisson(lam, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.poisson(lam, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
示例12: test_negative_binomial_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_negative_binomial_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
success_num = 2
success_prob = 0.2
print("ctx=%s, dtype=%s, Success Num=%d:, Success Prob=%g" % (ctx, dtype, success_num, success_prob))
buckets = [(-1.0, 2.5), (2.5, 5.5), (5.5, 8.5), (8.5, np.inf)]
probs = [ss.nbinom.cdf(bucket[1], success_num, success_prob) -
ss.nbinom.cdf(bucket[0], success_num, success_prob) for bucket in buckets]
generator_mx = lambda x: mx.nd.random.negative_binomial(success_num, success_prob,
shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.negative_binomial(success_num, success_prob, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
# Also test the Gamm-Poisson Mixture
print('Gamm-Poisson Mixture Test:')
alpha = 1.0 / success_num
mu = (1.0 - success_prob) / success_prob / alpha
generator_mx = lambda x: mx.nd.random.generalized_negative_binomial(mu, alpha,
shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
示例13: test_unique_zipfian_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_unique_zipfian_generator():
ctx = mx.context.current_context()
if ctx.device_type == 'cpu':
num_sampled = 8192
range_max = 793472
batch_size = 4
op = mx.nd._internal._sample_unique_zipfian
classes, num_trials = op(range_max, shape=(batch_size, num_sampled))
for i in range(batch_size):
num_trial = num_trials[i].asscalar()
# test uniqueness
assert np.unique(classes[i].asnumpy()).size == num_sampled
# test num trials. reference count obtained from pytorch implementation
assert num_trial > 14500
assert num_trial < 17000
示例14: test_zipfian_generator
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def test_zipfian_generator():
# dummy true classes
num_true = 5
num_sampled = 1000
range_max = 20
def compute_expected_prob():
# P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
classes = mx.nd.arange(0, range_max)
expected_counts = ((classes + 2).log() - (classes + 1).log()) / np.log(range_max + 1)
return expected_counts
exp_cnt = compute_expected_prob() * num_sampled
# test ndarray
true_classes = mx.nd.random.uniform(0, range_max, shape=(num_true,)).astype('int32')
sampled_classes, exp_cnt_true, exp_cnt_sampled = mx.nd.contrib.rand_zipfian(true_classes, num_sampled, range_max)
mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)
mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)
# test symbol
true_classes_var = mx.sym.var('true_classes')
outputs = mx.sym.contrib.rand_zipfian(true_classes_var, num_sampled, range_max)
outputs = mx.sym.Group(outputs)
executor = outputs.bind(mx.context.current_context(), {'true_classes' : true_classes})
executor.forward()
sampled_classes, exp_cnt_true, exp_cnt_sampled = executor.outputs
mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)
mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)
# Issue #10277 (https://github.com/apache/incubator-mxnet/issues/10277) discusses this test.
示例15: is_test_for_gpu
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import current_context [as 别名]
def is_test_for_gpu():
return mx.current_context().device_type == 'gpu'