本文整理汇总了Python中mxnet.nd方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.nd方法的具体用法?Python mxnet.nd怎么用?Python mxnet.nd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet
的用法示例。
在下文中一共展示了mxnet.nd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def __call__(self, *args, **kwargs):
inputs, pars = get_in_data(op=self.op, *args, **kwargs)
op_type = self.name
name = pars[1].pop('name', None)
input_type = pars[1].pop('__input_type__', None)
if input_type is None:
input_type = type(inputs[0])
if op_type not in self.cache:
# register operator
self.cache[op_type] = True
self.register()
if input_type is mx.sym.Symbol:
return mx.sym.Custom(*inputs, mobula_pars=pars_encode(pars), op_type=op_type)
if hasattr(mx, 'numpy'):
inputs = [x.as_nd_ndarray() if isinstance(
x, mx.np.ndarray) else x for x in inputs]
return mx.nd.Custom(*inputs, mobula_pars=pars_encode(pars), op_type=op_type, name=name)
示例2: plot_univariate
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def plot_univariate(samples, dist, buffer=0, **kwargs):
"""
Visual inspection by plotting the distribution: plots a histogram of the samples along with
:param samples: Samples from the distribution
:type samples: (mx.nd.NDArray, np.ndarray)
:param buffer: additional range to plot the distribution over
:param dist: Distribution that these are samples from (scipy.stats)
:param kwargs: Keyword arguments for the distribution (e.g. loc, scale)
"""
if isinstance(samples, mx.nd.NDArray):
samples = samples.asnumpy().ravel()
elif isinstance(samples, np.ndarray):
samples = samples.ravel()
else:
raise ValueError("Unexpected type for samples: {}, expected mx.nd.NDArray or np.ndarray".format(type(samples)))
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
ax.hist(samples, bins=301, density=True)
x = np.linspace(samples.min() - buffer, samples.max() + buffer, num=301)
ax.plot(x, dist.pdf(x, **kwargs).reshape(-1, 1))
plt.show()
示例3: forward
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def forward(self, is_train, req, in_data, out_data, aux):
a = in_data[0]
n = a.shape[-1]
if req[0] != 'null':
if req[0] == 'write':
b = out_data[0]
else:
b = mx.nd.zeros_like(out_data[0])
index = mx.nd.arange(start=0, stop=n, step=1, dtype=np.int32)
identity = mx.nd.one_hot(index, depth=n, dtype=a.dtype)
dim_diff = len(b.shape) - len(identity.shape)
if dim_diff > 0:
res_shape = (1,)*dim_diff + identity.shape
identity = mx.nd.reshape(identity, shape=res_shape)
mx.nd.broadcast_to(identity, shape=out_data[0].shape, out=b)
b *= mx.nd.expand_dims(a, axis=-1)
if req[0] != 'write':
self.assign(out_data[0], req[0], b)
示例4: _compute_K
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def _compute_K(self, F, X, lengthscale, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter (scalar), which scales the whole covariance matrix.
:type variance: MXNet NDArray or MXNet Symbol
:param lengthscale: the lengthscale parameter.
:type lengthscale: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
R2 = self._compute_R2(F, X, lengthscale, variance, X2=X2)
R = F.sqrt(F.clip(R2, 1e-14, np.inf))
return F.broadcast_mul(
(1+np.sqrt(5)*R+5/3.*R2)*F.exp(-np.sqrt(5)*R),
F.expand_dims(variance, axis=-2))
示例5: gen_mxfusion_model_w_mean
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def gen_mxfusion_model_w_mean(self, dtype, D, noise_var, lengthscale,
variance, rand_gen=None):
net = nn.HybridSequential(prefix='nn_')
with net.name_scope():
net.add(nn.Dense(D, flatten=False, activation="tanh",
in_units=3, dtype=dtype))
net.initialize(mx.init.Xavier(magnitude=3))
m = Model()
m.N = Variable()
m.X = Variable(shape=(m.N, 3))
m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype))
kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype)
m.mean_func = MXFusionGluonFunction(net, num_outputs=1,
broadcastable=True)
m.Y = GPRegression.define_variable(X=m.X, kernel=kernel, mean=m.mean_func(m.X), noise_var=m.noise_var, shape=(m.N, D), dtype=dtype, rand_gen=rand_gen)
m.Y.factor.gp_log_pdf.jitter = 1e-6
return m, net
示例6: test_log_pdf_w_mean
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_log_pdf_w_mean(self):
D, X, Y, noise_var, lengthscale, variance = self.gen_data()
# MXFusion log-likelihood
dtype = 'float64'
m, net = self.gen_mxfusion_model_w_mean(
dtype, D, noise_var, lengthscale, variance)
mean = net(mx.nd.array(X, dtype=dtype)).asnumpy()
# GPy log-likelihood
m_gpy = GPy.models.GPRegression(X=X, Y=Y-mean, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), noise_var=noise_var)
l_gpy = m_gpy.log_likelihood()
observed = [m.X, m.Y]
infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))
l_mf = -loss
assert np.allclose(l_mf.asnumpy(), l_gpy)
示例7: test_draw_samples
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_draw_samples(self):
D, X, Y, noise_var, lengthscale, variance = self.gen_data()
dtype = 'float64'
rand_gen = MockMXNetRandomGenerator(mx.nd.array(np.random.rand(20*D), dtype=dtype))
m = self.gen_mxfusion_model(dtype, D, noise_var, lengthscale, variance, rand_gen)
observed = [m.X]
infr = Inference(ForwardSamplingAlgorithm(
m, observed, num_samples=2, target_variables=[m.Y]), dtype=dtype)
samples = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))[0].asnumpy()
kern = RBF(3, True, name='rbf', dtype=dtype) + White(3, dtype=dtype)
X_var = Variable(shape=(10, 3))
gp = GaussianProcess.define_variable(X=X_var, kernel=kern, shape=(10, D), dtype=dtype, rand_gen=rand_gen).factor
variables = {gp.X.uuid: mx.nd.expand_dims(mx.nd.array(X, dtype=dtype), axis=0), gp.add_rbf_lengthscale.uuid: mx.nd.expand_dims(mx.nd.array(lengthscale, dtype=dtype), axis=0), gp.add_rbf_variance.uuid: mx.nd.expand_dims(mx.nd.array(variance, dtype=dtype), axis=0), gp.add_white_variance.uuid: mx.nd.expand_dims(mx.nd.array(noise_var, dtype=dtype), axis=0)}
samples_2 = gp.draw_samples(F=mx.nd, variables=variables, num_samples=2).asnumpy()
assert np.allclose(samples, samples_2), (samples, samples_2)
示例8: test_draw_samples_w_mean
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_draw_samples_w_mean(self):
D, X, Y, noise_var, lengthscale, variance = self.gen_data()
dtype = 'float64'
rand_gen = MockMXNetRandomGenerator(mx.nd.array(np.random.rand(20*D), dtype=dtype))
m, net = self.gen_mxfusion_model_w_mean(dtype, D, noise_var, lengthscale, variance, rand_gen)
observed = [m.X]
infr = Inference(ForwardSamplingAlgorithm(
m, observed, num_samples=2, target_variables=[m.Y]), dtype=dtype)
samples = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))[0].asnumpy()
kern = RBF(3, True, name='rbf', dtype=dtype) + White(3, dtype=dtype)
X_var = Variable(shape=(10, 3))
mean_func = MXFusionGluonFunction(net, num_outputs=1,
broadcastable=True)
mean_var = mean_func(X_var)
gp = GaussianProcess.define_variable(X=X_var, kernel=kern, mean=mean_var, shape=(10, D), dtype=dtype, rand_gen=rand_gen).factor
variables = {gp.X.uuid: mx.nd.expand_dims(mx.nd.array(X, dtype=dtype), axis=0), gp.add_rbf_lengthscale.uuid: mx.nd.expand_dims(mx.nd.array(lengthscale, dtype=dtype), axis=0), gp.add_rbf_variance.uuid: mx.nd.expand_dims(mx.nd.array(variance, dtype=dtype), axis=0), gp.add_white_variance.uuid: mx.nd.expand_dims(mx.nd.array(noise_var, dtype=dtype), axis=0), mean_var.uuid: mx.nd.expand_dims(net(mx.nd.array(X, dtype=dtype)), axis=0)}
samples_2 = gp.draw_samples(F=mx.nd, variables=variables, num_samples=2).asnumpy()
assert np.allclose(samples, samples_2), (samples, samples_2)
示例9: test_sampling_prediction_w_mean
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_sampling_prediction_w_mean(self):
D, X, Y, noise_var, lengthscale, variance = self.gen_data()
Xt = np.random.rand(20, 3)
dtype = 'float64'
m, net = self.gen_mxfusion_model_w_mean(
dtype, D, noise_var, lengthscale, variance)
observed = [m.X, m.Y]
infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype), max_iter=1)
infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5),
infr_params=infr.params)
gp = m.Y.factor
gp.attach_prediction_algorithms(
targets=gp.output_names, conditionals=gp.input_names,
algorithm=GPRegressionSamplingPrediction(
gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]),
alg_name='gp_predict')
gp.gp_predict.diagonal_variance = True
gp.gp_predict.noise_free = False
gp.gp_predict.jitter = 1e-6
y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy()
示例10: test_prediction_print
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_prediction_print(self):
D, X, Y, noise_var, lengthscale, variance = self.gen_data()
Xt = np.random.rand(20, 3)
m_gpy = GPy.models.GPRegression(X=X, Y=Y, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), noise_var=noise_var)
dtype = 'float64'
m = self.gen_mxfusion_model(dtype, D, noise_var, lengthscale, variance)
observed = [m.X, m.Y]
infr = Inference(MAP(model=m, observed=observed), dtype=dtype)
loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))
print = infr.print_params()
assert (len(print) > 1)
示例11: test_replicate_simple_model
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_replicate_simple_model(self):
m = mf.models.Model(verbose=False)
m.x = mfc.Variable(shape=(2,))
m.x_mean = mfc.Variable(value=mx.nd.array([0, 1]), shape=(2,))
m.x_var = mfc.Variable(value=mx.nd.array([1e6]))
d = mf.components.distributions.Normal(mean=m.x_mean, variance=m.x_var)
m.x.set_prior(d)
m2 = m.clone()
# compare m and m2 components and such for exactness.
self.assertTrue(set([v.uuid for v in m.components.values()]) ==
set([v.uuid for v in m2.components.values()]))
self.assertTrue(all([v in m.components for v in m2.components]), (set(m2.components) - set(m.components)))
self.assertTrue(all([v in m2.components for v in m.components]), (set(m.components) - set(m2.components)))
self.assertTrue(all([m.x.shape == m2.x.shape,
m.x_mean.shape == m2.x_mean.shape,
m.x_var.shape == m2.x_var.shape]))
示例12: test_compute_log_prob
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_compute_log_prob(self):
m = Model()
v = Variable(shape=(1,))
m.v2 = Normal.define_variable(mean=v, variance=mx.nd.array([1]))
m.v3 = Normal.define_variable(mean=m.v2, variance=mx.nd.array([1]), shape=(10,))
np.random.seed(0)
v_mx = mx.nd.array(np.random.randn(1))
v2_mx = mx.nd.array(np.random.randn(1))
v3_mx = mx.nd.array(np.random.randn(10))
v_rt = add_sample_dimension(mx.nd, v_mx)
v2_rt = add_sample_dimension(mx.nd, v2_mx)
v3_rt = add_sample_dimension(mx.nd, v3_mx)
variance = m.v2.factor.variance
variance2 = m.v3.factor.variance
variance_rt = add_sample_dimension(mx.nd, variance.constant)
variance2_rt = add_sample_dimension(mx.nd, variance2.constant)
log_pdf = m.log_pdf(F=mx.nd, variables={m.v2.uuid: v2_rt, m.v3.uuid:v3_rt, variance.uuid: variance_rt, variance2.uuid: variance2_rt, v.uuid: v_rt}).asscalar()
variables = {m.v2.factor.mean.uuid: v_rt, m.v2.factor.variance.uuid: variance_rt, m.v2.factor.random_variable.uuid: v2_rt}
log_pdf_1 = mx.nd.sum(m.v2.factor.log_pdf(F=mx.nd, variables=variables))
variables = {m.v3.factor.mean.uuid: v2_rt, m.v3.factor.variance.uuid: variance2_rt, m.v3.factor.random_variable.uuid: v3_rt}
log_pdf_2 = mx.nd.sum(m.v3.factor.log_pdf(F=mx.nd, variables=variables))
assert log_pdf == (log_pdf_1 + log_pdf_2).asscalar()
示例13: test_draw_samples_with_broadcast
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_draw_samples_with_broadcast(self, dtype, a, a_is_samples, rv_shape, num_samples):
a_mx = mx.nd.array(a, dtype=dtype)
if not a_is_samples:
a_mx = add_sample_dimension(mx.nd, a_mx)
rand = np.random.gamma(shape=a, scale=np.ones(a.shape), size=(num_samples,)+rv_shape)
draw_samples_np = rand / np.sum(rand)
rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype))
dirichlet = Dirichlet.define_variable(alpha=Variable(), shape=rv_shape, dtype=dtype, rand_gen=rand_gen).factor
variables = {dirichlet.alpha.uuid: a_mx}
draw_samples_rt = dirichlet.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert np.issubdtype(draw_samples_rt.dtype, dtype)
assert draw_samples_rt.shape == (5,) + rv_shape
assert np.allclose(draw_samples_np, draw_samples_rt.asnumpy())
示例14: test_log_pdf
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_log_pdf(self, dtype, location, location_is_samples, rv, rv_is_samples, num_samples):
is_samples_any = any([location_is_samples, rv_is_samples])
rv_shape = rv.shape[1:] if rv_is_samples else rv.shape
n_dim = 1 + len(rv.shape) if is_samples_any and not rv_is_samples else len(rv.shape)
var = PointMass.define_variable(location=Variable(), shape=rv_shape, dtype=dtype).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_is_samples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {var.location.uuid: location_mx, var.random_variable.uuid: rv_mx}
log_pdf_rt = var.log_pdf(F=mx.nd, variables=variables)
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(0, log_pdf_rt, rtol=rtol, atol=atol)
示例15: test_draw_samples
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import nd [as 别名]
def test_draw_samples(self, dtype, location, location_is_samples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
var = PointMass.define_variable(location=Variable(), shape=rv_shape, dtype=dtype).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
variables = {var.location.uuid: location_mx}
rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(location_mx.asnumpy()[0], rv_samples_rt.asnumpy()[0], rtol=rtol, atol=atol)