本文整理汇总了Python中mxnet.nd.ones方法的典型用法代码示例。如果您正苦于以下问题:Python nd.ones方法的具体用法?Python nd.ones怎么用?Python nd.ones使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.ones方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_compute_quantile_loss
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def test_compute_quantile_loss() -> None:
y_true = nd.ones(shape=(10, 10, 10))
y_pred = nd.zeros(shape=(10, 10, 10, 2))
quantiles = [0.5, 0.9]
loss = QuantileLoss(quantiles)
correct_qt_loss = [1.0, 1.8]
for idx, q in enumerate(quantiles):
assert (
nd.mean(
loss.compute_quantile_loss(
nd.ndarray, y_true, y_pred[:, :, :, idx], q
)
)
- correct_qt_loss[idx]
< 1e-5
), f"computing quantile loss at quantile {q} fails!"
示例2: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def forward(self, graph, feat):
r"""Compute APPNP layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : mx.NDArray
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
Returns
-------
mx.NDArray
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
with graph.local_scope():
norm = mx.nd.power(mx.nd.clip(
graph.in_degrees().astype(feat.dtype), a_min=1, a_max=float("inf")), -0.5)
shp = norm.shape + (1,) * (feat.ndim - 1)
norm = norm.reshape(shp).as_in_context(feat.context)
feat_0 = feat
for _ in range(self._k):
# normalization by src node
feat = feat * norm
graph.ndata['h'] = feat
graph.edata['w'] = self.edge_drop(
nd.ones((graph.number_of_edges(), 1), ctx=feat.context))
graph.update_all(fn.u_mul_e('h', 'w', 'm'),
fn.sum('m', 'h'))
feat = graph.ndata.pop('h')
# normalization by dst node
feat = feat * norm
feat = (1 - self._alpha) * feat + self._alpha * feat_0
return feat
示例3: validate
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def validate(val_data, val_dataset, net, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric.reset()
from tqdm import tqdm
for batch in tqdm(val_data):
data, scale, center, score, imgid = val_batch_fn(batch, ctx)
outputs = [net(X) for X in data]
if opt.flip_test:
data_flip = [nd.flip(X, axis=3) for X in data]
outputs_flip = [net(X) for X in data_flip]
outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
outputs = [(o + o_flip)/2 for o, o_flip in zip(outputs, outputs_flipback)]
if opt.dsnt:
outputs = [net_dsnt(X)[0] for X in outputs]
if len(outputs) > 1:
outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
else:
outputs_stack = outputs[0].as_in_context(mx.cpu())
if opt.dsnt:
preds = (outputs_stack - 0.5) * scale.expand_dims(axis=1) + center.expand_dims(axis=1)
maxvals = nd.ones(preds.shape[0:2]+(1, ))
else:
preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
val_metric.update(preds, maxvals, score, imgid)
metric_name, metric_score = val_metric.get()
print("Inference Completed! %s = %.4f" % (metric_name, metric_score))
return
示例4: test_data_parallel
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def test_data_parallel():
# test gluon.contrib.parallel.DataParallelModel
net = nn.HybridSequential()
with net.name_scope():
net.add(nn.Conv2D(in_channels=1, channels=20, kernel_size=5))
net.add(nn.Activation('relu'))
net.add(nn.MaxPool2D(pool_size=2, strides=2))
net.add(nn.Conv2D(in_channels=20, channels=50, kernel_size=5))
net.add(nn.Activation('relu'))
net.add(nn.MaxPool2D(pool_size=2, strides=2))
# The Flatten layer collapses all axis, except the first one, into one axis.
net.add(nn.Flatten())
net.add(nn.Dense(512,in_units=800))
net.add(nn.Activation('relu'))
net.add(nn.Dense(10, in_units=512))
net.collect_params().initialize()
criterion = gluon.loss.SoftmaxCELoss(axis=1)
def test_net_sync(net, criterion, sync, nDevices):
ctx_list = [mx.cpu(0) for i in range(nDevices)]
net = DataParallelModel(net, ctx_list, sync=sync)
criterion = DataParallelCriterion(criterion, ctx_list, sync=sync)
iters = 100
# train mode
for i in range(iters):
x = mx.random.uniform(shape=(8, 1, 28, 28))
t = nd.ones(shape=(8))
with autograd.record():
y = net(x)
loss = criterion(y, t)
autograd.backward(loss)
# evaluation mode
for i in range(iters):
x = mx.random.uniform(shape=(8, 1, 28, 28))
y = net(x)
test_net_sync(net, criterion, True, 1)
test_net_sync(net, criterion, True, 2)
test_net_sync(net, criterion, False, 1)
test_net_sync(net, criterion, False, 2)
示例5: __init__
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def __init__(
self,
quantiles: List[float],
quantile_weights: List[float] = None,
weight=None,
batch_axis=0,
**kwargs,
) -> None:
"""
Represents the quantile loss used to fit decoders that learn quantiles.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
weight:
weighting of the loss.
batch_axis:
indicates axis that represents the batch.
"""
super().__init__(weight, batch_axis, **kwargs)
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.quantile_weights = (
nd.ones(self.num_quantiles) / self.num_quantiles
if not quantile_weights
else quantile_weights
)
# noinspection PyMethodOverriding
示例6: test_global_norm
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def test_global_norm():
data = list()
for i in range(1, 6):
data.append(np.ones((i * 10, i * 10)) * i)
gnorm = np.asscalar(np.sqrt(sum([np.sum(np.square(d)) for d in data])))
assert np.isclose(gnorm, global_norm([nd.array(d) for d in data]).asscalar())
示例7: test_broadcast_like
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def test_broadcast_like():
x = nd.ones((1, 2)) * 10
y = nd.ones((100, 100, 2)) * 20
assert mx.test_utils.almost_equal(x.broadcast_like(y).asnumpy(), broadcast_like(nd, x, y).asnumpy())
示例8: test_data_parallel
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import ones [as 别名]
def test_data_parallel():
# test gluon.contrib.parallel.DataParallelModel
net = nn.HybridSequential()
with net.name_scope():
net.add(nn.Conv2D(in_channels=1, channels=5, kernel_size=5))
net.add(nn.Activation('relu'))
net.add(nn.MaxPool2D(pool_size=2, strides=2))
net.add(nn.Conv2D(in_channels=5, channels=5, kernel_size=5))
net.add(nn.Activation('relu'))
net.add(nn.MaxPool2D(pool_size=2, strides=2))
# The Flatten layer collapses all axis, except the first one, into one axis.
net.add(nn.Flatten())
net.add(nn.Dense(8,in_units=80))
net.add(nn.Activation('relu'))
net.add(nn.Dense(10, in_units=8))
net.collect_params().initialize()
criterion = gluon.loss.SoftmaxCELoss(axis=1)
def test_net_sync(net, criterion, sync, nDevices):
ctx_list = [mx.cpu(0) for i in range(nDevices)]
net = DataParallelModel(net, ctx_list, sync=sync)
criterion = DataParallelCriterion(criterion, ctx_list, sync=sync)
iters = 10
bs = 2
# train mode
for i in range(iters):
x = mx.random.uniform(shape=(bs, 1, 28, 28))
t = nd.ones(shape=(bs))
with autograd.record():
y = net(x)
loss = criterion(y, t)
autograd.backward(loss)
# evaluation mode
for i in range(iters):
x = mx.random.uniform(shape=(bs, 1, 28, 28))
y = net(x)
nd.waitall()
# test_net_sync(net, criterion, True, 1)
test_net_sync(net, criterion, True, 2)
# test_net_sync(net, criterion, False, 1)
test_net_sync(net, criterion, False, 2)