本文整理汇总了Python中mxnet.ndarray.arange方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.arange方法的具体用法?Python ndarray.arange怎么用?Python ndarray.arange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.arange方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_download_embed
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def test_download_embed():
@text.embedding.register
class Test(text.embedding._TokenEmbedding):
# 33 bytes.
pretrained_file_name_sha1 = \
{'embedding_test.vec': '29b9a6511cf4b5aae293c44a9ec1365b74f2a2f8'}
namespace = 'test'
def __init__(self, embedding_root='embeddings', init_unknown_vec=nd.zeros, **kwargs):
pretrained_file_name = 'embedding_test.vec'
Test._check_pretrained_file_names(pretrained_file_name)
super(Test, self).__init__(**kwargs)
pretrained_file_path = Test._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
test_embed = text.embedding.create('test')
assert test_embed.token_to_idx['hello'] == 1
assert test_embed.token_to_idx['world'] == 2
assert_almost_equal(test_embed.idx_to_vec[1].asnumpy(), (nd.arange(5) + 1).asnumpy())
assert_almost_equal(test_embed.idx_to_vec[2].asnumpy(), (nd.arange(5) + 6).asnumpy())
assert_almost_equal(test_embed.idx_to_vec[0].asnumpy(), nd.zeros((5,)).asnumpy())
示例2: unsorted_1d_segment_sum
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim):
# TODO: support other dimensions
assert dim == 0, 'MXNet only supports segment sum on first dimension'
# Use SPMV to simulate segment sum
ctx = input.context
n_inputs = input.shape[0]
input_shape_suffix = input.shape[1:]
input = input.reshape(n_inputs, -1)
n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context)
w_nnz = nd.ones(n_inputs).as_in_context(input.context)
w_nid = nd.stack(seg_id, n_range, axis=0)
w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs))
w = w.as_in_context(input.context)
y = nd.dot(w, input)
y = nd.reshape(y, (n_segs,) + input_shape_suffix)
return y
示例3: generate_anchors
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def generate_anchors(base_size=16, ratios=nd.array([0.5, 1, 2]), scales=2**nd.arange(3,6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
This implementation matches the original Faster-RCNN RPN generate_anchors().
But all calculations are on mxnet.ndarray.NDArray.
Refer to
https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/rpn/generate_anchors.py
"""
base_anchor = nd.array([1, 1, base_size, base_size])
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = nd.concatenate([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
示例4: map_anchors
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def map_anchors(ref_anchors, target_shape, scale_h, scale_w, ctx):
ref_anchors = ref_anchors.as_in_context(ctx)
ref_anchors = ref_anchors.reshape((1, -1, 1, 1))
ref_anchors = ref_anchors.broadcast_to(target_shape)
_n, _c, h, w = ref_anchors.shape
ref_x = nd.arange(w).as_in_context(ctx).reshape((1, w)) / w
ref_x = ref_x * scale_w
ref_x = ref_x.broadcast_to((h, w))
ref_y = nd.arange(h).as_in_context(ctx).reshape((h, 1)) / h
ref_y = ref_y * scale_h
ref_y = ref_y.broadcast_to((h, w))
for anchor_i in range(_c//4):
ref_anchors[0, anchor_i * 4] += ref_x
ref_anchors[0, anchor_i * 4 + 1] += ref_y
ref_anchors[0, anchor_i * 4 + 2] += ref_x
ref_anchors[0, anchor_i * 4 + 3] += ref_y
return ref_anchors
示例5: sparse_matrix
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt == 'coo':
if force_format:
raise TypeError('MXNet backend only supports CSR format,'
' but COO format is forced.')
coord = index[1]
# generate convert idx
# FIXME: cannot use int64
tmp_data = nd.arange(len(coord[0]), dtype=data.dtype, ctx=coord[0].context)
tmp_spmat = nd.sparse.csr_matrix((tmp_data, (coord[0], coord[1])),
tuple(shape), ctx=data.context)
convert_idx = nd.cast(tmp_spmat.data, dtype='int64')
# shuffle the data
data = data[convert_idx]
spmat = nd.sparse.csr_matrix((data, tmp_spmat.indices, tmp_spmat.indptr),
tuple(shape), ctx=data.context)
return spmat, convert_idx
elif fmt == 'csr':
indices = index[1]
indptr = index[2]
spmat = nd.sparse.csr_matrix((data, indices, indptr),
tuple(shape), ctx=data.context)
# No conversion is required.
return spmat, None
else:
raise TypeError('Invalid format: %s.' % fmt)
示例6: arange
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def arange(start, stop, dtype="int64"):
if start >= stop:
return nd.array([], dtype=data_type_dict()[dtype])
else:
return nd.arange(start, stop, dtype=data_type_dict()[dtype])
示例7: _sync_params_from_devices
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def _sync_params_from_devices(self):
"""Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
if self._kvstore and self._update_on_kvstore:
for param_name, param_val in sorted(self._arg_params.items()):
if param_val.stype == 'row_sparse':
row_ids = nd.arange(0, param_val.shape[0], dtype='int64')
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids)
self._params_dirty = False
示例8: test_jitter_synthetic_gp
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import arange [as 别名]
def test_jitter_synthetic_gp(jitter_method, float_type, ctx) -> None:
# TODO: Enable GPU tests on Jenkins
if ctx == mx.Context("gpu") and not check_gpu_support():
return
# Initialize problem parameters
batch_size = 1
prediction_length = 50
context_length = 5
num_samples = 3
# Initialize test data to generate Gaussian Process from
lb = -5
ub = 5
dx = (ub - lb) / (prediction_length - 1)
x_test = nd.arange(lb, ub + dx, dx, ctx=ctx, dtype=float_type).reshape(
-1, 1
)
x_test = nd.tile(x_test, reps=(batch_size, 1, 1))
# Define the GP hyper parameters
amplitude = nd.ones((batch_size, 1, 1), ctx=ctx, dtype=float_type)
length_scale = math.sqrt(0.4) * nd.ones_like(amplitude)
sigma = math.sqrt(1e-5) * nd.ones_like(amplitude)
# Instantiate desired kernel object and compute kernel matrix
rbf_kernel = RBFKernel(amplitude, length_scale)
# Generate samples from 0 mean Gaussian process with RBF Kernel and plot it
gp = GaussianProcess(
sigma=sigma,
kernel=rbf_kernel,
prediction_length=prediction_length,
context_length=context_length,
num_samples=num_samples,
ctx=ctx,
float_type=float_type,
jitter_method=jitter_method,
sample_noise=False, # Returns sample without noise
)
# Generate training set on subset of interval using the sine function
x_train = nd.array([-4, -3, -2, -1, 1], ctx=ctx, dtype=float_type).reshape(
context_length, 1
)
x_train = nd.tile(x_train, reps=(batch_size, 1, 1))
y_train = nd.sin(x_train.squeeze(axis=2))
# Predict exact GP using the GP predictive mean and covariance using the same fixed hyper-parameters
samples, predictive_mean, predictive_std = gp.exact_inference(
x_train, y_train, x_test
)
assert (
np.sum(np.isnan(samples.asnumpy())) == 0
), "NaNs in predictive samples!"