本文整理汇总了Python中builtins.sum方法的典型用法代码示例。如果您正苦于以下问题:Python builtins.sum方法的具体用法?Python builtins.sum怎么用?Python builtins.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类builtins
的用法示例。
在下文中一共展示了builtins.sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_zeros
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
示例2: _new_alloc_handle
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
示例3: numpy_funcs
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def numpy_funcs():
return astroid.parse('''
import builtins
def sum(a, axis=None, dtype=None, out=None, keepdims=None):
return builtins.sum(a)
''')
示例4: numel
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def numel(x, **kwargs):
xp = get_array_module(x)
return xp.sum(xp.ones_like(x), **kwargs)
示例5: nannumel
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def nannumel(x, **kwargs):
x_size = reduce(operator.mul, x.shape)
xp = get_array_module(x)
return x_size - xp.sum(xp.isnan(x), **kwargs)
示例6: _concatenate_shape
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def _concatenate_shape(tensor, combine_block):
return tuple(builtins.sum(nsplit[i] for i in cb)
for nsplit, cb in zip(tensor.nsplits, combine_block))
示例7: _partial_reduction
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def _partial_reduction(cls, tensor, axis, dtype, keepdims, combine_size, stage, kw=None):
from ..merge.concatenate import TensorConcatenate
kw = kw or {}
axes = sorted(combine_size.keys())
op_type = type(tensor.op)
combine_blocks = [cls._combine_split(i, combine_size, tensor.chunk_shape)
for i in range(tensor.ndim)]
combine_blocks_idxes = [range(len(blocks)) for blocks in combine_blocks]
chunks = []
for combine_block_idx, combine_block in zip(itertools.product(*combine_blocks_idxes),
itertools.product(*combine_blocks)):
chks = [tensor.cix[idx] for idx in itertools.product(*combine_block)]
if len(chks) > 1:
op = TensorConcatenate(axis=axes, dtype=chks[0].dtype)
chk = op.new_chunk(chks, shape=cls._concatenate_shape(tensor, combine_block),
order=tensor.order)
else:
chk = chks[0]
shape = tuple(s if i not in combine_size else 1
for i, s in enumerate(chk.shape) if keepdims or i not in combine_size)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, **kw)
chunk = agg_op.new_chunk([chk], shape=shape,
index=tuple(idx for i, idx in enumerate(combine_block_idx)
if keepdims or i not in combine_size),
order=tensor.order)
chunks.append(chunk)
nsplits = [
tuple(c.shape[i] for c in chunks if builtins.all(idx == 0 for j, idx in enumerate(c.index) if j != i))
for i in range(len(chunks[0].shape))]
shape = tuple(builtins.sum(nsplit) for nsplit in nsplits)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size, **kw)
return agg_op.new_tensors([tensor], shape, order=tensor.order,
chunks=chunks, nsplits=nsplits)
示例8: _get_offset
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def _get_offset(tensor, axis, chunk, ravel):
nsplits = tensor.nsplits
offset = tuple(builtins.sum(split[:idx]) for split, idx in zip(nsplits, chunk.index))
if not ravel:
offset = offset[axis[0]]
return offset
示例9: numpy_funcs
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def numpy_funcs():
return astroid.parse(
"""
import builtins
def sum(a, axis=None, dtype=None, out=None, keepdims=None):
return builtins.sum(a)
"""
)
示例10: _new_alloc_handle
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array(ctypes.c_int, aux_type_ids),
c_array(mx_uint, aux_shape_lens),
c_array(mx_uint, aux_shapes),
ctypes.byref(hdl)))
return hdl
示例11: sum
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def sum(xs):
return builtins.sum(xs)
示例12: test_sum
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def test_sum(self):
d = np.ones(101, dtype=np.bool);
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
示例13: check_count_nonzero
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
示例14: test_count_nonzero_unaligned
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
示例15: test_export_record
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import sum [as 别名]
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)