本文整理汇总了C++中ndt::type::get_data_size方法的典型用法代码示例。如果您正苦于以下问题:C++ type::get_data_size方法的具体用法?C++ type::get_data_size怎么用?C++ type::get_data_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ndt::type
的用法示例。
在下文中一共展示了type::get_data_size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: make_elwise_reduce_result
/**
* Creates a result array for an elementwise
* reduce operation.
*/
static ndarray_node_ptr make_elwise_reduce_result(const ndt::type& result_dt, uint32_t access_flags, bool keepdims,
int ndim, const dynd_bool *reduce_axes, const intptr_t *src_shape, const int *src_axis_perm,
char *&result_originptr, intptr_t *result_strides)
{
dimvector result_shape(ndim);
// Calculate the shape and strides of the reduction result
// without removing the dimensions
intptr_t num_elements = 1;
intptr_t stride = result_dt.get_data_size();
for (int i = 0; i < ndim; ++i) {
int p = src_axis_perm[i];
if (reduce_axes[p]) {
result_shape[p] = 1;
result_strides[p] = 0;
} else {
intptr_t size = src_shape[p];
result_shape[p] = size;
if (size == 1) {
result_strides[p] = 0;
} else {
result_strides[p] = stride;
stride *= size;
num_elements *= size;
}
}
}
// Allocate the memoryblock for the data
char *originptr = NULL;
memory_block_ptr memblock = make_fixed_size_pod_memory_block(result_dt.get_data_size() * num_elements,
result_dt.get_data_alignment(), &originptr,
NULL, NULL);
ndarray_node_ptr result;
// Create the strided ndarray node, compressing the dimensions if requested
if (!keepdims) {
dimvector compressed_shape(ndim), compressed_strides(ndim);
int compressed_ndim = 0;
for (int i = 0; i < ndim; ++i) {
if (!reduce_axes[i]) {
compressed_shape[compressed_ndim] = result_shape[i];
compressed_strides[compressed_ndim] = result_strides[i];
++compressed_ndim;
}
}
result = make_strided_ndarray_node(result_dt, compressed_ndim,
compressed_shape.get(), compressed_strides.get(), originptr, access_flags, memblock);
} else {
result = make_strided_ndarray_node(result_dt, ndim,
result_shape.get(), result_strides, originptr, access_flags, memblock);
}
// Because we just allocated this buffer, we can write to it even though it
// might be marked as readonly because the src memory block is readonly
result_originptr = const_cast<char *>(result->get_readonly_originptr());
return DYND_MOVE(result);
}
示例2: type_error
byteswap_type::byteswap_type(const ndt::type& value_type)
: base_expr_type(byteswap_type_id, expr_kind, value_type.get_data_size(),
value_type.get_data_alignment(), type_flag_scalar, 0),
m_value_type(value_type),
m_operand_type(ndt::make_fixedbytes(value_type.get_data_size(), value_type.get_data_alignment()))
{
if (!value_type.is_builtin()) {
throw dynd::type_error("byteswap_type: Only built-in types are supported presently");
}
}
示例3: try_view
/**
* Scans through the types, and tries to view data
* for 'tp'/'arrmeta' as 'view_tp'. For this to be
* possible, one must be able to construct
* arrmeta for 'tp' corresponding to the same data.
*
* \param tp The type of the data.
* \param arrmeta The array arrmeta of the data.
* \param view_tp The type the data should be viewed as.
* \param view_arrmeta The array arrmeta of the view, which should be populated.
* \param embedded_reference The containing memory block in case the data was embedded.
*
* \returns If it worked, returns true, otherwise false.
*/
static bool try_view(const ndt::type &tp, const char *arrmeta, const ndt::type &view_tp, char *view_arrmeta,
dynd::memory_block_data *embedded_reference)
{
switch (tp.get_type_id()) {
case fixed_dim_type_id: {
// All the strided dim types share the same arrmeta, so can be
// treated uniformly here
const ndt::base_dim_type *sdt = tp.extended<ndt::base_dim_type>();
const fixed_dim_type_arrmeta *md = reinterpret_cast<const fixed_dim_type_arrmeta *>(arrmeta);
switch (view_tp.get_type_id()) {
case fixed_dim_type_id: { // strided as fixed
const ndt::fixed_dim_type *view_fdt = view_tp.extended<ndt::fixed_dim_type>();
// The size must match exactly in this case
if (md->dim_size != view_fdt->get_fixed_dim_size()) {
return false;
}
fixed_dim_type_arrmeta *view_md = reinterpret_cast<fixed_dim_type_arrmeta *>(view_arrmeta);
if (try_view(sdt->get_element_type(), arrmeta + sizeof(fixed_dim_type_arrmeta), view_fdt->get_element_type(),
view_arrmeta + sizeof(fixed_dim_type_arrmeta), embedded_reference)) {
*view_md = *md;
return true;
} else {
return false;
}
}
default: // other cases cannot be handled
return false;
}
}
default:
if (tp == view_tp) {
// require equal types otherwise
if (tp.get_arrmeta_size() > 0) {
tp.extended()->arrmeta_copy_construct(view_arrmeta, arrmeta, embedded_reference);
}
return true;
} else if (tp.is_pod() && view_tp.is_pod() && tp.get_data_size() == view_tp.get_data_size() &&
tp.get_data_alignment() >= view_tp.get_data_alignment()) {
// POD types with matching properties
if (view_tp.get_arrmeta_size() > 0) {
view_tp.extended()->arrmeta_default_construct(view_arrmeta, true);
}
return true;
} else {
return false;
}
}
}
示例4: malloc
static void array_getbuffer_pep3118_bytes(const ndt::type &tp, const char *arrmeta, char *data, Py_buffer *buffer,
int flags)
{
buffer->itemsize = 1;
if (flags & PyBUF_FORMAT) {
buffer->format = (char *)"c";
}
else {
buffer->format = NULL;
}
buffer->ndim = 1;
#if PY_VERSION_HEX == 0x02070000
buffer->internal = NULL;
buffer->shape = &buffer->smalltable[0];
buffer->strides = &buffer->smalltable[1];
#else
buffer->internal = malloc(2 * sizeof(intptr_t));
buffer->shape = reinterpret_cast<Py_ssize_t *>(buffer->internal);
buffer->strides = buffer->shape + 1;
#endif
buffer->strides[0] = 1;
if (tp.get_id() == bytes_id) {
// Variable-length bytes type
buffer->buf = reinterpret_cast<bytes *>(data)->begin();
buffer->len = reinterpret_cast<bytes *>(data)->size();
}
else {
// Fixed-length bytes type
buffer->len = tp.get_data_size();
}
buffer->shape[0] = buffer->len;
}
示例5: strides
ndt::type dynd::ndt::make_fixed_dim(size_t ndim, const intptr_t *shape,
const ndt::type& uniform_tp, const int *axis_perm)
{
if (axis_perm == NULL) {
// Build a C-order fixed array type
ndt::type result = uniform_tp;
for (ptrdiff_t i = (ptrdiff_t)ndim-1; i >= 0; --i) {
result = ndt::make_fixed_dim(shape[i], result);
}
return result;
} else {
// Create strides with the axis permutation
dimvector strides(ndim);
intptr_t stride = uniform_tp.get_data_size();
for (size_t i = 0; i < ndim; ++i) {
int i_perm = axis_perm[i];
size_t dim_size = shape[i_perm];
strides[i_perm] = dim_size > 1 ? stride : 0;
stride *= dim_size;
}
// Build the fixed array type
ndt::type result = uniform_tp;
for (ptrdiff_t i = (ptrdiff_t)ndim-1; i >= 0; --i) {
result = ndt::make_fixed_dim(shape[i], result, strides[i]);
}
return result;
}
}
示例6: runtime_error
fixed_dim_type::fixed_dim_type(size_t dimension_size, const ndt::type& element_tp, intptr_t stride)
: base_uniform_dim_type(fixed_dim_type_id, element_tp, 0, element_tp.get_data_alignment(),
0, type_flag_none),
m_stride(stride), m_dim_size(dimension_size)
{
size_t child_element_size = element_tp.get_data_size();
if (child_element_size == 0) {
stringstream ss;
ss << "Cannot create dynd fixed_dim type with element type " << element_tp;
ss << ", as it does not have a fixed size";
throw runtime_error(ss.str());
}
if (dimension_size <= 1 && stride != 0) {
stringstream ss;
ss << "Cannot create dynd fixed_dim type with size " << dimension_size;
ss << " and stride " << stride << ", as the stride must be zero when the dimension size is 1";
throw runtime_error(ss.str());
}
if (dimension_size > 1 && stride == 0) {
stringstream ss;
ss << "Cannot create dynd fixed_dim type with size " << dimension_size;
ss << " and stride 0, as the stride must be non-zero when the dimension size is > 1";
throw runtime_error(ss.str());
}
m_members.data_size = m_stride * (m_dim_size-1) + child_element_size;
// Propagate the zeroinit flag from the element
m_members.flags |= (element_tp.get_flags()&type_flag_zeroinit);
// Copy ndobject properties and functions from the first non-array dimension
get_scalar_properties_and_functions(m_array_properties, m_array_functions);
}
示例7: runtime_error
view_type::view_type(const ndt::type& value_type, const ndt::type& operand_type)
: base_expression_type(view_type_id, expression_kind, operand_type.get_data_size(),
operand_type.get_data_alignment(),
inherited_flags(value_type.get_flags(), operand_type.get_flags()),
operand_type.get_metadata_size()),
m_value_type(value_type), m_operand_type(operand_type)
{
if (value_type.get_data_size() != operand_type.value_type().get_data_size()) {
std::stringstream ss;
ss << "view_type: Cannot view " << operand_type.value_type() << " as " << value_type << " because they have different sizes";
throw std::runtime_error(ss.str());
}
if (!value_type.is_pod()) {
throw std::runtime_error("view_type: Only POD types are supported");
}
}
示例8: runtime_error
expr_type::expr_type(const ndt::type& value_type, const ndt::type& operand_type,
const expr_kernel_generator *kgen)
: base_expression_type(expr_type_id, expression_kind,
operand_type.get_data_size(), operand_type.get_data_alignment(),
inherited_flags(value_type.get_flags(), operand_type.get_flags()),
operand_type.get_metadata_size(), value_type.get_ndim()),
m_value_type(value_type), m_operand_type(operand_type),
m_kgen(kgen)
{
if (operand_type.get_type_id() != cstruct_type_id) {
stringstream ss;
ss << "expr_type can only be constructed with a cstruct as its operand, given ";
ss << operand_type;
throw runtime_error(ss.str());
}
const cstruct_type *fsd = static_cast<const cstruct_type *>(operand_type.extended());
size_t field_count = fsd->get_field_count();
if (field_count == 1) {
throw runtime_error("expr_type is for 2 or more operands, use unary_expr_type for 1 operand");
}
const ndt::type *field_types = fsd->get_field_types();
for (size_t i = 0; i != field_count; ++i) {
if (field_types[i].get_type_id() != pointer_type_id) {
stringstream ss;
ss << "each field of the expr_type's operand must be a pointer, field " << i;
ss << " is " << field_types[i];
throw runtime_error(ss.str());
}
}
}
示例9: make_cuda_to_device_builtin_type_assignment_kernel
intptr_t dynd::make_cuda_to_device_builtin_type_assignment_kernel(
const callable_type_data *DYND_UNUSED(self),
const ndt::callable_type *DYND_UNUSED(af_tp), char *DYND_UNUSED(data),
void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp,
const char *DYND_UNUSED(dst_arrmeta), intptr_t DYND_UNUSED(nsrc),
const ndt::type *src_tp, const char *const *DYND_UNUSED(src_arrmeta),
kernel_request_t kernreq, const eval::eval_context *ectx,
const nd::array &DYND_UNUSED(kwds),
const std::map<std::string, ndt::type> &DYND_UNUSED(tp_vars))
{
assign_error_mode errmode = ectx->errmode;
if (errmode != assign_error_nocheck &&
is_lossless_assignment(dst_tp, *src_tp)) {
errmode = assign_error_nocheck;
}
if (!dst_tp.is_builtin() || !src_tp->is_builtin() ||
errmode == assign_error_default) {
stringstream ss;
ss << "cannot assign to CUDA device with types " << *src_tp << " to "
<< dst_tp;
throw runtime_error(ss.str());
}
nd::cuda_host_to_device_assign_ck::make(ckb, kernreq, ckb_offset,
dst_tp.get_data_size());
return make_builtin_type_assignment_kernel(
ckb, ckb_offset, dst_tp.get_type_id(), src_tp->get_type_id(),
kernel_request_single, errmode);
}
示例10:
ndt::adapt_type::adapt_type(const ndt::type &value_tp, const ndt::type &storage_tp, const nd::callable &forward,
const nd::callable &inverse)
: base_expr_type(adapt_id, storage_tp.get_data_size(), storage_tp.get_data_alignment(), type_flag_none,
storage_tp.get_arrmeta_size(), storage_tp.get_ndim()),
m_value_tp(value_tp), m_storage_tp(storage_tp), m_forward(forward), m_inverse(inverse)
{
}
示例11:
unary_expr_type::unary_expr_type(const ndt::type& value_type, const ndt::type& operand_type,
const expr_kernel_generator *kgen)
: base_expression_type(unary_expr_type_id, expression_kind,
operand_type.get_data_size(), operand_type.get_data_alignment(),
inherited_flags(value_type.get_flags(), operand_type.get_flags()),
operand_type.get_metadata_size(), value_type.get_ndim()),
m_value_type(value_type), m_operand_type(operand_type),
m_kgen(kgen)
{
}
示例12: DYND_MOVE
ndarray_node_ptr dynd::eval::evaluate_strided_with_unary_kernel(ndarray_node *node, const eval::eval_context *DYND_UNUSED(ectx),
bool copy, uint32_t access_flags,
const ndt::type& dst_tp, kernel_instance<unary_operation_pair_t>& operation)
{
const ndt::type& src_tp = node->get_type();
ndarray_node_ptr result;
int ndim = node->get_ndim();
// Adjust the access flags, and force a copy if the access flags require it
eval::process_access_flags(access_flags, node->get_access_flags(), copy);
// For blockref result dtypes, this is the memblock
// where the variable sized data goes
memory_block_ptr dst_memblock;
// Generate the axis_perm from the input strides, and use it to allocate the output
shortvector<int> axis_perm(ndim);
const intptr_t *node_strides = node->get_strides();
char *result_originptr;
strides_to_axis_perm(ndim, node_strides, axis_perm.get());
result = initialize_dst_memblock(copy, dst_tp, ndim, node->get_shape(), axis_perm.get(),
access_flags, operation, node->get_data_memory_block(), dst_memblock, result_originptr);
// Execute the kernel for all the elements
raw_ndarray_iter<1,1> iter(node->get_ndim(), node->get_shape(),
result_originptr, result->get_strides(),
node->get_readonly_originptr(), node->get_strides());
intptr_t innersize = iter.innersize();
intptr_t dst_stride = iter.innerstride<0>();
intptr_t src0_stride = iter.innerstride<1>();
unary_specialization_t uspec = get_unary_specialization(dst_stride, dst_tp.get_data_size(),
src0_stride, src_tp.get_data_size());
unary_operation_t kfunc = operation.specializations[uspec];
if (innersize > 0) {
do {
kfunc(iter.data<0>(), dst_stride,
iter.data<1>(), src0_stride,
innersize, operation.auxdata);
} while (iter.iternext());
}
// Finalize the destination memory block if it was a blockref dtype
if (dst_memblock.get() != NULL) {
memory_block_pod_allocator_api *api = get_memory_block_pod_allocator_api(dst_memblock.get());
api->finalize(dst_memblock.get());
}
return DYND_MOVE(result);
}
示例13: make_assignment_kernel
size_t string_type::make_assignment_kernel(
ckernel_builder *out, size_t offset_out,
const ndt::type& dst_tp, const char *dst_metadata,
const ndt::type& src_tp, const char *src_metadata,
kernel_request_t kernreq, assign_error_mode errmode,
const eval::eval_context *ectx) const
{
if (this == dst_tp.extended()) {
switch (src_tp.get_type_id()) {
case string_type_id: {
return make_blockref_string_assignment_kernel(out, offset_out,
dst_metadata, get_encoding(),
src_metadata, static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
kernreq, errmode, ectx);
}
case fixedstring_type_id: {
return make_fixedstring_to_blockref_string_assignment_kernel(out, offset_out,
dst_metadata, get_encoding(),
src_tp.get_data_size(),
static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
kernreq, errmode, ectx);
}
default: {
if (!src_tp.is_builtin()) {
return src_tp.extended()->make_assignment_kernel(out, offset_out,
dst_tp, dst_metadata,
src_tp, src_metadata,
kernreq, errmode, ectx);
} else {
return make_builtin_to_string_assignment_kernel(out, offset_out,
dst_tp, dst_metadata,
src_tp.get_type_id(),
kernreq, errmode, ectx);
}
}
}
} else {
if (dst_tp.is_builtin()) {
return make_string_to_builtin_assignment_kernel(out, offset_out,
dst_tp.get_type_id(),
src_tp, src_metadata,
kernreq, errmode, ectx);
} else {
stringstream ss;
ss << "Cannot assign from " << src_tp << " to " << dst_tp;
throw dynd::type_error(ss.str());
}
}
}
示例14: typed_data_copy
void dynd::typed_data_copy(const ndt::type& tp,
const char *dst_arrmeta, char *dst_data,
const char *src_arrmeta, const char *src_data)
{
size_t data_size = tp.get_data_size();
if (tp.is_pod()) {
memcpy(dst_data, src_data, data_size);
} else {
unary_ckernel_builder k;
make_assignment_kernel(&k, 0, tp, dst_arrmeta, tp, src_arrmeta,
kernel_request_single,
&eval::default_eval_context);
k(dst_data, src_data);
}
}
示例15: make_unaligned
ndt::type ndt::make_unaligned(const ndt::type& value_type)
{
if (value_type.get_data_alignment() > 1) {
// Only do something if it requires alignment
if (value_type.get_kind() != expr_kind) {
return ndt::make_view(
value_type, ndt::make_fixed_bytes(value_type.get_data_size(), 1));
} else {
const ndt::type &sdt = value_type.storage_type();
return ndt::type(
value_type.extended<base_expr_type>()->with_replaced_storage_type(
ndt::make_view(sdt,
ndt::make_fixed_bytes(sdt.get_data_size(), 1))));
}
} else {
return value_type;
}
}