本文整理汇总了C++中COMPUTE_DBG函数的典型用法代码示例。如果您正苦于以下问题:C++ COMPUTE_DBG函数的具体用法?C++ COMPUTE_DBG怎么用?C++ COMPUTE_DBG使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了COMPUTE_DBG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
struct pipe_resource *r600_compute_global_buffer_create(
struct pipe_screen *screen,
const struct pipe_resource *templ)
{
assert(templ->target == PIPE_BUFFER);
assert(templ->bind & PIPE_BIND_GLOBAL);
assert(templ->array_size == 1 || templ->array_size == 0);
assert(templ->depth0 == 1 || templ->depth0 == 0);
assert(templ->height0 == 1 || templ->height0 == 0);
struct r600_resource_global* result = (struct r600_resource_global*)
CALLOC(sizeof(struct r600_resource_global), 1);
struct r600_screen* rscreen = (struct r600_screen*)screen;
COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
templ->array_size);
result->base.b.vtbl = &r600_global_buffer_vtbl;
result->base.b.b.screen = screen;
result->base.b.b = *templ;
pipe_reference_init(&result->base.b.b.reference, 1);
int size_in_dw = (templ->width0+3) / 4;
result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
if (result->chunk == NULL)
{
free(result);
return NULL;
}
return &result->base.b.b;
}
示例2: COMPUTE_DBG
void *r600_compute_global_transfer_map(
struct pipe_context *ctx_,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
{
struct r600_context *rctx = (struct r600_context*)ctx_;
struct compute_memory_pool *pool = rctx->screen->global_pool;
struct r600_resource_global* buffer =
(struct r600_resource_global*)resource;
COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
"width = %u, height = %u, depth = %u)\n", level, usage,
box->x, box->y, box->z, box->width, box->height,
box->depth);
COMPUTE_DBG(rctx->screen, "Buffer id = %u offset = "
"%u (box.x)\n", buffer->chunk->id, box->x);
compute_memory_finalize_pending(pool, ctx_);
assert(resource->target == PIPE_BUFFER);
assert(resource->bind & PIPE_BIND_GLOBAL);
assert(box->x >= 0);
assert(box->y == 0);
assert(box->z == 0);
///TODO: do it better, mapping is not possible if the pool is too big
return pipe_buffer_map_range(ctx_, (struct pipe_resource*)buffer->chunk->pool->bo,
box->x + (buffer->chunk->start_in_dw * 4),
box->width, usage, ptransfer);
}
示例3: util_slab_alloc
void *r600_compute_global_transfer_map(
struct pipe_context *ctx_,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
{
struct r600_context *rctx = (struct r600_context*)ctx_;
struct compute_memory_pool *pool = rctx->screen->global_pool;
struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
struct r600_resource_global* buffer =
(struct r600_resource_global*)resource;
uint32_t* map;
compute_memory_finalize_pending(pool, ctx_);
assert(resource->target == PIPE_BUFFER);
COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
"width = %u, height = %u, depth = %u)\n", level, usage,
box->x, box->y, box->z, box->width, box->height,
box->depth);
transfer->resource = resource;
transfer->level = level;
transfer->usage = usage;
transfer->box = *box;
transfer->stride = 0;
transfer->layer_stride = 0;
assert(transfer->resource->target == PIPE_BUFFER);
assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
assert(transfer->box.x >= 0);
assert(transfer->box.y == 0);
assert(transfer->box.z == 0);
///TODO: do it better, mapping is not possible if the pool is too big
COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
util_slab_free(&rctx->pool_transfers, transfer);
return NULL;
}
*ptransfer = transfer;
COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
"+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
}
示例4: compute_memory_demote_item
void *r600_compute_global_transfer_map(
struct pipe_context *ctx_,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
{
struct r600_context *rctx = (struct r600_context*)ctx_;
struct compute_memory_pool *pool = rctx->screen->global_pool;
struct r600_resource_global* buffer =
(struct r600_resource_global*)resource;
struct compute_memory_item *item = buffer->chunk;
struct pipe_resource *dst = NULL;
unsigned offset = box->x;
if (is_item_in_pool(item)) {
compute_memory_demote_item(pool, item, ctx_);
}
else {
if (item->real_buffer == NULL) {
item->real_buffer =
r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4);
}
}
dst = (struct pipe_resource*)item->real_buffer;
if (usage & PIPE_TRANSFER_READ)
buffer->chunk->status |= ITEM_MAPPED_FOR_READING;
COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
"width = %u, height = %u, depth = %u)\n", level, usage,
box->x, box->y, box->z, box->width, box->height,
box->depth);
COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = "
"%u (box.x)\n", item->id, box->x);
assert(resource->target == PIPE_BUFFER);
assert(resource->bind & PIPE_BIND_GLOBAL);
assert(box->x >= 0);
assert(box->y == 0);
assert(box->z == 0);
///TODO: do it better, mapping is not possible if the pool is too big
return pipe_buffer_map_range(ctx_, dst,
offset, box->width, usage, ptransfer);
}
示例5: CALLOC_STRUCT
void *evergreen_create_compute_state(
struct pipe_context *ctx_,
const const struct pipe_compute_state *cso)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
#ifdef HAVE_OPENCL
const struct pipe_llvm_program_header * header;
const unsigned char * code;
unsigned i;
COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
#endif
shader->ctx = (struct r600_context*)ctx;
shader->local_size = cso->req_local_mem; ///TODO: assert it
shader->private_size = cso->req_private_mem;
shader->input_size = cso->req_input_mem;
#ifdef HAVE_OPENCL
shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
for (i = 0; i < shader->num_kernels; i++) {
struct r600_kernel *kernel = &shader->kernels[i];
kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
header->num_bytes);
}
#endif
return shader;
}
示例6: evergreen_launch_grid
static void evergreen_launch_grid(
struct pipe_context *ctx_,
const uint *block_layout, const uint *grid_layout,
uint32_t pc, const void *input)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
#ifdef HAVE_OPENCL
COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
if (!shader->kernels[pc].code_bo) {
void *p;
struct r600_kernel *kernel = &shader->kernels[pc];
r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
kernel->bc.ndw * 4);
p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
}
#endif
ctx->cs_shader_state.kernel_index = pc;
evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
compute_emit_cs(ctx, block_layout, grid_layout);
}
示例7: evergreen_delete_compute_state
void evergreen_delete_compute_state(struct pipe_context *ctx_, void* state)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
COMPUTE_DBG(ctx->screen, "*** evergreen_delete_compute_state\n");
struct r600_pipe_compute *shader = state;
if (!shader)
return;
#ifdef HAVE_OPENCL
#if HAVE_LLVM < 0x0306
for (unsigned i = 0; i < shader->num_kernels; i++) {
struct r600_kernel *kernel = &shader->kernels[i];
LLVMDisposeModule(module);
}
FREE(shader->kernels);
LLVMContextDispose(shader->llvm_ctx);
#else
radeon_shader_binary_clean(&shader->binary);
r600_destroy_shader(&shader->bc);
/* TODO destroy shader->code_bo, shader->const_bo
* we'll need something like r600_buffer_free */
#endif
#endif
FREE(shader);
}
示例8: evergreen_set_compute_resources
static void evergreen_set_compute_resources(struct pipe_context * ctx_,
unsigned start, unsigned count,
struct pipe_surface ** surfaces)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_surface **resources = (struct r600_surface **)surfaces;
COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
start, count);
for (int i = 0; i < count; i++) {
/* The First two vertex buffers are reserved for parameters and
* global buffers. */
unsigned vtx_id = 2 + i;
if (resources[i]) {
struct r600_resource_global *buffer =
(struct r600_resource_global*)
resources[i]->base.texture;
if (resources[i]->base.writable) {
assert(i+1 < 12);
evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
(struct r600_resource *)resources[i]->base.texture,
buffer->chunk->start_in_dw*4,
resources[i]->base.texture->width0);
}
evergreen_cs_set_vertex_buffer(ctx, vtx_id,
buffer->chunk->start_in_dw * 4,
resources[i]->base.texture);
}
}
}
示例9: evergreen_set_global_binding
static void evergreen_set_global_binding(
struct pipe_context *ctx_, unsigned first, unsigned n,
struct pipe_resource **resources,
uint32_t **handles)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct compute_memory_pool *pool = ctx->screen->global_pool;
struct r600_resource_global **buffers =
(struct r600_resource_global **)resources;
COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
first, n);
if (!resources) {
/* XXX: Unset */
return;
}
compute_memory_finalize_pending(pool, ctx_);
for (int i = 0; i < n; i++)
{
assert(resources[i]->target == PIPE_BUFFER);
assert(resources[i]->bind & PIPE_BIND_GLOBAL);
*(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
}
evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
evergreen_cs_set_vertex_buffer(ctx, 1, 0,
(struct pipe_resource*)pool->bo);
}
示例10: CALLOC_STRUCT
static void *evergreen_create_compute_state(struct pipe_context *ctx,
const struct pipe_compute_state *cso)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
#ifdef HAVE_OPENCL
const struct pipe_llvm_program_header *header;
const char *code;
void *p;
boolean use_kill;
COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n");
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
radeon_shader_binary_init(&shader->binary);
radeon_elf_read(code, header->num_bytes, &shader->binary);
r600_create_shader(&shader->bc, &shader->binary, &use_kill);
/* Upload code + ROdata */
shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
shader->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
#endif
shader->ctx = rctx;
shader->local_size = cso->req_local_mem;
shader->private_size = cso->req_private_mem;
shader->input_size = cso->req_input_mem;
return shader;
}
示例11: r600_compute_global_transfer_map
void* r600_compute_global_transfer_map(
struct pipe_context *ctx_,
struct pipe_transfer* transfer)
{
assert(transfer->resource->target == PIPE_BUFFER);
assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
assert(transfer->box.x >= 0);
assert(transfer->box.y == 0);
assert(transfer->box.z == 0);
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_resource_global* buffer =
(struct r600_resource_global*)transfer->resource;
uint32_t* map;
///TODO: do it better, mapping is not possible if the pool is too big
if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
ctx->cs, transfer->usage))) {
return NULL;
}
COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
}
示例12: evergreen_bind_compute_state
static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
COMPUTE_DBG("*** evergreen_bind_compute_state\n");
ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
}
示例13: evergreen_compute_upload_input
/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
* kernel parameters there are inplicit parameters that need to be stored
* in the vertex buffer as well. Here is how these parameters are organized in
* the buffer:
*
* DWORDS 0-2: Number of work groups in each dimension (x,y,z)
* DWORDS 3-5: Number of global work items in each dimension (x,y,z)
* DWORDS 6-8: Number of work items within each work group in each dimension
* (x,y,z)
* DWORDS 9+ : Kernel parameters
*/
void evergreen_compute_upload_input(
struct pipe_context *ctx_,
const uint *block_layout,
const uint *grid_layout,
const void *input)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
int i;
/* We need to reserve 9 dwords (36 bytes) for implicit kernel
* parameters.
*/
unsigned input_size = shader->input_size + 36;
uint32_t * num_work_groups_start;
uint32_t * global_size_start;
uint32_t * local_size_start;
uint32_t * kernel_parameters_start;
if (shader->input_size == 0) {
return;
}
if (!shader->kernel_param) {
/* Add space for the grid dimensions */
shader->kernel_param = r600_compute_buffer_alloc_vram(
ctx->screen, input_size);
}
num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
/* Copy the work group size */
memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
/* Copy the global size */
for (i = 0; i < 3; i++) {
global_size_start[i] = grid_layout[i] * block_layout[i];
}
/* Copy the local dimensions */
memcpy(local_size_start, block_layout, 3 * sizeof(uint));
/* Copy the kernel inputs */
memcpy(kernel_parameters_start, input, shader->input_size);
for (i = 0; i < (input_size / 4); i++) {
COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
((unsigned*)num_work_groups_start)[i]);
}
ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
/* ID=0 is reserved for the parameters */
evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size,
(struct pipe_resource*)shader->kernel_param);
}
示例14: evergreen_launch_grid
static void evergreen_launch_grid(
struct pipe_context *ctx_,
const uint *block_layout, const uint *grid_layout,
uint32_t pc, const void *input)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
#ifdef HAVE_OPENCL
struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
boolean use_kill;
#if HAVE_LLVM < 0x0306
struct r600_kernel *kernel = &shader->kernels[pc];
(void)use_kill;
if (!kernel->code_bo) {
void *p;
struct r600_bytecode *bc = &kernel->bc;
LLVMModuleRef mod = kernel->llvm_module;
boolean use_kill = false;
bool dump = (ctx->screen->b.debug_flags & DBG_CS) != 0;
unsigned use_sb = ctx->screen->b.debug_flags & DBG_SB_CS;
unsigned sb_disasm = use_sb ||
(ctx->screen->b.debug_flags & DBG_SB_DISASM);
r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family,
ctx->screen->has_compressed_msaa_texturing);
bc->type = TGSI_PROCESSOR_COMPUTE;
bc->isa = ctx->isa;
r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump, &ctx->b.debug);
if (dump && !sb_disasm) {
r600_bytecode_disasm(bc);
} else if ((dump && sb_disasm) || use_sb) {
if (r600_sb_bytecode_process(ctx, bc, NULL, dump, use_sb))
R600_ERR("r600_sb_bytecode_process failed!\n");
}
kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
kernel->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(&ctx->b, kernel->code_bo, PIPE_TRANSFER_WRITE);
memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
ctx->b.ws->buffer_unmap(kernel->code_bo->buf);
}
shader->active_kernel = kernel;
ctx->cs_shader_state.kernel_index = pc;
#else
ctx->cs_shader_state.pc = pc;
/* Get the config information for this kernel. */
r600_shader_binary_read_config(&shader->binary, &shader->bc, pc, &use_kill);
#endif
#endif
COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
compute_emit_cs(ctx, block_layout, grid_layout);
}
示例15: evergreen_set_global_binding
static void evergreen_set_global_binding(struct pipe_context *ctx,
unsigned first, unsigned n,
struct pipe_resource **resources,
uint32_t **handles)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct compute_memory_pool *pool = rctx->screen->global_pool;
struct r600_resource_global **buffers =
(struct r600_resource_global **)resources;
unsigned i;
COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
first, n);
if (!resources) {
/* XXX: Unset */
return;
}
/* We mark these items for promotion to the pool if they
* aren't already there */
for (i = first; i < first + n; i++) {
struct compute_memory_item *item = buffers[i]->chunk;
if (!is_item_in_pool(item))
buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
}
if (compute_memory_finalize_pending(pool, ctx) == -1) {
/* XXX: Unset */
return;
}
for (i = first; i < first + n; i++)
{
uint32_t buffer_offset;
uint32_t handle;
assert(resources[i]->target == PIPE_BUFFER);
assert(resources[i]->bind & PIPE_BIND_GLOBAL);
buffer_offset = util_le32_to_cpu(*(handles[i]));
handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;
*(handles[i]) = util_cpu_to_le32(handle);
}
/* globals for writing */
evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
/* globals for reading */
evergreen_cs_set_vertex_buffer(rctx, 1, 0,
(struct pipe_resource*)pool->bo);
/* constants for reading, LLVM puts them in text segment */
evergreen_cs_set_vertex_buffer(rctx, 2, 0,
(struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
}