本文整理汇总了C++中drm_intel_bo_alloc函数的典型用法代码示例。如果您正苦于以下问题:C++ drm_intel_bo_alloc函数的具体用法?C++ drm_intel_bo_alloc怎么用?C++ drm_intel_bo_alloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了drm_intel_bo_alloc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char **argv)
{
int fd;
int devid;
igt_skip_on_simulation();
if (argc != 1) {
fprintf(stderr, "usage: %s\n", argv[0]);
igt_fail(-1);
}
fd = drm_open_any();
devid = intel_get_drm_devid(fd);
if (!HAS_BLT_RING(devid)) {
fprintf(stderr, "not (yet) implemented for pre-snb\n");
return 77;
}
bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
if (!bufmgr) {
fprintf(stderr, "failed to init libdrm\n");
igt_fail(-1);
}
drm_intel_bufmgr_gem_enable_reuse(bufmgr);
batch = intel_batchbuffer_alloc(bufmgr, devid);
if (!batch) {
fprintf(stderr, "failed to create batch buffer\n");
igt_fail(-1);
}
target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
if (!target_buffer) {
fprintf(stderr, "failed to alloc target buffer\n");
igt_fail(-1);
}
blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096);
if (!blt_bo) {
fprintf(stderr, "failed to alloc blt buffer\n");
igt_fail(-1);
}
dummy_reloc_loop();
drm_intel_bo_unreference(target_buffer);
intel_batchbuffer_free(batch);
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
return 0;
}
示例2: brw_upload_vs_pull_constants
/* Creates a new VS constant buffer reflecting the current VS program's
* constants, if needed by the VS program.
*
* Otherwise, constants go through the CURBEs using the brw_constant_buffer
* state atom.
*/
static void
brw_upload_vs_pull_constants(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
/* BRW_NEW_VERTEX_PROGRAM */
struct brw_vertex_program *vp =
(struct brw_vertex_program *) brw->vertex_program;
const struct gl_program_parameter_list *params = vp->program.Base.Parameters;
int i;
if (vp->program.IsNVProgram)
_mesa_load_tracked_matrices(ctx);
/* Updates the ParamaterValues[i] pointers for all parameters of the
* basic type of PROGRAM_STATE_VAR.
*/
_mesa_load_state_parameters(&brw->intel.ctx, vp->program.Base.Parameters);
/* CACHE_NEW_VS_PROG */
if (!brw->vs.prog_data->nr_pull_params) {
if (brw->vs.const_bo) {
drm_intel_bo_unreference(brw->vs.const_bo);
brw->vs.const_bo = NULL;
brw->bind.surf_offset[SURF_INDEX_VERT_CONST_BUFFER] = 0;
brw->state.dirty.brw |= BRW_NEW_VS_CONSTBUF;
}
return;
}
/* _NEW_PROGRAM_CONSTANTS */
drm_intel_bo_unreference(brw->vs.const_bo);
brw->vs.const_bo = drm_intel_bo_alloc(intel->bufmgr, "vp_const_buffer",
brw->vs.prog_data->nr_pull_params * 4,
64);
drm_intel_gem_bo_map_gtt(brw->vs.const_bo);
for (i = 0; i < brw->vs.prog_data->nr_pull_params; i++) {
memcpy(brw->vs.const_bo->virtual + i * 4,
brw->vs.prog_data->pull_param[i],
4);
}
if (0) {
for (i = 0; i < params->NumParameters; i++) {
float *row = (float *)brw->vs.const_bo->virtual + i * 4;
printf("vs const surface %3d: %4.3f %4.3f %4.3f %4.3f\n",
i, row[0], row[1], row[2], row[3]);
}
}
drm_intel_gem_bo_unmap_gtt(brw->vs.const_bo);
const int surf = SURF_INDEX_VERT_CONST_BUFFER;
intel->vtbl.create_constant_surface(brw, brw->vs.const_bo,
params->NumParameters,
&brw->bind.surf_offset[surf]);
brw->state.dirty.brw |= BRW_NEW_VS_CONSTBUF;
}
示例3: intel_upload_space
/**
* Interface for getting memory for uploading streamed data to the GPU
*
* In most cases, streamed data (for GPU state structures, for example) is
* uploaded through brw_state_batch(), since that interface allows relocations
* from the streamed space returned to other BOs. However, that interface has
* the restriction that the amount of space allocated has to be "small" (see
* estimated_max_prim_size in brw_draw.c).
*
* This interface, on the other hand, is able to handle arbitrary sized
* allocation requests, though it will batch small allocations into the same
* BO for efficiency and reduced memory footprint.
*
* \note The returned pointer is valid only until intel_upload_finish(), which
* will happen at batch flush or the next
* intel_upload_space()/intel_upload_data().
*
* \param out_bo Pointer to a BO, which must point to a valid BO or NULL on
* entry, and will have a reference to the new BO containing the state on
* return.
*
* \param out_offset Offset within the buffer object that the data will land.
*/
void *
intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
drm_intel_bo **out_bo,
uint32_t *out_offset)
{
uint32_t offset;
offset = ALIGN_NPOT(brw->upload.next_offset, alignment);
if (brw->upload.bo && offset + size > brw->upload.bo->size) {
intel_upload_finish(brw);
offset = 0;
}
if (!brw->upload.bo) {
brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "streamed data",
MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
drm_intel_bo_map(brw->upload.bo, true);
else
drm_intel_gem_bo_map_gtt(brw->upload.bo);
}
brw->upload.next_offset = offset + size;
*out_offset = offset;
if (*out_bo != brw->upload.bo) {
drm_intel_bo_unreference(*out_bo);
*out_bo = brw->upload.bo;
drm_intel_bo_reference(brw->upload.bo);
}
return brw->upload.bo->virtual + offset;
}
示例4: test_i915_nv_reimport_twice_check_flink_name
static void test_i915_nv_reimport_twice_check_flink_name(void)
{
drm_intel_bo *test_intel_bo;
int prime_fd;
struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL;
uint32_t flink_name1, flink_name2;
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);
igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
/* create a new dma-buf */
close(prime_fd);
igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);
igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
close(prime_fd);
igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0);
igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0);
igt_assert_eq_u32(flink_name1, flink_name2);
nouveau_bo_ref(NULL, &nvbo2);
nouveau_bo_ref(NULL, &nvbo);
drm_intel_bo_unreference(test_intel_bo);
}
示例5: intel_bufferobj_flush_mapped_range
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
* data, but FlushMappedBufferRange may be followed by further writes to
* the pointer, so we would have to re-map after emitting our blit, which
* would defeat the point.
*/
static void
intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target,
GLintptr offset, GLsizeiptr length,
struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
drm_intel_bo *temp_bo;
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
if (intel_obj->range_map_buffer == NULL)
return;
temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
intel_emit_linear_blit(intel,
intel_obj->buffer, obj->Offset + offset,
temp_bo, 0,
length);
drm_intel_bo_unreference(temp_bo);
}
示例6: intel_bufferobj_subdata
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
intel_bufferobj_subdata(struct gl_context * ctx,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
bool busy;
if (size == 0)
return;
assert(intel_obj);
/* If we have a single copy in system memory, update that */
if (intel_obj->sys_buffer) {
if (intel_obj->source)
release_buffer(intel_obj);
if (intel_obj->buffer == NULL) {
memcpy((char *)intel_obj->sys_buffer + offset, data, size);
return;
}
free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
}
/* Otherwise we need to update the copy in video memory. */
busy =
drm_intel_bo_busy(intel_obj->buffer) ||
drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
/* Replace the current busy bo with fresh data. */
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(intel, intel_obj);
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
} else {
perf_debug("Using a blit copy to avoid stalling on %ldb "
"glBufferSubData() to a busy buffer object.\n",
(long)size);
drm_intel_bo *temp_bo =
drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
drm_intel_bo_subdata(temp_bo, 0, size, data);
intel_emit_linear_blit(intel,
intel_obj->buffer, offset,
temp_bo, 0,
size);
drm_intel_bo_unreference(temp_bo);
}
} else {
drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
}
}
示例7: intel_bufferobj_alloc_buffer
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
intel_obj->Base.Size, 64);
}
示例8: i915_drm_buffer_create
static struct i915_winsys_buffer *
i915_drm_buffer_create(struct i915_winsys *iws,
unsigned size,
enum i915_winsys_buffer_type type)
{
struct i915_drm_buffer *buf = CALLOC_STRUCT(i915_drm_buffer);
struct i915_drm_winsys *idws = i915_drm_winsys(iws);
if (!buf)
return NULL;
buf->magic = 0xDEAD1337;
buf->flinked = FALSE;
buf->flink = 0;
buf->bo = drm_intel_bo_alloc(idws->gem_manager,
i915_drm_type_to_name(type), size, 0);
if (!buf->bo)
goto err;
return (struct i915_winsys_buffer *)buf;
err:
assert(0);
FREE(buf);
return NULL;
}
示例9: test2
static int test2(void)
{
drm_intel_bo *test_intel_bo;
uint32_t fb_id;
drmModeClip clip;
int prime_fd;
uint32_t udl_handle;
int ret;
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);
if (ret)
goto out;
ret = drmModeAddFB(udl_fd, 640, 480, 16, 16, 640, udl_handle, &fb_id);
if (ret)
goto out;
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = 10;
clip.y2 = 10;
ret = drmModeDirtyFB(udl_fd, fb_id, &clip, 1);
if (ret) {
return ret;
}
out:
dumb_bo_destroy(udl_fd, udl_handle);
drm_intel_bo_unreference(test_intel_bo);
return ret;
}
示例10: XvMCCreateSurface
/*
* Function: XvMCCreateSurface
*/
_X_EXPORT Status XvMCCreateSurface(Display * display, XvMCContext * context,
XvMCSurface * surface)
{
Status ret;
int priv_count;
CARD32 *priv_data;
intel_xvmc_surface_ptr intel_surf = NULL;
struct intel_xvmc_context *intel_ctx;
if (!display || !context)
return XvMCBadContext;
if (!surface)
return XvMCBadSurface;
intel_ctx = context->privData;
if ((ret = _xvmc_create_surface(display, context, surface,
&priv_count, &priv_data))) {
XVMC_ERR("Unable to create XvMCSurface.");
return ret;
}
XFree(priv_data);
surface->privData = calloc(1, sizeof(struct intel_xvmc_surface));
if (!(intel_surf = surface->privData)) {
PPTHREAD_MUTEX_UNLOCK();
return BadAlloc;
}
intel_surf->bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
"surface",
intel_ctx->surface_bo_size,
GTT_PAGE_SIZE);
if (!intel_surf->bo) {
free(intel_surf);
return BadAlloc;
}
drm_intel_bo_disable_reuse(intel_surf->bo);
intel_surf = surface->privData;
intel_surf->context = context;
intel_surf->image = XvCreateImage(display, context->port,
FOURCC_XVMC,
(char *) &intel_surf->gem_handle,
surface->width, surface->height);
if (!intel_surf->image) {
XVMC_ERR("Can't create XvImage for surface\n");
free(intel_surf);
_xvmc_destroy_surface(display, surface);
return BadAlloc;
}
return Success;
}
示例11: brw_new_transform_feedback
struct gl_transform_feedback_object *
brw_new_transform_feedback(struct gl_context *ctx, GLuint name)
{
struct brw_context *brw = brw_context(ctx);
struct brw_transform_feedback_object *brw_obj =
CALLOC_STRUCT(brw_transform_feedback_object);
if (!brw_obj)
return NULL;
_mesa_init_transform_feedback_object(&brw_obj->base, name);
brw_obj->offset_bo =
drm_intel_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
brw_obj->prim_count_bo =
drm_intel_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
return &brw_obj->base;
}
示例12: wrap_buffers
static void wrap_buffers(struct intel_context *intel, GLuint size)
{
intel_upload_finish(intel);
if (size < INTEL_UPLOAD_SIZE)
size = INTEL_UPLOAD_SIZE;
intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
intel->upload.offset = 0;
}
示例13: init_buffer
static void init_buffer(drm_intel_bufmgr *bufmgr,
struct scratch_buf *buf,
uint32_t size)
{
buf->bo = drm_intel_bo_alloc(bufmgr, "", size, 4096);
buf->size = size;
assert(buf->bo);
buf->tiling = I915_TILING_NONE;
buf->stride = 4096;
}
示例14: main
int main(int argc, char **argv)
{
int fd;
int i;
drm_intel_bo *src_bo, *dst_bo;
fd = drm_open_any();
bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
drm_intel_bufmgr_gem_enable_reuse(bufmgr);
batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
/* The ring we've been using is 128k, and each rendering op
* will use at least 8 dwords:
*
* BATCH_START
* BATCH_START offset
* MI_FLUSH
* STORE_DATA_INDEX
* STORE_DATA_INDEX offset
* STORE_DATA_INDEX value
* MI_USER_INTERRUPT
* (padding)
*
* So iterate just a little more than that -- if we don't fill the ring
* doing this, we aren't likely to with this test.
*/
for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) {
intel_copy_bo(batch, dst_bo, src_bo, width, height);
intel_batchbuffer_flush(batch);
}
intel_batchbuffer_free(batch);
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
return 0;
}
示例15: main
int main(int argc, char **argv)
{
int fd, i;
fd = drm_open_any();
bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
if (!bufmgr) {
fprintf(stderr, "failed to init libdrm\n");
exit(-1);
}
/* don't enable buffer reuse!! */
//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
assert(batch);
/* put some load onto the gpu to keep the light buffers active for long
* enough */
for (i = 0; i < 1000; i++) {
load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
if (!load_bo) {
fprintf(stderr, "failed to alloc target buffer\n");
exit(-1);
}
BEGIN_BATCH(8);
OUT_BATCH(XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
OUT_BATCH(0); /* dst x1,y1 */
OUT_BATCH((1024 << 16) | 512);
OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
OUT_BATCH(4096);
OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
intel_batchbuffer_flush(batch);
drm_intel_bo_disable_reuse(load_bo);
drm_intel_bo_unreference(load_bo);
}
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
return 0;
}