本文整理汇总了C++中drm_intel_bo_unreference函数的典型用法代码示例。如果您正苦于以下问题:C++ drm_intel_bo_unreference函数的具体用法?C++ drm_intel_bo_unreference怎么用?C++ drm_intel_bo_unreference使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了drm_intel_bo_unreference函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test_nv_i915_reimport_twice_check_flink_name
static void test_nv_i915_reimport_twice_check_flink_name(void)
{
drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL;
int prime_fd;
struct nouveau_bo *nvbo = NULL;
uint32_t flink_name1, flink_name2;
igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
0, BO_SIZE, NULL, &nvbo) == 0);
igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
igt_assert(intel_bo);
close(prime_fd);
igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
igt_assert(intel_bo2);
close(prime_fd);
igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0);
igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0);
igt_assert_eq_u32(flink_name1, flink_name2);
nouveau_bo_ref(NULL, &nvbo);
drm_intel_bo_unreference(intel_bo);
drm_intel_bo_unreference(intel_bo2);
}
示例2: intel_region_alloc_for_fd
struct intel_region *
intel_region_alloc_for_fd(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
GLuint size,
int fd, const char *name)
{
struct intel_region *region;
drm_intel_bo *buffer;
int ret;
uint32_t bit_6_swizzle, tiling;
buffer = drm_intel_bo_gem_create_from_prime(screen->bufmgr, fd, size);
if (buffer == NULL)
return NULL;
ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
if (ret != 0) {
fprintf(stderr, "Couldn't get tiling of buffer (%s): %s\n",
name, strerror(-ret));
drm_intel_bo_unreference(buffer);
return NULL;
}
region = intel_region_alloc_internal(screen, cpp,
width, height, pitch, tiling, buffer);
if (region == NULL) {
drm_intel_bo_unreference(buffer);
return NULL;
}
return region;
}
示例3: intelDestroyContext
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
struct brw_context *brw =
(struct brw_context *) driContextPriv->driverPrivate;
struct gl_context *ctx = &brw->ctx;
assert(brw); /* should never be null */
if (!brw)
return;
/* Dump a final BMP in case the application doesn't call SwapBuffers */
if (INTEL_DEBUG & DEBUG_AUB) {
intel_batchbuffer_flush(brw);
aub_dump_bmp(&brw->ctx);
}
_mesa_meta_free(&brw->ctx);
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
/* Force a report. */
brw->shader_time.report_time = 0;
brw_collect_and_report_shader_time(brw);
brw_destroy_shader_time(brw);
}
brw_destroy_state(brw);
brw_draw_destroy(brw);
drm_intel_bo_unreference(brw->curbe.curbe_bo);
free(brw->curbe.last_buf);
free(brw->curbe.next_buf);
drm_intel_gem_context_destroy(brw->hw_ctx);
if (ctx->swrast_context) {
_swsetup_DestroyContext(&brw->ctx);
_tnl_DestroyContext(&brw->ctx);
}
_vbo_DestroyContext(&brw->ctx);
if (ctx->swrast_context)
_swrast_DestroyContext(&brw->ctx);
intel_batchbuffer_free(brw);
drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
brw->first_post_swapbuffers_batch = NULL;
driDestroyOptionCache(&brw->optionCache);
/* free the Mesa context */
_mesa_free_context_data(&brw->ctx);
ralloc_free(brw);
driContextPriv->driverPrivate = NULL;
}
示例4: intel_batchbuffer_free
void
intel_batchbuffer_free(struct intel_context *intel)
{
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
drm_intel_bo_unreference(intel->batch.workaround_bo);
clear_cache(intel);
}
示例5: intelDestroyContext
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
struct intel_context *intel =
(struct intel_context *) driContextPriv->driverPrivate;
struct gl_context *ctx = &intel->ctx;
assert(intel); /* should never be null */
if (intel) {
INTEL_FIREVERTICES(intel);
/* Dump a final BMP in case the application doesn't call SwapBuffers */
if (INTEL_DEBUG & DEBUG_AUB) {
intel_batchbuffer_flush(intel);
aub_dump_bmp(&intel->ctx);
}
_mesa_meta_free(&intel->ctx);
intel->vtbl.destroy(intel);
if (ctx->swrast_context) {
_swsetup_DestroyContext(&intel->ctx);
_tnl_DestroyContext(&intel->ctx);
}
_vbo_DestroyContext(&intel->ctx);
if (ctx->swrast_context)
_swrast_DestroyContext(&intel->ctx);
intel->Fallback = 0x0; /* don't call _swrast_Flush later */
intel_batchbuffer_free(intel);
free(intel->prim.vb);
intel->prim.vb = NULL;
drm_intel_bo_unreference(intel->prim.vb_bo);
intel->prim.vb_bo = NULL;
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
intel->first_post_swapbuffers_batch = NULL;
driDestroyOptionCache(&intel->optionCache);
/* free the Mesa context */
_mesa_free_context_data(&intel->ctx);
_math_matrix_dtr(&intel->ViewportMatrix);
ralloc_free(intel);
driContextPriv->driverPrivate = NULL;
}
}
示例6: intel_batchbuffer_free
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
drm_intel_bo_unreference(batch->bo);
batch->bo = NULL;
free(batch);
}
示例7: intel_upload_space
/**
* Interface for getting memory for uploading streamed data to the GPU
*
* In most cases, streamed data (for GPU state structures, for example) is
* uploaded through brw_state_batch(), since that interface allows relocations
* from the streamed space returned to other BOs. However, that interface has
* the restriction that the amount of space allocated has to be "small" (see
* estimated_max_prim_size in brw_draw.c).
*
* This interface, on the other hand, is able to handle arbitrary sized
* allocation requests, though it will batch small allocations into the same
* BO for efficiency and reduced memory footprint.
*
* \note The returned pointer is valid only until intel_upload_finish(), which
* will happen at batch flush or the next
* intel_upload_space()/intel_upload_data().
*
* \param out_bo Pointer to a BO, which must point to a valid BO or NULL on
* entry, and will have a reference to the new BO containing the state on
* return.
*
* \param out_offset Offset within the buffer object that the data will land.
*/
void *
intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
drm_intel_bo **out_bo,
uint32_t *out_offset)
{
uint32_t offset;
offset = ALIGN_NPOT(brw->upload.next_offset, alignment);
if (brw->upload.bo && offset + size > brw->upload.bo->size) {
intel_upload_finish(brw);
offset = 0;
}
if (!brw->upload.bo) {
brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "streamed data",
MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
drm_intel_bo_map(brw->upload.bo, true);
else
drm_intel_gem_bo_map_gtt(brw->upload.bo);
}
brw->upload.next_offset = offset + size;
*out_offset = offset;
if (*out_bo != brw->upload.bo) {
drm_intel_bo_unreference(*out_bo);
*out_bo = brw->upload.bo;
drm_intel_bo_reference(brw->upload.bo);
}
return brw->upload.bo->virtual + offset;
}
示例8: brw_fence_client_wait
/**
* Return true if the function successfully signals or has already signalled.
* (This matches the behavior expected from __DRI2fence::client_wait_sync).
*/
static bool
brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
uint64_t timeout)
{
if (fence->signalled)
return true;
assert(fence->batch_bo);
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
* immediately for timeouts <= 0. The best we can do is to clamp the
* timeout to INT64_MAX. This limits the maximum timeout from 584 years to
* 292 years - likely not a big deal.
*/
if (timeout > INT64_MAX)
timeout = INT64_MAX;
if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
return false;
fence->signalled = true;
drm_intel_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return true;
}
示例9: test2
static int test2(void)
{
drm_intel_bo *test_intel_bo;
uint32_t fb_id;
drmModeClip clip;
int prime_fd;
uint32_t udl_handle;
int ret;
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);
if (ret)
goto out;
ret = drmModeAddFB(udl_fd, 640, 480, 16, 16, 640, udl_handle, &fb_id);
if (ret)
goto out;
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = 10;
clip.y2 = 10;
ret = drmModeDirtyFB(udl_fd, fb_id, &clip, 1);
if (ret) {
return ret;
}
out:
dumb_bo_destroy(udl_fd, udl_handle);
drm_intel_bo_unreference(test_intel_bo);
return ret;
}
示例10: test_i915_self_import_to_different_fd
/* export handle from intel driver - reimport to another intel driver bufmgr
see if you get same object */
static void test_i915_self_import_to_different_fd(void)
{
drm_intel_bo *test_intel_bo, *test_intel_bo2;
int prime_fd;
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
test_intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
close(prime_fd);
igt_assert(test_intel_bo2);
drm_intel_bo_unreference(test_intel_bo2);
drm_intel_bo_unreference(test_intel_bo);
}
示例11: test_i915_nv_reimport_twice_check_flink_name
static void test_i915_nv_reimport_twice_check_flink_name(void)
{
drm_intel_bo *test_intel_bo;
int prime_fd;
struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL;
uint32_t flink_name1, flink_name2;
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);
igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
/* create a new dma-buf */
close(prime_fd);
igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);
igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
close(prime_fd);
igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0);
igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0);
igt_assert_eq_u32(flink_name1, flink_name2);
nouveau_bo_ref(NULL, &nvbo2);
nouveau_bo_ref(NULL, &nvbo);
drm_intel_bo_unreference(test_intel_bo);
}
示例12: intel_winsys_alloc_texture
struct intel_bo *
intel_winsys_alloc_texture(struct intel_winsys *winsys,
const char *name,
int width, int height, int cpp,
enum intel_tiling_mode tiling,
uint32_t initial_domain,
unsigned long *pitch)
{
const unsigned long flags =
(initial_domain & (INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION)) ?
BO_ALLOC_FOR_RENDER : 0;
uint32_t real_tiling = tiling;
drm_intel_bo *bo;
bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
width, height, cpp, &real_tiling, pitch, flags);
if (!bo)
return NULL;
if (real_tiling != tiling) {
assert(!"tiling mismatch");
drm_intel_bo_unreference(bo);
return NULL;
}
return (struct intel_bo *) bo;
}
示例13: intel_bufferobj_free
/**
* Deallocate/free a vertex/pixel buffer object.
* Called via glDeleteBuffersARB().
*/
static void
intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
/* Buffer objects are automatically unmapped when deleting according
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
if (obj->Pointer)
intel_bufferobj_unmap(ctx, 0, obj);
free(intel_obj->sys_buffer);
if (intel_obj->region) {
intel_bufferobj_release_region(intel, intel_obj);
}
else if (intel_obj->buffer) {
drm_intel_bo_unreference(intel_obj->buffer);
}
free(intel_obj);
}
示例14: drmmode_crtc_shadow_allocate
static void *
drmmode_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
{
ScrnInfoPtr scrn = crtc->scrn;
drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
drmmode_ptr drmmode = drmmode_crtc->drmmode;
unsigned long rotate_pitch;
uint32_t tiling;
int ret;
drmmode_crtc->rotate_bo = intel_allocate_framebuffer(scrn,
width, height,
drmmode->cpp,
&rotate_pitch,
&tiling);
if (!drmmode_crtc->rotate_bo) {
xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
"Couldn't allocate shadow memory for rotated CRTC\n");
return NULL;
}
ret = drmModeAddFB(drmmode->fd, width, height, crtc->scrn->depth,
crtc->scrn->bitsPerPixel, rotate_pitch,
drmmode_crtc->rotate_bo->handle,
&drmmode_crtc->rotate_fb_id);
if (ret) {
ErrorF("failed to add rotate fb\n");
drm_intel_bo_unreference(drmmode_crtc->rotate_bo);
return NULL;
}
drmmode_crtc->rotate_pitch = rotate_pitch;
return drmmode_crtc->rotate_bo;
}
示例15: intel_region_alloc
struct intel_region *
intel_region_alloc(struct intel_screen *screen,
uint32_t tiling,
GLuint cpp, GLuint width, GLuint height,
bool expect_accelerated_upload)
{
drm_intel_bo *buffer;
unsigned long flags = 0;
unsigned long aligned_pitch;
struct intel_region *region;
if (expect_accelerated_upload)
flags |= BO_ALLOC_FOR_RENDER;
buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
width, height, cpp,
&tiling, &aligned_pitch, flags);
if (buffer == NULL)
return NULL;
region = intel_region_alloc_internal(screen, cpp, width, height,
aligned_pitch, tiling, buffer);
if (region == NULL) {
drm_intel_bo_unreference(buffer);
return NULL;
}
return region;
}