本文整理汇总了C++中OSK_ASSERT函数的典型用法代码示例。如果您正苦于以下问题:C++ OSK_ASSERT函数的具体用法?C++ OSK_ASSERT怎么用?C++ OSK_ASSERT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OSK_ASSERT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: kbase_pm_context_idle
void kbase_pm_context_idle(kbase_device *kbdev)
{
unsigned long flags;
int c;
OSK_ASSERT(kbdev != NULL);
spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);
c = --kbdev->pm.active_count;
KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c );
OSK_ASSERT(c >= 0);
if (c == 0)
{
/* Last context has gone idle */
kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_IDLE);
kbasep_pm_record_gpu_idle(kbdev);
}
/* We must wait for the above functions to finish (in the case c==0) before releasing the lock otherwise there is
* a race with another thread calling kbase_pm_context_active - in this case the IDLE message could be sent
* *after* the ACTIVE message causing the policy and metrics systems to become confused
*/
spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);
}
示例2: kbasep_js_check_and_deref_nss_job
/**
* When the context is scheduled, the caller must hold the runpool_irq lock (a spinlock).
*/
STATIC mali_bool kbasep_js_check_and_deref_nss_job( kbasep_js_device_data *js_devdata,
kbase_context *kctx,
kbase_jd_atom *atom )
{
kbasep_js_kctx_info *js_kctx_info;
mali_bool nss_state_changed = MALI_FALSE;
OSK_ASSERT( kctx != NULL );
js_kctx_info = &kctx->jctx.sched_info;
if ( atom->atom->core_req & BASE_JD_REQ_NSS )
{
OSK_ASSERT( js_kctx_info->ctx.nr_nss_jobs > 0 );
if ( js_kctx_info->ctx.is_scheduled != MALI_FALSE
&& js_kctx_info->ctx.nr_nss_jobs == 1 )
{
/* Only NSS deref-count a running ctx on the last nss job */
nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx( js_devdata, kctx );
}
--(js_kctx_info->ctx.nr_nss_jobs);
}
return nss_state_changed;
}
示例3: kbase_fence_wait_callback
static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
{
kbase_jd_atom *katom = container_of(waiter, kbase_jd_atom, sync_waiter);
kbase_context *kctx;
OSK_ASSERT(NULL != katom);
kctx = katom->kctx;
OSK_ASSERT(NULL != kctx);
/* Propagate the fence status to the atom.
* If negative then cancel this atom and its dependencies.
*/
if (fence->status < 0)
{
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
}
/* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
*
* The issue is that we may signal the timeline while holding kctx->jctx.lock and
* the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
*/
OSK_ASSERT(0 == object_is_on_stack(&katom->work));
INIT_WORK(&katom->work, kbase_fence_wait_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
}
示例4: kbasep_get_memory_performance
void kbasep_get_memory_performance(const kbase_memory_resource *resource, kbase_memory_performance *cpu_performance,
kbase_memory_performance *gpu_performance)
{
kbase_attribute *attributes;
OSK_ASSERT(resource != NULL);
OSK_ASSERT(cpu_performance != NULL );
OSK_ASSERT(gpu_performance != NULL);
attributes = resource->attributes;
*cpu_performance = *gpu_performance = KBASE_MEM_PERF_NORMAL; /* default performance */
if (attributes == NULL)
{
return;
}
while (attributes->id != KBASE_CONFIG_ATTR_END)
{
if (attributes->id == KBASE_MEM_ATTR_PERF_GPU)
{
*gpu_performance = (kbase_memory_performance) attributes->data;
}
else if (attributes->id == KBASE_MEM_ATTR_PERF_CPU)
{
*cpu_performance = (kbase_memory_performance) attributes->data;
}
attributes++;
}
}
示例5: ukk_call_prepare
void ukk_call_prepare(ukk_call_context * const ukk_ctx, ukk_session * const session)
{
OSK_ASSERT(NULL != ukk_ctx);
OSK_ASSERT(NULL != session);
ukk_ctx->ukk_session = session;
}
示例6: kbasep_js_devdata_term
void kbasep_js_devdata_term( kbase_device *kbdev )
{
kbasep_js_device_data *js_devdata;
OSK_ASSERT( kbdev != NULL );
js_devdata = &kbdev->js_data;
if ( (js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS) )
{
/* The caller must de-register all contexts before calling this */
OSK_ASSERT( js_devdata->nr_contexts_running == 0 );
OSK_ASSERT( js_devdata->runpool_irq.nr_nss_ctxs_running == 0 );
}
if ( (js_devdata->init_status & JS_DEVDATA_INIT_POLICY) )
{
kbasep_js_policy_term( &js_devdata->policy );
}
if ( (js_devdata->init_status & JS_DEVDATA_INIT_RUNPOOL_IRQ_LOCK) )
{
osk_spinlock_irq_term( &js_devdata->runpool_irq.lock );
}
if ( (js_devdata->init_status & JS_DEVDATA_INIT_QUEUE_MUTEX) )
{
osk_mutex_term( &js_devdata->queue_mutex );
}
if ( (js_devdata->init_status & JS_DEVDATA_INIT_RUNPOOL_MUTEX) )
{
osk_mutex_term( &js_devdata->runpool_mutex );
}
js_devdata->init_status = JS_DEVDATA_INIT_NONE;
}
示例7: kbase_fence_wait
static int kbase_fence_wait(kbase_jd_atom *katom)
{
int ret;
OSK_ASSERT(NULL != katom);
OSK_ASSERT(NULL != katom->kctx);
sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
if (ret == 1)
{
/* Already signalled */
return 0;
}
else if (ret < 0)
{
goto cancel_atom;
}
return 1;
cancel_atom:
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
/* We should cause the dependant jobs in the bag to be failed,
* to do this we schedule the work queue to complete this job */
OSK_ASSERT(0 == object_is_on_stack(&katom->work));
INIT_WORK(&katom->work, kbase_fence_wait_worker);
queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
return 1;
}
示例8: OSK_ASSERT
struct kbase_va_region *kbase_pmem_alloc(struct kbase_context *kctx, u32 size,
u32 flags, u16 *pmem_cookie)
{
struct kbase_va_region *reg;
u16 cookie;
OSK_ASSERT(kctx != NULL);
OSK_ASSERT(pmem_cookie != NULL);
if ( 0 == size )
{
goto out1;
}
if (!kbase_check_alloc_flags(flags))
{
goto out1;
}
reg = kbase_alloc_free_region(kctx, 0, size, KBASE_REG_ZONE_PMEM);
if (!reg)
goto out1;
reg->flags &= ~KBASE_REG_FREE;
kbase_update_region_flags(reg, flags, MALI_FALSE);
if (kbase_alloc_phy_pages(reg, size, size))
goto out2;
reg->nr_alloc_pages = size;
reg->extent = 0;
kbase_gpu_vm_lock(kctx);
if (!kctx->osctx.cookies)
goto out3;
cookie = __ffs(kctx->osctx.cookies);
kctx->osctx.cookies &= ~(1UL << cookie);
reg->flags &= ~KBASE_REG_COOKIE_MASK;
reg->flags |= KBASE_REG_COOKIE(cookie);
OSK_DLIST_PUSH_FRONT(&kctx->osctx.reg_pending, reg,
struct kbase_va_region, link);
*pmem_cookie = cookie;
kbase_gpu_vm_unlock(kctx);
return reg;
out3:
kbase_gpu_vm_unlock(kctx);
kbase_free_phy_pages(reg);
out2:
osk_free(reg);
out1:
return NULL;
}
示例9: kbase_event_post
void kbase_event_post(kbase_context *ctx, kbase_jd_atom *atom)
{
OSK_ASSERT(ctx);
OSK_ASSERT(atom);
osk_workq_work_init(&atom->work, kbase_event_post_worker);
osk_workq_submit(&ctx->event_workq, &atom->work);
}
示例10: kbasep_8401_submit_dummy_job
/**
* Submit the 8401 workaround job.
*
* Important for BASE_HW_ISSUE_8987: This job always uses 16 RMUs
* - Therefore, on slot[1] it will always use the same number of RMUs as another
* GLES job.
* - On slot[2], no other job (GLES or otherwise) will be running on the
* cores, by virtue of it being slot[2]. Therefore, any value of RMUs is
* acceptable.
*/
void kbasep_8401_submit_dummy_job(kbase_device *kbdev, int js)
{
u32 cfg;
mali_addr64 jc;
/* While this workaround is active we reserve the last address space just for submitting the dummy jobs */
int as = kbdev->nr_hw_address_spaces;
/* Don't issue compute jobs on job slot 0 */
OSK_ASSERT(js != 0);
OSK_ASSERT(js < KBASE_8401_WORKAROUND_COMPUTEJOB_COUNT);
/* Job chain GPU address */
jc = (js+WORKAROUND_PAGE_OFFSET)*OSK_PAGE_SIZE; /* GPU phys address (see kbase_mmu_insert_pages call in kbasep_8401_workaround_init*/
/* Clear the job status words which may contain values from a previous job completion */
memset(kbdev->workaround_compute_job_va[js], 0, 4*sizeof(u32));
/* Get the affinity of the previous job */
dummy_job_atom[js].affinity = ((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_LO), NULL)) |
(((u64)kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_HI), NULL)) << 32);
/* Don't submit a compute job if the affinity was previously zero (i.e. no jobs have run yet on this slot) */
if(!dummy_job_atom[js].affinity)
{
return;
}
/* Ensure that our page tables are programmed into the MMU */
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_LO),
(kbdev->workaround_kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) | ASn_TRANSTAB_READ_INNER
| ASn_TRANSTAB_ADRMODE_TABLE, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_TRANSTAB_HI), (kbdev->workaround_kctx->pgd >> 32), NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_LO), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_MEMATTR_HI), ASn_MEMATTR_IMPL_DEF_CACHE_POLICY, NULL);
kbase_reg_write(kbdev, MMU_AS_REG(as, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc & 0xFFFFFFFF, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc >> 32, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), dummy_job_atom[js].affinity & 0xFFFFFFFF, NULL);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), dummy_job_atom[js].affinity >> 32, NULL);
/* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
cfg = as | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU
| JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, NULL);
KBASE_TRACE_ADD_SLOT( kbdev, JM_SUBMIT, NULL, 0, jc, js );
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, NULL);
/* Report that the job has been submitted */
kbasep_jm_enqueue_submit_slot(&kbdev->jm_slots[js], &dummy_job_atom[js]);
}
示例11: kbase_gpuprops_get_props
/**
* @brief Get the GPU configuration
*
* Fill the base_gpu_props structure with values from the GPU configuration registers
*
* @param gpu_props The base_gpu_props structure
* @param kbdev The kbase_device structure for the device
*/
static void kbase_gpuprops_get_props(base_gpu_props * gpu_props, kbase_device * kbdev)
{
kbase_gpuprops_regdump regdump;
int i;
OSK_ASSERT(NULL != kbdev);
OSK_ASSERT(NULL != gpu_props);
/* Dump relevant registers */
kbase_gpuprops_dump_registers(kbdev, ®dump);
/* Populate the base_gpu_props structure */
gpu_props->core_props.version_status = KBASE_UBFX32(regdump.gpu_id, 0U, 4);
gpu_props->core_props.minor_revision = KBASE_UBFX32(regdump.gpu_id, 4U, 8);
gpu_props->core_props.major_revision = KBASE_UBFX32(regdump.gpu_id, 12U, 4);
gpu_props->core_props.product_id = KBASE_UBFX32(regdump.gpu_id, 16U, 16);
gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
for(i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
{
gpu_props->core_props.texture_features[i] = regdump.texture_features[i];
}
gpu_props->l2_props.log2_line_size = KBASE_UBFX32(regdump.l2_features, 0U, 8);
gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(regdump.l2_features, 16U, 8);
gpu_props->l3_props.log2_line_size = KBASE_UBFX32(regdump.l3_features, 0U, 8);
gpu_props->l3_props.log2_cache_size = KBASE_UBFX32(regdump.l3_features, 16U, 8);
gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(regdump.tiler_features, 0U, 6);
gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(regdump.tiler_features, 8U, 4);
gpu_props->raw_props.gpu_id = regdump.gpu_id;
gpu_props->raw_props.tiler_features = regdump.tiler_features;
gpu_props->raw_props.mem_features = regdump.mem_features;
gpu_props->raw_props.mmu_features = regdump.mmu_features;
gpu_props->raw_props.l2_features = regdump.l2_features;
gpu_props->raw_props.l3_features = regdump.l3_features;
gpu_props->raw_props.as_present = regdump.as_present;
gpu_props->raw_props.js_present = regdump.js_present;
gpu_props->raw_props.shader_present = ((u64)regdump.shader_present_hi << 32) + regdump.shader_present_lo;
gpu_props->raw_props.tiler_present = ((u64)regdump.tiler_present_hi << 32) + regdump.tiler_present_lo;
gpu_props->raw_props.l2_present = ((u64)regdump.l2_present_hi << 32) + regdump.l2_present_lo;
gpu_props->raw_props.l3_present = ((u64)regdump.l3_present_hi << 32) + regdump.l3_present_lo;
for(i = 0; i < MIDG_MAX_JOB_SLOTS; i++)
{
gpu_props->raw_props.js_features[i] = regdump.js_features[i];
}
/* Initialize the coherent_group structure for each group */
kbase_gpuprops_construct_coherent_groups(gpu_props);
}
示例12: kbasep_js_kctx_init
mali_error kbasep_js_kctx_init( kbase_context *kctx )
{
kbase_device *kbdev;
kbasep_js_kctx_info *js_kctx_info;
mali_error err;
osk_error osk_err;
OSK_ASSERT( kctx != NULL );
kbdev = kctx->kbdev;
OSK_ASSERT( kbdev != NULL );
js_kctx_info = &kctx->jctx.sched_info;
OSK_ASSERT( js_kctx_info->init_status == JS_KCTX_INIT_NONE );
js_kctx_info->ctx.nr_jobs = 0;
js_kctx_info->ctx.nr_nss_jobs = 0;
js_kctx_info->ctx.is_scheduled = MALI_FALSE;
js_kctx_info->ctx.is_dying = MALI_FALSE;
js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS;
/* On error, we could continue on: providing none of the below resources
* rely on the ones above */
osk_err = osk_mutex_init( &js_kctx_info->ctx.jsctx_mutex, OSK_LOCK_ORDER_JS_CTX );
if ( osk_err == OSK_ERR_NONE )
{
js_kctx_info->init_status |= JS_KCTX_INIT_JSCTX_MUTEX;
}
osk_err = osk_waitq_init( &js_kctx_info->ctx.not_scheduled_waitq );
if ( osk_err == OSK_ERR_NONE )
{
js_kctx_info->init_status |= JS_KCTX_INIT_JSCTX_WAITQ;
}
err = kbasep_js_policy_init_ctx( kbdev, kctx );
if ( err == MALI_ERROR_NONE )
{
js_kctx_info->init_status |= JS_KCTX_INIT_POLICY;
}
/* On error, do no cleanup; this will be handled by the caller(s), since
* we've designed this resource to be safe to terminate on init-fail */
if ( js_kctx_info->init_status != JS_KCTX_INIT_ALL)
{
return MALI_ERROR_FUNCTION_FAILED;
}
/* Initially, the context is not scheduled */
osk_waitq_set( &js_kctx_info->ctx.not_scheduled_waitq );
return MALI_ERROR_NONE;
}
示例13: mali_dvfs_event_proc
static void mali_dvfs_event_proc(struct work_struct *w)
{
unsigned long flags;
mali_dvfs_status *dvfs_status;
struct exynos_context *platform;
mutex_lock(&mali_enable_clock_lock);
dvfs_status = &mali_dvfs_status_current;
if (!kbase_platform_dvfs_get_enable_status()) {
mutex_unlock(&mali_enable_clock_lock);
return;
}
platform = (struct exynos_context *)dvfs_status->kbdev->platform_context;
#ifdef MALI_DVFS_ASV_ENABLE
if (dvfs_status->asv_status==ASV_STATUS_DISABLE_REQ) {
dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_DISABLE);
} else if (dvfs_status->asv_status==ASV_STATUS_NOT_INIT) {
dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_ENABLE);
}
#endif
spin_lock_irqsave(&mali_dvfs_spinlock, flags);
if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) {
if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) {
if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold)
dvfs_status->step++;
OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
} else {
dvfs_status->step++;
OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
}
}else if ((dvfs_status->step>0) &&
(platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
(platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
OSK_ASSERT(dvfs_status->step > 0);
dvfs_status->step--;
}
#ifdef CONFIG_MALI_T6XX_FREQ_LOCK
if ((dvfs_status->upper_lock >= 0)&&(dvfs_status->step > dvfs_status->upper_lock)) {
dvfs_status->step = dvfs_status->upper_lock;
}
if (dvfs_status->under_lock > 0) {
if (dvfs_status->step < dvfs_status->under_lock)
dvfs_status->step = dvfs_status->under_lock;
}
#endif
spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
mutex_unlock(&mali_enable_clock_lock);
}
示例14: kbasep_js_remove_job
void kbasep_js_remove_job( kbase_context *kctx, kbase_jd_atom *atom )
{
kbasep_js_policy_cfs_ctx *ctx_info;
kbasep_js_kctx_info *js_kctx_info;
kbase_device *kbdev;
kbasep_js_device_data *js_devdata;
kbasep_js_policy *js_policy;
mali_bool nss_state_changed;
OSK_ASSERT( kctx != NULL );
OSK_ASSERT( atom != NULL );
kbdev = kctx->kbdev;
js_devdata = &kbdev->js_data;
js_policy = &kbdev->js_data.policy;
js_kctx_info = &kctx->jctx.sched_info;
/* De-refcount ctx.nr_jobs */
OSK_ASSERT( js_kctx_info->ctx.nr_jobs > 0 );
--(js_kctx_info->ctx.nr_jobs);
ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
/* Adjust context priority to no longer include removed job */
OSK_ASSERT(ctx_info->bag_total_nr_atoms > 0);
ctx_info->bag_total_nr_atoms--;
ctx_info->bag_total_priority -= atom->nice_prio;
OSK_ASSERT(ctx_info->bag_total_priority >= 0);
/* Get average priority and convert to NICE range -20..19 */
if(ctx_info->bag_total_nr_atoms)
{
ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
}
osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
nss_state_changed = kbasep_js_check_and_deref_nss_job( js_devdata, kctx, atom );
osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
/* De-register the job from the system */
kbasep_js_policy_term_job( js_policy, atom );
/* A change in NSS state means we might be able to run on slots that were
* previously empty, but could now run jobs on them */
if ( nss_state_changed != MALI_FALSE )
{
osk_mutex_lock( &js_devdata->runpool_mutex );
kbasep_js_try_run_next_job( kbdev );
osk_mutex_unlock( &js_devdata->runpool_mutex );
}
}
示例15: assign_and_activate_kctx_addr_space
/**
* Picks a free address space and add the context to the Policy. Then perform a
* transaction on this AS and RunPool IRQ to:
* - setup the runpool_irq structure and the context on that AS
* - Activate the MMU on that AS
* - Allow jobs to be submitted on that AS
*
* Locking conditions:
* - Caller must hold the kbasep_js_kctx_info::jsctx_mutex
* - Caller must hold the kbase_js_device_data::runpool_mutex
* - AS transaction mutex will be obtained
* - Runpool IRQ lock will be obtained
*/
STATIC void assign_and_activate_kctx_addr_space( kbase_device *kbdev, kbase_context *kctx )
{
kbasep_js_device_data *js_devdata;
kbase_as *current_as;
kbasep_js_per_as_data *js_per_as_data;
long ffs_result;
OSK_ASSERT( kbdev != NULL );
OSK_ASSERT( kctx != NULL );
js_devdata = &kbdev->js_data;
/* Find the free address space */
ffs_result = osk_find_first_set_bit( js_devdata->as_free );
/* ASSERT that we should've found a free one */
OSK_ASSERT( 0 <= ffs_result && ffs_result < kbdev->nr_address_spaces );
js_devdata->as_free &= ~((u16)(1u << ffs_result));
/*
* Transaction on the AS and runpool_irq
*/
current_as = &kbdev->as[ffs_result];
js_per_as_data = &js_devdata->runpool_irq.per_as_data[ffs_result];
osk_mutex_lock( ¤t_as->transaction_mutex );
osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
/* NSS Handling */
kbasep_js_check_and_ref_nss_running_ctx( js_devdata, kctx );
/* Assign addr space */
kctx->as_nr = (int)ffs_result;
/* Activate this address space on the MMU */
kbase_mmu_update( kctx );
/* Allow it to run jobs */
kbasep_js_set_submit_allowed( js_devdata, kctx );
/* Book-keeping */
js_per_as_data->kctx = kctx;
js_per_as_data->as_busy_refcount = 0;
/* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
kbasep_js_policy_runpool_add_ctx( &js_devdata->policy, kctx );
/*
* Transaction complete
*/
osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
osk_mutex_unlock( ¤t_as->transaction_mutex );
}