本文整理汇总了C++中KBASE_DEBUG_ASSERT函数的典型用法代码示例。如果您正苦于以下问题:C++ KBASE_DEBUG_ASSERT函数的具体用法?C++ KBASE_DEBUG_ASSERT怎么用?C++ KBASE_DEBUG_ASSERT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了KBASE_DEBUG_ASSERT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: kbase_platform_dvfs_event
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation)
{
unsigned long flags;
struct exynos_context *platform;
KBASE_DEBUG_ASSERT(kbdev != NULL);
platform = (struct exynos_context *) kbdev->platform_context;
spin_lock_irqsave(&mali_dvfs_spinlock, flags);
if (platform->time_tick < MALI_DVFS_TIME_INTERVAL) {
platform->time_tick++;
platform->time_busy += kbdev->pm.metrics.time_busy;
platform->time_idle += kbdev->pm.metrics.time_idle;
} else {
platform->time_busy = kbdev->pm.metrics.time_busy;
platform->time_idle = kbdev->pm.metrics.time_idle;
platform->time_tick = 0;
}
if ((platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
(platform->time_idle + platform->time_busy > 0))
platform->utilisation = (100*platform->time_busy) / (platform->time_idle + platform->time_busy);
mali_dvfs_status_current.utilisation = utilisation;
#ifdef MALI_DEBUG
printk(KERN_INFO "\n[mali_devfreq]utilization: %d\n", utilisation);
#endif
spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
/*add error handle here*/
return MALI_TRUE;
}
示例2: kbase_pm_register_vsync_callback
void kbase_pm_register_vsync_callback(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
/* no VSync metrics will be available */
kbdev->pm.metrics.platform_data = NULL;
}
示例3: kbase_timeline_job_slot_done
void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
kbase_jd_atom *katom, int js,
kbasep_js_atom_done_code done_code)
{
lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
} else {
/* Job finished in JSn_HEAD */
base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
/* see if we need to trace the job in JSn_NEXT moving to JSn_HEAD */
if (kbdev->timeline.slot_atoms_submitted[js] > 1) {
/* Tag events with next_katom's kctx */
kbase_jm_slot *slot = &kbdev->jm_slots[js];
kbase_jd_atom *next_katom;
kbase_context *next_kctx;
KBASE_DEBUG_ASSERT(kbasep_jm_nr_jobs_submitted(slot) > 0);
/* Peek the next atom - note that the atom in JSn_HEAD will already
* have been dequeued */
next_katom = kbasep_jm_peek_idx_submit_slot(slot, 0);
next_kctx = next_katom->kctx;
KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0);
KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1);
KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom));
}
}
--kbdev->timeline.slot_atoms_submitted[js];
KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
}
示例4: KBASE_DEBUG_ASSERT
const struct kbase_pm_ca_policy
*kbase_pm_ca_get_policy(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
return kbdev->pm.backend.ca_current_policy;
}
示例5: kbasep_jd_debugfs_ctx_add
int kbasep_jd_debugfs_ctx_add(struct kbase_context *kctx)
{
/* Refer below for format string, %u is 10 chars max */
char dir_name[10 * 2 + 2];
KBASE_DEBUG_ASSERT(kctx != NULL);
/* Create per-context directory */
scnprintf(dir_name, sizeof(dir_name), "%u_%u", kctx->pid, kctx->id);
kctx->jd_ctx_dir = debugfs_create_dir(dir_name, kctx->kbdev->jd_directory);
if (IS_ERR(kctx->jd_ctx_dir))
goto err;
/* Expose all atoms */
if (IS_ERR(debugfs_create_file("atoms", S_IRUGO,
kctx->jd_ctx_dir, kctx, &kbasep_jd_debugfs_atoms_fops)))
goto err_jd_ctx_dir;
return 0;
err_jd_ctx_dir:
debugfs_remove_recursive(kctx->jd_ctx_dir);
err:
return -1;
}
示例6: kbase_context_set_create_flags
/**
* kbase_context_set_create_flags - Set creation flags on a context
* @kctx: Kbase context
* @flags: Flags to set
*
* Return: 0 on success
*/
int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
{
int err = 0;
struct kbasep_js_kctx_info *js_kctx_info;
unsigned long irq_flags;
KBASE_DEBUG_ASSERT(NULL != kctx);
js_kctx_info = &kctx->jctx.sched_info;
/* Validate flags */
if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
err = -EINVAL;
goto out;
}
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
/* Translate the flags */
if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
if ((flags & BASE_CONTEXT_HINT_ONLY_COMPUTE) != 0)
js_kctx_info->ctx.flags |= (u32) KBASE_CTX_FLAG_HINT_ONLY_COMPUTE;
/* Latch the initial attributes into the Job Scheduler */
kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock,
irq_flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
out:
return err;
}
示例7: KBASE_DEBUG_ASSERT
/* Find region enclosing given address. */
struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr)
{
struct rb_node *rbnode;
struct kbase_va_region *reg;
u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
KBASE_DEBUG_ASSERT(NULL != kctx);
lockdep_assert_held(&kctx->reg_lock);
rbnode = kctx->reg_rbtree.rb_node;
while (rbnode) {
u64 tmp_start_pfn, tmp_end_pfn;
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
tmp_start_pfn = reg->start_pfn;
tmp_end_pfn = reg->start_pfn + reg->nr_pages;
/* If start is lower than this, go left. */
if (gpu_pfn < tmp_start_pfn)
rbnode = rbnode->rb_left;
/* If end is higher than this, then go right. */
else if (gpu_pfn >= tmp_end_pfn)
rbnode = rbnode->rb_right;
else /* Enclosing */
return reg;
}
return NULL;
}
示例8: kbasep_jd_debugfs_ctx_remove
void kbasep_jd_debugfs_ctx_remove(struct kbase_context *kctx)
{
KBASE_DEBUG_ASSERT(kctx != NULL);
if (!IS_ERR(kctx->jd_ctx_dir))
debugfs_remove_recursive(kctx->jd_ctx_dir);
}
示例9: kbasep_mem_profile_seq_show
/** Show callback for the @c mem_profile debugfs file.
*
* This function is called to get the contents of the @c mem_profile debugfs
* file. This is a report of current memory usage and distribution in userspace.
*
* @param sfile The debugfs entry
* @param data Data associated with the entry
*
* @return 0 if successfully prints data in debugfs entry file
* -1 if it encountered an error
*/
static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
{
struct kbase_context *kctx = sfile->private;
KBASE_DEBUG_ASSERT(kctx != NULL);
/* MALI_SEC_INTEGRATION */
{
struct kbase_device *kbdev = kctx->kbdev;
atomic_inc(&kctx->mem_profile_showing_state);
if(kbdev->vendor_callbacks->mem_profile_check_kctx)
if (!kbdev->vendor_callbacks->mem_profile_check_kctx(kctx)) {
atomic_dec(&kctx->mem_profile_showing_state);
return 0;
}
}
/* MALI_SEC_INTEGRATION */
if (kctx->destroying_context) {
atomic_dec(&kctx->mem_profile_showing_state);
return 0;
}
spin_lock(&kctx->mem_profile_lock);
/* MALI_SEC_INTEGRATION */
if (kctx->mem_profile_data) {
seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
seq_putc(sfile, '\n');
}
spin_unlock(&kctx->mem_profile_lock);
atomic_dec(&kctx->mem_profile_showing_state);
return 0;
}
示例10: kbasep_cache_clean_worker
/**
* Workqueue for handling cache cleaning
*/
void kbasep_cache_clean_worker(struct work_struct *data)
{
struct kbase_device *kbdev;
unsigned long flags;
kbdev = container_of(data, struct kbase_device, hwcnt.cache_clean_work);
mutex_lock(&kbdev->cacheclean_lock);
kbasep_instr_hwcnt_cacheclean(kbdev);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Wait for our condition, and any reset to complete */
while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING ||
kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANING) {
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
wait_event(kbdev->hwcnt.cache_clean_wait,
(kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING
&& kbdev->hwcnt.state != KBASE_INSTR_STATE_CLEANING));
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
}
KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANED);
/* All finished and idle */
kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
kbdev->hwcnt.triggered = 1;
wake_up(&kbdev->hwcnt.wait);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
mutex_unlock(&kbdev->cacheclean_lock);
}
示例11: kbase_gpuprops_set
void kbase_gpuprops_set(kbase_device *kbdev)
{
kbase_gpu_props *gpu_props;
struct midg_raw_gpu_props *raw;
KBASE_DEBUG_ASSERT(NULL != kbdev);
gpu_props = &kbdev->gpu_props;
raw = &gpu_props->props.raw_props;
/* Initialize the base_gpu_props structure from the hardware */
kbase_gpuprops_get_props(&gpu_props->props, kbdev);
/* Populate the derived properties */
kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
/* Populate kbase-only fields */
gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
gpu_props->l3_props.associativity = KBASE_UBFX32(raw->l3_features, 8U, 8);
gpu_props->l3_props.external_bus_width = KBASE_UBFX32(raw->l3_features, 24U, 8);
gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
gpu_props->mem.supergroup = KBASE_UBFX32(raw->mem_features, 1U, 1);
gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
gpu_props->num_cores = hweight64(raw->shader_present);
gpu_props->num_core_groups = hweight64(raw->l2_present);
gpu_props->num_supergroups = hweight64(raw->l3_present);
gpu_props->num_address_spaces = hweight32(raw->as_present);
gpu_props->num_job_slots = hweight32(raw->js_present);
}
示例12: kbase_js_affinity_is_violating
STATIC INLINE mali_bool kbase_js_affinity_is_violating(struct kbase_device *kbdev, u64 *affinities)
{
/* This implementation checks whether the two slots involved in Generic thread creation
* have intersecting affinity. This is due to micro-architectural issues where a job in
* slot A targetting cores used by slot B could prevent the job in slot B from making
* progress until the job in slot A has completed.
*
* @note It just so happens that this restriction also allows
* BASE_HW_ISSUE_8987 to be worked around by placing on job slot 2 the
* atoms from ctxs with KBASE_CTX_FLAG_HINT_ONLY_COMPUTE flag set
*/
u64 affinity_set_left;
u64 affinity_set_right;
u64 intersection;
KBASE_DEBUG_ASSERT(affinities != NULL);
affinity_set_left = affinities[1];
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
/* The left set also includes those on the Fragment slot when
* we are using the HW workaround for BASE_HW_ISSUE_8987 */
affinity_set_left |= affinities[0];
}
affinity_set_right = affinities[2];
/* A violation occurs when any bit in the left_set is also in the right_set */
intersection = affinity_set_left & affinity_set_right;
return (mali_bool) (intersection != (u64) 0u);
}
示例13: kbase_region_tracker_insert
/* This function inserts a region into the tree. */
static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase_va_region *new_reg)
{
u64 start_pfn = new_reg->start_pfn;
struct rb_node **link = &(kctx->reg_rbtree.rb_node);
struct rb_node *parent = NULL;
/* Find the right place in the tree using tree search */
while (*link) {
struct kbase_va_region *old_reg;
parent = *link;
old_reg = rb_entry(parent, struct kbase_va_region, rblink);
/* RBTree requires no duplicate entries. */
KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
if (old_reg->start_pfn > start_pfn)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
/* Put the new node there, and rebalance tree */
rb_link_node(&(new_reg->rblink), parent, link);
rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree));
}
示例14: kbasep_jd_debugfs_term
void kbasep_jd_debugfs_term(struct kbase_device *kbdev)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
if (!IS_ERR(kbdev->jd_directory))
debugfs_remove_recursive(kbdev->jd_directory);
}
示例15: kbase_js_affinity_is_violating
static inline bool kbase_js_affinity_is_violating(
struct kbase_device *kbdev,
u64 *affinities)
{
/* This implementation checks whether the two slots involved in Generic
* thread creation have intersecting affinity. This is due to micro-
* architectural issues where a job in slot A targetting cores used by
* slot B could prevent the job in slot B from making progress until the
* job in slot A has completed.
*/
u64 affinity_set_left;
u64 affinity_set_right;
u64 intersection;
KBASE_DEBUG_ASSERT(affinities != NULL);
affinity_set_left = affinities[1];
affinity_set_right = affinities[2];
/* A violation occurs when any bit in the left_set is also in the
* right_set */
intersection = affinity_set_left & affinity_set_right;
return (bool) (intersection != (u64) 0u);
}