本文整理汇总了C++中pm_qos_request函数的典型用法代码示例。如果您正苦于以下问题:C++ pm_qos_request函数的具体用法?C++ pm_qos_request怎么用?C++ pm_qos_request使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pm_qos_request函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update_core_config
static int update_core_config(unsigned int cpunumber, bool up)
{
int ret = -EINVAL;
unsigned int nr_cpus = num_online_cpus();
int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
if (cpq_state == TEGRA_CPQ_DISABLED || cpunumber >= nr_cpu_ids)
return ret;
if (up) {
if(is_lp_cluster()) {
cpumask_set_cpu(cpunumber, &cr_online_requests);
ret = -EBUSY;
} else {
if (tegra_cpu_edp_favor_up(nr_cpus, mp_overhead) &&
nr_cpus < max_cpus)
ret = cpu_up(cpunumber);
}
} else {
if (is_lp_cluster()) {
ret = -EBUSY;
} else {
if (nr_cpus > min_cpus)
ret = cpu_down(cpunumber);
}
}
return ret;
}
示例2: gk20a_scale_qos_notify
static int gk20a_scale_qos_notify(struct notifier_block *nb,
unsigned long n, void *p)
{
struct gk20a_scale_profile *profile =
container_of(nb, struct gk20a_scale_profile,
qos_notify_block);
struct gk20a_platform *platform = platform_get_drvdata(profile->pdev);
struct gk20a *g = get_gk20a(profile->pdev);
unsigned long freq;
if (!platform->postscale)
return NOTIFY_OK;
/* get the frequency requirement. if devfreq is enabled, check if it
* has higher demand than qos */
freq = gk20a_clk_round_rate(g, pm_qos_request(platform->qos_id));
if (g->devfreq)
freq = max(g->devfreq->previous_freq, freq);
/* Update gpu load because we may scale the emc target
* if the gpu load changed. */
gk20a_pmu_load_update(g);
platform->postscale(profile->pdev, freq);
return NOTIFY_OK;
}
示例3: cpufreq_get_touch_boost_press
unsigned int cpufreq_get_touch_boost_press(void)
{
touch_boost_press_value = pm_qos_request(PM_QOS_TOUCH_PRESS);
return touch_boost_press_value;
}
示例4: ladder_select_state
/**
* ladder_select_state - selects the next state to enter
* @drv: cpuidle driver
* @dev: the CPU
*/
static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
ladder_do_selection(ldev, last_idx, 0);
return 0;
}
last_state = &ldev->states[last_idx];
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
!drv->states[last_idx + 1].disabled &&
!dev->states_usage[last_idx + 1].disable &&
last_residency > last_state->threshold.promotion_time &&
drv->states[last_idx + 1].exit_latency <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
ladder_do_selection(ldev, last_idx, last_idx + 1);
return last_idx + 1;
}
}
/* consider demotion */
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
(drv->states[last_idx].disabled ||
dev->states_usage[last_idx].disable ||
drv->states[last_idx].exit_latency > latency_req)) {
int i;
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
if (drv->states[i].exit_latency <= latency_req)
break;
}
ladder_do_selection(ldev, last_idx, i);
return i;
}
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
ladder_do_selection(ldev, last_idx, last_idx - 1);
return last_idx - 1;
}
}
/* otherwise remain at the current state */
return last_idx;
}
示例5: show_bus_int_freq_min
static ssize_t show_bus_int_freq_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
unsigned int ret = 0;
ret = sprintf(buf, "%d\n", pm_qos_request(PM_QOS_DEVICE_THROUGHPUT));
return ret;
}
示例6: show_bimc_freq_min
static ssize_t show_bimc_freq_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
unsigned int ret = 0;
ret = sprintf(buf, "%d\n", pm_qos_request(PM_QOS_BIMC_FREQ_MIN));
return ret;
}
示例7: tegra_cpu_speed_balance
static noinline int tegra_cpu_speed_balance(void)
{
unsigned long highest_speed = tegra_cpu_highest_speed();
unsigned long balanced_speed = highest_speed * balance_level / 100;
unsigned long skewed_speed = balanced_speed / 2;
unsigned int nr_cpus = num_online_cpus();
unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
/* balanced: freq targets for all CPUs are above 50% of highest speed
biased: freq target for at least one CPU is below 50% threshold
skewed: freq targets for at least 2 CPUs are below 25% threshold */
if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
(highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
(nr_cpus > min_cpus))
return TEGRA_CPU_SPEED_SKEWED;
if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
(!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
(highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
(nr_cpus >= min_cpus))
return TEGRA_CPU_SPEED_BIASED;
return TEGRA_CPU_SPEED_BALANCED;
}
示例8: update_runnables_state
static void update_runnables_state(void)
{
unsigned int nr_cpus = num_online_cpus();
int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
unsigned int avg_nr_run = avg_nr_running();
unsigned int nr_run;
if (runnables_state == DISABLED)
return;
for (nr_run = 1; nr_run < ARRAY_SIZE(nr_run_thresholds); nr_run++) {
unsigned int nr_threshold = nr_run_thresholds[nr_run - 1];
if (nr_run_last <= nr_run)
nr_threshold += NR_FSHIFT / nr_run_hysteresis;
if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT_EXP)))
break;
}
nr_run_last = nr_run;
if ((nr_cpus > max_cpus || nr_run < nr_cpus) && nr_cpus >= min_cpus) {
runnables_state = DOWN;
} else if (nr_cpus < min_cpus || nr_run > nr_cpus) {
runnables_state = UP;
} else {
runnables_state = IDLE;
}
}
示例9: show_cpu_online_min
static ssize_t show_cpu_online_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
unsigned int ret = 0;
ret = sprintf(buf, "%d\n", pm_qos_request(PM_QOS_CPU_ONLINE_MIN));
return ret;
}
示例10: devfreq_simple_ondemand_func
static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned long *freq)
{
struct devfreq_dev_status stat;
int err;
unsigned long long a, b;
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
unsigned int dfso_multiplication_weight = DFSO_WEIGHT;
struct devfreq_simple_ondemand_data *data = df->data;
unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
unsigned long pm_qos_min = 0;
if (data) {
pm_qos_min = pm_qos_request(data->pm_qos_class);
if (pm_qos_min >= data->cal_qos_max) {
*freq = pm_qos_min;
return 0;
}
}
if (df->profile->get_dev_status) {
err = df->profile->get_dev_status(df->dev.parent, &stat);
} else {
*freq = pm_qos_min;
return 0;
}
if (err)
return err;
if (data) {
if (data->upthreshold)
dfso_upthreshold = data->upthreshold;
if (data->downdifferential)
dfso_downdifferential = data->downdifferential;
if (data->multiplication_weight)
dfso_multiplication_weight = data->multiplication_weight;
}
if (dfso_upthreshold > 100 ||
dfso_upthreshold < dfso_downdifferential)
return -EINVAL;
if (data && data->cal_qos_max)
max = (df->max_freq) ? df->max_freq : 0;
/* Assume MAX if it is going to be divided by zero */
if (stat.total_time == 0) {
if (data && data->cal_qos_max)
max = max3(max, data->cal_qos_max, pm_qos_min);
*freq = max;
return 0;
}
/* Prevent overflow */
if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
stat.busy_time >>= 7;
stat.total_time >>= 7;
}
示例11: cpu_up
int __cpuinit cpu_up(unsigned int cpu)
{
int err = 0;
#ifdef CONFIG_MEMORY_HOTPLUG
int nid;
pg_data_t *pgdat;
#endif
if (num_online_cpus() >= pm_qos_request(PM_QOS_CPU_ONLINE_MAX))
return 0;
if (!cpu_possible(cpu)) {
printk(KERN_ERR "can't online cpu %d because it is not "
"configured as may-hotadd at boot time\n", cpu);
#if defined(CONFIG_IA64)
printk(KERN_ERR "please check additional_cpus= boot "
"parameter\n");
#endif
return -EINVAL;
}
#ifdef CONFIG_MEMORY_HOTPLUG
nid = cpu_to_node(cpu);
if (!node_online(nid)) {
err = mem_online_node(nid);
if (err)
return err;
}
pgdat = NODE_DATA(nid);
if (!pgdat) {
printk(KERN_ERR
"Can't online cpu %d due to NULL pgdat\n", cpu);
return -ENOMEM;
}
if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
mutex_lock(&zonelists_mutex);
build_all_zonelists(NULL);
mutex_unlock(&zonelists_mutex);
}
#endif
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto out;
}
err = _cpu_up(cpu, 0);
out:
cpu_maps_update_done();
return err;
}
示例12: show_cpufreq_max
static ssize_t show_cpufreq_max(struct kobject *kobj,
struct attribute *attr, char *buf)
{
unsigned int ret = 0;
max_cpu_freq = pm_qos_request(PM_QOS_CPU_FREQ_MAX);
ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", max_cpu_freq);
return ret;
}
示例13: store_cpu_online_max
static ssize_t store_cpu_online_max(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
set_pmqos_data(cpu_online_max_qos_array, PM_QOS_CPU_ONLINE_MAX, buf);
if (num_online_cpus() > pm_qos_request(PM_QOS_CPU_ONLINE_MAX))
cpu_down(1);
return count;
}
示例14: store_cpu_online_min
static ssize_t store_cpu_online_min(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
set_pmqos_data(cpu_online_min_qos_array, PM_QOS_CPU_ONLINE_MIN, buf);
if (num_online_cpus() < pm_qos_request(PM_QOS_CPU_ONLINE_MIN)) {
pr_info("%s cpu_up\n", __FUNCTION__);
cpu_up(1);
}
return count;
}
示例15: find_coupled_state
/*
* find_couple_state - Find the maximum state platform can enter
*
* @index: pointer to variable which stores the maximum state
* @cluster: cluster number
*
* Must be called with function holds mmp_lpm_lock
*/
static void find_coupled_state(int *index, int cluster)
{
int i;
int platform_lpm = DEFAULT_LPM_FLAG;
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++)
platform_lpm &= mmp_enter_lpm[cluster][i];
*index = min(find_first_zero_bit((void *)&platform_lpm, LPM_NUM),
pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1;
}