本文整理汇总了C++中read_cpuid_mpidr函数的典型用法代码示例。如果您正苦于以下问题:C++ read_cpuid_mpidr函数的具体用法?C++ read_cpuid_mpidr怎么用?C++ read_cpuid_mpidr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_cpuid_mpidr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: exynos5420_cpu_suspend
static int exynos5420_cpu_suspend(unsigned long arg)
{
/* MCPM works with HW CPU identifiers */
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
__raw_writel(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
/*
* Residency value passed to mcpm_cpu_suspend back-end
* has to be given clear semantics. Set to 0 as a
* temporary value.
*/
mcpm_cpu_suspend(0);
}
pr_info("Failed to suspend the system\n");
/* return value != 0 means failure */
return 1;
}
示例2: sunxi_cpu_power_down_c2state
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \
struct cpuidle_driver *drv, \
int index)
{
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu_pm_enter();
//cpu_cluster_pm_enter();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
smp_wmb();
cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher);
/*
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* variant exists, we need to disable IRQs manually here.
*/
local_irq_disable();
arch_spin_lock(&sun8i_mcpm_lock);
sun8i_cpu_use_count[cluster][cpu]++;
sun8i_cluster_use_count[cluster]++;
arch_spin_unlock(&sun8i_mcpm_lock);
local_irq_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
//cpu_cluster_pm_exit();
cpu_pm_exit();
return index;
}
示例3: mcpm_cpu_powered_up
int mcpm_cpu_powered_up(void)
{
unsigned int mpidr, cpu, cluster;
bool cpu_was_down, first_man;
unsigned long flags;
if (!platform_ops)
return -EUNATCH;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
local_irq_save(flags);
arch_spin_lock(&mcpm_lock);
cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
first_man = mcpm_cluster_unused(cluster);
if (first_man && platform_ops->cluster_is_up)
platform_ops->cluster_is_up(cluster);
if (cpu_was_down)
mcpm_cpu_use_count[cluster][cpu] = 1;
if (platform_ops->cpu_is_up)
platform_ops->cpu_is_up(cpu, cluster);
arch_spin_unlock(&mcpm_lock);
local_irq_restore(flags);
return 0;
}
示例4: mcpm_sync_init
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level))
{
unsigned int i, j, mpidr, this_cluster;
BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
/*
* Set initial CPU and cluster states.
* Only one cluster is assumed to be active at this point.
*/
for (i = 0; i < MAX_NR_CLUSTERS; i++) {
mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
}
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
for_each_online_cpu(i)
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
sync_cache_w(&mcpm_sync);
if (power_up_setup) {
mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
sync_cache_w(&mcpm_power_up_setup_phys);
}
return 0;
}
示例5: store_cpu_topology
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
u64 mpidr;
if (cpuid_topo->cluster_id != -1)
goto topology_populated;
mpidr = read_cpuid_mpidr();
/* Uniprocessor systems can rely on default topology values */
if (mpidr & MPIDR_UP_BITMASK)
return;
/* Create cpu topology mapping based on MPIDR. */
if (mpidr & MPIDR_MT_BITMASK) {
/* Multiprocessor system : Multi-threads per core */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
} else {
/* Multiprocessor system : Single-thread per core */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
cpuid_topo->thread_id, mpidr);
topology_populated:
update_siblings_masks(cpuid);
}
示例6: store_cpu_topology
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
*/
void store_cpu_topology(unsigned int cpuid)
{
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
unsigned int cpu;
/* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
return;
mpidr = read_cpuid_mpidr();
/* create cpu topology mapping */
if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
/*
* This is a multiprocessor system
* multiprocessor format & multiprocessor mode field are set
*/
if (mpidr & MPIDR_MT_BITMASK) {
/* core performance interdependency */
cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
& MPIDR_LEVEL0_MASK;
cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
& MPIDR_LEVEL1_MASK;
cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
& MPIDR_LEVEL2_MASK;
} else {
示例7: main
/*
* Entry point.
*
* Unpack images, setup the MMU, jump to the kernel.
*/
void main(void)
{
int num_apps;
int cpu_mode;
#ifdef CONFIG_SMP_ARM_MPCORE
/* If not the boot strap processor then go to non boot main */
if ( (read_cpuid_mpidr() & 0xf) != booting_cpu_id) {
non_boot_main();
}
#endif
/* Print welcome message. */
printf("\nELF-loader started on ");
print_cpuid();
platform_init();
printf(" paddr=[%p..%p]\n", _start, _end - 1);
/* Unpack ELF images into memory. */
load_images(&kernel_info, &user_info, 1, &num_apps);
if (num_apps != 1) {
printf("No user images loaded!\n");
abort();
}
/* Setup MMU. */
cpu_mode = read_cpsr() & CPSR_MODE_MASK;
if(cpu_mode == CPSR_MODE_HYPERVISOR){
printf("Enabling hypervisor MMU and paging\n");
init_lpae_boot_pd(&kernel_info);
arm_enable_hyp_mmu();
}
/* If we are in HYP mode, we enable the SV MMU and paging
* just in case the kernel does not support hyp mode. */
printf("Enabling MMU and paging\n");
init_boot_pd(&kernel_info);
arm_enable_mmu();
#ifdef CONFIG_SMP_ARM_MPCORE
/* Bring up any other CPUs */
init_cpus();
non_boot_lock = 1;
#endif
/* Enter kernel. */
if (UART_PPTR < kernel_info.virt_region_start) {
printf("Jumping to kernel-image entry point...\n\n");
} else {
/* Our serial port is no longer accessible */
}
((init_kernel_t)kernel_info.virt_entry)(user_info.phys_region_start,
user_info.phys_region_end, user_info.phys_virt_offset,
user_info.virt_entry);
/* We should never get here. */
printf("Kernel returned back to the elf-loader.\n");
abort();
}
示例8: exynos_set_core_flag
/*
* exynos_set_core_flag - set the cluster id to IROM register
* to ensure that we wake up with the
* current cluster.
*/
static void exynos_set_core_flag(void)
{
int cluster_id = (read_cpuid_mpidr() >> 8) & 0xf;
if (cluster_id)
__raw_writel(1, EXYNOS_IROM_DATA2);
else
__raw_writel(0, EXYNOS_IROM_DATA2);
}
示例9: mcpm_cpu_die
static void mcpm_cpu_die(unsigned int cpu)
{
unsigned int mpidr, pcpu, pcluster;
mpidr = read_cpuid_mpidr();
pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
mcpm_set_entry_vector(pcpu, pcluster, NULL);
mcpm_cpu_power_down();
}
示例10: mcpm_powerdown_finisher
static int notrace mcpm_powerdown_finisher(unsigned long arg)
{
u32 mpidr = read_cpuid_mpidr();
u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
u32 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
mcpm_set_entry_vector(cpu, this_cluster, cpu_resume);
mcpm_cpu_suspend(arg);
return 1;
}
示例11: bl_powerdown_finisher
static int notrace bl_powerdown_finisher(unsigned long arg)
{
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cluster = (mpidr >> 8) & 0xf;
unsigned int cpu = mpidr & 0xf;
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
mcpm_cpu_suspend(0); /* 0 should be replaced with better value here */
return 1;
}
示例12: store_boot_cpu_info
static void store_boot_cpu_info(void)
{
unsigned int mpidr = read_cpuid_mpidr();
boot_core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_info("A booting CPU: core %d cluster %d\n", boot_core_id,
boot_cluster_id);
}
示例13: sunxi_powerdown_c2_finisher
/*
* notrace prevents trace shims from getting inserted where they
* should not. Global jumps and ldrex/strex must not be inserted
* in power down sequences where caches and MMU may be turned off.
*/
static int notrace sunxi_powerdown_c2_finisher(unsigned long flg)
{
/* MCPM works with HW CPU identifiers */
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
bool last_man = false;
struct sunxi_enter_idle_para sunxi_idle_para;
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
arch_spin_lock(&sun8i_mcpm_lock);
sun8i_cpu_use_count[cluster][cpu]--;
/* check is the last-man, and set flg */
sun8i_cluster_use_count[cluster]--;
if (sun8i_cluster_use_count[cluster] == 0) {
writel(1, CLUSTER_CPUX_FLG(cluster, cpu));
last_man = true;
}
arch_spin_unlock(&sun8i_mcpm_lock);
/* call cpus to power off */
sunxi_idle_para.flags = (unsigned long)mpidr | flg;
sunxi_idle_para.resume_addr = (void *)(virt_to_phys(mcpm_entry_point));
arisc_enter_cpuidle(NULL, NULL, &sunxi_idle_para);
if (last_man) {
int t = 0;
/* wait for cpus received this message and respond,
* for reconfirm is this cpu the man really, then clear flg
*/
while (1) {
udelay(2);
if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 2) {
writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
break; /* last_man is true */
} else if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 3) {
writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
goto out; /* last_man is false */
}
if(++t > 5000) {
printk(KERN_WARNING "cpu%didle time out!\n", \
cluster * 4 + cpu);
t = 0;
}
}
sunxi_idle_cluster_die(cluster);
}
out:
sunxi_idle_cpu_die();
/* return value != 0 means failure */
return 1;
}
示例14: exynos5420_powerdown_conf
static void exynos5420_powerdown_conf(enum sys_powerdown mode)
{
u32 this_cluster;
this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
/*
* set the cluster id to IROM register to ensure that we wake
* up with the current cluster.
*/
pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
}
示例15: mmp_pm_usage_count_init
static void __init mmp_pm_usage_count_init(void)
{
unsigned int mpidr, cpu, cluster;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
BUG_ON(cpu >= MAX_CPUS_PER_CLUSTER || cluster >= MAX_NR_CLUSTERS);
memset(mmp_pm_use_count, 0, sizeof(mmp_pm_use_count));
mmp_pm_use_count[cluster][cpu] = 1;
}