当前位置: 首页>>代码示例>>C++>>正文


C++ cpumask_of函数代码示例

本文整理汇总了C++中cpumask_of函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_of函数的具体用法?C++ cpumask_of怎么用?C++ cpumask_of使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cpumask_of函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: arch_send_call_function_single_ipi

void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
开发者ID:CobraJet93,项目名称:kernel-3.10.54,代码行数:4,代码来源:smp.c

示例2: kernel_physical_mapping_init

/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
//.........这里部分代码省略.........
开发者ID:AllenWeb,项目名称:linux,代码行数:101,代码来源:init.c

示例3: xen_smp_send_call_function_single_ipi

static void xen_smp_send_call_function_single_ipi(int cpu)
{
	xen_send_IPI_mask(cpumask_of(cpu),
			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
开发者ID:friackazoid,项目名称:linux-2.6,代码行数:5,代码来源:smp.c

示例4: tick_check_new_device

/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;

	raw_spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
		goto out_bc;

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
	if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
		if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
			goto out_bc;
	}

	/*
	 * Replace the eventually existing device by the new
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
	 */
	if (tick_is_broadcast_device(curdev)) {
		clockevents_shutdown(curdev);
		curdev = NULL;
	}
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);

	return ret;
}
开发者ID:33d,项目名称:linux-2.6.21-hh20,代码行数:83,代码来源:tick-common.c

示例5: DECLARE_COMPLETION_ONSTACK


//.........这里部分代码省略.........
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;

	spin_lock(&kthread_create_lock);
	list_add_tail(&create->list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		int i = 0;

		/*
		 * I got SIGKILL, but wait for 10 more seconds for completion
		 * unless chosen by the OOM killer. This delay is there as a
		 * workaround for boot failure caused by SIGKILL upon device
		 * driver initialization timeout.
		 */
		while (i++ < 10 && !test_tsk_thread_flag(current, TIF_MEMDIE))
			if (wait_for_completion_timeout(&done, HZ))
				goto ready;
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
			return ERR_PTR(-ENOMEM);
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
ready:
	task = create->result;
	if (!IS_ERR(task)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
	}
	kfree(create);
	return task;
}
EXPORT_SYMBOL(kthread_create_on_node);

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_NO_SETAFFINITY;
}

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
}
开发者ID:mobilehunter,项目名称:trafficsqueezer-org,代码行数:101,代码来源:kthread.c

示例6: smp_prepare_cpus

/*
 * Called at the top of init() to launch all the other CPUs.
 * They run free to complete their initialization and then wait
 * until they get an IPI from the boot cpu to come online.
 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	long rc;
	int cpu, cpu_count;
	int boot_cpu = smp_processor_id();

	current_thread_info()->cpu = boot_cpu;

	/*
	 * Pin this task to the boot CPU while we bring up the others,
	 * just to make sure we don't uselessly migrate as they come up.
	 */
	rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
	if (rc != 0)
		pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);

	/* Print information about disabled and dataplane cpus. */
	print_disabled_cpus();

	/*
	 * Tell the messaging subsystem how to respond to the
	 * startup message.  We use a level of indirection to avoid
	 * confusing the linker with the fact that the messaging
	 * subsystem is calling __init code.
	 */
	start_cpu_function_addr = (unsigned long) &online_secondary;

	/* Set up thread context for all new processors. */
	cpu_count = 1;
	for (cpu = 0; cpu < NR_CPUS; ++cpu)	{
		struct task_struct *idle;

		if (cpu == boot_cpu)
			continue;

		if (!cpu_possible(cpu)) {
			/*
			 * Make this processor do nothing on boot.
			 * Note that we don't give the boot_pc function
			 * a stack, so it has to be assembly code.
			 */
			per_cpu(boot_sp, cpu) = 0;
			per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
			continue;
		}

		/* Create a new idle thread to run start_secondary() */
		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);
		idle->thread.pc = (unsigned long) start_secondary;

		/* Make this thread the boot thread for this processor */
		per_cpu(boot_sp, cpu) = task_ksp0(idle);
		per_cpu(boot_pc, cpu) = idle->thread.pc;

		++cpu_count;
	}
	BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));

	/* Fire up the other tiles, if any */
	init_cpu_present(cpu_possible_mask);
	if (cpumask_weight(cpu_present_mask) > 1) {
		mb();  /* make sure all data is visible to new processors */
		hv_start_all_tiles();
	}
}
开发者ID:fhgwright,项目名称:bb-linux,代码行数:72,代码来源:smpboot.c

示例7: _cpu_down

/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	smpboot_park_threads(cpu);

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
开发者ID:ExtremeGTX,项目名称:Devkit8500_Linux_BSP,代码行数:82,代码来源:cpu.c

示例8: do_suspend

static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

	err = freeze_processes();
	if (err) {
		pr_err("%s: freeze processes failed %d\n", __func__, err);
		goto out;
	}

	err = freeze_kernel_threads();
	if (err) {
		pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
		goto out_thaw;
	}

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_end(PMSG_FREEZE);
	if (err) {
		pr_err("dpm_suspend_end failed: %d\n", err);
		si.cancelled = 0;
		goto out_resume;
	}

	xen_arch_suspend();

	si.cancelled = 1;

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	/* Resume console as early as possible. */
	if (!si.cancelled)
		xen_console_resume();

	raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);

	dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		pr_err("failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

	xen_arch_resume();

out_resume:
	if (!si.cancelled)
		xs_resume();
	else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

out_thaw:
	thaw_processes();
out:
	shutting_down = SHUTDOWN_INVALID;
}
开发者ID:mdamt,项目名称:linux,代码行数:69,代码来源:manage.c

示例9: boot_secondary

int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
    unsigned long timeout;

    /*
     * Set synchronisation state between this boot processor
     * and the secondary one
     */
    spin_lock(&boot_lock);

    /*
     * This is really belt and braces; we hold unintended secondary
     * CPUs in the holding pen until we're ready for them.  However,
     * since we haven't sent them a soft interrupt, they shouldn't
     * be there.
     */
    //writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
    //writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO);
    writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS);
    writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR);
    __cpuc_flush_kern_all();
    pen_release = cpu;

#if defined(CONFIG_MSTAR_STR_DBGMSG)
    {
        unsigned int *ptr=&pen_release;
        printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
    }
#endif

    __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

    /*
     * Send the secondary CPU a soft interrupt, thereby causing
     * the boot monitor to read the system wide flags register,
     * and branch to the address found there.
     */
    smp_cross_call(cpumask_of(cpu));

    timeout = jiffies + (1 * HZ);
    while (time_before(jiffies, timeout)) {
        smp_rmb();
        if (pen_release == -1)
            break;

        udelay(10);
    }
#if defined(CONFIG_MSTAR_STR_DBGMSG)
    {
        unsigned int *ptr=&pen_release;
        printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
    }
#endif
    /*
     * now the secondary core is starting up let it run its
     * calibrations, then wait for it to finish
     */
    spin_unlock(&boot_lock);
    return pen_release != -1 ? -ENOSYS : 0;
}
开发者ID:nightcap79,项目名称:kogan-tv-gpl,代码行数:61,代码来源:platsmp.c

示例10: native_send_call_func_single_ipi

void native_send_call_func_single_ipi(int cpu)
{
	apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
开发者ID:B-Rich,项目名称:L4Reap,代码行数:4,代码来源:smp.c

示例11: smp_send_state_dump

void smp_send_state_dump(unsigned int cpu)
{
    /* We overload the spurious interrupt handler to handle the dump. */
    per_cpu(state_dump_pending, cpu) = 1;
    send_IPI_mask(cpumask_of(cpu), SPURIOUS_APIC_VECTOR);
}
开发者ID:robhoes,项目名称:xen,代码行数:6,代码来源:apic.c

示例12: pv_init

int pv_init(struct pv_init_t *init, struct pv_config_t **config)
{
	struct pv_dev *dev;
	int ret;

	if (!init || (init->id >= MAX_PV_INST) || !init->irq
#ifdef PV_HAS_CLK
		|| !init->apb_clk_name || !init->pix_clk_name
#endif
		|| !init->base_addr || !init->err_cb || !init->eof_cb) {
		pv_err("Invalid input parameters\n");
		ret = -EINVAL;
		goto done;
	}
	if (g_pv_init[init->id]) {
		pv_err("All instances of PV have already been initialised\n");
		ret = -EPERM;
		goto done;
	}

	dev = kzalloc(sizeof(struct pv_dev), GFP_KERNEL);
	if (!dev) {
		pv_err("couldn't allocate memory for pv_data\n");
		ret = -ENOMEM;
		goto done;
	}
	ret = request_irq(init->irq, pv_isr, IRQF_TRIGGER_HIGH, "PV", dev);
	if (ret < 0) {
		pv_err("failed to get irq\n");
		goto fail;
	}
	irq_set_affinity(init->irq, cpumask_of(1));
#ifdef PV_HAS_CLK
	dev->apb_clk = clk_get(NULL, init->apb_clk_name);
	if (IS_ERR(dev->apb_clk)) {
		pv_err("failed to get %s\n", init->apb_clk_name);
		ret = PTR_ERR(dev->apb_clk);
		goto fail;
	}

	dev->pix_clk = clk_get(NULL, init->pix_clk_name);
	if (IS_ERR(dev->pix_clk)) {
		pv_err("failed to get %s\n", init->pix_clk_name);
		ret = PTR_ERR(dev->pix_clk);
		goto fail;
	}
#endif
	dev->id = init->id;
	dev->irq = init->irq;
	dev->base_addr = init->base_addr;
	dev->err_cb = init->err_cb;
	dev->eof_cb = init->eof_cb;
	INIT_WORK(&dev->err_work, err_cb);
	INIT_WORK(&dev->eof_work, eof_cb);
	dev->state = PV_INIT_DONE;
	if (!g_display_enabled) {
		printk("%s:%d\n", __func__, __LINE__);
		dev->state = PV_INIT_DONE;
	} else {
		pv_clk_enable(dev);
		printk("enabling pv isr in init\n");
#ifdef INT_4_LONG_PKT
		writel(VFP_START, dev->base_addr + REG_PV_INTEN); //workaround1
#endif
		dev->state = PV_ENABLED;
	}
	g_pv_init[init->id] = true;
	*config = &dev->vid_config;
	ret = 0;
	goto done;

fail:
	free_irq(init->irq, NULL);
	kfree(dev);
done:
	return ret;
}
开发者ID:ASAZING,项目名称:Android-Kernel-Gt-s7390l,代码行数:77,代码来源:pv_video.c

示例13: omap4_boot_secondary

static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static struct clockdomain *cpu1_clkdm;
	static bool booted;
	static struct powerdomain *cpu1_pwrdm;
	void __iomem *base = omap_get_wakeupgen_base();

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap4_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	if (omap_secure_apis_support())
		omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	else
		__raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0);

	if (!cpu1_clkdm && !cpu1_pwrdm) {
		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
		cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm");
	}

	/*
	 * The SGI(Software Generated Interrupts) are not wakeup capable
	 * from low power states. This is known limitation on OMAP4 and
	 * needs to be worked around by using software forced clockdomain
	 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
	 * software force wakeup. The clockdomain is then put back to
	 * hardware supervised mode.
	 * More details can be found in OMAP4430 TRM - Version J
	 * Section :
	 *	4.3.4.2 Power States of CPU0 and CPU1
	 */
	if (booted && cpu1_pwrdm && cpu1_clkdm) {
		/*
		 * GIC distributor control register has changed between
		 * CortexA9 r1pX and r2pX. The Control Register secure
		 * banked version is now composed of 2 bits:
		 * bit 0 == Secure Enable
		 * bit 1 == Non-Secure Enable
		 * The Non-Secure banked register has not changed
		 * Because the ROM Code is based on the r1pX GIC, the CPU1
		 * GIC restoration will cause a problem to CPU0 Non-Secure SW.
		 * The workaround must be:
		 * 1) Before doing the CPU1 wakeup, CPU0 must disable
		 * the GIC distributor
		 * 2) CPU1 must re-enable the GIC distributor on
		 * it's wakeup path.
		 */
		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			local_irq_disable();
			gic_dist_disable();
		}

		/*
		 * Ensure that CPU power state is set to ON to avoid CPU
		 * powerdomain transition on wfi
		 */
		clkdm_wakeup(cpu1_clkdm);
		omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON);
		clkdm_allow_idle(cpu1_clkdm);

		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			while (gic_dist_disabled()) {
				udelay(1);
				cpu_relax();
			}
			gic_timer_retrigger();
			local_irq_enable();
		}
	} else {
		dsb_sev();
		booted = true;
	}

	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
开发者ID:03199618,项目名称:linux,代码行数:92,代码来源:omap-smp.c

示例14: acpi_cpufreq_target

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
			       unsigned int index)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
	unsigned int next_perf_state = 0; /* Index into perf table */
	int result = 0;

	if (unlikely(data == NULL || data->freq_table == NULL)) {
		return -ENODEV;
	}

	perf = to_perf_data(data);
	next_perf_state = data->freq_table[index].driver_data;
	if (perf->state == next_perf_state) {
		if (unlikely(data->resume)) {
			pr_debug("Called after resume, resetting to P%d\n",
				next_perf_state);
			data->resume = 0;
		} else {
			pr_debug("Already at target state (P%d)\n",
				next_perf_state);
			goto out;
		}
	}

	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
		result = -ENODEV;
		goto out;
	}

	/* cpufreq holds the hotplug lock, so we are safe from here on */
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
		cmd.mask = policy->cpus;
	else
		cmd.mask = cpumask_of(policy->cpu);

	drv_write(&cmd);

	if (acpi_pstate_strict) {
		if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
					data)) {
			pr_debug("acpi_cpufreq_target failed (%d)\n",
				policy->cpu);
			result = -EAGAIN;
		}
	}

	if (!result)
		perf->state = next_perf_state;

out:
	return result;
}
开发者ID:magarto,项目名称:linux-rpi-grsecurity,代码行数:72,代码来源:acpi-cpufreq.c

示例15: ixp4xx_clockevent_init

static void __init ixp4xx_clockevent_init(void)
{
	clockevent_ixp4xx.cpumask = cpumask_of(0);
	clockevents_config_and_register(&clockevent_ixp4xx, IXP4XX_TIMER_FREQ,
					0xf, 0xfffffffe);
}
开发者ID:8563,项目名称:millennium-sources,代码行数:6,代码来源:common.c


注:本文中的cpumask_of函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。