本文整理汇总了C++中qemu_mutex_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ qemu_mutex_lock函数的具体用法?C++ qemu_mutex_lock怎么用?C++ qemu_mutex_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了qemu_mutex_lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: process_queued_cpu_work
void process_queued_cpu_work(CPUState *cpu)
{
struct qemu_work_item *wi;
if (cpu->queued_work_first == NULL) {
return;
}
qemu_mutex_lock(&cpu->work_mutex);
while (cpu->queued_work_first != NULL) {
wi = cpu->queued_work_first;
cpu->queued_work_first = wi->next;
if (!cpu->queued_work_first) {
cpu->queued_work_last = NULL;
}
qemu_mutex_unlock(&cpu->work_mutex);
if (wi->exclusive) {
/* Running work items outside the BQL avoids the following deadlock:
* 1) start_exclusive() is called with the BQL taken while another
* CPU is running; 2) cpu_exec in the other CPU tries to takes the
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
* neither CPU can proceed.
*/
qemu_mutex_unlock_iothread();
start_exclusive();
wi->func(cpu, wi->data);
end_exclusive();
qemu_mutex_lock_iothread();
} else {
wi->func(cpu, wi->data);
}
qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
} else {
atomic_mb_set(&wi->done, true);
}
}
qemu_mutex_unlock(&cpu->work_mutex);
qemu_cond_broadcast(&qemu_work_cond);
}
示例2: res_tbl_dealloc
static inline void res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
{
pr_dbg("%s, handle=%d\n", tbl->name, handle);
qemu_mutex_lock(&tbl->lock);
if (handle < tbl->tbl_sz) {
clear_bit(handle, tbl->bitmap);
}
qemu_mutex_unlock(&tbl->lock);
}
示例3: char_pty_finalize
static void char_pty_finalize(Object *obj)
{
Chardev *chr = CHARDEV(obj);
PtyChardev *s = PTY_CHARDEV(obj);
qemu_mutex_lock(&chr->chr_write_lock);
pty_chr_state(chr, 0);
object_unref(OBJECT(s->ioc));
pty_chr_timer_cancel(s);
qemu_mutex_unlock(&chr->chr_write_lock);
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
}
示例4: sifive_plic_set_claimed
static
void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool claimed)
{
qemu_mutex_lock(&plic->lock);
uint32_t word = irq >> 5;
if (claimed) {
plic->claimed[word] |= (1 << (irq & 31));
} else {
plic->claimed[word] &= ~(1 << (irq & 31));
}
qemu_mutex_unlock(&plic->lock);
}
示例5: sifive_plic_set_pending
static
void sifive_plic_set_pending(SiFivePLICState *plic, int irq, bool pending)
{
qemu_mutex_lock(&plic->lock);
uint32_t word = irq >> 5;
if (pending) {
plic->pending[word] |= (1 << (irq & 31));
} else {
plic->pending[word] &= ~(1 << (irq & 31));
}
qemu_mutex_unlock(&plic->lock);
}
示例6: qemu_wait_io_event
static void qemu_wait_io_event(CPUState *env)
{
while (!tcg_has_work())
qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
qemu_mutex_unlock(&qemu_global_mutex);
/*
* Users of qemu_global_mutex can be starved, having no chance
* to acquire it since this path will get to it first.
* So use another lock to provide fairness.
*/
qemu_mutex_lock(&qemu_fair_mutex);
qemu_mutex_unlock(&qemu_fair_mutex);
qemu_mutex_lock(&qemu_global_mutex);
if (env->stop) {
env->stop = 0;
env->stopped = 1;
qemu_cond_signal(&qemu_pause_cond);
}
}
示例7: cpu_list_add
void cpu_list_add(CPUState *cpu)
{
qemu_mutex_lock(&qemu_cpu_list_lock);
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
cpu->cpu_index = cpu_get_free_index();
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
} else {
assert(!cpu_index_auto_assigned);
}
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
qemu_mutex_unlock(&qemu_cpu_list_lock);
finish_safe_work(cpu);
}
示例8: hostmem_listener_commit
/**
* Install new regions list
*/
static void hostmem_listener_commit(MemoryListener *listener)
{
HostMem *hostmem = container_of(listener, HostMem, listener);
qemu_mutex_lock(&hostmem->current_regions_lock);
g_free(hostmem->current_regions);
hostmem->current_regions = hostmem->new_regions;
hostmem->num_current_regions = hostmem->num_new_regions;
qemu_mutex_unlock(&hostmem->current_regions_lock);
/* Reset new regions list */
hostmem->new_regions = NULL;
hostmem->num_new_regions = 0;
}
示例9: qemu_tcg_wait_io_event
static void qemu_tcg_wait_io_event(void)
{
CPUState *env;
while (!any_cpu_has_work())
qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000);
qemu_mutex_unlock(&qemu_global_mutex);
/*
* Users of qemu_global_mutex can be starved, having no chance
* to acquire it since this path will get to it first.
* So use another lock to provide fairness.
*/
qemu_mutex_lock(&qemu_fair_mutex);
qemu_mutex_unlock(&qemu_fair_mutex);
qemu_mutex_lock(&qemu_global_mutex);
for (env = first_cpu; env != NULL; env = env->next_cpu) {
qemu_wait_io_event_common(env);
}
}
示例10: iothread_complete
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
IOThread *iothread = IOTHREAD(obj);
char *name, *thread_name;
iothread->stopping = false;
iothread->running = true;
iothread->thread_id = -1;
iothread->ctx = aio_context_new(&local_error);
if (!iothread->ctx) {
error_propagate(errp, local_error);
return;
}
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
iothread->poll_grow,
iothread->poll_shrink,
&local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
iothread->ctx = NULL;
return;
}
qemu_mutex_init(&iothread->init_done_lock);
qemu_cond_init(&iothread->init_done_cond);
iothread->once = (GOnce) G_ONCE_INIT;
/* This assumes we are called from a thread with useful CPU affinity for us
* to inherit.
*/
name = object_get_canonical_path_component(OBJECT(obj));
thread_name = g_strdup_printf("IO %s", name);
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
iothread, QEMU_THREAD_JOINABLE);
g_free(thread_name);
g_free(name);
/* Wait for initialization to complete */
qemu_mutex_lock(&iothread->init_done_lock);
while (iothread->thread_id == -1) {
qemu_cond_wait(&iothread->init_done_cond,
&iothread->init_done_lock);
}
qemu_mutex_unlock(&iothread->init_done_lock);
}
示例11: queue_work_on_cpu
static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
{
qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = wi;
} else {
cpu->queued_work_last->next = wi;
}
cpu->queued_work_last = wi;
wi->next = NULL;
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
}
示例12: qemu_mutex_lock_iothread
void qemu_mutex_lock_iothread(void)
{
LOGD_CPUS("%s1\n", __func__);
if (!tcg_enabled()) {
LOGD_CPUS("%s2\n", __func__);
qemu_mutex_lock(&qemu_global_mutex);
LOGD_CPUS("%s3\n", __func__);
} else {
LOGD_CPUS("%s4\n", __func__);
iothread_requesting_mutex = true;
if (qemu_mutex_trylock(&qemu_global_mutex)) {
LOGD_CPUS("%s5\n", __func__);
qemu_cpu_kick_thread(first_cpu);
LOGD_CPUS("%s6\n", __func__);
qemu_mutex_lock(&qemu_global_mutex);
LOGD_CPUS("%s7\n", __func__);
}
LOGD_CPUS("%s8\n", __func__);
iothread_requesting_mutex = false;
LOGD_CPUS("%s9\n", __func__);
qemu_cond_broadcast(&qemu_io_proceeded_cond);
LOGD_CPUS("%s10\n", __func__);
}
}
示例13: g_new0
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
bh = g_new0(QEMUBH, 1);
bh->ctx = ctx;
bh->cb = cb;
bh->opaque = opaque;
qemu_mutex_lock(&ctx->bh_lock);
bh->next = ctx->first_bh;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
qemu_mutex_unlock(&ctx->bh_lock);
return bh;
}
示例14: pci_edu_uninit
static void pci_edu_uninit(PCIDevice *pdev)
{
EduState *edu = DO_UPCAST(EduState, pdev, pdev);
qemu_mutex_lock(&edu->thr_mutex);
edu->stopping = true;
qemu_mutex_unlock(&edu->thr_mutex);
qemu_cond_signal(&edu->thr_cond);
qemu_thread_join(&edu->thread);
qemu_cond_destroy(&edu->thr_cond);
qemu_mutex_destroy(&edu->thr_mutex);
timer_del(&edu->dma_timer);
}
示例15: cpu_list_remove
void cpu_list_remove(CPUState *cpu)
{
qemu_mutex_lock(&qemu_cpu_list_lock);
if (!QTAILQ_IN_USE(cpu, node)) {
/* there is nothing to undo since cpu_exec_init() hasn't been called */
qemu_mutex_unlock(&qemu_cpu_list_lock);
return;
}
assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
QTAILQ_REMOVE(&cpus, cpu, node);
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
qemu_mutex_unlock(&qemu_cpu_list_lock);
}