本文整理汇总了C++中release_spinlock函数的典型用法代码示例。如果您正苦于以下问题:C++ release_spinlock函数的具体用法?C++ release_spinlock怎么用?C++ release_spinlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了release_spinlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: remove_thread_safe_enlist_head
char
SORAAPI
remove_thread_safe_enlist_head(struct thread_safe_enlist* tslist,
void** data,
char lock) {
KIRQL irql;
acquire_spinlock(lock,
tslist->m_sync,
irql);
if (IsListEmpty(&tslist->m_head.m_entry)) {
release_spinlock(lock,
tslist->m_sync,
irql);
return false;
}
struct LIST_ENTRY_EX* entry;
entry = (LIST_ENTRY_EX*)RemoveHeadList(&tslist->m_head.m_entry);
*data = entry->m_value;
ExFreeToNPagedLookasideList(&tslist->m_lookaside,
entry);
InterlockedDecrement(&tslist->m_count);
release_spinlock(lock,
tslist->m_sync,
irql);
return true;
}
示例2: tx_cleanup_thread
int32
tx_cleanup_thread(void *us)
{
PLM_PACKET pPacket;
PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK)(us);
struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(us);
struct B_UM_PACKET *pUmPacket;
cpu_status cpu;
while (1) {
cpu = disable_interrupts();
acquire_spinlock(&pUmDevice->lock);
pPacket = (PLM_PACKET)
QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
release_spinlock(&pUmDevice->lock);
restore_interrupts(cpu);
if (pPacket == 0)
break;
pUmPacket = (struct B_UM_PACKET *)(pPacket);
chunk_pool_put(pUmPacket->data);
pUmPacket->data = NULL;
cpu = disable_interrupts();
acquire_spinlock(&pUmDevice->lock);
QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
release_spinlock(&pUmDevice->lock);
restore_interrupts(cpu);
}
return LM_STATUS_SUCCESS;
}
示例3: midi_interrupt_op
void
midi_interrupt_op(
int32 op,
void * data)
{
midi_dev * port = (midi_dev *)data;
ddprintf(("port = %p\n", port));
if (op == B_MPU_401_ENABLE_CARD_INT) {
cpu_status cp;
ddprintf(("cmedia_pci: B_MPU_401_ENABLE_CARD_INT\n"));
cp = disable_interrupts();
acquire_spinlock(&port->card->hardware);
increment_interrupt_handler(port->card);
set_direct(port->card, 0x01, 0x00, 0x80);
set_indirect(port->card, 0x2A, 0x04, 0xff);
release_spinlock(&port->card->hardware);
restore_interrupts(cp);
}
else if (op == B_MPU_401_DISABLE_CARD_INT) {
/* turn off MPU interrupts */
cpu_status cp;
ddprintf(("cmedia_pci: B_MPU_401_DISABLE_CARD_INT\n"));
cp = disable_interrupts();
acquire_spinlock(&port->card->hardware);
set_direct(port->card, 0x01, 0x80, 0x80);
/* remove interrupt handler if necessary */
decrement_interrupt_handler(port->card);
release_spinlock(&port->card->hardware);
restore_interrupts(cp);
}
ddprintf(("cmedia_pci: midi_interrupt_op() done\n"));
}
示例4: acquire_spinlock_nocheck
static struct smp_msg *smp_check_for_message(int curr_cpu, int *source_mailbox)
{
struct smp_msg *msg;
acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]);
msg = smp_msgs[curr_cpu];
if(msg != 0) {
smp_msgs[curr_cpu] = msg->next;
release_spinlock(&cpu_msg_spinlock[curr_cpu]);
// dprintf(" found msg 0x%x in cpu mailbox\n", msg);
*source_mailbox = MAILBOX_LOCAL;
} else {
// try getting one from the broadcast mailbox
release_spinlock(&cpu_msg_spinlock[curr_cpu]);
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
msg = smp_broadcast_msgs;
while(msg != 0) {
if(CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) {
// we have handled this one already
msg = msg->next;
continue;
}
// mark it so we wont try to process this one again
msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu);
*source_mailbox = MAILBOX_BCAST;
break;
}
release_spinlock(&broadcast_msg_spinlock);
// dprintf(" found msg 0x%x in broadcast mailbox\n", msg);
}
return msg;
}
示例5: find_free_message
/*! Finds a free message and gets it.
NOTE: has side effect of disabling interrupts
return value is the former interrupt state
*/
static cpu_status
find_free_message(struct smp_msg** msg)
{
cpu_status state;
TRACE(("find_free_message: entry\n"));
retry:
while (sFreeMessageCount <= 0) {
state = disable_interrupts();
process_all_pending_ici(smp_get_current_cpu());
restore_interrupts(state);
PAUSE();
}
state = disable_interrupts();
acquire_spinlock(&sFreeMessageSpinlock);
if (sFreeMessageCount <= 0) {
// someone grabbed one while we were getting the lock,
// go back to waiting for it
release_spinlock(&sFreeMessageSpinlock);
restore_interrupts(state);
goto retry;
}
*msg = sFreeMessages;
sFreeMessages = (*msg)->next;
sFreeMessageCount--;
release_spinlock(&sFreeMessageSpinlock);
TRACE(("find_free_message: returning msg %p\n", *msg));
return state;
}
示例6: signal_local_semaphore
int signal_local_semaphore( struct thread* tr, int sem_id )
{
struct sem_link *sl;
struct thread *target;
struct process *proc;
proc = tr->process;
acquire_spinlock( & (proc->sems_lock) );
if ( proc->sems[ sem_id ].sem_id != sem_id )
{
release_spinlock( & (proc->sems_lock) );
return -1;
}
proc->sems[ sem_id ].count -= 1;
// wake up any waiting threads
sl = proc->sems[ sem_id ].waiting_list;
if ( sl != NULL )
{
target = find_thread_with_id( tr->process, sl->tid );
if ( target != NULL )
set_thread_state( target, THREAD_RUNNING );
proc->sems[ sem_id ].waiting_list = sl->next;
free( sl );
}
release_spinlock( & ( proc->sems_lock ) );
return 0;
}
示例7: b57_read
static status_t
b57_read(void *cookie,off_t pos,void *data,size_t *numBytes)
{
struct be_b57_dev *pUmDevice = (struct be_b57_dev *)cookie;
PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
PLM_PACKET pPacket;
struct B_UM_PACKET *pUmPacket;
cpu_status cpu;
if (pUmDevice->block)
acquire_sem(pUmDevice->packet_release_sem);
else {
/* Decrement the receive sem anyway, but don't block
this is a horrible hack, but it works. */
acquire_sem_etc(pUmDevice->packet_release_sem, 1, B_RELATIVE_TIMEOUT, 0);
}
cpu = disable_interrupts();
acquire_spinlock(&pUmDevice->lock);
pPacket = (PLM_PACKET)
QQ_PopHead(&pUmDevice->RxPacketReadQ.Container);
release_spinlock(&pUmDevice->lock);
restore_interrupts(cpu);
if (pPacket == 0) {
*numBytes = -1;
return B_ERROR;
}
pUmPacket = (struct B_UM_PACKET *) pPacket;
if (pPacket->PacketStatus != LM_STATUS_SUCCESS
|| pPacket->PacketSize > 1518) {
cpu = disable_interrupts();
acquire_spinlock(&pUmDevice->lock);
QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
release_spinlock(&pUmDevice->lock);
restore_interrupts(cpu);
*numBytes = -1;
return B_ERROR;
}
if ((pPacket->PacketSize) < *numBytes)
*numBytes = pPacket->PacketSize;
memcpy(data,pUmPacket->data,*numBytes);
cpu = disable_interrupts();
acquire_spinlock(&pUmDevice->lock);
QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
release_spinlock(&pUmDevice->lock);
restore_interrupts(cpu);
return B_OK;
}
示例8: rhine_xmit
void rhine_xmit(rhine *r, const char *ptr, ssize_t len)
{
#if 0
PANIC_UNIMPLEMENTED();
#if 0
int i;
#endif
//restart:
sem_acquire(r->tx_sem, 1);
mutex_lock(&r->lock);
#if 0
dprintf("XMIT %d %x (%d)\n",r->txbn, ptr, len);
dprintf("dumping packet:");
for(i=0; i<len; i++) {
if(i%8 == 0)
dprintf("\n");
dprintf("0x%02x ", ptr[i]);
}
dprintf("\n");
#endif
int_disable_interrupts();
acquire_spinlock(&r->reg_spinlock);
#if 0
/* wait for clear-to-send */
if(!(RTL_READ_32(r, RT_TXSTATUS0 + r->txbn*4) & RT_TX_HOST_OWNS)) {
dprintf("rhine_xmit: no txbuf free\n");
rhine_dumptxstate(r);
release_spinlock(&r->reg_spinlock);
int_restore_interrupts();
mutex_unlock(&r->lock);
sem_release(r->tx_sem, 1);
goto restart;
}
#endif
memcpy((void*)(r->txbuf + r->txbn * 0x800), ptr, len);
if(len < ETHERNET_MIN_SIZE)
len = ETHERNET_MIN_SIZE;
RTL_WRITE_32(r, RT_TXSTATUS0 + r->txbn*4, len | 0x80000);
if(++r->txbn >= 4)
r->txbn = 0;
release_spinlock(&r->reg_spinlock);
int_restore_interrupts();
mutex_unlock(&r->lock);
#endif
}
示例9: resume_thread
err_code resume_thread(thread_id id) {
err_code err = ERR_NONE;
CAST_TO_THREAD(thrd, id);
acquire_spinlock(&inactive.lock);
if (thrd->state == THREAD_STATE_PAUSED) {
thrd->state = THREAD_STATE_UNKNOWN;
if (inactive.tail == thrd)
inactive.tail = thrd->next;
if (thrd->next)
thrd->next->prev = thrd->prev;
if (thrd->prev)
thrd->prev->next = thrd->next;
inactive.total_threads--;
}
else
err = ERR_BAD_STATE;
release_spinlock(&inactive.lock);
if (!err) {
thrd->real_priority = thrd->priority;
thrd->quantum = 0;
update_priority_quantum(thrd);
struct cpu_task task = { .type = CPU_TASK_RESUME, .thread = thrd };
run_cpu_task(find_least_loaded_cpu(thrd->affinity), &task);
}
示例10: free_single_page
void free_single_page(region_t r, struct page *p)
/* Assumes freepages_lock held */
{
/* pthread_t pt = pthread_self(); */
#ifndef NMEMDEBUG
ASSERT_INUSE(p, r);
set_page_region(MAPNB(p), PAGENB(p), FREEPAGE);
#endif /* ifndef NMEMDEBUG */
list_id = get_next_random_list(MAXLISTS);
while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
list_id = get_next_random_list(MAXLISTS);
p->next = single_pages[list_id].pages;
single_pages[list_id].pages = p;
single_pages[list_id].page_count++;
release_spinlock(&single_pages[list_id].lock);
/*acquire_spinlock1( &single_pages[p->list_id].lock );*/
/*p->next = single_pages[p->list_id].pages;*/
/*single_pages[p->list_id].pages = p;*/
/*single_pages[p->list_id].page_count++;*/
/*release_spinlock( &single_pages[p->list_id].lock );*/
/*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/
/*single_pages[Hash(pt)%MAXLISTS].pages = p;*/
/*single_pages[Hash(pt)%MAXLISTS].page_count++;*/
}
示例11: alloc_single_page
struct page* alloc_single_page(struct page *next)
{
struct page *p = NULL;
/* pthread_t pt = pthread_self(); */
list_id = get_next_random_list(MAXLISTS);
while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
list_id = get_next_random_list(MAXLISTS);
/*if( single_pages[Hash(pt)%MAXLISTS].page_count == 0 ){*/
if (single_pages[list_id % MAXLISTS].page_count == 0)
p = alloc_new(PAGE_GROUP_SIZE, NULL);
add_single_pages(p);
/*p = single_pages[Hash(pt)%MAXLISTS].pages;*/
p = single_pages[list_id % MAXLISTS].pages;
/*single_pages[Hash(pt)%MAXLISTS].pages = p->next;*/
single_pages[list_id % MAXLISTS].pages = p->next;
p->next = next;
/*single_pages[Hash(pt)%MAXLISTS].page_count--;*/
single_pages[list_id % MAXLISTS].page_count--;
/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
release_spinlock(&single_pages[list_id % MAXLISTS].lock);
/*list_id++;*/
return p;
}
示例12: alloc_pages
struct page* alloc_pages(int n, struct page *next)
{
/* pthread_t pt = pthread_self(); */
struct page *ret_val, *p = NULL;
assert(n >= K);
list_id = get_next_random_list(MAXLISTS);
while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1)
list_id = get_next_random_list(MAXLISTS);
/*if( n > single_pages[Hash(pt)%MAXLISTS].page_count ){*/
if (n > single_pages[list_id % MAXLISTS].page_count)
p = alloc_new(n + PAGE_GROUP_SIZE, NULL);
add_single_pages(p);
/*ret_val = single_pages[Hash(pt)%MAXLISTS].pages;*/
/*single_pages[Hash(pt)%MAXLISTS].pages =*/
/*single_pages[Hash(pt)%MAXLISTS].pages->next;*/
ret_val =
single_pages[list_id % MAXLISTS].pages;
single_pages[list_id %
MAXLISTS].pages =
single_pages[list_id % MAXLISTS].pages->next;
ret_val->next = next;
/*single_pages[Hash(pt)%MAXLISTS].page_count -= n;*/
single_pages[list_id % MAXLISTS].page_count -= n;
/*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/
release_spinlock(&single_pages[list_id % MAXLISTS].lock);
/*list_id++;*/
return ret_val;
}
示例13: rtl8169_int
static void rtl8169_int(void* data)
{
int rc = INT_NO_RESCHEDULE;
rtl8169 *r = (rtl8169 *)data;
uint16 istat;
acquire_spinlock(&r->reg_spinlock);
istat = RTL_READ_16(r, REG_ISR);
SHOW_FLOW(3, "rtl8169_int: istat 0x%x\n", istat);
if (istat == 0)
goto done;
if (istat & (IMR_ROK|IMR_RER|IMR_RDU|IMR_RXOVL)) {
rc |= rtl8169_rxint(r, istat);
}
if (istat & (IMR_TOK|IMR_TER|IMR_TDU)) {
rc |= rtl8169_txint(r, istat);
}
RTL_WRITE_16(r, REG_ISR, istat);
done:
release_spinlock(&r->reg_spinlock);
// TODO req reschedule if needed?
(void) rc;
//return rc;
}
示例14: smp_send_broadcast_ici_interrupts_disabled
void
smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message,
addr_t data, addr_t data2, addr_t data3, void *dataPointer, uint32 flags)
{
if (!sICIEnabled)
return;
TRACE(("smp_send_broadcast_ici_interrupts_disabled: cpu %ld mess 0x%lx, "
"data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n",
currentCPU, message, data, data2, data3, dataPointer, flags));
struct smp_msg *msg;
find_free_message_interrupts_disabled(currentCPU, &msg);
msg->message = message;
msg->data = data;
msg->data2 = data2;
msg->data3 = data3;
msg->data_ptr = dataPointer;
msg->ref_count = sNumCPUs - 1;
msg->flags = flags;
msg->proc_bitmap = SET_BIT(0, currentCPU);
msg->done = false;
TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: inserting msg %p "
"into broadcast mbox\n", currentCPU, msg));
// stick it in the appropriate cpu's mailbox
acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
msg->next = sBroadcastMessages;
sBroadcastMessages = msg;
release_spinlock(&sBroadcastMessageSpinlock);
arch_smp_send_broadcast_ici();
TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: sent interrupt\n",
currentCPU));
if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
// wait for the other cpus to finish processing it
// the interrupt handler will ref count it to <0
// if the message is sync after it has removed it from the mailbox
TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: waiting for "
"ack\n", currentCPU));
while (msg->done == false) {
process_all_pending_ici(currentCPU);
PAUSE();
}
TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: returning "
"message to free list\n", currentCPU));
// for SYNC messages, it's our responsibility to put it
// back into the free list
return_free_message(msg);
}
TRACE(("smp_send_broadcast_ici_interrupts_disabled: done\n"));
}
示例15: disable_interrupts
void *slab_alloc(struct slab_allocator *sa)
{
void *object = 0;
int old_flags;
old_flags = disable_interrupts();
acquire_spinlock(&sa->lock);
if (sa->free_list)
{
// Grab freed object
object = sa->free_list;
sa->free_list = *((void**) object);
}
else
{
// If there is no wilderness, or the slab is full, create a new
// wilderness slab
if (sa->wilderness_slab == 0
|| sa->wilderness_offset + sa->object_size > sa->slab_size)
{
sa->wilderness_slab = kmalloc(sa->slab_size);
sa->wilderness_offset = 0;
}
object = (void*)((char*) sa->wilderness_slab + sa->wilderness_offset);
sa->wilderness_offset += sa->object_size;
}
release_spinlock(&sa->lock);
restore_interrupts(old_flags);
return object;
}