本文整理汇总了C++中spinlock_release函数的典型用法代码示例。如果您正苦于以下问题:C++ spinlock_release函数的具体用法?C++ spinlock_release怎么用?C++ spinlock_release使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spinlock_release函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: service_isvalid
/**
* Check to see if a service pointer is valid
*
* @param service The pointer to check
* @return 1 if the service is in the list of all services
*/
int
service_isvalid(SERVICE *service)
{
SERVICE *ptr;
int rval = 0;
spinlock_acquire(&service_spin);
ptr = allServices;
while (ptr)
{
if (ptr == service)
{
rval = 1;
break;
}
ptr = ptr->next;
}
spinlock_release(&service_spin);
return rval;
}
示例2: lock_do_i_hold
bool
lock_do_i_hold(struct lock *lock)
{
// Write this
if (!CURCPU_EXISTS()) {
return true;
}
// if(lock->lk_holder== curcpu->c_self){
// return true;
// }
//KASSERT(lock->lk_thread != NULL);
//KASSERT(lock != NULL);
bool value= false;
//(void)lock; // suppress warning until code gets written
spinlock_acquire(&lock->lk_spinlock);
value= (lock->lk_thread== curthread); // dummy until code gets written
spinlock_release(&lock->lk_spinlock);
return value;
}
示例3: gwbuf_add_hint
/**
* Add hint to a buffer.
*
* @param buf The buffer to add the hint to
* @param hint The hint itself
* @return Non-zero on success
*/
int
gwbuf_add_hint(GWBUF *buf, HINT *hint)
{
HINT *ptr;
spinlock_acquire(&buf->gwbuf_lock);
if (buf->hint)
{
ptr = buf->hint;
while (ptr->next)
ptr = ptr->next;
ptr->next = hint;
}
else
{
buf->hint = hint;
}
spinlock_release(&buf->gwbuf_lock);
return 1;
}
示例4: rses_begin_locked_router_action
/**
* @node Acquires lock to router client session if it is not closed.
*
* Parameters:
* @param rses - in, use
*
*
* @return true if router session was not closed. If return value is true
* it means that router is locked, and must be unlocked later. False, if
* router was closed before lock was acquired.
*
*
* @details (write detailed description here)
*
*/
static bool rses_begin_locked_router_action(
ROUTER_CLIENT_SES* rses)
{
bool succp = false;
CHK_CLIENT_RSES(rses);
if (rses->rses_closed) {
goto return_succp;
}
spinlock_acquire(&rses->rses_lock);
if (rses->rses_closed) {
spinlock_release(&rses->rses_lock);
goto return_succp;
}
succp = true;
return_succp:
return succp;
}
示例5: monitorList
/**
* List all the monitors
*
* @param dcb DCB for printing output
*/
void
monitorList(DCB *dcb)
{
MONITOR *ptr;
spinlock_acquire(&monLock);
ptr = allMonitors;
dcb_printf(dcb, "---------------------+---------------------\n");
dcb_printf(dcb, "%-20s | Status\n", "Monitor");
dcb_printf(dcb, "---------------------+---------------------\n");
while (ptr)
{
dcb_printf(dcb, "%-20s | %s\n", ptr->name,
ptr->state & MONITOR_STATE_RUNNING
? "Running" : "Stopped");
ptr = ptr->next;
}
dcb_printf(dcb, "---------------------+---------------------\n");
spinlock_release(&monLock);
}
示例6: cputs
// `High'-level console I/O. Used by readline and cprintf.
void
cputs(const char *str)
{
if (read_cs() & 3)
return sys_cputs(str); // use syscall from user mode
// Hold the console spinlock while printing the entire string,
// so that the output of different cputs calls won't get mixed.
// Implement ad hoc recursive locking for debugging convenience.
bool already = spinlock_holding(&cons_lock);
if (!already)
spinlock_acquire(&cons_lock);
char ch;
while (*str)
cons_putc(*str++);
if (!already)
spinlock_release(&cons_lock);
}
示例7: cpustatus_generate_irq
void cpustatus_generate_irq(device_t *dev)
{
interrupt_status_t intr_status;
volatile cpu_io_area_t *iobase = (cpu_io_area_t *)dev->io_address;
cpu_real_device_t *cpu = (cpu_real_device_t *)dev->real_device;
KERNEL_ASSERT(dev != NULL && cpu != NULL);
intr_status = _interrupt_disable();
spinlock_acquire(&cpu->slock);
/* If you really want to do something with inter-cpu interrupts,
do it here.*/
/* Generate the IRQ */
iobase->command = CPU_COMMAND_RAISE_IRQ;
spinlock_release(&cpu->slock);
_interrupt_set_state(intr_status);
}
示例8: lamebus_detach_interrupt
/*
* Unregister a function that was being called when a particular slot
* signaled an interrupt.
*/
void
lamebus_detach_interrupt(struct lamebus_softc *sc, int slot)
{
uint32_t mask = ((uint32_t)1) << slot;
KASSERT(slot>=0 && slot < LB_NSLOTS);
spinlock_acquire(&sc->ls_lock);
if ((sc->ls_slotsinuse & mask)==0) {
panic("lamebus_detach_interrupt: slot %d not marked in use\n",
slot);
}
KASSERT(sc->ls_irqfuncs[slot]!=NULL);
sc->ls_devdata[slot] = NULL;
sc->ls_irqfuncs[slot] = NULL;
spinlock_release(&sc->ls_lock);
}
示例9: thread_startup
/*
* This function is where new threads start running. The arguments
* ENTRYPOINT, DATA1, and DATA2 are passed through from thread_fork.
*
* Because new code comes here from inside the middle of
* thread_switch, the beginning part of this function must match the
* tail of thread_switch.
*/
void
thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
void *data1, unsigned long data2)
{
struct thread *cur;
cur = curthread;
/* Clear the wait channel and set the thread state. */
cur->t_wchan_name = NULL;
cur->t_state = S_RUN;
/* Release the runqueue lock acquired in thread_switch. */
spinlock_release(&curcpu->c_runqueue_lock);
/* Activate our address space in the MMU. */
as_activate();
/* Clean up dead threads. */
exorcise();
/* Enable interrupts. */
spl0();
#if OPT_SYNCHPROBS
/* Yield a random number of times to get a good mix of threads. */
{
int i, n;
n = random()%161 + random()%161;
for (i=0; i<n; i++) {
thread_yield();
}
}
#endif
/* Call the function. */
entrypoint(data1, data2);
/* Done. */
thread_exit();
}
示例10: freeSession
/**
* @node Unlink from backend server, unlink from router's connection list,
* and free memory of a router client session.
*
* Parameters:
* @param router - <usage>
* <description>
*
* @param router_cli_ses - <usage>
* <description>
*
* @return void
*
*
* @details (write detailed description here)
*
*/
static void freeSession(
ROUTER* router_instance,
void* router_client_ses)
{
ROUTER_INSTANCE* router = (ROUTER_INSTANCE *)router_instance;
ROUTER_CLIENT_SES* router_cli_ses =
(ROUTER_CLIENT_SES *)router_client_ses;
int prev_val;
prev_val = atomic_add(&router_cli_ses->backend->current_connection_count, -1);
ss_dassert(prev_val > 0);
spinlock_acquire(&router->lock);
if (router->connections == router_cli_ses) {
router->connections = router_cli_ses->next;
} else {
ROUTER_CLIENT_SES *ptr = router->connections;
while (ptr != NULL && ptr->next != router_cli_ses) {
ptr = ptr->next;
}
if (ptr != NULL) {
ptr->next = router_cli_ses->next;
}
}
spinlock_release(&router->lock);
LOGIF(LD, (skygw_log_write_flush(
LOGFILE_DEBUG,
"%lu [freeSession] Unlinked router_client_session %p from "
"router %p and from server on port %d. Connections : %d. ",
pthread_self(),
router_cli_ses,
router,
router_cli_ses->backend->server->port,
prev_val-1)));
free(router_cli_ses);
}
示例11: hashtable_write_lock
/**
* Obtain an exclusive write lock for the hash table.
*
* We acquire the hashtable spinlock, check for the number of
* readers beign zero. If it is not we hold the spinlock and
* loop waiting for the n_readers to reach zero. This will prevent
* any new readers beign granted access but will not prevent current
* readers releasing the read lock.
*
* Once we have no readers we increment writelock and test if we are
* the only writelock holder, if not we repeat the process. We hold
* the spinlock throughout the process since both read and write
* locks do not require the spinlock to be acquired.
*
* @param table The table to lock for updates
*/
static void
hashtable_write_lock(HASHTABLE *table)
{
int available;
spinlock_acquire(&table->spin);
do
{
while (table->n_readers)
{
;
}
available = atomic_add(&table->writelock, 1);
if (available != 0)
{
atomic_add(&table->writelock, -1);
}
}
while (available != 0);
spinlock_release(&table->spin);
}
示例12: timer_read
// Read and returns the number of 1.193182MHz ticks since kernel boot.
// This function also updates the high-order bits of our tick count,
// so it MUST be called at least once per 1/18 sec.
uint64_t
timer_read(void)
{
spinlock_acquire(&lock);
// Read the current timer counter.
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
uint8_t lo = inb(IO_TIMER1);
uint8_t hi = inb(IO_TIMER1);
uint16_t ctr = hi << 8 | lo;
assert(ctr != 0);
// If the counter has wrapped, assume we're into the next tick.
if (ctr > last)
base += 65535;
last = ctr;
uint64_t ticks = base + (65535 - ctr);
spinlock_release(&lock);
return ticks;
}
示例13: thread_finish
/** Perform suicide. The calling thread will kill itself by freeing
* its memory and other resources and marking itself as dying. The
* scheduler will free the thread table entry when it encounters dying
* threads.
*/
void thread_finish(void)
{
TID_t my_tid;
my_tid = thread_get_current_thread();
_interrupt_disable();
/* Check that the page mappings have been cleared. */
KERNEL_ASSERT(thread_table[my_tid].pagetable == NULL);
spinlock_acquire(&thread_table_slock);
thread_table[my_tid].state = THREAD_DYING;
spinlock_release(&thread_table_slock);
_interrupt_enable();
_interrupt_generate_sw0();
/* not possible without a stack? alternative in assembler? */
KERNEL_PANIC("thread_finish(): thread was not destroyed");
}
示例14: wchan_destroy
/*
* Destroy a wait channel. Must be empty and unlocked.
* (The corresponding cleanup functions require this.)
*/
void
wchan_destroy(struct wchan* wc) {
unsigned num;
struct wchan* wc2;
/* remove from allwchans[] */
spinlock_acquire(&allwchans_lock);
num = wchanarray_num(&allwchans);
assert(wchanarray_get(&allwchans, wc->wc_index) == wc);
if (wc->wc_index < num - 1) {
/* move the last entry into our slot */
wc2 = wchanarray_get(&allwchans, num - 1);
wchanarray_set(&allwchans, wc->wc_index, wc2);
wc2->wc_index = wc->wc_index;
}
wchanarray_setsize(&allwchans, num - 1);
spinlock_release(&allwchans_lock);
threadlist_cleanup(&wc->wc_threads);
kfree(wc);
}
示例15: monitor_free
/**
* Free a monitor, first stop the monitor and then remove the monitor from
* the chain of monitors and free the memory.
*
* @param mon The monitor to free
*/
void
monitor_free(MONITOR *mon)
{
MONITOR *ptr;
mon->module->stopMonitor(mon->handle);
spinlock_acquire(&monLock);
if (allMonitors == mon)
allMonitors = mon->next;
else
{
ptr = allMonitors;
while (ptr->next && ptr->next != mon)
ptr = ptr->next;
if (ptr->next)
ptr->next = mon->next;
}
spinlock_release(&monLock);
free(mon->name);
free(mon);
}