本文整理汇总了C++中sched_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ sched_unlock函数的具体用法?C++ sched_unlock怎么用?C++ sched_unlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sched_unlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: set_errno
FAR char *getcwd(FAR char *buf, size_t size)
{
char *pwd;
/* Verify input parameters */
#ifdef CONFIG_DEBUG
if (!buf || !size)
{
set_errno(EINVAL);
return NULL;
}
#endif
/* If no working directory is defined, then default to the home directory */
pwd = getenv("PWD");
if (!pwd)
{
pwd = CONFIG_LIB_HOMEDIR;
}
/* Verify that the cwd will fit into the user-provided buffer */
if (strlen(pwd) + 1 > size)
{
set_errno(ERANGE);
return NULL;
}
/* Copy the cwd to the user buffer */
strcpy(buf, pwd);
sched_unlock();
return buf;
}
示例2: px4_task_spawn_cmd
int px4_task_spawn_cmd(const char *name, int scheduler, int priority, int stack_size, main_t entry, char *const argv[])
{
int pid;
sched_lock();
/* create the task */
pid = task_create(name, priority, stack_size, entry, argv);
if (pid > 0) {
/* configure the scheduler */
struct sched_param param;
param.sched_priority = priority;
sched_setscheduler(pid, scheduler, ¶m);
/* XXX do any other private task accounting here before the task starts */
}
sched_unlock();
return pid;
}
示例3: load_absmodule
static int load_absmodule(FAR struct binary_s *bin)
{
FAR struct binfmt_s *binfmt;
int ret = -ENOENT;
bdbg("Loading %s\n", bin->filename);
/* Disabling pre-emption should be sufficient protection while accessing
* the list of registered binary format handlers.
*/
sched_lock();
/* Traverse the list of registered binary format handlers. Stop
* when either (1) a handler recognized and loads the format, or
* (2) no handler recognizes the format.
*/
for (binfmt = g_binfmts; binfmt; binfmt = binfmt->next)
{
/* Use this handler to try to load the format */
ret = binfmt->load(bin);
if (ret == OK)
{
/* Successfully loaded -- break out with ret == 0 */
bvdbg("Successfully loaded module %s\n", bin->filename);
dump_module(bin);
break;
}
}
sched_unlock();
return ret;
}
示例4: group_assigngid
static void group_assigngid(FAR struct task_group_s *group)
{
irqstate_t flags;
gid_t gid;
/* Pre-emption should already be enabled, but lets be paranoid careful */
sched_lock();
/* Loop until we create a unique ID */
for (;;) {
/* Increment the ID counter. This is global data so be extra paranoid. */
flags = irqsave();
gid = ++g_gidcounter;
/* Check for overflow */
if (gid <= 0) {
g_gidcounter = 1;
irqrestore(flags);
} else {
/* Does a task group with this ID already exist? */
irqrestore(flags);
if (group_findbygid(gid) == NULL) {
/* Now assign this ID to the group and return */
group->tg_gid = gid;
sched_unlock();
return;
}
}
}
}
示例5: vm_free
/*
* Deallocate memory region for specified address.
*
* The "addr" argument points to a memory region previously
* allocated through a call to vm_allocate() or vm_map(). The
* number of bytes freed is the number of bytes of the
* allocated region. If one of the region of previous and next
* are free, it combines with them, and larger free region is
* created.
*/
int
vm_free(task_t task, void *addr)
{
int err;
sched_lock();
if (!task_valid(task)) {
err = ESRCH;
goto out;
}
if (task != cur_task() && !task_capable(CAP_MEMORY)) {
err = EPERM;
goto out;
}
if (!user_area(addr)) {
err = EFAULT;
goto out;
}
err = do_free(task->map, addr);
out:
sched_unlock();
return err;
}
示例6: syslog_flush_intbuffer
int syslog_flush_intbuffer(FAR const struct syslog_channel_s *channel,
bool force)
{
syslog_putc_t putfunc;
int ch;
int ret = OK;
/* Select which putc function to use for this flush */
putfunc = force ? channel->sc_putc : channel->sc_force;
/* This logic is performed with the scheduler disabled to protect from
* concurrent modification by other tasks.
*/
sched_lock();
do
{
/* Transfer one character to time. This is inefficient, but is
* done in this way to: (1) Deal with concurrent modification of
* the interrupt buffer from interrupt activity, (2) Avoid keeper
* interrupts disabled for a long time, and (3) to handler
* wraparound of the circular buffer indices.
*/
ch = syslog_remove_intbuffer();
if (ch != EOF)
{
ret = putfunc(ch);
}
}
while (ch != EOF && ret >= 0);
sched_unlock();
return ret;
}
示例7: up_wdginitialize
int up_wdginitialize(void)
{
#if (defined(CONFIG_SAM34_WDT) && !defined(CONFIG_WDT_DISABLE_ON_RESET))
int fd;
int ret;
/* Initialize tha register the watchdog timer device */
wdgvdbg("Initializing Watchdog driver...\n");
sam_wdtinitialize(CONFIG_WATCHDOG_DEVPATH);
/* Open the watchdog device */
wdgvdbg("Opening.\n");
fd = open(CONFIG_WATCHDOG_DEVPATH, O_RDONLY);
if (fd < 0)
{
wdgdbg("open %s failed: %d\n", CONFIG_WATCHDOG_DEVPATH, errno);
goto errout;
}
/* Set the watchdog timeout */
wdgvdbg("Timeout = %d.\n", CONFIG_WDT_TIMEOUT);
ret = ioctl(fd, WDIOC_SETTIMEOUT, (unsigned long)CONFIG_WDT_TIMEOUT);
if (ret < 0)
{
wdgdbg("ioctl(WDIOC_SETTIMEOUT) failed: %d\n", errno);
goto errout_with_dev;
}
/* Set the watchdog minimum time */
wdgvdbg("MinTime = %d.\n", CONFIG_WDT_MINTIME);
ret = ioctl(fd, WDIOC_MINTIME, (unsigned long)CONFIG_WDT_MINTIME);
if (ret < 0)
{
wdgdbg("ioctl(WDIOC_MINTIME) failed: %d\n", errno);
goto errout_with_dev;
}
/* Start Kicker task */
#if defined(CONFIG_WDT_THREAD)
sched_lock();
int taskid = KERNEL_THREAD(CONFIG_WDT_THREAD_NAME,
CONFIG_WDT_THREAD_PRIORITY,
CONFIG_WDT_THREAD_STACKSIZE,
(main_t)wdog_daemon, (FAR char * const *)NULL);
ASSERT(taskid > 0);
sched_unlock();
#endif
return OK;
errout_with_dev:
close(fd);
errout:
return ERROR;
#else
return -ENODEV;
#endif
}
示例8: aio_cancel
int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
{
FAR struct aio_container_s *aioc;
FAR struct aio_container_s *next;
int status;
int ret;
/* Check if a non-NULL aiocbp was provided */
/* Lock the scheduler so that no I/O events can complete on the worker
* thread until we set complete this operation.
*/
ret = AIO_ALLDONE;
sched_lock();
aio_lock();
if (aiocbp) {
/* Check if the I/O has completed */
if (aiocbp->aio_result == -EINPROGRESS) {
/* No.. Find the container for this AIO control block */
for (aioc = (FAR struct aio_container_s *)g_aio_pending.head; aioc && aioc->aioc_aiocbp != aiocbp; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ;
/* Did we find a container for this fildes? We should; the aio_result says
* that the transfer is pending. If not we return AIO_ALLDONE.
*/
if (aioc) {
/* Yes... attempt to cancel the I/O. There are two
* possibilities:* (1) the work has already been started and
* is no longer queued, or (2) the work has not been started
* and is still in the work queue. Only the second case can
* be cancelled. work_cancel() will return -ENOENT in the
* first case.
*/
status = work_cancel(LPWORK, &aioc->aioc_work);
if (status >= 0) {
aiocbp->aio_result = -ECANCELED;
ret = AIO_CANCELED;
} else {
ret = AIO_NOTCANCELED;
}
/* Remove the container from the list of pending transfers */
(void)aioc_decant(aioc);
}
}
} else {
/* No aiocbp.. cancel all outstanding I/O for the fildes */
next = (FAR struct aio_container_s *)g_aio_pending.head;
do {
/* Find the next container with this AIO control block */
for (aioc = next; aioc && aioc->aioc_aiocbp->aio_fildes != fildes; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ;
/* Did we find the container? We should; the aio_result says
* that the transfer is pending. If not we return AIO_ALLDONE.
*/
if (aioc) {
/* Yes... attempt to cancel the I/O. There are two
* possibilities:* (1) the work has already been started and
* is no longer queued, or (2) the work has not been started
* and is still in the work queue. Only the second case can
* be cancelled. work_cancel() will return -ENOENT in the
* first case.
*/
status = work_cancel(LPWORK, &aioc->aioc_work);
/* Remove the container from the list of pending transfers */
next = (FAR struct aio_container_s *)aioc->aioc_link.flink;
aiocbp = aioc_decant(aioc);
DEBUGASSERT(aiocbp);
if (status >= 0) {
aiocbp->aio_result = -ECANCELED;
if (ret != AIO_NOTCANCELED) {
ret = AIO_CANCELED;
}
} else {
ret = AIO_NOTCANCELED;
}
}
} while (aioc);
}
aio_unlock();
sched_unlock();
return ret;
}
示例9: uipdriver_loop
void uipdriver_loop(void)
{
/* netdev_read will return 0 on a timeout event and >0 on a data received event */
g_sim_dev.d_len = netdev_read((unsigned char*)g_sim_dev.d_buf, CONFIG_NET_BUFSIZE);
/* Disable preemption through to the following so that it behaves a little more
* like an interrupt (otherwise, the following logic gets pre-empted an behaves
* oddly.
*/
sched_lock();
if (g_sim_dev.d_len > 0)
{
/* Data received event. Check for valid Ethernet header with destination == our
* MAC address
*/
if (g_sim_dev.d_len > UIP_LLH_LEN && up_comparemac(BUF->ether_dhost, &g_sim_dev.d_mac) == 0)
{
/* We only accept IP packets of the configured type and ARP packets */
#ifdef CONFIG_NET_IPv6
if (BUF->ether_type == htons(UIP_ETHTYPE_IP6))
#else
if (BUF->ether_type == htons(UIP_ETHTYPE_IP))
#endif
{
uip_arp_ipin(&g_sim_dev);
uip_input(&g_sim_dev);
/* If the above function invocation resulted in data that
* should be sent out on the network, the global variable
* d_len is set to a value > 0.
*/
if (g_sim_dev.d_len > 0)
{
uip_arp_out(&g_sim_dev);
netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
}
}
else if (BUF->ether_type == htons(UIP_ETHTYPE_ARP))
{
uip_arp_arpin(&g_sim_dev);
/* If the above function invocation resulted in data that
* should be sent out on the network, the global variable
* d_len is set to a value > 0.
*/
if (g_sim_dev.d_len > 0)
{
netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
}
}
}
}
/* Otherwise, it must be a timeout event */
else if (timer_expired(&g_periodic_timer))
{
timer_reset(&g_periodic_timer);
uip_timer(&g_sim_dev, sim_uiptxpoll, 1);
}
sched_unlock();
}
示例10: sigsuspend
int sigsuspend(FAR const sigset_t *set)
{
FAR struct tcb_s *rtcb = (FAR struct tcb_s *)g_readytorun.head;
sigset_t intersection;
sigset_t saved_sigprocmask;
FAR sigpendq_t *sigpend;
irqstate_t saved_state;
int unblocksigno;
/* Several operations must be performed below: We must determine if any
* signal is pending and, if not, wait for the signal. Since signals can
* be posted from the interrupt level, there is a race condition that
* can only be eliminated by disabling interrupts!
*/
sched_lock(); /* Not necessary */
saved_state = irqsave();
/* Check if there is a pending signal corresponding to one of the
* signals that will be unblocked by the new sigprocmask.
*/
intersection = ~(*set) & sig_pendingset(rtcb);
if (intersection != NULL_SIGNAL_SET)
{
/* One or more of the signals in intersections is sufficient to cause
* us to not wait. Pick the lowest numbered signal and mark it not
* pending.
*/
unblocksigno = sig_lowest(&intersection);
sigpend = sig_removependingsignal(rtcb, unblocksigno);
ASSERT(sigpend);
sig_releasependingsignal(sigpend);
irqrestore(saved_state);
}
else
{
/* Its time to wait. Save a copy of the old sigprocmask and install
* the new (temporary) sigprocmask
*/
saved_sigprocmask = rtcb->sigprocmask;
rtcb->sigprocmask = *set;
rtcb->sigwaitmask = NULL_SIGNAL_SET;
/* And wait until one of the unblocked signals is posted */
up_block_task(rtcb, TSTATE_WAIT_SIG);
/* We are running again, restore the original sigprocmask */
rtcb->sigprocmask = saved_sigprocmask;
irqrestore(saved_state);
/* Now, handle the (rare?) case where (a) a blocked signal was received
* while the task was suspended but (b) restoring the original
* sigprocmask will unblock the signal.
*/
sig_unmaskpendingsignal();
}
sched_unlock();
return ERROR;
}
示例11: stm32_idlepm
//.........这里部分代码省略.........
switch (newstate)
{
case PM_NORMAL:
{
/* If we just awakened from PM_STANDBY mode, then reconfigure
* clocking.
*/
if (oldstate == PM_STANDBY)
{
/* Re-enable clocking */
stm32_clockenable();
/* The system timer was disabled while in PM_STANDBY or
* PM_SLEEP modes. But the RTC has still be running: Reset
* the system time the current RTC time.
*/
#ifdef CONFIG_RTC
clock_synchronize();
#endif
}
}
break;
case PM_IDLE:
{
}
break;
case PM_STANDBY:
{
/* Set the alarm as an EXTI Line */
#ifdef CONFIG_RTC_ALARM
stm32_rtc_alarm(CONFIG_PM_ALARM_SEC, CONFIG_PM_ALARM_NSEC, true);
#endif
/* Wait 10ms */
up_mdelay(10);
/* Enter the STM32 stop mode */
(void)stm32_pmstop(false);
/* We have been re-awakened by some even: A button press?
* An alarm? Cancel any pending alarm and resume the normal
* operation.
*/
#ifdef CONFIG_RTC_ALARM
stm32_exti_cancel();
ret = stm32_rtc_cancelalarm();
if (ret < 0)
{
lldbg("Warning: Cancel alarm failed\n");
}
#endif
/* Note: See the additional PM_STANDBY related logic at the
* beginning of this function. That logic is executed after
* this point.
*/
}
break;
case PM_SLEEP:
{
/* We should not return from standby mode. The only way out
* of standby is via the reset path.
*/
/* Configure the RTC alarm to Auto Reset the system */
#ifdef CONFIG_PM_SLEEP_WAKEUP
stm32_rtc_alarm(CONFIG_PM_SLEEP_WAKEUP_SEC, CONFIG_PM_SLEEP_WAKEUP_NSEC, false);
#endif
/* Wait 10ms */
up_mdelay(10);
/* Enter the STM32 standby mode */
(void)stm32_pmstandby();
}
break;
default:
break;
}
/* Save the new state */
oldstate = newstate;
errout:
sched_unlock();
}
}
示例12: pthread_mutex_unlock
int pthread_mutex_unlock(FAR pthread_mutex_t *mutex)
{
int ret = OK;
sdbg("mutex=0x%p\n", mutex);
if (!mutex)
{
ret = EINVAL;
}
else
{
/* Make sure the semaphore is stable while we make the following
* checks. This all needs to be one atomic action.
*/
sched_lock();
/* Does the calling thread own the semaphore? */
if (mutex->pid != (int)getpid())
{
/* No... return an error (default behavior is like PTHREAD_MUTEX_ERRORCHECK) */
sdbg("Holder=%d returning EPERM\n", mutex->pid);
ret = EPERM;
}
/* Yes, the caller owns the semaphore.. Is this a recursive mutex? */
#ifdef CONFIG_MUTEX_TYPES
else if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1)
{
/* This is a recursive mutex and we there are multiple locks held. Retain
* the mutex lock, just decrement the count of locks held, and return
* success.
*/
mutex->nlocks--;
}
#endif
/* This is either a non-recursive mutex or is the outermost unlock of
* a recursive mutex.
*/
else
{
/* Nullify the pid and lock count then post the semaphore */
mutex->pid = -1;
#ifdef CONFIG_MUTEX_TYPES
mutex->nlocks = 0;
#endif
ret = pthread_givesemaphore((sem_t*)&mutex->sem);
}
sched_unlock();
}
sdbg("Returning %d\n", ret);
return ret;
}
示例13: task_restart
int task_restart(pid_t pid)
{
FAR _TCB *rtcb;
FAR _TCB *tcb;
int status;
irqstate_t state;
/* Make sure this task does not become ready-to-run while
* we are futzing with its TCB
*/
sched_lock();
/* Check if the task to restart is the calling task */
rtcb = (FAR _TCB*)g_readytorun.head;
if ((pid == 0) || (pid == rtcb->pid))
{
/* Not implemented */
return ERROR;
}
/* We are restarting some other task than ourselves */
else
{
/* Find for the TCB associated with matching pid */
tcb = sched_gettcb(pid);
if (!tcb)
{
/* There is no TCB with this pid */
return ERROR;
}
/* Remove the TCB from whatever list it is in. At this point, the
* TCB should no longer be accessible to the system
*/
state = irqsave();
dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list);
tcb->task_state = TSTATE_TASK_INVALID;
irqrestore(state);
/* Deallocate anything left in the TCB's queues */
sig_cleanup(tcb); /* Deallocate Signal lists */
/* Reset the current task priority */
tcb->sched_priority = tcb->init_priority;
/* Reset the base task priority and the number of pending reprioritizations */
#ifdef CONFIG_PRIORITY_INHERITANCE
tcb->base_priority = tcb->init_priority;
# if CONFIG_SEM_NNESTPRIO > 0
tcb->npend_reprio = 0;
# endif
#endif
/* Re-initialize the processor-specific portion of the TCB
* This will reset the entry point and the start-up parameters
*/
up_initial_state(tcb);
/* Add the task to the inactive task list */
dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
tcb->task_state = TSTATE_TASK_INACTIVE;
/* Activate the task */
status = task_activate(tcb);
if (status != OK)
{
dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
sched_releasetcb(tcb);
return ERROR;
}
}
sched_unlock();
return OK;
}
示例14: nxcon_kbdin
void nxcon_kbdin(NXCONSOLE handle, FAR const uint8_t *buffer, uint8_t buflen)
{
FAR struct nxcon_state_s *priv;
ssize_t nwritten;
int nexthead;
char ch;
int ret;
gvdbg("buflen=%d\n");
DEBUGASSERT(handle);
/* Get the reference to the driver structure from the handle */
priv = (FAR struct nxcon_state_s *)handle;
/* Get exclusive access to the driver structure */
ret = nxcon_semwait(priv);
if (ret < 0)
{
gdbg("ERROR: nxcon_semwait failed\n");
return;
}
/* Loop until all of the bytes have been written. This function may be
* called from an interrupt handler! Semaphores cannot be used!
*
* The write logic only needs to modify the head index. Therefore,
* there is a difference in the way that head and tail are protected:
* tail is protected with a semaphore; tail is protected by disabling
* interrupts.
*/
for (nwritten = 0; nwritten < buflen; nwritten++)
{
/* Add the next character */
ch = buffer[nwritten];
/* Calculate the write index AFTER the next byte is add to the ring
* buffer
*/
nexthead = priv->head + 1;
if (nexthead >= CONFIG_NXCONSOLE_KBDBUFSIZE)
{
nexthead = 0;
}
/* Would the next write overflow the circular buffer? */
if (nexthead == priv->tail)
{
/* Yes... Return an indication that nothing was saved in the buffer. */
gdbg("ERROR: Keyboard data overrun\n");
break;
}
/* No... copy the byte */
priv->rxbuffer[priv->head] = ch;
priv->head = nexthead;
}
/* Was anything written? */
if (nwritten > 0)
{
int i;
/* Are there threads waiting for read data? */
sched_lock();
for (i = 0; i < priv->nwaiters; i++)
{
/* Yes.. Notify all of the waiting readers that more data is available */
sem_post(&priv->waitsem);
}
/* Notify all poll/select waiters that they can write to the FIFO */
#ifndef CONFIG_DISABLE_POLL
nxcon_pollnotify(priv, POLLIN);
#endif
sched_unlock();
}
nxcon_sempost(priv);
}
示例15: nxcon_read
ssize_t nxcon_read(FAR struct file *filep, FAR char *buffer, size_t len)
{
FAR struct nxcon_state_s *priv;
ssize_t nread;
char ch;
int ret;
/* Recover our private state structure */
DEBUGASSERT(filep && filep->f_priv);
priv = (FAR struct nxcon_state_s *)filep->f_priv;
/* Get exclusive access to the driver structure */
ret = nxcon_semwait(priv);
if (ret < 0)
{
gdbg("ERROR: nxcon_semwait failed\n");
return ret;
}
/* Loop until something is read */
for (nread = 0; nread < len; )
{
/* Get the next byte from the buffer */
if (priv->head == priv->tail)
{
/* The circular buffer is empty. Did we read anything? */
if (nread > 0)
{
/* Yes.. break out to return what we have. */
break;
}
/* If the driver was opened with O_NONBLOCK option, then don't wait.
* Just return EGAIN.
*/
if (filep->f_oflags & O_NONBLOCK)
{
nread = -EAGAIN;
break;
}
/* Otherwise, wait for something to be written to the circular
* buffer. Increment the number of waiters so that the nxcon_write()
* will not that it needs to post the semaphore to wake us up.
*/
sched_lock();
priv->nwaiters++;
nxcon_sempost(priv);
/* We may now be pre-empted! But that should be okay because we
* have already incremented nwaiters. Pre-emption is disabled
* but will be re-enabled while we are waiting.
*/
ret = sem_wait(&priv->waitsem);
/* Pre-emption will be disabled when we return. So the decrementing
* nwaiters here is safe.
*/
priv->nwaiters--;
sched_unlock();
/* Did we successfully get the waitsem? */
if (ret >= 0)
{
/* Yes... then retake the mutual exclusion semaphore */
ret = nxcon_semwait(priv);
}
/* Was the semaphore wait successful? Did we successful re-take the
* mutual exclusion semaphore?
*/
if (ret < 0)
{
/* No.. One of the two sem_wait's failed. */
int errval = errno;
gdbg("ERROR: nxcon_semwait failed\n");
/* Were we awakened by a signal? Did we read anything before
* we received the signal?
*/
if (errval != EINTR || nread >= 0)
{
/* Yes.. return the error. */
//.........这里部分代码省略.........