本文整理汇总了C++中QUEUE_HEAD函数的典型用法代码示例。如果您正苦于以下问题:C++ QUEUE_HEAD函数的具体用法?C++ QUEUE_HEAD怎么用?C++ QUEUE_HEAD使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了QUEUE_HEAD函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: uv__cf_loop_cb
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
QUEUE* item;
QUEUE split_head;
uv__cf_loop_signal_t* s;
loop = arg;
uv_mutex_lock(&loop->cf_mutex);
QUEUE_INIT(&split_head);
if (!QUEUE_EMPTY(&loop->cf_signals)) {
QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals);
QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head);
}
uv_mutex_unlock(&loop->cf_mutex);
while (!QUEUE_EMPTY(&split_head)) {
item = QUEUE_HEAD(&split_head);
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->cb == NULL)
CFRunLoopStop(loop->cf_loop);
else
s->cb(s->arg);
QUEUE_REMOVE(item);
free(s);
}
}
示例2: uv__cf_loop_cb
void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
QUEUE* item;
QUEUE split_head;
uv__cf_loop_signal_t* s;
loop = arg;
uv_mutex_lock(&loop->cf_mutex);
QUEUE_INIT(&split_head);
if (!QUEUE_EMPTY(&loop->cf_signals)) {
QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals);
QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head);
}
uv_mutex_unlock(&loop->cf_mutex);
while (!QUEUE_EMPTY(&split_head)) {
item = QUEUE_HEAD(&split_head);
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
s->cb(s->arg);
QUEUE_REMOVE(item);
free(s);
}
}
示例3: uv__work_done
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
QUEUE* q;
QUEUE wq;
int err;
loop = container_of(handle, uv_loop_t, wq_async);
QUEUE_INIT(&wq);
// uv_mutex_lock(&loop->wq_mutex);
if (!QUEUE_EMPTY(&loop->wq)) {
q = QUEUE_HEAD(&loop->wq);
QUEUE_SPLIT(&loop->wq, q, &wq);
}
// uv_mutex_unlock(&loop->wq_mutex);
while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q);
w = container_of(q, struct uv__work, wq);
w->done(w, 0, NULL, 0);
}
}
示例4: uv__work_done
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
QUEUE* q;
QUEUE wq;
int err;
loop = container_of(handle, uv_loop_t, wq_async);
QUEUE_INIT(&wq);
uv_mutex_lock(&loop->wq_mutex);
if (!QUEUE_EMPTY(&loop->wq)) {
q = QUEUE_HEAD(&loop->wq);
QUEUE_SPLIT(&loop->wq, q, &wq);
}
uv_mutex_unlock(&loop->wq_mutex);
while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err);
}
}
示例5: ixEthDBNPEEventCallback
/**
* @brief default NPE event processing callback
*
* @param npeID ID of the NPE that generated the event
* @param msg NPE message (encapsulated event)
*
* Creates an event object on the Ethernet event processor queue
* and signals the new event by incrementing the event queue semaphore.
* Events are processed by @ref ixEthDBEventProcessorLoop() which runs
* at user level.
*
* @see ixEthDBEventProcessorLoop()
*
* @warning do not call directly
*
* @internal
*/
IX_ETH_DB_PUBLIC
void ixEthDBNPEEventCallback(IxNpeMhNpeId npeID, IxNpeMhMessage msg)
{
PortEvent *local_event;
IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) new event received by processor callback from port %d, id 0x%X\n", IX_ETHNPE_NODE_AND_PORT_TO_PHYSICAL_ID(npeID,0), NPE_MSG_ID(msg), 0, 0, 0, 0);
if (CAN_ENQUEUE(&eventQueue))
{
TEST_FIXTURE_LOCK_EVENT_QUEUE;
local_event = QUEUE_HEAD(&eventQueue);
/* create event structure on queue */
local_event->eventType = NPE_MSG_ID(msg);
local_event->portID = IX_ETHNPE_NODE_AND_PORT_TO_PHYSICAL_ID(npeID,0);
/* update queue */
PUSH_UPDATE_QUEUE(&eventQueue);
TEST_FIXTURE_UNLOCK_EVENT_QUEUE;
IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) Waking up main processor loop...\n", 0, 0, 0, 0, 0, 0);
/* increment event queue semaphore */
ixOsalSemaphorePost(&eventQueueSemaphore);
}
else
{
IX_ETH_DB_IRQ_EVENTS_TRACE("DB: (Events) Warning: could not enqueue event (overflow)\n", 0, 0, 0, 0, 0, 0);
}
}
示例6: uv__write_callbacks
static void uv__write_callbacks(uv_stream_t* stream) {
uv_write_t* req;
QUEUE* q;
while (!QUEUE_EMPTY(&stream->write_completed_queue)) {
/* Pop a req off write_completed_queue. */
q = QUEUE_HEAD(&stream->write_completed_queue);
req = QUEUE_DATA(q, uv_write_t, queue);
QUEUE_REMOVE(q);
uv__req_unregister(stream->loop, req);
if (req->bufs != NULL) {
stream->write_queue_size -= uv__write_req_size(req);
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
}
/* NOTE: call callback AFTER freeing the request data. */
if (req->cb)
req->cb(req, req->error);
}
assert(QUEUE_EMPTY(&stream->write_completed_queue));
}
示例7: uv__udp_finish_close
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT));
assert(handle->io_watcher.fd == -1);
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
QUEUE_REMOVE(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req->status = -ECANCELED;
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
}
uv__udp_run_completed(handle);
assert(handle->send_queue_size == 0);
assert(handle->send_queue_count == 0);
/* Now tear down the handle. */
handle->recv_cb = NULL;
handle->alloc_cb = NULL;
/* but _do not_ touch close_cb */
}
示例8: uv__udp_finish_close
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT));
assert(handle->io_watcher.fd == -1);
uv__udp_run_completed(handle);
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
QUEUE_REMOVE(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
if (req->send_cb != NULL)
req->send_cb(req, -ECANCELED);
}
/* Now tear down the handle. */
handle->recv_cb = NULL;
handle->alloc_cb = NULL;
/* but _do not_ touch close_cb */
}
示例9: uv__udp_run_completed
static void uv__udp_run_completed(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
q = QUEUE_HEAD(&handle->write_completed_queue);
QUEUE_REMOVE(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
if (req->send_cb == NULL)
continue;
/* req->status >= 0 == bytes written
* req->status < 0 == errno
*/
if (req->status >= 0)
req->send_cb(req, 0);
else
req->send_cb(req, req->status);
}
}
示例10: uv__inotify_read
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* dummy,
unsigned int events) {
const struct uv__inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
QUEUE queue;
QUEUE* q;
const char* path;
ssize_t size;
const char *p;
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
while (1) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
if (size == -1) {
assert(errno == EAGAIN || errno == EWOULDBLOCK);
break;
}
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
/* Now we have one or more inotify_event structs. */
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
e = (const struct uv__inotify_event*)p;
events = 0;
if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_CHANGE;
if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_RENAME;
w = find_watcher(loop, e->wd);
if (w == NULL)
continue; /* Stale event, no watchers left. */
/* inotify does not return the filename when monitoring a single file
* for modifications. Repurpose the filename for API compatibility.
* I'm not convinced this is a good thing, maybe it should go.
*/
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
QUEUE_MOVE(&w->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&w->watchers, q);
h->cb(h, path, events, 0);
}
}
}
}
示例11: stack_pop
static Token stack_pop(QUEUE *stack)
{
QUEUE *h = QUEUE_HEAD(stack);
queue_item *item = queue_node_data(h);
QUEUE_REMOVE(&item->node);
Token token = item->item;
free(item);
return token;
}
示例12: uv__udp_run_pending
static void uv__udp_run_pending(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
struct msghdr h;
ssize_t size;
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
h.msg_name = &req->addr;
h.msg_namelen = (req->addr.sin6_family == AF_INET6 ?
sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
h.msg_iov = (struct iovec*)req->bufs;
h.msg_iovlen = req->bufcnt;
do {
size = sendmsg(handle->io_watcher.fd, &h, 0);
}
while (size == -1 && errno == EINTR);
/* TODO try to write once or twice more in the
* hope that the socket becomes readable again?
*/
if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
break;
req->status = (size == -1 ? -errno : size);
#ifndef NDEBUG
/* Sanity check. */
if (size != -1) {
ssize_t nbytes;
int i;
for (nbytes = i = 0; i < req->bufcnt; i++)
nbytes += req->bufs[i].len;
assert(size == nbytes);
}
#endif
/* Sending a datagram is an atomic operation: either all data
* is written or nothing is (and EMSGSIZE is raised). That is
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
QUEUE_REMOVE(&req->queue);
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
}
}
示例13: loop_run_immediate
static void loop_run_immediate(evLoop *loop) {
QUEUE *q;
evHandle *handle;
while ( !QUEUE_EMPTY(&loop->handle_queue) ){
q = QUEUE_HEAD(&(loop)->handle_queue);
QUEUE_REMOVE(q);
handle = QUEUE_DATA(q, evHandle, queue);
assert(handle);
handle->cb(handle);
}
}
示例14: queue_pop
static Event queue_pop(Queue *queue)
{
QUEUE *h = QUEUE_HEAD(&queue->headtail);
queue_item *item = queue_node_data(h);
QUEUE_REMOVE(&item->node);
Event e;
e = item->item;
free(item);
return e;
}
示例15: uv_chan_clear
void uv_chan_clear(uv_chan_t *chan) {
uv_mutex_lock(&chan->mutex);
uv__chan_item_t *item = NULL;
QUEUE *head = NULL;
while (!QUEUE_EMPTY(&chan->q)) {
head = QUEUE_HEAD(&chan->q);
item = QUEUE_DATA(head, uv__chan_item_t, active_queue);
QUEUE_REMOVE(head);
free(item);
}
uv_mutex_unlock(&chan->mutex);
}