本文整理汇总了C++中SAVE_ERRNO函数的典型用法代码示例。如果您正苦于以下问题:C++ SAVE_ERRNO函数的具体用法?C++ SAVE_ERRNO怎么用?C++ SAVE_ERRNO使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SAVE_ERRNO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: uv_cpu_info
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus;
uv_cpu_info_t* ci;
*cpu_infos = NULL;
*count = 0;
numcpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(numcpus != (unsigned int)-1);
assert(numcpus != 0);
ci = calloc(numcpus, sizeof(*ci));
if (ci == NULL) return uv__new_sys_error(ENOMEM);
if (read_models(numcpus, ci)) {
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
return uv__new_sys_error(errno);
}
if (read_times(numcpus, ci)) {
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
return uv__new_sys_error(errno);
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
* We don't check for errors here. Worst case, the field is left zero.
*/
if (ci[0].speed == 0) read_speeds(numcpus, ci);
*cpu_infos = ci;
*count = numcpus;
return uv_ok_;
}
示例2: uv_cpu_info
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed;
uint64_t info[CPUSTATES];
char model[512];
int numcpus = 1;
static int which[] = {CTL_HW,HW_MODEL,0};
size_t size;
int i;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctl(which, 2, &model, &size, NULL, 0))
return -errno;
which[1] = HW_NCPU;
size = sizeof(numcpus);
if (sysctl(which, 2, &numcpus, &size, NULL, 0))
return -errno;
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return -ENOMEM;
*count = numcpus;
which[1] = HW_CPUSPEED;
size = sizeof(cpuspeed);
if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
size = sizeof(info);
which[0] = CTL_KERN;
which[1] = KERN_CPTIME2;
for (i = 0; i < numcpus; i++) {
which[2] = i;
size = sizeof(info);
if (sysctl(which, 3, &info, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed;
}
return 0;
}
示例3: uv_exepath
int uv_exepath(char* buffer, size_t* size) {
uint32_t usize;
int result;
char* path;
char* fullpath;
if (buffer == NULL || size == NULL)
return -EINVAL;
usize = *size;
result = _NSGetExecutablePath(buffer, &usize);
if (result) return result;
path = malloc(2 * PATH_MAX);
fullpath = realpath(buffer, path);
if (fullpath == NULL) {
SAVE_ERRNO(free(path));
return -errno;
}
strncpy(buffer, fullpath, *size);
free(fullpath);
*size = strlen(buffer);
return 0;
}
示例4: invokeArray
static void
invokeArray(JNIEnv* env, jlong ctxAddress, jbyteArray paramBuffer, void* returnBuffer)
{
Function* ctx = (Function *) j2p(ctxAddress);
void** ffiArgs = { NULL };
jbyte *tmpBuffer = NULL;
if (ctx->cif.nargs > 0) {
unsigned int i;
tmpBuffer = alloca(ctx->cif.nargs * PARAM_SIZE);
ffiArgs = alloca(ctx->cif.nargs * sizeof(void *));
(*env)->GetByteArrayRegion(env, paramBuffer, 0, ctx->cif.nargs * PARAM_SIZE, tmpBuffer);
for (i = 0; i < ctx->cif.nargs; ++i) {
if (unlikely(ctx->cif.arg_types[i]->type == FFI_TYPE_STRUCT)) {
ffiArgs[i] = *(void **) &tmpBuffer[i * PARAM_SIZE];
} else {
ffiArgs[i] = &tmpBuffer[i * PARAM_SIZE];
}
}
}
ffi_call(&ctx->cif, FFI_FN(ctx->function), returnBuffer, ffiArgs);
SAVE_ERRNO(ctx);
}
示例5: Java_com_kenai_jffi_Foreign_invokeArrayReturnStruct
/*
* Class: com_kenai_jffi_Foreign
* Method: invokeArrayReturnStruct
* Signature: (J[B[B)V
*/
JNIEXPORT void JNICALL
Java_com_kenai_jffi_Foreign_invokeArrayReturnStruct(JNIEnv* env, jclass self, jlong ctxAddress,
jbyteArray paramBuffer, jbyteArray returnBuffer, jint offset)
{
Function* ctx = (Function *) j2p(ctxAddress);
jbyte* retval = alloca(ctx->cif.rtype->size);
jbyte* tmpBuffer;
void** ffiArgs;
int i;
//
// Due to the undocumented and somewhat strange struct-return handling when
// using ffi_raw_call(), we convert from raw to ptr array, then call via normal
// ffi_call
//
ffiArgs = alloca(ctx->cif.nargs * sizeof(void *));
#ifdef USE_RAW
tmpBuffer = alloca(ctx->rawParameterSize);
(*env)->GetByteArrayRegion(env, paramBuffer, 0, ctx->rawParameterSize, tmpBuffer);
for (i = 0; i < (int) ctx->cif.nargs; ++i) {
ffiArgs[i] = (tmpBuffer + ctx->rawParamOffsets[i]);
}
#else
tmpBuffer = alloca(ctx->cif.nargs * PARAM_SIZE);
(*env)->GetByteArrayRegion(env, paramBuffer, 0, ctx->cif.nargs * PARAM_SIZE, tmpBuffer);
for (i = 0; i < (int) ctx->cif.nargs; ++i) {
ffiArgs[i] = &tmpBuffer[i * PARAM_SIZE];
}
#endif
ffi_call(&ctx->cif, FFI_FN(ctx->function), retval, ffiArgs);
SAVE_ERRNO(ctx);
(*env)->SetByteArrayRegion(env, returnBuffer, offset, ctx->cif.rtype->size, retval);
}
示例6: uv__close
int uv__close(int fd) {
assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
#if defined(__MVS__)
SAVE_ERRNO(epoll_file_close(fd));
#endif
return uv__close_nocheckstdio(fd);
}
示例7: uv_resident_set_memory
uv_err_t uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return uv__new_sys_error(errno);
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
SAVE_ERRNO(close(fd));
if (n == -1)
return uv__new_sys_error(errno);
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
goto err;
*rss = val * getpagesize();
return uv_ok_;
err:
return uv__new_artificial_error(UV_EINVAL);
}
示例8: uv__dup
/* This function is not execve-safe, there is a race window
* between the call to dup() and fcntl(FD_CLOEXEC).
*/
int uv__dup(int fd) {
fd = dup(fd);
if (fd == -1)
return -1;
if (uv__cloexec(fd, 1)) {
SAVE_ERRNO(uv__close(fd));
return -1;
}
return fd;
}
示例9: new_inotify_fd
static int new_inotify_fd(void) {
#if HAVE_INOTIFY_INIT1
return inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
#else
int fd;
if ((fd = inotify_init()) == -1)
return -1;
if (uv__cloexec(fd, 1) || uv__nonblock(fd, 1)) {
SAVE_ERRNO(uv__close(fd));
fd = -1;
}
return fd;
#endif
}
示例10: new_inotify_fd
static int new_inotify_fd(void) {
int fd;
fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
if (fd != -1)
return fd;
if (errno != ENOSYS)
return -1;
if ((fd = uv__inotify_init()) == -1)
return -1;
if (uv__cloexec(fd, 1) || uv__nonblock(fd, 1)) {
SAVE_ERRNO(close(fd));
return -1;
}
return fd;
}
示例11: uv__io_poll
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct pollfd events[1024];
struct pollfd pqry;
struct pollfd* pe;
struct poll_ctl pc;
QUEUE* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
int nevents;
int count;
int nfds;
int i;
int rc;
int add_failed;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
pc.events = w->pevents;
pc.fd = w->fd;
add_failed = 0;
if (w->events == 0) {
pc.cmd = PS_ADD;
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
if (errno != EINVAL) {
assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
abort();
}
/* Check if the fd is already in the pollset */
pqry.fd = pc.fd;
rc = pollset_query(loop->backend_fd, &pqry);
switch (rc) {
case -1:
assert(0 && "Failed to query pollset for file descriptor");
abort();
case 0:
assert(0 && "Pollset does not contain file descriptor");
abort();
}
/* If we got here then the pollset already contained the file descriptor even though
* we didn't think it should. This probably shouldn't happen, but we can continue. */
add_failed = 1;
}
}
if (w->events != 0 || add_failed) {
/* Modify, potentially removing events -- need to delete then add.
* Could maybe mod if we knew for sure no events are removed, but
* content of w->events is handled above as not reliable (falls back)
* so may require a pollset_query() which would have to be pretty cheap
* compared to a PS_DELETE to be worth optimizing. Alternatively, could
* lazily remove events, squelching them in the mean time. */
pc.cmd = PS_DELETE;
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
abort();
}
pc.cmd = PS_ADD;
if (pollset_ctl(loop->backend_fd, &pc, 1)) {
assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
abort();
}
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
nfds = pollset_poll(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
//.........这里部分代码省略.........
示例12: uv__io_poll
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
QUEUE* q;
uint64_t base;
uint64_t diff;
uv__io_t* w;
int filter;
int fflags;
int count;
int nfds;
int fd;
int op;
int i;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
nevents = 0;
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
filter = EVFILT_READ;
fflags = 0;
op = EV_ADD;
if (w->cb == uv__fs_event) {
filter = EVFILT_VNODE;
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
}
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;; nevents = 0) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
nfds = kevent(loop->backend_fd,
events,
nevents,
events,
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
//.........这里部分代码省略.........
示例13: uv__process_child_init
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
* avoided. Since this isn't called on those targets, the function
* doesn't even need to be defined for them.
*/
static void uv__process_child_init(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
int error_fd) {
sigset_t set;
int close_fd;
int use_fd;
int err;
int fd;
int n;
#if defined(__linux__) || defined(__FreeBSD__)
int r;
int i;
int cpumask_size;
uv__cpu_set_t cpuset;
#endif
if (options->flags & UV_PROCESS_DETACHED)
setsid();
/* First duplicate low numbered fds, since it's not safe to duplicate them,
* they could get replaced. Example: swapping stdout and stderr; without
* this fd 2 (stderr) would be duplicated into fd 1, thus making both
* stdout and stderr go to the same fd, which was not the intention. */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < 0 || use_fd >= fd)
continue;
pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
if (pipes[fd][1] == -1) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
}
for (fd = 0; fd < stdio_count; fd++) {
close_fd = pipes[fd][0];
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set
*/
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
if (use_fd == -1) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
}
}
if (fd == use_fd)
uv__cloexec_fcntl(use_fd, 0);
else
fd = dup2(use_fd, fd);
if (fd == -1) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (fd <= 2)
uv__nonblock_fcntl(fd, 0);
if (close_fd >= stdio_count)
uv__close(close_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd >= stdio_count)
uv__close(use_fd);
}
if (options->cwd != NULL && chdir(options->cwd)) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* When dropping privileges from root, the `setgroups` call will
* remove any extraneous groups. If we don't call this, then
* even though our uid has dropped, we may still have groups
* that enable us to do super-user things. This will fail if we
* aren't root, so don't bother checking the return value, this
* is just done as an optimistic privilege dropping function.
*/
SAVE_ERRNO(setgroups(0, NULL));
}
//.........这里部分代码省略.........
示例14: invokeArrayWithObjects_
//.........这里部分代码省略.........
int idx = (type & com_kenai_jffi_ObjectBuffer_INDEX_MASK) >> com_kenai_jffi_ObjectBuffer_INDEX_SHIFT;
void* ptr;
switch (type & com_kenai_jffi_ObjectBuffer_TYPE_MASK & ~com_kenai_jffi_ObjectBuffer_PRIM_MASK) {
case com_kenai_jffi_ObjectBuffer_ARRAY:
if (unlikely(object == NULL)) {
throwException(env, NullPointer, "null object for parameter %d", idx);
goto cleanup;
} else if (unlikely((type & com_kenai_jffi_ObjectBuffer_PINNED) != 0)) {
ptr = jffi_getArrayCritical(env, object, offset, length, type, &arrays[arrayCount]);
if (unlikely(ptr == NULL)) {
goto cleanup;
}
} else if (true && likely(length < MAX_STACK_ARRAY)) {
ptr = alloca(jffi_arraySize(length + 1, type));
if (unlikely(jffi_getArrayBuffer(env, object, offset, length, type,
&arrays[arrayCount], ptr) == NULL)) {
goto cleanup;
}
} else {
ptr = jffi_getArrayHeap(env, object, offset, length, type, &arrays[arrayCount]);
if (unlikely(ptr == NULL)) {
goto cleanup;
}
}
++arrayCount;
break;
case com_kenai_jffi_ObjectBuffer_BUFFER:
ptr = (*env)->GetDirectBufferAddress(env, object);
if (unlikely(ptr == NULL)) {
throwException(env, NullPointer, "Could not get direct Buffer address");
goto cleanup;
}
ptr = ((char *) ptr + offset);
break;
case com_kenai_jffi_ObjectBuffer_JNI:
switch (type & com_kenai_jffi_ObjectBuffer_TYPE_MASK) {
case com_kenai_jffi_ObjectBuffer_JNIENV:
ptr = env;
break;
case com_kenai_jffi_ObjectBuffer_JNIOBJECT:
ptr = (void *) object;
break;
default:
throwException(env, IllegalArgument, "Unsupported object type: %#x",
type & com_kenai_jffi_ObjectBuffer_TYPE_MASK);
goto cleanup;
}
break;
default:
throwException(env, IllegalArgument, "Unsupported object type: %#x",
type & com_kenai_jffi_ObjectBuffer_TYPE_MASK);
goto cleanup;
}
#if defined(USE_RAW)
*((void **)(tmpBuffer + ctx->rawParamOffsets[idx])) = ptr;
#else
if (unlikely(ctx->cif.arg_types[idx]->type == FFI_TYPE_STRUCT)) {
ffiArgs[idx] = ptr;
} else {
*((void **) ffiArgs[idx]) = ptr;
}
#endif
}
#if defined(USE_RAW)
//
// Special case for struct return values - unroll into a ptr array and
// use ffi_call, since ffi_raw_call with struct return values is undocumented.
//
if (unlikely(ctx->cif.rtype->type == FFI_TYPE_STRUCT)) {
ffiArgs = alloca(ctx->cif.nargs * sizeof(void *));
for (i = 0; i < ctx->cif.nargs; ++i) {
ffiArgs[i] = (tmpBuffer + ctx->rawParamOffsets[i]);
}
ffi_call(&ctx->cif, FFI_FN(ctx->function), retval, ffiArgs);
} else {
ffi_raw_call(&ctx->cif, FFI_FN(ctx->function), retval, (ffi_raw *) tmpBuffer);
}
#else
ffi_call(&ctx->cif, FFI_FN(ctx->function), retval, ffiArgs);
#endif
SAVE_ERRNO(ctx);
cleanup:
/* Release any array backing memory */
for (i = 0; i < arrayCount; ++i) {
if (arrays[i].release != NULL) {
//printf("releasing array=%p\n", arrays[i].elems);
(*arrays[i].release)(env, &arrays[i]);
}
}
}
示例15: uv__io_poll
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
QUEUE* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
unsigned int nfds;
unsigned int i;
int saved_errno;
int nevents;
int count;
int fd;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0))
abort();
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
/* Work around a kernel bug where nfds is not updated. */
events[0].portev_source = 0;
nfds = 1;
saved_errno = 0;
if (port_getn(loop->backend_fd,
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec)) {
/* Work around another kernel bug: port_getn() may return events even
* on error.
*/
if (errno == EINTR || errno == ETIME)
saved_errno = errno;
else
abort();
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (timeout == 0)
return;
if (timeout == -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
return;
}
nevents = 0;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->portev_object;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
/* File descriptor that we've stopped watching, ignore. */
if (w == NULL)
continue;
w->cb(loop, w, pe->portev_events);
//.........这里部分代码省略.........