本文整理汇总了C++中TAILQ_FIRST函数的典型用法代码示例。如果您正苦于以下问题:C++ TAILQ_FIRST函数的具体用法?C++ TAILQ_FIRST怎么用?C++ TAILQ_FIRST使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TAILQ_FIRST函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: viommu_dvmamap_load_raw
/*
* Load a dvmamap from an array of segs or an mlist (if the first
* "segs" entry's mlist is non-null). It calls iommu_dvmamap_load_segs()
* or iommu_dvmamap_load_mlist() for part of the 2nd pass through the
* mapping. This is ugly. A better solution would probably be to have
* function pointers for implementing the traversal. That way, there
* could be one core load routine for each of the three required algorithms
* (buffer, seg, and mlist). That would also mean that the traversal
* algorithm would then only need one implementation for each algorithm
* instead of two (one for populating the iomap and one for populating
* the dvma map).
*/
int
viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
int i;
int left;
int err = 0;
bus_size_t sgsize;
bus_size_t boundary, align;
u_long dvmaddr, sgstart, sgend;
struct iommu_state *is;
struct iommu_map_state *ims = map->_dm_cookie;
#ifdef DIAGNOSTIC
if (ims == NULL)
panic("viommu_dvmamap_load_raw: null map state");
if (ims->ims_iommu == NULL)
panic("viommu_dvmamap_load_raw: null iommu");
#endif
is = ims->ims_iommu;
if (map->dm_nsegs) {
/* Already in use?? */
#ifdef DIAGNOSTIC
panic("iommu_dvmamap_load_raw: map still in use");
#endif
bus_dmamap_unload(t0, map);
}
/*
* A boundary presented to bus_dmamem_alloc() takes precedence
* over boundary in the map.
*/
if ((boundary = segs[0]._ds_boundary) == 0)
boundary = map->_dm_boundary;
align = MAX(segs[0]._ds_align, PAGE_SIZE);
/*
* Make sure that on error condition we return "no valid mappings".
*/
map->dm_nsegs = 0;
iommu_iomap_clear_pages(ims);
if (segs[0]._ds_mlist) {
struct pglist *mlist = segs[0]._ds_mlist;
struct vm_page *m;
for (m = TAILQ_FIRST(mlist); m != NULL;
m = TAILQ_NEXT(m,pageq)) {
err = iommu_iomap_insert_page(ims, VM_PAGE_TO_PHYS(m));
if(err) {
printf("iomap insert error: %d for "
"pa 0x%lx\n", err, VM_PAGE_TO_PHYS(m));
iommu_iomap_clear_pages(ims);
return (EFBIG);
}
}
} else {
/* Count up the total number of pages we need */
for (i = 0, left = size; left > 0 && i < nsegs; i++) {
bus_addr_t a, aend;
bus_size_t len = segs[i].ds_len;
bus_addr_t addr = segs[i].ds_addr;
int seg_len = MIN(left, len);
if (len < 1)
continue;
aend = round_page(addr + seg_len);
for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
err = iommu_iomap_insert_page(ims, a);
if (err) {
printf("iomap insert error: %d for "
"pa 0x%llx\n", err, a);
iommu_iomap_clear_pages(ims);
return (EFBIG);
}
}
left -= seg_len;
}
}
sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
mtx_enter(&is->is_mtx);
if (flags & BUS_DMA_24BIT) {
//.........这里部分代码省略.........
示例2: mta_flush_task
static void
mta_flush_task(struct mta_session *s, int delivery, const char *error, size_t count,
int cache)
{
struct mta_envelope *e;
char relay[LINE_MAX];
size_t n;
struct sockaddr_storage ss;
struct sockaddr *sa;
socklen_t sa_len;
const char *domain;
(void)snprintf(relay, sizeof relay, "%s", mta_host_to_text(s->route->dst));
n = 0;
while ((e = TAILQ_FIRST(&s->task->envelopes))) {
if (count && n == count) {
stat_decrement("mta.envelope", n);
return;
}
TAILQ_REMOVE(&s->task->envelopes, e, entry);
/* we're about to log, associate session to envelope */
e->session = s->id;
e->ext = s->ext;
/* XXX */
/*
* getsockname() can only fail with ENOBUFS here
* best effort, don't log source ...
*/
sa = (struct sockaddr *)&ss;
sa_len = sizeof(ss);
if (getsockname(s->io.sock, sa, &sa_len) < 0)
mta_delivery_log(e, NULL, relay, delivery, error);
else
mta_delivery_log(e, sa_to_text(sa),
relay, delivery, error);
mta_delivery_notify(e);
domain = strchr(e->dest, '@');
if (domain) {
domain++;
mta_hoststat_update(domain, error);
if (cache)
mta_hoststat_cache(domain, e->id);
}
n++;
}
free(s->task->sender);
free(s->task);
s->task = NULL;
if (s->datafp) {
fclose(s->datafp);
s->datafp = NULL;
}
stat_decrement("mta.envelope", n);
stat_decrement("mta.task.running", 1);
stat_decrement("mta.task", 1);
}
示例3: ex_txt
/*
* ex_txt --
* Get lines from the terminal for ex.
*
* PUBLIC: int ex_txt __P((SCR *, TEXTH *, ARG_CHAR_T, u_int32_t));
*/
int
ex_txt(SCR *sp, TEXTH *tiqh, ARG_CHAR_T prompt, u_int32_t flags)
{
EVENT ev;
GS *gp;
TEXT ait, *ntp, *tp;
carat_t carat_st;
size_t cnt;
int rval;
int nochange;
rval = 0;
/*
* Get a TEXT structure with some initial buffer space, reusing the
* last one if it's big enough. (All TEXT bookkeeping fields default
* to 0 -- text_init() handles this.)
*/
if (!TAILQ_EMPTY(tiqh)) {
tp = TAILQ_FIRST(tiqh);
if (TAILQ_NEXT(tp, q) != NULL || tp->lb_len < 32) {
text_lfree(tiqh);
goto newtp;
}
tp->len = 0;
} else {
newtp: if ((tp = text_init(sp, NULL, 0, 32)) == NULL)
goto err;
TAILQ_INSERT_HEAD(tiqh, tp, q);
}
/* Set the starting line number. */
tp->lno = sp->lno + 1;
/*
* If it's a terminal, set up autoindent, put out the prompt, and
* set it up so we know we were suspended. Otherwise, turn off
* the autoindent flag, as that requires less special casing below.
*
* XXX
* Historic practice is that ^Z suspended command mode (but, because
* it ran in cooked mode, it was unaffected by the autowrite option.)
* On restart, any "current" input was discarded, whether in insert
* mode or not, and ex was in command mode. This code matches historic
* practice, but not 'cause it's easier.
*/
gp = sp->gp;
if (F_ISSET(gp, G_SCRIPTED))
LF_CLR(TXT_AUTOINDENT);
else {
if (LF_ISSET(TXT_AUTOINDENT)) {
LF_SET(TXT_EOFCHAR);
if (v_txt_auto(sp, sp->lno, NULL, 0, tp))
goto err;
}
txt_prompt(sp, tp, prompt, flags);
}
for (carat_st = C_NOTSET, nochange = 0;;) {
if (v_event_get(sp, &ev, 0, 0))
goto err;
/* Deal with all non-character events. */
switch (ev.e_event) {
case E_CHARACTER:
break;
case E_ERR:
goto err;
case E_REPAINT:
case E_WRESIZE:
continue;
case E_EOF:
rval = 1;
/* FALLTHROUGH */
case E_INTERRUPT:
/*
* Handle EOF/SIGINT events by discarding partially
* entered text and returning. EOF returns failure,
* E_INTERRUPT returns success.
*/
goto notlast;
default:
v_event_err(sp, &ev);
goto notlast;
}
/*
* Deal with character events.
*
* Check to see if the character fits into the input buffer.
* (Use tp->len, ignore overwrite and non-printable chars.)
*/
BINC_GOTOW(sp, tp->lb, tp->lb_len, tp->len + 1);
//.........这里部分代码省略.........
示例4: _pthread_create
//.........这里部分代码省略.........
/* Copy the scheduling attributes: */
new_thread->base_priority =
curthread->base_priority &
~PTHREAD_SIGNAL_PRIORITY;
new_thread->attr.prio =
curthread->base_priority &
~PTHREAD_SIGNAL_PRIORITY;
new_thread->attr.sched_policy =
curthread->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
new_thread->base_priority =
new_thread->attr.prio;
}
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
/* Initialize joiner to NULL (no joiner): */
new_thread->joiner = NULL;
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
/* Initialise hooks in the thread structure: */
new_thread->specific = NULL;
new_thread->cleanup = NULL;
new_thread->flags = 0;
new_thread->poll_data.nfds = 0;
new_thread->poll_data.fds = NULL;
new_thread->continuation = NULL;
/*
* Defer signals to protect the scheduling queues
* from access by the signal handler:
*/
_thread_kern_sig_defer();
/*
* Initialise the unique id which GDB uses to
* track threads.
*/
new_thread->uniqueid = next_uniqueid++;
/*
* Check if the garbage collector thread
* needs to be started.
*/
f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
/* Add the thread to the linked list of all threads: */
TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
new_thread->state = PS_SUSPENDED;
} else {
new_thread->state = PS_RUNNING;
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
}
/*
* Undefer and handle pending signals, yielding
* if necessary.
*/
_thread_kern_sig_undefer();
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
if (f_gc != 0) {
/* Install the scheduling timer: */
itimer.it_interval.tv_sec = 0;
itimer.it_interval.tv_usec = _clock_res_usec;
itimer.it_value = itimer.it_interval;
if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
NULL) != 0)
PANIC("Cannot set interval timer");
}
/* Schedule the new user thread: */
_thread_kern_sched(NULL);
/*
* Start a garbage collector thread
* if necessary.
*/
if (f_gc && _pthread_create(&gc_thread, NULL,
_thread_gc, NULL) != 0)
PANIC("Can't create gc thread");
}
}
/* Return the status: */
return (ret);
}
示例5: iwm_mvm_config_umac_scan
int
iwm_mvm_config_umac_scan(struct iwm_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwm_scan_config *scan_config;
int ret, j, nchan;
size_t cmd_size;
struct ieee80211_channel *c;
struct iwm_host_cmd hcmd = {
.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
.flags = IWM_CMD_SYNC,
};
static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
IWM_SCAN_CONFIG_RATE_54M);
cmd_size = sizeof(*scan_config) + sc->ucode_capa.n_scan_channels;
scan_config = malloc(cmd_size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (scan_config == NULL)
return ENOMEM;
scan_config->tx_chains = htole32(iwm_mvm_get_valid_tx_ant(sc));
scan_config->rx_chains = htole32(iwm_mvm_get_valid_rx_ant(sc));
scan_config->legacy_rates = htole32(rates |
IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
/* These timings correspond to iwlwifi's UNASSOC scan. */
scan_config->dwell_active = 10;
scan_config->dwell_passive = 110;
scan_config->dwell_fragmented = 44;
scan_config->dwell_extended = 90;
scan_config->out_of_channel_time = htole32(0);
scan_config->suspend_time = htole32(0);
IEEE80211_ADDR_COPY(scan_config->mac_addr,
vap ? vap->iv_myaddr : ic->ic_macaddr);
scan_config->bcast_sta_id = sc->sc_aux_sta.sta_id;
scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
for (nchan = j = 0;
j < ic->ic_nchans && nchan < sc->ucode_capa.n_scan_channels; j++) {
c = &ic->ic_channels[j];
/* For 2GHz, only populate 11b channels */
/* For 5GHz, only populate 11a channels */
/*
* Catch other channels, in case we have 900MHz channels or
* something in the chanlist.
*/
if (iwm_mvm_scan_skip_channel(c))
continue;
scan_config->channel_array[nchan++] =
ieee80211_mhz2ieee(c->ic_freq, 0);
}
scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
hcmd.data[0] = scan_config;
hcmd.len[0] = cmd_size;
IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Sending UMAC scan config\n");
ret = iwm_send_cmd(sc, &hcmd);
if (!ret)
IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
"UMAC scan config was sent successfully\n");
free(scan_config, M_DEVBUF);
return ret;
}
static boolean_t
iwm_mvm_scan_use_ebs(struct iwm_softc *sc)
{
const struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
/* We can only use EBS if:
* 1. the feature is supported;
* 2. the last EBS was successful;
* 3. if only single scan, the single scan EBS API is supported;
* 4. it's not a p2p find operation.
//.........这里部分代码省略.........
示例6: stream_move
/*
* move the pointers into the stream
*/
int stream_move(struct stream_object *so, size_t offset, int whence, int mode)
{
size_t tmp_size = 0;
size_t move = 0;
struct so_list *so_curr = NULL;
size_t po_off = 0;
/* get the values into temp variable */
switch (mode) {
case STREAM_SIDE1:
so_curr = so->side1.so_curr;
po_off = so->side1.po_off;
break;
case STREAM_SIDE2:
so_curr = so->side2.so_curr;
po_off = so->side2.po_off;
break;
}
/* no movement */
if (offset == 0)
return 0;
/*
* the offest is calculated from the beginning,
* so move to the first packet
*/
if (whence == SEEK_SET) {
so_curr = TAILQ_FIRST(&so->so_head);
po_off = 0;
}
/* the other mode is SEEK_CUR */
/* search the first packet matching the selected mode */
while (so_curr->side != mode) {
so_curr = TAILQ_NEXT(so_curr, next);
/* don't go after the end of the stream */
if (so_curr == TAILQ_END(&so->pl_head))
return 0;
}
while (offset) {
/* get the lenght to jump to in the current po */
tmp_size = (so_curr->po.DATA.len - po_off < offset) ? so_curr->po.DATA.len - po_off : offset;
/* update the offset */
po_off += tmp_size;
/* decrement the total offset by the packet lenght */
offset -= tmp_size;
/* update the total movement */
move += tmp_size;
/* we have reached the end of the packet, go to the next one */
if (po_off == so_curr->po.DATA.len) {
/* search the next packet matching the selected mode */
do {
/* don't go after the end of the stream */
if (TAILQ_NEXT(so_curr, next) != TAILQ_END(&so->pl_head))
so_curr = TAILQ_NEXT(so_curr, next);
else
goto move_end;
} while (so_curr->side != mode);
/* reset the offset for the packet */
po_off = 0;
}
}
move_end:
/* restore the value in the real stream object */
switch (mode) {
case STREAM_SIDE1:
so->side1.so_curr = so_curr;
so->side1.po_off = po_off;
break;
case STREAM_SIDE2:
so->side2.so_curr = so_curr;
so->side2.po_off = po_off;
break;
}
return move;
}
示例7: mx_channel_netconf
mx_channel_t *
mx_channel_netconf (mx_sock_session_t *mssp, mx_sock_t *client, int xml_mode)
{
LIBSSH2_CHANNEL *channel;
mx_channel_t *mcp;
mcp = TAILQ_FIRST(&mssp->mss_released);
if (mcp) {
mx_log("S%u reusing channel C%u for client S%u",
mssp->mss_base.ms_id, mcp->mc_id, client->ms_id);
TAILQ_REMOVE(&mssp->mss_released, mcp, mc_link);
TAILQ_INSERT_HEAD(&mssp->mss_channels, mcp, mc_link);
mcp->mc_state = MSS_RPC_INITIAL;
mcp->mc_client = client;
if (mx_mti(client)->mti_set_channel)
mx_mti(client)->mti_set_channel(client, mcp->mc_session, mcp);
return mcp;
}
/* Must use blocking IO for channel creation */
libssh2_session_set_blocking(mssp->mss_session, 1);
channel = libssh2_channel_open_session(mssp->mss_session);
if (channel == NULL) {
mx_log("S%u could not open netconf channel", mssp->mss_base.ms_id);
return NULL;
}
if (!xml_mode) {
if (libssh2_channel_subsystem(channel, "netconf") != 0) {
mx_log("S%u could not open netconf subsystem",
mssp->mss_base.ms_id);
goto try_xml_mode;
}
mx_log("S%u opened netconf subsystem channel to %s",
mssp->mss_base.ms_id, mssp->mss_target);
} else {
static const char command[] = "xml-mode netconf need-trailer";
try_xml_mode:
if (libssh2_channel_process_startup(channel,
"exec", sizeof("exec") - 1,
command, strlen(command)) != 0) {
mx_log("S%u could not open netconf xml-mode",
mssp->mss_base.ms_id);
libssh2_channel_free(channel);
channel = NULL;
} else {
mx_log("S%u opened netconf xml-mode channel to %s",
mssp->mss_base.ms_id, mssp->mss_target);
}
}
libssh2_session_set_blocking(mssp->mss_session, 0);
if (channel == NULL) {
mx_log("S%u could not open netconf channel", mssp->mss_base.ms_id);
return NULL;
}
mcp = mx_channel_create(mssp, client, channel);
if (mcp == NULL) {
/* XXX fail */
return NULL;
}
mx_channel_netconf_send_hello(mcp);
mx_channel_netconf_read_hello(mcp);
return mcp;
}
示例8: pf_map_addr
//.........这里部分代码省略.........
PF_ACPY(init_addr, naddr, af);
} else {
PF_AINC(&rpool->counter, af);
PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
}
break;
case PF_POOL_SRCHASH:
{
unsigned char hash[16];
pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
break;
}
case PF_POOL_ROUNDROBIN:
{
struct pf_pooladdr *acur = rpool->cur;
/*
* XXXGL: in the round-robin case we need to store
* the round-robin machine state in the rule, thus
* forwarding thread needs to modify rule.
*
* This is done w/o locking, because performance is assumed
* more important than round-robin precision.
*
* In the simpliest case we just update the "rpool->cur"
* pointer. However, if pool contains tables or dynamic
* addresses, then "tblidx" is also used to store machine
* state. Since "tblidx" is int, concurrent access to it can't
* lead to inconsistence, only to lost of precision.
*
* Things get worse, if table contains not hosts, but
* prefixes. In this case counter also stores machine state,
* and for IPv6 address, counter can't be updated atomically.
* Probably, using round-robin on a table containing IPv6
* prefixes (or even IPv4) would cause a panic.
*/
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if (!pfr_pool_get(rpool->cur->addr.p.tbl,
&rpool->tblidx, &rpool->counter, af))
goto get_addr;
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
&rpool->tblidx, &rpool->counter, af))
goto get_addr;
} else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
goto get_addr;
try_next:
if (TAILQ_NEXT(rpool->cur, entries) == NULL)
rpool->cur = TAILQ_FIRST(&rpool->list);
else
rpool->cur = TAILQ_NEXT(rpool->cur, entries);
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
rpool->tblidx = -1;
if (pfr_pool_get(rpool->cur->addr.p.tbl,
&rpool->tblidx, &rpool->counter, af)) {
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
return (1);
}
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
rpool->tblidx = -1;
if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
&rpool->tblidx, &rpool->counter, af)) {
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
return (1);
}
} else {
raddr = &rpool->cur->addr.v.a.addr;
rmask = &rpool->cur->addr.v.a.mask;
PF_ACPY(&rpool->counter, raddr, af);
}
get_addr:
PF_ACPY(naddr, &rpool->counter, af);
if (init_addr != NULL && PF_AZERO(init_addr, af))
PF_ACPY(init_addr, naddr, af);
PF_AINC(&rpool->counter, af);
break;
}
}
if (*sn != NULL)
PF_ACPY(&(*sn)->raddr, naddr, af);
if (V_pf_status.debug >= PF_DEBUG_MISC &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
printf("pf_map_addr: selected address ");
pf_print_host(naddr, 0, af);
printf("\n");
}
return (0);
}
示例9: kvm_proclist
/*
* Read proc's from memory file into buffer bp, which has space to hold
* at most maxcnt procs.
*/
static int
kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
struct kinfo_proc *bp, int maxcnt)
{
int cnt = 0;
struct kinfo_proc kinfo_proc, *kp;
struct pgrp pgrp;
struct session sess;
struct cdev t_cdev;
struct tty tty;
struct vmspace vmspace;
struct sigacts sigacts;
#if 0
struct pstats pstats;
#endif
struct ucred ucred;
struct prison pr;
struct thread mtd;
struct proc proc;
struct proc pproc;
struct sysentvec sysent;
char svname[KI_EMULNAMELEN];
kp = &kinfo_proc;
kp->ki_structsize = sizeof(kinfo_proc);
/*
* Loop on the processes. this is completely broken because we need to be
* able to loop on the threads and merge the ones that are the same process some how.
*/
for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
memset(kp, 0, sizeof *kp);
if (KREAD(kd, (u_long)p, &proc)) {
_kvm_err(kd, kd->program, "can't read proc at %p", p);
return (-1);
}
if (proc.p_state == PRS_NEW)
continue;
if (proc.p_state != PRS_ZOMBIE) {
if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads),
&mtd)) {
_kvm_err(kd, kd->program,
"can't read thread at %p",
TAILQ_FIRST(&proc.p_threads));
return (-1);
}
}
if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
kp->ki_ruid = ucred.cr_ruid;
kp->ki_svuid = ucred.cr_svuid;
kp->ki_rgid = ucred.cr_rgid;
kp->ki_svgid = ucred.cr_svgid;
kp->ki_cr_flags = ucred.cr_flags;
if (ucred.cr_ngroups > KI_NGROUPS) {
kp->ki_ngroups = KI_NGROUPS;
kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
} else
kp->ki_ngroups = ucred.cr_ngroups;
kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups,
kp->ki_ngroups * sizeof(gid_t));
kp->ki_uid = ucred.cr_uid;
if (ucred.cr_prison != NULL) {
if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
_kvm_err(kd, kd->program,
"can't read prison at %p",
ucred.cr_prison);
return (-1);
}
kp->ki_jid = pr.pr_id;
}
}
switch(what & ~KERN_PROC_INC_THREAD) {
case KERN_PROC_GID:
if (kp->ki_groups[0] != (gid_t)arg)
continue;
break;
case KERN_PROC_PID:
if (proc.p_pid != (pid_t)arg)
continue;
break;
case KERN_PROC_RGID:
if (kp->ki_rgid != (gid_t)arg)
continue;
break;
case KERN_PROC_UID:
if (kp->ki_uid != (uid_t)arg)
continue;
break;
case KERN_PROC_RUID:
if (kp->ki_ruid != (uid_t)arg)
continue;
//.........这里部分代码省略.........
示例10: pf_match_translation
static struct pf_rule *
pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
struct pf_addr *daddr, uint16_t dport, int rs_num,
struct pf_anchor_stackframe *anchor_stack)
{
struct pf_rule *r, *rm = NULL;
struct pf_ruleset *ruleset = NULL;
int tag = -1;
int rtableid = -1;
int asd = 0;
r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
while (r && rm == NULL) {
struct pf_rule_addr *src = NULL, *dst = NULL;
struct pf_addr_wrap *xdst = NULL;
if (r->action == PF_BINAT && direction == PF_IN) {
src = &r->dst;
if (r->rpool.cur != NULL)
xdst = &r->rpool.cur->addr;
} else {
src = &r->src;
dst = &r->dst;
}
r->evaluations++;
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != direction)
r = r->skip[PF_SKIP_DIR].ptr;
else if (r->af && r->af != pd->af)
r = r->skip[PF_SKIP_AF].ptr;
else if (r->proto && r->proto != pd->proto)
r = r->skip[PF_SKIP_PROTO].ptr;
else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
src->neg, kif, M_GETFIB(m)))
r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
PF_SKIP_DST_ADDR].ptr;
else if (src->port_op && !pf_match_port(src->port_op,
src->port[0], src->port[1], sport))
r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
PF_SKIP_DST_PORT].ptr;
else if (dst != NULL &&
PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL,
M_GETFIB(m)))
r = r->skip[PF_SKIP_DST_ADDR].ptr;
else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
0, NULL, M_GETFIB(m)))
r = TAILQ_NEXT(r, entries);
else if (dst != NULL && dst->port_op &&
!pf_match_port(dst->port_op, dst->port[0],
dst->port[1], dport))
r = r->skip[PF_SKIP_DST_PORT].ptr;
else if (r->match_tag && !pf_match_tag(m, r, &tag,
pd->pf_mtag ? pd->pf_mtag->tag : 0))
r = TAILQ_NEXT(r, entries);
else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
off, pd->hdr.tcp), r->os_fingerprint)))
r = TAILQ_NEXT(r, entries);
else {
if (r->tag)
tag = r->tag;
if (r->rtableid >= 0)
rtableid = r->rtableid;
if (r->anchor == NULL) {
rm = r;
} else
pf_step_into_anchor(anchor_stack, &asd,
&ruleset, rs_num, &r, NULL, NULL);
}
if (r == NULL)
pf_step_out_of_anchor(anchor_stack, &asd, &ruleset,
rs_num, &r, NULL, NULL);
}
if (tag > 0 && pf_tag_packet(m, pd, tag))
return (NULL);
if (rtableid >= 0)
M_SETFIB(m, rtableid);
if (rm != NULL && (rm->action == PF_NONAT ||
rm->action == PF_NORDR || rm->action == PF_NOBINAT))
return (NULL);
return (rm);
}
示例11: rc_service_daemons_crashed
//.........这里部分代码省略.........
if (!list)
list = rc_stringlist_new();
rc_stringlist_add(list, p);
} else if (strcmp(token, "name") == 0) {
if (name)
free(name);
name = xstrdup(p);
} else if (strcmp(token, "pidfile") == 0) {
pidfile = xstrdup(p);
break;
}
}
fclose(fp);
ch_root = rc_service_value_get(basename_c(service), "chroot");
spidfile = pidfile;
if (ch_root && pidfile) {
spidfile = xmalloc(strlen(ch_root) + strlen(pidfile) + 1);
strcpy(spidfile, ch_root);
strcat(spidfile, pidfile);
free(pidfile);
pidfile = spidfile;
}
pid = 0;
if (pidfile) {
retval = true;
if ((fp = fopen(pidfile, "r"))) {
if (fscanf(fp, "%d", &pid) == 1)
retval = false;
fclose(fp);
}
free(pidfile);
pidfile = NULL;
/* We have the pid, so no need to match
on exec or name */
free(exec);
exec = NULL;
free(name);
name = NULL;
} else {
if (exec) {
if (!list)
list = rc_stringlist_new();
if (!TAILQ_FIRST(list))
rc_stringlist_add(list, exec);
free(exec);
exec = NULL;
}
if (list) {
/* We need to flatten our linked list
into an array */
i = 0;
TAILQ_FOREACH(s, list, entries)
i++;
argv = xmalloc(sizeof(char *) * (i + 1));
i = 0;
TAILQ_FOREACH(s, list, entries)
argv[i++] = s->value;
argv[i] = '\0';
}
}
if (!retval) {
if (pid != 0) {
if (kill(pid, 0) == -1 && errno == ESRCH)
retval = true;
} else if ((pids = rc_find_pids(exec,
(const char *const *)argv,
0, pid)))
{
p1 = LIST_FIRST(pids);
while (p1) {
p2 = LIST_NEXT(p1, entries);
free(p1);
p1 = p2;
}
free(pids);
} else
retval = true;
}
rc_stringlist_free(list);
list = NULL;
free(argv);
argv = NULL;
free(exec);
exec = NULL;
free(name);
name = NULL;
if (retval)
break;
}
closedir(dp);
free(line);
return retval;
}
示例12: systimer_intr
/*
* Execute ready systimers. Called directly from the platform-specific
* one-shot timer clock interrupt (e.g. clkintr()) or via an IPI. May
* be called simultaniously on multiple cpus and always operations on
* the current cpu's queue. Systimer functions are responsible for calling
* hardclock, statclock, and other finely-timed routines.
*/
void
systimer_intr(sysclock_t *timep, int in_ipi, struct intrframe *frame)
{
globaldata_t gd = mycpu;
sysclock_t time = *timep;
systimer_t info;
if (gd->gd_syst_nest)
return;
crit_enter();
++gd->gd_syst_nest;
while ((info = TAILQ_FIRST(&gd->gd_systimerq)) != NULL) {
/*
* If we haven't reached the requested time, tell the cputimer
* how much is left and break out.
*/
if ((int)(info->time - time) > 0) {
cputimer_intr_reload(info->time - time);
break;
}
/*
* Dequeue and execute, detect a loss of the systimer. Note
* that the in-progress systimer pointer can only be used to
* detect a loss of the systimer, it is only useful within
* this code sequence and becomes stale otherwise.
*/
info->flags &= ~SYSTF_ONQUEUE;
TAILQ_REMOVE(info->queue, info, node);
gd->gd_systimer_inprog = info;
crit_exit();
info->func(info, in_ipi, frame);
crit_enter();
/*
* The caller may deleted or even re-queue the systimer itself
* with a delete/add sequence. If the caller does not mess with
* the systimer we will requeue the periodic interval automatically.
*
* If this is a non-queued periodic interrupt, do not allow multiple
* events to build up (used for things like the callout timer to
* prevent premature timeouts due to long interrupt disablements,
* BIOS 8254 glitching, and so forth). However, we still want to
* keep things synchronized between cpus for efficient handling of
* the timer interrupt so jump in multiples of the periodic rate.
*/
if (gd->gd_systimer_inprog == info && info->periodic) {
if (info->which != sys_cputimer) {
info->periodic = sys_cputimer->fromhz(info->freq);
info->which = sys_cputimer;
}
info->time += info->periodic;
if ((info->flags & SYSTF_NONQUEUED) &&
(int)(info->time - time) <= 0
) {
info->time += ((time - info->time + info->periodic - 1) /
info->periodic) * info->periodic;
}
systimer_add(info);
}
gd->gd_systimer_inprog = NULL;
}
--gd->gd_syst_nest;
crit_exit();
}
示例13: cmd_swap_pane_exec
int
cmd_swap_pane_exec(struct cmd *self, struct cmd_ctx *ctx)
{
struct args *args = self->args;
struct winlink *src_wl, *dst_wl;
struct window *src_w, *dst_w;
struct window_pane *tmp_wp, *src_wp, *dst_wp;
struct layout_cell *src_lc, *dst_lc;
u_int sx, sy, xoff, yoff;
dst_wl = cmd_find_pane(ctx, args_get(args, 't'), NULL, &dst_wp);
if (dst_wl == NULL)
return (-1);
dst_w = dst_wl->window;
if (!args_has(args, 's')) {
src_w = dst_w;
if (args_has(self->args, 'D')) {
src_wp = TAILQ_NEXT(dst_wp, entry);
if (src_wp == NULL)
src_wp = TAILQ_FIRST(&dst_w->panes);
} else if (args_has(self->args, 'U')) {
src_wp = TAILQ_PREV(dst_wp, window_panes, entry);
if (src_wp == NULL)
src_wp = TAILQ_LAST(&dst_w->panes, window_panes);
} else
return (0);
} else {
src_wl = cmd_find_pane(ctx, args_get(args, 's'), NULL, &src_wp);
if (src_wl == NULL)
return (-1);
src_w = src_wl->window;
}
if (src_wp == dst_wp)
return (0);
tmp_wp = TAILQ_PREV(dst_wp, window_panes, entry);
TAILQ_REMOVE(&dst_w->panes, dst_wp, entry);
TAILQ_REPLACE(&src_w->panes, src_wp, dst_wp, entry);
if (tmp_wp == src_wp)
tmp_wp = dst_wp;
if (tmp_wp == NULL)
TAILQ_INSERT_HEAD(&dst_w->panes, src_wp, entry);
else
TAILQ_INSERT_AFTER(&dst_w->panes, tmp_wp, src_wp, entry);
src_lc = src_wp->layout_cell;
dst_lc = dst_wp->layout_cell;
src_lc->wp = dst_wp;
dst_wp->layout_cell = src_lc;
dst_lc->wp = src_wp;
src_wp->layout_cell = dst_lc;
src_wp->window = dst_w;
dst_wp->window = src_w;
sx = src_wp->sx; sy = src_wp->sy;
xoff = src_wp->xoff; yoff = src_wp->yoff;
src_wp->xoff = dst_wp->xoff; src_wp->yoff = dst_wp->yoff;
window_pane_resize(src_wp, dst_wp->sx, dst_wp->sy);
dst_wp->xoff = xoff; dst_wp->yoff = yoff;
window_pane_resize(dst_wp, sx, sy);
if (!args_has(self->args, 'd')) {
if (src_w != dst_w) {
window_set_active_pane(src_w, dst_wp);
window_set_active_pane(dst_w, src_wp);
} else {
tmp_wp = dst_wp;
if (!window_pane_visible(tmp_wp))
tmp_wp = src_wp;
window_set_active_pane(src_w, tmp_wp);
}
} else {
if (src_w->active == src_wp)
window_set_active_pane(src_w, dst_wp);
if (dst_w->active == dst_wp)
window_set_active_pane(dst_w, src_wp);
}
if (src_w != dst_w) {
if (src_w->last == src_wp)
src_w->last = NULL;
if (dst_w->last == dst_wp)
dst_w->last = NULL;
}
server_redraw_window(src_w);
server_redraw_window(dst_w);
return (0);
}
示例14: refresh_netif_metrics
void
refresh_netif_metrics(void)
{
#if 0
int i;
int sts;
unsigned long kaddr;
struct ifnethead ifnethead;
struct ifnet ifnet;
struct ifnet *ifp;
static int warn = 0; /* warn once control */
/*
* Not sure that the order of chained netif structs is invariant,
* especially if interfaces are added to the configuration after
* initial system boot ... so mark all the instances as inactive
* and re-match based on the interface name
*/
pmdaCacheOp(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_INACTIVE);
kaddr = symbols[KERN_IFNET].n_value;
if (kvmp == NULL || kaddr == 0) {
/* no network interface metrics for us today ... */
if ((warn & WARN_INIT) == 0) {
fprintf(stderr, "refresh_netif_metrics: Warning: cannot get any network interface metrics\n");
warn |= WARN_INIT;
}
return;
}
/*
* Kernel data structures for the linked list of network interface
* information.
*
* _ifnet -> struct ifnethead {
* struct ifnet *tqh_first;
* struct ifnet **tqh_last;
* ...
* }
*
* and within an ifnet struct (declared in <net/if_var.h>) we find
* the linked list maintained in if_link, the external interface
* name in if_xname[] and if_data which is a nested if_data stuct
* (declared in <net/if.h>) that contains many of the goodies we're
* after, e.g. u_char ifi_type, u_long ifi_mtu, u_long ifi_baudrate,
* u_long ifi_ipackets, u_long ifi_opackets, u_long ifi_ibytes,
* u_long ifi_obytes, etc.
*/
if (kvm_read(kvmp, kaddr, (char *)&ifnethead, sizeof(ifnethead)) != sizeof(ifnethead)) {
if ((warn & WARN_READ_HEAD) == 0) {
fprintf(stderr, "refresh_netif_metrics: Warning: kvm_read: ifnethead: %s\n", kvm_geterr(kvmp));
warn |= WARN_READ_HEAD;
}
return;
}
for (i = 0; ; i++) {
if (i == 0)
kaddr = (unsigned long)TAILQ_FIRST(&ifnethead);
else
kaddr = (unsigned long)TAILQ_NEXT(&ifnet, if_link);
if (kaddr == 0)
break;
if (kvm_read(kvmp, kaddr, (char *)&ifnet, sizeof(ifnet)) != sizeof(ifnet)) {
fprintf(stderr, "refresh_netif_metrics: Error: kvm_read: ifnet[%d]: %s\n", i, kvm_geterr(kvmp));
return;
}
/* skip network interfaces that are not interesting ... */
if (strcmp(ifnet.if_xname, "lo0") == 0)
continue;
sts = pmdaCacheLookupName(indomtab[NETIF_INDOM].it_indom, ifnet.if_xname, NULL, (void **)&ifp);
if (sts == PMDA_CACHE_ACTIVE) {
fprintf(stderr, "refresh_netif_metrics: Warning: duplicate name (%s) in network interface indom\n", ifnet.if_xname);
continue;
}
else if (sts == PMDA_CACHE_INACTIVE) {
/* reactivate an existing entry */
pmdaCacheStore(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_ADD, ifnet.if_xname, (void *)ifp);
}
else {
/* new entry */
ifp = (struct ifnet *)malloc(sizeof(*ifp));
if (ifp == NULL) {
fprintf(stderr, "Error: struct ifnet alloc failed for network interface \"%s\"\n", ifnet.if_xname);
__pmNoMem("refresh_netif_metrics", sizeof(*ifp), PM_FATAL_ERR);
/*NOTREACHED*/
}
pmdaCacheStore(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_ADD, ifnet.if_xname, (void *)ifp);
}
memcpy((void *)ifp, (void *)&ifnet, sizeof(*ifp));
}
#endif
}
示例15: sleepq_enqueue
//.........这里部分代码省略.........
#ifdef T2EX
if ( l->l_mutex != NULL ) {
mutex_exit(l->l_mutex);
}
#endif
mutex_enter(&sq_mtx);
while (l->l_wchan) {
if ( hatch ) {
error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo );
}
else {
error = cv_timedwait( &sq_cv, &sq_mtx, timo );
}
if (error == EINTR) {
if (l->l_wchan) {
TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
l->l_wchan = NULL;
l->l_sleepq = NULL;
}
}
}
mutex_exit(&sq_mtx);
#ifdef T2EX
l->l_mutex = &spc_lock;
#endif
if (timo != 0) {
/*
* Even if the callout appears to have fired, we need to
* stop it in order to synchronise with other CPUs.
*/
if (callout_halt(&l->l_timeout_ch, NULL)) {
error = EWOULDBLOCK;
}
}
return error;
}
#ifdef T2EX
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
struct lwp *l;
bool found = false;
TAILQ_FOREACH(l, sq, l_sleepchain) {
if (l->l_wchan == wchan) {
found = true;
l->l_wchan = NULL;
}
}
if (found)
cv_broadcast(&sq_cv);
mutex_spin_exit(mp);
return NULL;
}
#else
/*
* sleepq_wake:
*
* Wake zero or more LWPs blocked on a single wait channel.
*/
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
lwp_t *l, *next;
int swapin = 0;
KASSERT(mutex_owned(mp));
for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
KASSERT(l->l_sleepq == sq);
KASSERT(l->l_mutex == mp);
next = TAILQ_NEXT(l, l_sleepchain);
if (l->l_wchan != wchan)
continue;
swapin |= sleepq_remove(sq, l);
if (--expected == 0)
break;
}
mutex_spin_exit(mp);
#if 0
/*
* If there are newly awakend threads that need to be swapped in,
* then kick the swapper into action.
*/
if (swapin)
uvm_kick_scheduler();
#endif
return l;
}