本文整理汇总了C++中TAILQ_INSERT_TAIL函数的典型用法代码示例。如果您正苦于以下问题:C++ TAILQ_INSERT_TAIL函数的具体用法?C++ TAILQ_INSERT_TAIL怎么用?C++ TAILQ_INSERT_TAIL使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TAILQ_INSERT_TAIL函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: lka_submit
static void
lka_submit(struct lka_session *lks, struct rule *rule, struct expandnode *xn)
{
union lookup lk;
struct envelope *ep;
struct expandnode *xn2;
int r;
ep = xmemdup(&lks->envelope, sizeof *ep, "lka_submit");
ep->expire = rule->r_qexpire;
switch (rule->r_action) {
case A_RELAY:
case A_RELAYVIA:
if (xn->type != EXPAND_ADDRESS)
fatalx("lka_deliver: expect address");
ep->type = D_MTA;
ep->dest = xn->u.mailaddr;
ep->agent.mta.relay = rule->r_value.relayhost;
/* only rewrite if not a bounce */
if (ep->sender.user[0] && rule->r_as && rule->r_as->user[0])
(void)strlcpy(ep->sender.user, rule->r_as->user,
sizeof ep->sender.user);
if (ep->sender.user[0] && rule->r_as && rule->r_as->domain[0])
(void)strlcpy(ep->sender.domain, rule->r_as->domain,
sizeof ep->sender.domain);
break;
case A_NONE:
case A_MBOX:
case A_MAILDIR:
case A_FILENAME:
case A_MDA:
case A_LMTP:
ep->type = D_MDA;
ep->dest = lka_find_ancestor(xn, EXPAND_ADDRESS)->u.mailaddr;
/* set username */
if ((xn->type == EXPAND_FILTER || xn->type == EXPAND_FILENAME)
&& xn->alias) {
(void)strlcpy(ep->agent.mda.username, SMTPD_USER,
sizeof(ep->agent.mda.username));
}
else {
xn2 = lka_find_ancestor(xn, EXPAND_USERNAME);
(void)strlcpy(ep->agent.mda.username, xn2->u.user,
sizeof(ep->agent.mda.username));
}
r = table_lookup(rule->r_userbase, ep->agent.mda.username,
K_USERINFO, &lk);
if (r <= 0) {
lks->error = (r == -1) ? LKA_TEMPFAIL : LKA_PERMFAIL;
free(ep);
return;
}
(void)strlcpy(ep->agent.mda.usertable, rule->r_userbase->t_name,
sizeof ep->agent.mda.usertable);
(void)strlcpy(ep->agent.mda.username, lk.userinfo.username,
sizeof ep->agent.mda.username);
if (xn->type == EXPAND_FILENAME) {
ep->agent.mda.method = A_FILENAME;
(void)strlcpy(ep->agent.mda.buffer, xn->u.buffer,
sizeof ep->agent.mda.buffer);
}
else if (xn->type == EXPAND_FILTER) {
ep->agent.mda.method = A_MDA;
(void)strlcpy(ep->agent.mda.buffer, xn->u.buffer,
sizeof ep->agent.mda.buffer);
}
else if (xn->type == EXPAND_USERNAME) {
ep->agent.mda.method = rule->r_action;
(void)strlcpy(ep->agent.mda.buffer, rule->r_value.buffer,
sizeof ep->agent.mda.buffer);
}
else
fatalx("lka_deliver: bad node type");
r = lka_expand_format(ep->agent.mda.buffer,
sizeof(ep->agent.mda.buffer), ep, &lk.userinfo);
if (!r) {
lks->error = LKA_TEMPFAIL;
log_warnx("warn: format string error while"
" expanding for user %s", ep->agent.mda.username);
free(ep);
return;
}
break;
default:
fatalx("lka_submit: bad rule action");
}
TAILQ_INSERT_TAIL(&lks->deliverylist, ep, entry);
}
示例2: perror
//.........这里部分代码省略.........
else {
script_execute(ScriptActionClientBlockLogin, rv->pyobj, NULL);
}
#endif
switch(version) {
case CLIENT_VERSION_DCV1:
case CLIENT_VERSION_DCV2:
case CLIENT_VERSION_PC:
/* Generate the encryption keys for the client and server. */
client_seed_dc = mt19937_genrand_int32(rng);
server_seed_dc = mt19937_genrand_int32(rng);
CRYPT_CreateKeys(&rv->skey, &server_seed_dc, CRYPT_PC);
CRYPT_CreateKeys(&rv->ckey, &client_seed_dc, CRYPT_PC);
/* Send the client the welcome packet, or die trying. */
if(send_dc_welcome(rv, server_seed_dc, client_seed_dc)) {
goto err;
}
break;
case CLIENT_VERSION_GC:
case CLIENT_VERSION_EP3:
/* Generate the encryption keys for the client and server. */
client_seed_dc = mt19937_genrand_int32(rng);
server_seed_dc = mt19937_genrand_int32(rng);
CRYPT_CreateKeys(&rv->skey, &server_seed_dc, CRYPT_GAMECUBE);
CRYPT_CreateKeys(&rv->ckey, &client_seed_dc, CRYPT_GAMECUBE);
/* Send the client the welcome packet, or die trying. */
if(send_dc_welcome(rv, server_seed_dc, client_seed_dc)) {
goto err;
}
break;
case CLIENT_VERSION_BB:
/* Generate the encryption keys for the client and server. */
for(i = 0; i < 48; i += 4) {
client_seed_dc = mt19937_genrand_int32(rng);
server_seed_dc = mt19937_genrand_int32(rng);
client_seed_bb[i + 0] = (uint8_t)(client_seed_dc >> 0);
client_seed_bb[i + 1] = (uint8_t)(client_seed_dc >> 8);
client_seed_bb[i + 2] = (uint8_t)(client_seed_dc >> 16);
client_seed_bb[i + 3] = (uint8_t)(client_seed_dc >> 24);
server_seed_bb[i + 0] = (uint8_t)(server_seed_dc >> 0);
server_seed_bb[i + 1] = (uint8_t)(server_seed_dc >> 8);
server_seed_bb[i + 2] = (uint8_t)(server_seed_dc >> 16);
server_seed_bb[i + 3] = (uint8_t)(server_seed_dc >> 24);
}
CRYPT_CreateKeys(&rv->skey, server_seed_bb, CRYPT_BLUEBURST);
CRYPT_CreateKeys(&rv->ckey, client_seed_bb, CRYPT_BLUEBURST);
rv->hdr_size = 8;
/* Send the client the welcome packet, or die trying. */
if(send_bb_welcome(rv, server_seed_bb, client_seed_bb)) {
goto err;
}
break;
}
/* Insert it at the end of our list, and we're done. */
if(type == CLIENT_TYPE_BLOCK) {
pthread_rwlock_wrlock(&block->lock);
TAILQ_INSERT_TAIL(clients, rv, qentry);
++block->num_clients;
pthread_rwlock_unlock(&block->lock);
}
else {
TAILQ_INSERT_TAIL(clients, rv, qentry);
}
ship_inc_clients(ship);
return rv;
err:
close(sock);
if(type == CLIENT_TYPE_BLOCK) {
free(rv->enemy_kills);
free(rv->pl);
}
#ifdef HAVE_PYTHON
client_pyobj_invalidate(rv);
Py_XDECREF(rv->pyobj);
#endif
pthread_mutex_destroy(&rv->mutex);
free(rv);
return NULL;
}
示例3: cluster_rbuild
//.........这里部分代码省略.........
tsize = size;
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
for (j = 0; tsize > 0; j++) {
toff = off & PAGE_MASK;
tinc = tsize;
if (toff + tinc > PAGE_SIZE)
tinc = PAGE_SIZE - toff;
VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
MA_OWNED);
if ((tbp->b_pages[j]->valid &
vm_page_bits(toff, tinc)) != 0)
break;
off += tinc;
tsize -= tinc;
}
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
if (tsize > 0) {
bqrelse(tbp);
break;
}
/*
* Set a read-ahead mark as appropriate
*/
if ((fbp && (i == 1)) || (i == (run - 1)))
tbp->b_flags |= B_RAM;
/*
* Set the buffer up for an async read (XXX should
* we do this only if we do not wind up brelse()ing?).
* Set the block number if it isn't set, otherwise
* if it is make sure it matches the block number we
* expect.
*/
tbp->b_flags |= B_ASYNC;
tbp->b_iocmd = BIO_READ;
if (tbp->b_blkno == tbp->b_lblkno) {
tbp->b_blkno = bn;
} else if (tbp->b_blkno != bn) {
brelse(tbp);
break;
}
}
/*
* XXX fbp from caller may not be B_ASYNC, but we are going
* to biodone() it in cluster_callback() anyway
*/
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
vm_page_io_start(m);
vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages-1] != m)) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
}
if (m->valid == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
/*
* Don't inherit tbp->b_bufsize as it may be larger due to
* a non-page-aligned size. Instead just aggregate using
* 'size'.
*/
if (tbp->b_bcount != size)
printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
if (tbp->b_bufsize != size)
printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
bp->b_bcount += size;
bp->b_bufsize += size;
}
/*
* Fully valid pages in the cluster are already good and do not need
* to be re-read from disk. Replace the page with bogus_page
*/
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
bp->b_kvasize = bp->b_bufsize;
if ((bp->b_flags & B_UNMAPPED) == 0) {
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *)bp->b_pages, bp->b_npages);
}
return (bp);
}
示例4: main
//.........这里部分代码省略.........
case 'm':
OverrideMachine = optarg;
break;
case 'n':
Fake = TRUE;
Verbose = TRUE;
break;
case 'p':
Prefix = optarg;
break;
case 'U':
ReplaceSame = 1;
Replace = 1;
break;
case 'u':
Replace = 1;
break;
case 'V':
show_version();
/* NOTREACHED */
case 'v':
Verbose = TRUE;
break;
case 'W':
Viewbase = optarg;
break;
case 'w':
View = optarg;
break;
case 'h':
case '?':
default:
usage();
break;
}
}
argc -= optind;
argv += optind;
pkg_install_config();
if (Destdir != NULL) {
char *pkgdbdir;
pkgdbdir = xasprintf("%s/%s", Destdir, config_pkg_dbdir);
pkgdb_set_dir(pkgdbdir, 4);
free(pkgdbdir);
}
process_pkg_path();
TAILQ_INIT(&pkgs);
if (argc == 0) {
/* If no packages, yelp */
warnx("missing package name(s)");
usage();
}
if (strcasecmp(do_license_check, "no") == 0)
LicenseCheck = 0;
else if (strcasecmp(do_license_check, "yes") == 0)
LicenseCheck = 1;
else if (strcasecmp(do_license_check, "always") == 0)
LicenseCheck = 2;
else
errx(1, "Unknown value of the configuration variable"
"CHECK_LICENSE");
if (LicenseCheck)
load_license_lists();
/* Get all the remaining package names, if any */
for (; argc > 0; --argc, ++argv) {
lpkg_t *lpp;
if (IS_STDIN(*argv))
lpp = alloc_lpkg("-");
else
lpp = alloc_lpkg(*argv);
TAILQ_INSERT_TAIL(&pkgs, lpp, lp_link);
}
error += pkg_perform(&pkgs);
if (error != 0) {
warnx("%d package addition%s failed", error, error == 1 ? "" : "s");
exit(1);
}
exit(0);
}
示例5: usdf_msg_sendmsg
ssize_t
usdf_msg_sendmsg(struct fid_ep *fep, const struct fi_msg *msg, uint64_t flags)
{
size_t i;
struct usdf_ep *ep;
struct usdf_tx *tx;
struct usdf_msg_qe *wqe;
struct usdf_domain *udp;
size_t tot_len;
const struct iovec *iov;
ep = ep_ftou(fep);
tx = ep->ep_tx;
udp = ep->ep_domain;
iov = msg->msg_iov;
if (flags & ~USDF_MSG_SUPP_SENDMSG_FLAGS) {
USDF_DBG_SYS(EP_DATA,
"one or more flags in %#" PRIx64 " not supported\n",
flags);
return -FI_EOPNOTSUPP;
}
/* check for inject overrun before acquiring lock and allocating wqe,
* easier to unwind this way */
if (flags & FI_INJECT) {
iov = msg->msg_iov;
tot_len = 0;
for (i = 0; i < msg->iov_count; ++i) {
tot_len += iov[i].iov_len;
if (tot_len > USDF_MSG_MAX_INJECT_SIZE) {
USDF_DBG_SYS(EP_DATA, "max inject len exceeded (%zu)\n",
tot_len);
return -FI_EINVAL;
}
}
}
if (TAILQ_EMPTY(&tx->t.msg.tx_free_wqe)) {
return -FI_EAGAIN;
}
pthread_spin_lock(&udp->dom_progress_lock);
wqe = usdf_msg_get_tx_wqe(tx);
wqe->ms_context = msg->context;
if (flags & FI_INJECT) {
tot_len = 0;
for (i = 0; i < msg->iov_count; ++i) {
assert(tot_len + iov[i].iov_len <= USDF_MSG_MAX_INJECT_SIZE);
memcpy(&wqe->ms_inject_buf[tot_len], iov[i].iov_base,
iov[i].iov_len);
tot_len += iov[i].iov_len;
}
wqe->ms_iov[0].iov_base = wqe->ms_inject_buf;
wqe->ms_iov[0].iov_len = tot_len;
wqe->ms_last_iov = 0;
} else {
tot_len = 0;
for (i = 0; i < msg->iov_count; ++i) {
wqe->ms_iov[i].iov_base = (void *)iov[i].iov_base;
wqe->ms_iov[i].iov_len = iov[i].iov_len;
tot_len += iov[i].iov_len;
}
wqe->ms_last_iov = msg->iov_count - 1;
}
wqe->ms_cur_iov = 0;
wqe->ms_resid = tot_len;
wqe->ms_length = tot_len;
wqe->ms_cur_ptr = iov[0].iov_base;
wqe->ms_iov_resid = iov[0].iov_len;
wqe->ms_signal_comp = ep->ep_tx_dflt_signal_comp ||
(flags & FI_COMPLETION) ? 1 : 0;
/* add send to EP, and add EP to TX list if not present */
TAILQ_INSERT_TAIL(&ep->e.msg.ep_posted_wqe, wqe, ms_link);
usdf_msg_ep_ready(ep);
pthread_spin_unlock(&udp->dom_progress_lock);
usdf_domain_progress(udp);
return 0;
}
示例6: nvmed_get_cache
/* Get CACHE from free list or evict */
NVMED_CACHE* nvmed_get_cache(NVMED_HANDLE* nvmed_handle) {
NVMED* nvmed = HtoD(nvmed_handle);
NVMED_CACHE *cache = nvmed->free_head.tqh_first;
NVMED_CACHE *__cache;
NVMED_CACHE *ret_cache;
int i;
unsigned int start_lpaddr, end_lpaddr;
TAILQ_HEAD(cache_list, nvmed_cache) temp_head;
pthread_rwlock_wrlock(&nvmed->cache_radix_lock);
pthread_spin_lock(&nvmed->cache_list_lock);
if(cache==NULL) {
//HEAD -> LRU, //TAIL -> MRU
//EVICT - LRU
cache = nvmed->lru_head.tqh_first;
if(!FLAG_ISSET(cache, CACHE_DIRTY)) {
TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
LIST_REMOVE(cache, handle_cache_list);
radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
FLAG_SET_FORCE(cache, 0);
ret_cache = cache;
}
else {
TAILQ_INIT(&temp_head);
while(FLAG_ISSET_SYNC(cache, CACHE_LOCKED) || cache->ref != 0) {
usleep(1);
}
start_lpaddr = cache->lpaddr;
end_lpaddr = cache->lpaddr;
__cache = cache->cache_list.tqe_next;
TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
TAILQ_INSERT_HEAD(&temp_head, cache, cache_list);
for(i=1; i<NVMED_CACHE_FORCE_EVICT_MAX; i++) {
cache = __cache;
if(FLAG_ISSET_SYNC(cache, CACHE_LOCKED)) break;
if(!FLAG_ISSET(cache, CACHE_DIRTY)) break;
if(start_lpaddr != 0 && cache->lpaddr == start_lpaddr-1 ) {
//front_merge
start_lpaddr--;
__cache = cache->cache_list.tqe_next;
TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
TAILQ_INSERT_HEAD(&temp_head, cache, cache_list);
continue;
}
else if(cache->lpaddr == end_lpaddr+1) {
//back_merge
end_lpaddr++;
__cache = cache->cache_list.tqe_next;
TAILQ_REMOVE(&nvmed->lru_head, cache, cache_list);
radix_tree_delete(&nvmed->cache_root, cache->lpaddr);
TAILQ_INSERT_TAIL(&temp_head, cache, cache_list);
continue;
}
else {
break;
}
}
if(FLAG_ISSET(cache, CACHE_DIRTY))
nvmed_cache_io_rw(nvmed_handle, nvme_cmd_write, temp_head.tqh_first,
start_lpaddr * PAGE_SIZE, (end_lpaddr - start_lpaddr) * PAGE_SIZE,
HANDLE_SYNC_IO);
cache = temp_head.tqh_first;
FLAG_SET_FORCE(cache, 0);
ret_cache = cache;
TAILQ_REMOVE(&temp_head, cache, cache_list);
LIST_REMOVE(cache, handle_cache_list);
while(temp_head.tqh_first != NULL) {
TAILQ_REMOVE(&temp_head, temp_head.tqh_first, cache_list);
TAILQ_INSERT_HEAD(&nvmed->free_head, temp_head.tqh_first, cache_list);
nvmed->num_cache_usage--;
}
}
}
else {
// Remove From Free Queue
TAILQ_REMOVE(&nvmed->free_head, cache, cache_list);
FLAG_UNSET_SYNC(cache, CACHE_FREE);
if(FLAG_ISSET(cache, CACHE_UNINIT)) {
memset(cache->ptr, 0, PAGE_SIZE);
virt_to_phys(nvmed, cache->ptr, &cache->paddr, 4096);
FLAG_UNSET_SYNC(cache, CACHE_UNINIT);
}
ret_cache = cache;
}
//.........这里部分代码省略.........
示例7: cvs_trigger_getlines
struct trigger_list *
cvs_trigger_getlines(char * file, char * repo)
{
FILE *fp;
int allow_all, lineno, match = 0;
size_t len;
regex_t preg;
struct trigger_list *list;
struct trigger_line *tline;
char fpath[PATH_MAX];
char *currentline, *defaultline = NULL, *nline, *p, *q, *regex;
if (strcmp(file, CVS_PATH_EDITINFO) == 0 ||
strcmp(file, CVS_PATH_VERIFYMSG) == 0)
allow_all = 0;
else
allow_all = 1;
(void)xsnprintf(fpath, PATH_MAX, "%s/%s", current_cvsroot->cr_dir,
file);
if ((fp = fopen(fpath, "r")) == NULL) {
if (errno != ENOENT)
cvs_log(LP_ERRNO, "cvs_trigger_getlines: %s", file);
return (NULL);
}
list = xmalloc(sizeof(*list));
TAILQ_INIT(list);
lineno = 0;
nline = NULL;
while ((currentline = fgetln(fp, &len)) != NULL) {
if (currentline[len - 1] == '\n') {
currentline[len - 1] = '\0';
} else {
nline = xmalloc(len + 1);
memcpy(nline, currentline, len);
nline[len] = '\0';
currentline = nline;
}
lineno++;
for (p = currentline; isspace((unsigned char)*p); p++)
;
if (*p == '\0' || *p == '#')
continue;
for (q = p; !isspace((unsigned char)*q) && *q != '\0'; q++)
;
if (*q == '\0')
goto bad;
*q++ = '\0';
regex = p;
for (; isspace((unsigned char)*q); q++)
;
if (*q == '\0')
goto bad;
if (strcmp(regex, "ALL") == 0 && allow_all) {
tline = xmalloc(sizeof(*tline));
tline->line = xstrdup(q);
TAILQ_INSERT_TAIL(list, tline, flist);
} else if (defaultline == NULL && !match &&
strcmp(regex, "DEFAULT") == 0) {
defaultline = xstrdup(q);
} else if (!match) {
if (regcomp(&preg, regex, REG_NOSUB|REG_EXTENDED))
goto bad;
if (regexec(&preg, repo, 0, NULL, 0) != REG_NOMATCH) {
match = 1;
tline = xmalloc(sizeof(*tline));
tline->line = xstrdup(q);
TAILQ_INSERT_HEAD(list, tline, flist);
}
regfree(&preg);
}
}
free(nline);
if (defaultline != NULL) {
if (!match) {
tline = xmalloc(sizeof(*tline));
tline->line = defaultline;
TAILQ_INSERT_HEAD(list, tline, flist);
} else
free(defaultline);
}
(void)fclose(fp);
//.........这里部分代码省略.........
示例8: tvhlogv
void tvhlogv ( const char *file, int line,
int notify, int severity,
const char *subsys, const char *fmt, va_list *args )
{
int ok, options;
size_t l;
char buf[1024];
pthread_mutex_lock(&tvhlog_mutex);
/* Check for full */
if (tvhlog_queue_full || !tvhlog_run) {
pthread_mutex_unlock(&tvhlog_mutex);
return;
}
/* Check debug enabled (and cache config) */
options = tvhlog_options;
if (severity >= LOG_DEBUG) {
ok = 0;
if (severity <= tvhlog_level) {
if (tvhlog_trace) {
ok = htsmsg_get_u32_or_default(tvhlog_trace, "all", 0);
ok = htsmsg_get_u32_or_default(tvhlog_trace, subsys, ok);
}
if (!ok && severity == LOG_DEBUG && tvhlog_debug) {
ok = htsmsg_get_u32_or_default(tvhlog_debug, "all", 0);
ok = htsmsg_get_u32_or_default(tvhlog_debug, subsys, ok);
}
}
} else {
ok = 1;
}
/* Ignore */
if (!ok) {
pthread_mutex_unlock(&tvhlog_mutex);
return;
}
/* FULL */
if (tvhlog_queue_size == TVHLOG_QUEUE_MAXSIZE) {
tvhlog_queue_full = 1;
fmt = "log buffer full";
args = NULL;
severity = LOG_ERR;
}
/* Basic message */
l = 0;
if (options & TVHLOG_OPT_THREAD) {
l += snprintf(buf + l, sizeof(buf) - l, "tid %ld: ", (long)pthread_self());
}
l += snprintf(buf + l, sizeof(buf) - l, "%s: ", subsys);
if (options & TVHLOG_OPT_FILELINE && severity >= LOG_DEBUG)
l += snprintf(buf + l, sizeof(buf) - l, "(%s:%d) ", file, line);
if (args)
l += vsnprintf(buf + l, sizeof(buf) - l, fmt, *args);
else
l += snprintf(buf + l, sizeof(buf) - l, "%s", fmt);
/* Store */
tvhlog_msg_t *msg = calloc(1, sizeof(tvhlog_msg_t));
gettimeofday(&msg->time, NULL);
msg->msg = strdup(buf);
msg->severity = severity;
msg->notify = notify;
#if TVHLOG_THREAD
if (tvhlog_run) {
TAILQ_INSERT_TAIL(&tvhlog_queue, msg, link);
tvhlog_queue_size++;
pthread_cond_signal(&tvhlog_cond);
} else {
#endif
FILE *fp = NULL;
tvhlog_process(msg, tvhlog_options, &fp, tvhlog_path);
if (fp) fclose(fp);
#if TVHLOG_THREAD
}
#endif
pthread_mutex_unlock(&tvhlog_mutex);
}
示例9: hyfi_aggr_queue_pkt
static int hyfi_aggr_queue_pkt(struct net_hatbl_entry *ha, struct sk_buff **skb,
u_int16_t seq)
{
u_int32_t i, idx = HYFI_AGGR_MAX_IFACES;
/* Find the queue for that interface */
for (i = 0; i < ha->aggr_rx_entry->num_ifs; i++) {
if (ha->aggr_rx_entry->hyfi_iface_info[i].ifindex
== (*skb)->dev->ifindex) {
/* Found */
idx = i;
break;
}
}
/* If no queue exists, assign an empty queue to that interface */
if (idx == HYFI_AGGR_MAX_IFACES) {
for (i = 0; i < ha->aggr_rx_entry->num_ifs; i++) {
if (!ha->aggr_rx_entry->hyfi_iface_info[i].ifindex) {
/* Assign to the interface */
ha->aggr_rx_entry->hyfi_iface_info[i].ifindex =
(*skb)->dev->ifindex;
ha->aggr_rx_entry->hyfi_iface_info[i].pkt_cnt = 0;
ha->aggr_rx_entry->hyfi_iface_info[i].seq_valid = 0;
idx = i;
break;
}
}
}
if (unlikely(idx >= HYFI_AGGR_MAX_IFACES)) {
/* Defensive check, in case there is no available queue (unlikely)
* we should just finish here.
*/
return -1;
} else {
struct hyfi_skb_aggr_q *skb_aggr_q;
struct hyfi_aggr_skb_buffer *hyfi_aggr_skb_buffer =
(struct hyfi_aggr_skb_buffer *) (*skb)->head;
#ifdef HYFI_AGGR_NOISY_DEBUG /* Be careful, noisy and damages high rate flows */
DPRINTK( "%s: Queue future packet, ha = %p, iface = %s, seq = %d, next_seq = %d\n",
__func__, ha, (*skb)->dev->name, (seq & 0x3FFF), ha->aggr_rx_entry->aggr_next_seq );
#endif
/* Push the skb in the queue */
skb_aggr_q = &ha->aggr_rx_entry->hyfi_iface_info[idx].skb_aggr_q;
hyfi_aggr_skb_buffer->pkt_seq = (seq & 0x3FFF);
hyfi_aggr_skb_buffer->skb = *skb;
TAILQ_INSERT_TAIL( skb_aggr_q, hyfi_aggr_skb_buffer, skb_aggr_qelem);
ha->aggr_rx_entry->hyfi_iface_info[idx].pkt_cnt++;
if (ha->aggr_rx_entry->hyfi_iface_info[idx].pkt_cnt == 1) {
ha->aggr_rx_entry->hyfi_iface_info[idx].seq = (seq & 0x3FFF);
ha->aggr_rx_entry->hyfi_iface_info[idx].seq_valid = 1;
}
*skb = NULL;
}
/* Sanity check: if we reached the maximum packets quota per queue, then
* something is really off. flush everything and restart (may be blunt, could
* be revisited in the future).
*/
if (unlikely(
ha->aggr_rx_entry->hyfi_iface_info[idx].pkt_cnt
> HYFI_AGGR_MAX_QUEUE_LEN)) {
u_int16_t next_seq;
DPRINTK( "%s: Queue %d is full, flush and recover\n", __func__, idx);
hyfi_aggr_reset_queues(ha, seq);
ha->aggr_rx_entry->time_stamp = jiffies;
/* Handle the gap, dequeue the next available packets */
next_seq = hyfi_aggr_find_next_seq(ha);
if (likely(next_seq != (u_int16_t) ~0)) {
/* Update our next sequence variable to the closest
* sequence number we have in the queues.
*/
ha->aggr_rx_entry->aggr_next_seq = next_seq;
DPRINTK( "%s: Next sequence to dequeue: %d\n", __func__, next_seq);
} else {
ha->aggr_rx_entry->next_seq_valid = 0;
DPRINTK(
"%s: Next sequence is unavailable, forward current packet\n",
__func__);
return -1;
}
}
return 0;
}
示例10: pft_refresh
static int
pft_refresh(void)
{
struct pfioc_table io;
struct pfr_tstats *t = NULL;
struct pft_entry *e;
int i, numtbls = 1;
if (started && this_tick <= pf_tick)
return (0);
while (!TAILQ_EMPTY(&pft_table)) {
e = TAILQ_FIRST(&pft_table);
TAILQ_REMOVE(&pft_table, e, link);
free(e);
}
bzero(&io, sizeof(io));
io.pfrio_esize = sizeof(struct pfr_tstats);
for (;;) {
t = reallocf(t, numtbls * sizeof(struct pfr_tstats));
if (t == NULL) {
syslog(LOG_ERR, "pft_refresh(): reallocf() numtbls=%d: %s",
numtbls, strerror(errno));
goto err2;
}
io.pfrio_size = numtbls;
io.pfrio_buffer = t;
if (ioctl(dev, DIOCRGETTSTATS, &io)) {
syslog(LOG_ERR, "pft_refresh(): ioctl(): %s",
strerror(errno));
goto err2;
}
if (numtbls >= io.pfrio_size)
break;
numtbls = io.pfrio_size;
}
for (i = 0; i < numtbls; i++) {
e = malloc(sizeof(struct pft_entry));
if (e == NULL)
goto err1;
e->index = i + 1;
memcpy(&e->pft, t+i, sizeof(struct pfr_tstats));
TAILQ_INSERT_TAIL(&pft_table, e, link);
}
pft_table_age = time(NULL);
pft_table_count = numtbls;
pf_tick = this_tick;
free(t);
return (0);
err1:
while (!TAILQ_EMPTY(&pft_table)) {
e = TAILQ_FIRST(&pft_table);
TAILQ_REMOVE(&pft_table, e, link);
free(e);
}
err2:
free(t);
return(-1);
}
示例11: pfa_table_addrs
static int
pfa_table_addrs(u_int sidx, struct pfr_table *pt)
{
struct pfioc_table io;
struct pfr_astats *t = NULL;
struct pfa_entry *e;
int i, numaddrs = 1;
if (pt == NULL)
return (-1);
memset(&io, 0, sizeof(io));
strlcpy(io.pfrio_table.pfrt_name, pt->pfrt_name,
sizeof(io.pfrio_table.pfrt_name));
for (;;) {
t = reallocf(t, numaddrs * sizeof(struct pfr_astats));
if (t == NULL) {
syslog(LOG_ERR, "pfa_table_addrs(): reallocf(): %s",
strerror(errno));
numaddrs = -1;
goto error;
}
memset(t, 0, sizeof(*t));
io.pfrio_size = numaddrs;
io.pfrio_buffer = t;
io.pfrio_esize = sizeof(struct pfr_astats);
if (ioctl(dev, DIOCRGETASTATS, &io)) {
syslog(LOG_ERR, "pfa_table_addrs(): ioctl() on %s: %s",
pt->pfrt_name, strerror(errno));
numaddrs = -1;
break;
}
if (numaddrs >= io.pfrio_size)
break;
numaddrs = io.pfrio_size;
}
for (i = 0; i < numaddrs; i++) {
if ((t + i)->pfras_a.pfra_af != AF_INET &&
(t + i)->pfras_a.pfra_af != AF_INET6) {
numaddrs = i;
break;
}
e = (struct pfa_entry *)malloc(sizeof(struct pfa_entry));
if (e == NULL) {
syslog(LOG_ERR, "pfa_table_addrs(): malloc(): %s",
strerror(errno));
numaddrs = -1;
break;
}
e->index = sidx + i;
memcpy(&e->pfas, t + i, sizeof(struct pfr_astats));
TAILQ_INSERT_TAIL(&pfa_table, e, link);
}
free(t);
error:
return (numaddrs);
}
示例12: pfi_refresh
static int
pfi_refresh(void)
{
struct pfioc_iface io;
struct pfi_kif *p = NULL;
struct pfi_entry *e;
int i, numifs = 1;
if (started && this_tick <= pf_tick)
return (0);
while (!TAILQ_EMPTY(&pfi_table)) {
e = TAILQ_FIRST(&pfi_table);
TAILQ_REMOVE(&pfi_table, e, link);
free(e);
}
bzero(&io, sizeof(io));
io.pfiio_esize = sizeof(struct pfi_kif);
for (;;) {
p = reallocf(p, numifs * sizeof(struct pfi_kif));
if (p == NULL) {
syslog(LOG_ERR, "pfi_refresh(): reallocf() numifs=%d: %s",
numifs, strerror(errno));
goto err2;
}
io.pfiio_size = numifs;
io.pfiio_buffer = p;
if (ioctl(dev, DIOCIGETIFACES, &io)) {
syslog(LOG_ERR, "pfi_refresh(): ioctl(): %s",
strerror(errno));
goto err2;
}
if (numifs >= io.pfiio_size)
break;
numifs = io.pfiio_size;
}
for (i = 0; i < numifs; i++) {
e = malloc(sizeof(struct pfi_entry));
if (e == NULL)
goto err1;
e->index = i + 1;
memcpy(&e->pfi, p+i, sizeof(struct pfi_kif));
TAILQ_INSERT_TAIL(&pfi_table, e, link);
}
pfi_table_age = time(NULL);
pfi_table_count = numifs;
pf_tick = this_tick;
free(p);
return (0);
err1:
while (!TAILQ_EMPTY(&pfi_table)) {
e = TAILQ_FIRST(&pfi_table);
TAILQ_REMOVE(&pfi_table, e, link);
free(e);
}
err2:
free(p);
return(-1);
}
示例13: parse_schedule
static void
parse_schedule(const char *string, int timeout)
{
char buffer[20];
const char *slash;
int count = 0;
SCHEDULEITEM *repeatat = NULL;
size_t len;
SCHEDULEITEM *item;
if (string)
for (slash = string; *slash; slash++)
if (*slash == '/')
count++;
free_schedulelist();
if (count == 0) {
item = xmalloc(sizeof(*item));
item->type = SC_SIGNAL;
item->value = timeout;
item->gotoitem = NULL;
TAILQ_INSERT_TAIL(&schedule, item, entries);
item = xmalloc(sizeof(*item));
item->type = SC_TIMEOUT;
item->gotoitem = NULL;
TAILQ_INSERT_TAIL(&schedule, item, entries);
if (string) {
if (sscanf(string, "%d", &item->value) != 1)
eerrorx("%s: invalid timeout in schedule",
applet);
} else
item->value = 5;
return;
}
while (string != NULL) {
if ((slash = strchr(string, '/')))
len = slash - string;
else
len = strlen(string);
if (len >= (ptrdiff_t)sizeof(buffer))
eerrorx("%s: invalid schedule item, far too long",
applet);
memcpy(buffer, string, len);
buffer[len] = 0;
string = slash ? slash + 1 : NULL;
item = parse_schedule_item(buffer);
TAILQ_INSERT_TAIL(&schedule, item, entries);
if (item->type == SC_FOREVER) {
if (repeatat)
eerrorx("%s: invalid schedule, `forever' "
"appears more than once", applet);
repeatat = item;
continue;
}
}
if (repeatat) {
item = xmalloc(sizeof(*item));
item->type = SC_GOTO;
item->value = 0;
item->gotoitem = repeatat;
TAILQ_INSERT_TAIL(&schedule, item, entries);
}
return;
}
示例14: __wt_open
//.........这里部分代码省略.........
/* Disable read-ahead on trees: it slows down random read workloads. */
if (dio_type == WT_FILE_TYPE_DATA ||
dio_type == WT_FILE_TYPE_CHECKPOINT)
f |= FILE_FLAG_RANDOM_ACCESS;
filehandle = CreateFileA(path,
(GENERIC_READ | GENERIC_WRITE),
share_mode,
NULL,
dwCreationDisposition,
f,
NULL);
if (filehandle == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_FILE_EXISTS && ok_create)
filehandle = CreateFileA(path,
(GENERIC_READ | GENERIC_WRITE),
share_mode,
NULL,
OPEN_EXISTING,
f,
NULL);
if (filehandle == INVALID_HANDLE_VALUE)
WT_ERR_MSG(session, __wt_errno(),
direct_io ?
"%s: open failed with direct I/O configured, some "
"filesystem types do not support direct I/O" :
"%s", path);
}
/*
* Open a second handle to file to support allocation/truncation
* concurrently with reads on the file. Writes would also move the file
* pointer.
*/
filehandle_secondary = CreateFileA(path,
(GENERIC_READ | GENERIC_WRITE),
share_mode,
NULL,
OPEN_EXISTING,
f,
NULL);
if (filehandle == INVALID_HANDLE_VALUE)
WT_ERR_MSG(session, __wt_errno(),
"open failed for secondary handle: %s", path);
setupfh:
WT_ERR(__wt_calloc(session, 1, sizeof(WT_FH), &fh));
WT_ERR(__wt_strdup(session, name, &fh->name));
fh->filehandle = filehandle;
fh->filehandle_secondary = filehandle_secondary;
fh->ref = 1;
fh->direct_io = direct_io;
/* Set the file's size. */
if (dio_type != WT_FILE_TYPE_DIRECTORY)
WT_ERR(__wt_filesize(session, fh, &fh->size));
/* Configure file extension. */
if (dio_type == WT_FILE_TYPE_DATA ||
dio_type == WT_FILE_TYPE_CHECKPOINT)
fh->extend_len = conn->data_extend_len;
/* Configure fallocate/posix_fallocate calls. */
__wt_fallocate_config(session, fh);
/*
* Repeat the check for a match, but then link onto the database's list
* of files.
*/
matched = 0;
__wt_spin_lock(session, &conn->fh_lock);
TAILQ_FOREACH(tfh, &conn->fhqh, q)
if (strcmp(name, tfh->name) == 0) {
++tfh->ref;
*fhp = tfh;
matched = 1;
break;
}
if (!matched) {
TAILQ_INSERT_TAIL(&conn->fhqh, fh, q);
WT_STAT_FAST_CONN_INCR(session, file_open);
*fhp = fh;
}
__wt_spin_unlock(session, &conn->fh_lock);
if (matched) {
err: if (fh != NULL) {
__wt_free(session, fh->name);
__wt_free(session, fh);
}
if (filehandle != INVALID_HANDLE_VALUE)
(void)CloseHandle(filehandle);
if (filehandle_secondary != INVALID_HANDLE_VALUE)
(void)CloseHandle(filehandle_secondary);
}
__wt_free(session, path);
return (ret);
}
示例15: nullfs_mount
//.........这里部分代码省略.........
/*
* Find lower node
*/
NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread);
error = namei(ndp);
/*
* Re-lock vnode.
* XXXKIB This is deadlock-prone as well.
*/
if (isvnunlocked)
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
NDFREE(ndp, NDF_ONLY_PNBUF);
/*
* Sanity check on lower vnode
*/
lowerrootvp = ndp->ni_vp;
/*
* Check multi null mount to avoid `lock against myself' panic.
*/
if (lowerrootvp == VTONULL(mp->mnt_vnodecovered)->null_lowervp) {
NULLFSDEBUG("nullfs_mount: multi null mount?\n");
vput(lowerrootvp);
return (EDEADLK);
}
xmp = (struct null_mount *) malloc(sizeof(struct null_mount),
M_NULLFSMNT, M_WAITOK | M_ZERO);
/*
* Save reference to underlying FS
*/
xmp->nullm_vfs = lowerrootvp->v_mount;
/*
* Save reference. Each mount also holds
* a reference on the root vnode.
*/
error = null_nodeget(mp, lowerrootvp, &vp);
/*
* Make sure the node alias worked
*/
if (error) {
free(xmp, M_NULLFSMNT);
return (error);
}
/*
* Keep a held reference to the root vnode.
* It is vrele'd in nullfs_unmount.
*/
nullm_rootvp = vp;
nullm_rootvp->v_vflag |= VV_ROOT;
xmp->nullm_rootvp = nullm_rootvp;
/*
* Unlock the node (either the lower or the alias)
*/
VOP_UNLOCK(vp, 0);
if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) {
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
MNT_IUNLOCK(mp);
}
xmp->nullm_flags |= NULLM_CACHE;
if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0)
xmp->nullm_flags &= ~NULLM_CACHE;
MNT_ILOCK(mp);
if ((xmp->nullm_flags & NULLM_CACHE) != 0) {
mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
(MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED |
MNTK_EXTENDED_SHARED);
}
mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT;
mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
(MNTK_SUSPENDABLE | MNTK_USES_BCACHE);
MNT_IUNLOCK(mp);
mp->mnt_data = xmp;
vfs_getnewfsid(mp);
if ((xmp->nullm_flags & NULLM_CACHE) != 0) {
MNT_ILOCK(xmp->nullm_vfs);
TAILQ_INSERT_TAIL(&xmp->nullm_vfs->mnt_uppers, mp,
mnt_upper_link);
MNT_IUNLOCK(xmp->nullm_vfs);
}
vfs_mountedfrom(mp, target);
NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n",
mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
return (0);
}