本文整理汇总了C++中STAILQ_FIRST函数的典型用法代码示例。如果您正苦于以下问题:C++ STAILQ_FIRST函数的具体用法?C++ STAILQ_FIRST怎么用?C++ STAILQ_FIRST使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了STAILQ_FIRST函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pccard_check_cis_quirks
void
pccard_check_cis_quirks(device_t dev)
{
struct pccard_softc *sc = PCCARD_SOFTC(dev);
int wiped = 0;
int i, j;
struct pccard_function *pf, *pf_next, *pf_last;
struct pccard_config_entry *cfe, *cfe_next;
struct pccard_cis_quirk *q;
pf = NULL;
pf_last = NULL;
for (i=0; i<n_pccard_cis_quirks; i++) {
q = &pccard_cis_quirks[i];
if (!pccard_cis_quirk_match(sc, q))
continue;
if (!wiped) {
if (bootverbose) {
device_printf(dev, "using CIS quirks for ");
for (j = 0; j < 4; j++) {
if (sc->card.cis1_info[j] == NULL)
break;
if (j)
kprintf(", ");
kprintf("%s", sc->card.cis1_info[j]);
}
kprintf("\n");
}
for (pf = STAILQ_FIRST(&sc->card.pf_head); pf != NULL;
pf = pf_next) {
for (cfe = STAILQ_FIRST(&pf->cfe_head); cfe != NULL;
cfe = cfe_next) {
cfe_next = STAILQ_NEXT(cfe, cfe_list);
kfree(cfe, M_DEVBUF);
}
pf_next = STAILQ_NEXT(pf, pf_list);
kfree(pf, M_DEVBUF);
}
STAILQ_INIT(&sc->card.pf_head);
wiped = 1;
}
if (pf_last == q->pf) {
cfe = kmalloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
if (cfe == NULL) {
device_printf(dev, "no memory for quirk (1)\n");
continue;
}
*cfe = *q->cfe;
STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
} else {
pf = kmalloc(sizeof(*pf), M_DEVBUF, M_NOWAIT);
if (pf == NULL) {
device_printf(dev,
"no memory for pccard function\n");
continue;
}
*pf = *q->pf;
STAILQ_INIT(&pf->cfe_head);
cfe = kmalloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT);
if (cfe == NULL) {
kfree(pf, M_DEVBUF);
device_printf(dev, "no memory for quirk (2)\n");
continue;
}
*cfe = *q->cfe;
STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list);
STAILQ_INSERT_TAIL(&sc->card.pf_head, pf, pf_list);
pf_last = q->pf;
}
}
}
示例2: cuda_send_inbound
static void
cuda_send_inbound(struct cuda_softc *sc)
{
device_t dev;
struct cuda_packet *pkt;
dev = sc->sc_dev;
mtx_lock(&sc->sc_mutex);
while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
mtx_unlock(&sc->sc_mutex);
/* check if we have a handler for this message */
switch (pkt->type) {
case CUDA_ADB:
if (pkt->len > 2) {
adb_receive_raw_packet(sc->adb_bus,
pkt->data[0],pkt->data[1],
pkt->len - 2,&pkt->data[2]);
} else {
adb_receive_raw_packet(sc->adb_bus,
pkt->data[0],pkt->data[1],0,NULL);
}
break;
case CUDA_PSEUDO:
mtx_lock(&sc->sc_mutex);
switch (pkt->data[1]) {
case CMD_AUTOPOLL:
sc->sc_autopoll = 1;
break;
case CMD_READ_RTC:
memcpy(&sc->sc_rtc, &pkt->data[2],
sizeof(sc->sc_rtc));
wakeup(&sc->sc_rtc);
break;
case CMD_WRITE_RTC:
break;
}
mtx_unlock(&sc->sc_mutex);
break;
case CUDA_ERROR:
/*
* CUDA will throw errors if we miss a race between
* sending and receiving packets. This is already
* handled when we abort packet output to handle
* this packet in cuda_intr(). Thus, we ignore
* these messages.
*/
break;
default:
device_printf(dev,"unknown CUDA command %d\n",
pkt->type);
break;
}
mtx_lock(&sc->sc_mutex);
STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
}
mtx_unlock(&sc->sc_mutex);
}
示例3: _libelf_load_scn
/*
* Load an ELF section table and create a list of Elf_Scn structures.
*/
int
_libelf_load_scn(Elf *e, void *ehdr)
{
int ec, swapbytes;
size_t fsz, i, shnum;
uint64_t shoff;
char *src;
Elf32_Ehdr *eh32;
Elf64_Ehdr *eh64;
Elf_Scn *scn;
int (*xlator)(char *_d, size_t _dsz, char *_s, size_t _c, int _swap);
assert(e != NULL);
assert(ehdr != NULL);
assert((e->e_flags & LIBELF_F_SHDRS_LOADED) == 0);
#define CHECK_EHDR(E,EH) do { \
if (fsz != (EH)->e_shentsize || \
shoff + fsz * shnum > e->e_rawsize) { \
LIBELF_SET_ERROR(HEADER, 0); \
return (0); \
} \
} while (0)
ec = e->e_class;
fsz = _libelf_fsize(ELF_T_SHDR, ec, e->e_version, (size_t) 1);
assert(fsz > 0);
shnum = e->e_u.e_elf.e_nscn;
if (ec == ELFCLASS32) {
eh32 = (Elf32_Ehdr *) ehdr;
shoff = (uint64_t) eh32->e_shoff;
CHECK_EHDR(e, eh32);
} else {
eh64 = (Elf64_Ehdr *) ehdr;
shoff = eh64->e_shoff;
CHECK_EHDR(e, eh64);
}
xlator = _libelf_get_translator(ELF_T_SHDR, ELF_TOMEMORY, ec);
swapbytes = e->e_byteorder != LIBELF_PRIVATE(byteorder);
src = e->e_rawfile + shoff;
/*
* If the file is using extended numbering then section #0
* would have already been read in.
*/
i = 0;
if (!STAILQ_EMPTY(&e->e_u.e_elf.e_scn)) {
assert(STAILQ_FIRST(&e->e_u.e_elf.e_scn) ==
STAILQ_LAST(&e->e_u.e_elf.e_scn, _Elf_Scn, s_next));
i = 1;
src += fsz;
}
for (; i < shnum; i++, src += fsz) {
if ((scn = _libelf_allocate_scn(e, i)) == NULL)
return (0);
(*xlator)((char *) &scn->s_shdr, sizeof(scn->s_shdr), src,
(size_t) 1, swapbytes);
if (ec == ELFCLASS32) {
scn->s_offset = scn->s_rawoff =
scn->s_shdr.s_shdr32.sh_offset;
scn->s_size = scn->s_shdr.s_shdr32.sh_size;
} else {
scn->s_offset = scn->s_rawoff =
scn->s_shdr.s_shdr64.sh_offset;
scn->s_size = scn->s_shdr.s_shdr64.sh_size;
}
}
e->e_flags |= LIBELF_F_SHDRS_LOADED;
return (1);
}
示例4: memcache_pre_coalesce
/*
* Pre-coalesce handler is invoked when the message is a response to
* the fragmented multi vector request - 'get' or 'gets' and all the
* responses to the fragmented request vector hasn't been received
*/
void
memcache_pre_coalesce(struct msg *r)
{
struct msg *pr = r->peer; /* peer request */
struct mbuf *mbuf;
ASSERT(!r->request);
ASSERT(pr->request);
if (pr->frag_id == 0) {
/* do nothing, if not a response to a fragmented request */
return;
}
switch (r->type) {
case MSG_RSP_MC_VALUE:
case MSG_RSP_MC_END:
/*
* Readjust responses of the fragmented message vector by not
* including the end marker for all but the last response
*/
if (pr->last_fragment) {
break;
}
ASSERT(r->end != NULL);
for (;;) {
mbuf = STAILQ_LAST(&r->mhdr, mbuf, next);
ASSERT(mbuf != NULL);
/*
* We cannot assert that end marker points to the last mbuf
* Consider a scenario where end marker points to the
* penultimate mbuf and the last mbuf only contains spaces
* and CRLF: mhdr -> [...END] -> [\r\n]
*/
if (r->end >= mbuf->pos && r->end < mbuf->last) {
/* end marker is within this mbuf */
r->mlen -= (uint32_t)(mbuf->last - r->end);
mbuf->last = r->end;
break;
}
/* end marker is not in this mbuf */
r->mlen -= mbuf_length(mbuf);
mbuf_remove(&r->mhdr, mbuf);
mbuf_put(mbuf);
}
break;
default:
/*
* Valid responses for a fragmented requests are MSG_RSP_MC_VALUE or,
* MSG_RSP_MC_END. For an invalid response, we send out SERVER_ERRROR
* with EINVAL errno
*/
mbuf = STAILQ_FIRST(&r->mhdr);
log_hexdump(LOG_ERR, mbuf->pos, mbuf_length(mbuf), "rsp fragment "
"with unknown type %d", r->type);
pr->error = 1;
pr->err = EINVAL;
break;
}
}
示例5: output_listing
void
output_listing(char *ifilename)
{
char buf[1024];
FILE *ifile;
struct instruction *cur_instr;
patch_t *cur_patch;
symbol_node_t *cur_func;
int *func_values;
int instrcount;
int instrptr;
int line;
int func_count;
int skip_addr;
instrcount = 0;
instrptr = 0;
line = 1;
skip_addr = 0;
if ((ifile = fopen(ifilename, "r")) == NULL) {
perror(ifilename);
stop(NULL, EX_DATAERR);
}
/*
* Determine which options to apply to this listing.
*/
for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions);
cur_func != NULL;
cur_func = SLIST_NEXT(cur_func, links))
func_count++;
func_values = NULL;
if (func_count != 0) {
func_values = (int *)malloc(func_count * sizeof(int));
if (func_values == NULL)
stop("Could not malloc", EX_OSERR);
func_values[0] = 0; /* FALSE func */
func_count--;
/*
* Ask the user to fill in the return values for
* the rest of the functions.
*/
for (cur_func = SLIST_FIRST(&patch_functions);
cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL;
cur_func = SLIST_NEXT(cur_func, links), func_count--) {
int input;
fprintf(stdout, "\n(%s)\n", cur_func->symbol->name);
fprintf(stdout,
"Enter the return value for "
"this expression[T/F]:");
while (1) {
input = getchar();
input = toupper(input);
if (input == 'T') {
func_values[func_count] = 1;
break;
} else if (input == 'F') {
func_values[func_count] = 0;
break;
}
}
if (isatty(fileno(stdin)) == 0)
putchar(input);
}
fprintf(stdout, "\nThanks!\n");
}
/* Now output the listing */
cur_patch = STAILQ_FIRST(&patches);
for (cur_instr = STAILQ_FIRST(&seq_program);
cur_instr != NULL;
cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) {
if (check_patch(&cur_patch, instrcount,
&skip_addr, func_values) == 0) {
/* Don't count this instruction as it is in a patch
* that was removed.
*/
continue;
}
while (line < cur_instr->srcline) {
fgets(buf, sizeof(buf), ifile);
fprintf(listfile, " \t%s", buf);
line++;
}
fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr,
#ifdef __LITTLE_ENDIAN
cur_instr->format.bytes[0],
cur_instr->format.bytes[1],
//.........这里部分代码省略.........
示例6: mbaintr
/*
* We got an interrupt. Check type of interrupt and call the specific
* device interrupt handling routine.
*/
void
mbaintr(void *mba)
{
struct mba_softc * const sc = mba;
struct mba_device *md;
struct buf *bp;
int itype, attn, anr;
itype = MBA_RCSR(MBA_SR);
MBA_WCSR(MBA_SR, itype);
attn = MBA_RCSR(MUREG(0, MU_AS)) & 0xff;
MBA_WCSR(MUREG(0, MU_AS), attn);
if (sc->sc_state == SC_AUTOCONF)
return; /* During autoconfig */
md = STAILQ_FIRST(&sc->sc_xfers);
bp = bufq_peek(md->md_q);
/*
* A data-transfer interrupt. Current operation is finished,
* call that device's finish routine to see what to do next.
*/
if (sc->sc_state == SC_ACTIVE) {
sc->sc_state = SC_IDLE;
switch ((*md->md_finish)(md, itype, &attn)) {
case XFER_FINISH:
/*
* Transfer is finished. Take buffer of drive
* queue, and take drive of adapter queue.
* If more to transfer, start the adapter again
* by calling mbastart().
*/
(void)bufq_get(md->md_q);
STAILQ_REMOVE_HEAD(&sc->sc_xfers, md_link);
if (bufq_peek(md->md_q) != NULL) {
STAILQ_INSERT_TAIL(&sc->sc_xfers, md, md_link);
}
bp->b_resid = 0;
biodone(bp);
if (!STAILQ_EMPTY(&sc->sc_xfers))
mbastart(sc);
break;
case XFER_RESTART:
/*
* Something went wrong with the transfer. Try again.
*/
mbastart(sc);
break;
}
}
while (attn) {
anr = ffs(attn) - 1;
attn &= ~(1 << anr);
if (sc->sc_md[anr]->md_attn == 0)
panic("Should check for new MBA device %d", anr);
(*sc->sc_md[anr]->md_attn)(sc->sc_md[anr]);
}
}
示例7: dfs_process_radarevent
int
dfs_process_radarevent(struct ath_softc *sc, HAL_CHANNEL *chan)
{
struct ath_dfs *dfs=sc->sc_dfs;
struct ath_hal *ah=sc->sc_ah;
struct dfs_event re,*event;
struct dfs_state *rs=NULL;
struct dfs_filtertype *ft;
struct dfs_filter *rf;
int found, retval=0,p, empty;
int events_processed=0;
u_int32_t tabledepth,rfilt, index;
u_int64_t deltafull_ts = 0,this_ts, deltaT;
HAL_CHANNEL *thischan;
HAL_PHYERR_PARAM pe;
struct dfs_pulseline *pl;
static u_int32_t test_ts = 0;
static u_int32_t diff_ts = 0;
int ext_chan_event_flag=0;
if (dfs == NULL) {
DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: sc_sfs is NULL\n",
__func__);
return 0;
}
if ( ! (sc->sc_curchan.privFlags & CHANNEL_DFS)) {
DFS_DPRINTK(sc, ATH_DEBUG_DFS2, "%s: radar event on non-DFS chan\n",
__func__);
dfs_reset_radarq(sc);
dfs_reset_alldelaylines(sc);
return 0;
}
pl = dfs->pulses;
/* TEST : Simulate radar bang, make sure we add the channel to NOL (bug 29968) */
if (dfs->dfs_bangradar) {
/* bangradar will always simulate radar found on the primary channel */
rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex];
dfs->dfs_bangradar = 0; /* reset */
DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: bangradar\n", __func__);
retval = 1;
goto dfsfound;
}
ATH_DFSQ_LOCK(dfs);
empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
ATH_DFSQ_UNLOCK(dfs);
while ((!empty) && (!retval) && (events_processed < MAX_EVENTS)) {
ATH_DFSQ_LOCK(dfs);
event = STAILQ_FIRST(&(dfs->dfs_radarq));
if (event != NULL)
STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list);
ATH_DFSQ_UNLOCK(dfs);
if (event == NULL) {
empty = 1;
break;
}
events_processed++;
re = *event;
OS_MEMZERO(event, sizeof(struct dfs_event));
ATH_DFSEVENTQ_LOCK(dfs);
STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list);
ATH_DFSEVENTQ_UNLOCK(dfs);
found = 0;
if (re.re_chanindex < DFS_NUM_RADAR_STATES)
rs = &dfs->dfs_radar[re.re_chanindex];
else {
ATH_DFSQ_LOCK(dfs);
empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
ATH_DFSQ_UNLOCK(dfs);
continue;
}
if (rs->rs_chan.privFlags & CHANNEL_INTERFERENCE) {
ATH_DFSQ_LOCK(dfs);
empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
ATH_DFSQ_UNLOCK(dfs);
continue;
}
if (dfs->dfs_rinfo.rn_lastfull_ts == 0) {
/*
* Either not started, or 64-bit rollover exactly to zero
* Just prepend zeros to the 15-bit ts
*/
dfs->dfs_rinfo.rn_ts_prefix = 0;
this_ts = (u_int64_t) re.re_ts;
} else {
/* WAR 23031- patch duplicate ts on very short pulses */
/* This pacth has two problems in linux environment.
* 1)The time stamp created and hence PRI depends entirely on the latency.
* If the latency is high, it possibly can split two consecutive
* pulses in the same burst so far away (the same amount of latency)
* that make them look like they are from differenct bursts. It is
//.........这里部分代码省略.........
示例8: get_bs
struct block_space *
get_bs(struct chip_swap *swap, uint32_t block, uint8_t writing)
{
struct block_state *blk_state, *old_blk_state = NULL;
struct block_space *blk_space;
if (swap == NULL || (block >= swap->nof_blks))
return (NULL);
blk_state = &swap->blk_state[block];
nand_debug(NDBG_SIM,"blk_state %x\n", blk_state->status);
if (blk_state->status & BLOCK_ALLOCATED) {
blk_space = blk_state->blk_sp;
} else {
blk_space = SLIST_FIRST(&swap->free_bs);
if (blk_space) {
SLIST_REMOVE_HEAD(&swap->free_bs, free_link);
STAILQ_INSERT_TAIL(&swap->used_bs, blk_space,
used_link);
} else {
blk_space = STAILQ_FIRST(&swap->used_bs);
old_blk_state = blk_space->blk_state;
STAILQ_REMOVE_HEAD(&swap->used_bs, used_link);
STAILQ_INSERT_TAIL(&swap->used_bs, blk_space,
used_link);
if (old_blk_state->status & BLOCK_DIRTY) {
swap_file_write(swap, old_blk_state);
old_blk_state->status &= ~BLOCK_DIRTY;
old_blk_state->status |= BLOCK_SWAPPED;
}
}
}
if (blk_space == NULL)
return (NULL);
if (old_blk_state != NULL) {
old_blk_state->status &= ~BLOCK_ALLOCATED;
old_blk_state->blk_sp = NULL;
}
blk_state->blk_sp = blk_space;
blk_space->blk_state = blk_state;
if (!(blk_state->status & BLOCK_ALLOCATED)) {
if (blk_state->status & BLOCK_SWAPPED)
swap_file_read(swap, blk_state);
else
memset(blk_space->blk_ptr, 0xff, swap->blk_size);
blk_state->status |= BLOCK_ALLOCATED;
}
if (writing)
blk_state->status |= BLOCK_DIRTY;
nand_debug(NDBG_SIM,"get_bs returned %p[%p] state %x\n", blk_space,
blk_space->blk_ptr, blk_state->status);
return (blk_space);
}
示例9: switch
static nc_thread_memory_block_t *nc_allocate_memory_block_mu(
nc_thread_memory_block_type_t type,
int required_size) {
struct tailhead *head;
nc_thread_memory_block_t *node;
/* Assume the lock is held!!! */
if (type >= MAX_MEMORY_TYPE)
return NULL;
head = &__nc_thread_memory_blocks[type];
/* We need to know the size even if we find a free node - to memset it to 0 */
switch (type) {
case THREAD_STACK_MEMORY:
required_size = required_size + kStackAlignment - 1;
break;
case TLS_AND_TDB_MEMORY:
break;
case MAX_MEMORY_TYPE:
default:
return NULL;
}
if (!STAILQ_EMPTY(head)) {
/* Try to get one from queue. */
nc_thread_memory_block_t *node = STAILQ_FIRST(head);
/*
* On average the memory blocks will be marked as not used in the same order
* as they are added to the queue, therefore there is no need to check the
* next queue entries if the first one is still in use.
*/
if (0 == node->is_used && node->size >= required_size) {
/*
* This will only re-use the first node possibly, and could be
* improved to provide the stack with a best-fit algorithm if needed.
* TODO: we should scan all nodes to see if there is one that fits
* before allocating another.
* http://code.google.com/p/nativeclient/issues/detail?id=1569
*/
int size = node->size;
STAILQ_REMOVE_HEAD(head, entries);
--__nc_memory_block_counter[type];
memset(node, 0,sizeof(*node));
node->size = size;
node->is_used = 1;
return node;
}
while (__nc_memory_block_counter[type] > __nc_kMaxCachedMemoryBlocks) {
/*
* We have too many blocks in the queue - try to release some.
* The maximum number of memory blocks to keep in the queue
* is almost arbitrary and can be tuned.
* The main limitation is that if we keep too many
* blocks in the queue, the NaCl app will run out of memory,
* since the default thread stack size is 512K.
* TODO(gregoryd): we might give up reusing stack entries once we
* support variable stack size.
*/
nc_thread_memory_block_t *tmp = STAILQ_FIRST(head);
if (0 == tmp->is_used) {
STAILQ_REMOVE_HEAD(head, entries);
--__nc_memory_block_counter[type];
free(tmp);
} else {
/*
* Stop once we find a block that is still in use,
* since probably there is no point to continue.
*/
break;
}
}
}
/* No available blocks of the required type/size - allocate one. */
node = malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size));
if (NULL != node) {
memset(node, 0, sizeof(*node));
node->size = required_size;
node->is_used = 1;
}
return node;
}
示例10: elf_getdata
Elf_Data *
elf_getdata(Elf_Scn *s, Elf_Data *ed)
{
Elf *e;
unsigned int sh_type;
int elfclass, elftype;
size_t count, fsz, msz;
struct _Libelf_Data *d;
uint64_t sh_align, sh_offset, sh_size;
int (*xlate)(unsigned char *_d, size_t _dsz, unsigned char *_s,
size_t _c, int _swap);
d = (struct _Libelf_Data *) ed;
if (s == NULL || (e = s->s_elf) == NULL ||
(d != NULL && s != d->d_scn)) {
LIBELF_SET_ERROR(ARGUMENT, 0);
return (NULL);
}
assert(e->e_kind == ELF_K_ELF);
if (d == NULL && (d = STAILQ_FIRST(&s->s_data)) != NULL)
return (&d->d_data);
if (d != NULL)
return (&STAILQ_NEXT(d, d_next)->d_data);
if (e->e_rawfile == NULL) {
/*
* In the ELF_C_WRITE case, there is no source that
* can provide data for the section.
*/
LIBELF_SET_ERROR(ARGUMENT, 0);
return (NULL);
}
elfclass = e->e_class;
assert(elfclass == ELFCLASS32 || elfclass == ELFCLASS64);
if (elfclass == ELFCLASS32) {
sh_type = s->s_shdr.s_shdr32.sh_type;
sh_offset = (uint64_t) s->s_shdr.s_shdr32.sh_offset;
sh_size = (uint64_t) s->s_shdr.s_shdr32.sh_size;
sh_align = (uint64_t) s->s_shdr.s_shdr32.sh_addralign;
} else {
sh_type = s->s_shdr.s_shdr64.sh_type;
sh_offset = s->s_shdr.s_shdr64.sh_offset;
sh_size = s->s_shdr.s_shdr64.sh_size;
sh_align = s->s_shdr.s_shdr64.sh_addralign;
}
if (sh_type == SHT_NULL) {
LIBELF_SET_ERROR(SECTION, 0);
return (NULL);
}
if ((elftype = _libelf_xlate_shtype(sh_type)) < ELF_T_FIRST ||
elftype > ELF_T_LAST || (sh_type != SHT_NOBITS &&
sh_offset + sh_size > (uint64_t) e->e_rawsize)) {
LIBELF_SET_ERROR(SECTION, 0);
return (NULL);
}
if ((fsz = (elfclass == ELFCLASS32 ? elf32_fsize : elf64_fsize)
(elftype, (size_t) 1, e->e_version)) == 0) {
LIBELF_SET_ERROR(UNIMPL, 0);
return (NULL);
}
if (sh_size % fsz) {
LIBELF_SET_ERROR(SECTION, 0);
return (NULL);
}
if (sh_size / fsz > SIZE_MAX) {
LIBELF_SET_ERROR(RANGE, 0);
return (NULL);
}
count = (size_t) (sh_size / fsz);
msz = _libelf_msize(elftype, elfclass, e->e_version);
if (count > 0 && msz > SIZE_MAX / count) {
LIBELF_SET_ERROR(RANGE, 0);
return (NULL);
}
assert(msz > 0);
assert(count <= SIZE_MAX);
assert(msz * count <= SIZE_MAX);
if ((d = _libelf_allocate_data(s)) == NULL)
return (NULL);
d->d_data.d_buf = NULL;
d->d_data.d_off = 0;
d->d_data.d_align = sh_align;
//.........这里部分代码省略.........
示例11: memcache_copy_bulk
/*
* Copy one response from src to dst and return bytes copied
*/
static rstatus_t
memcache_copy_bulk(struct msg *dst, struct msg *src)
{
struct mbuf *mbuf, *nbuf;
uint8_t *p;
uint32_t len = 0;
uint32_t bytes = 0;
uint32_t i = 0;
for (mbuf = STAILQ_FIRST(&src->mhdr);
mbuf && mbuf_empty(mbuf);
mbuf = STAILQ_FIRST(&src->mhdr)) {
mbuf_remove(&src->mhdr, mbuf);
mbuf_put(mbuf);
}
mbuf = STAILQ_FIRST(&src->mhdr);
if (mbuf == NULL) {
return NC_OK; /* key not exists */
}
p = mbuf->pos;
/*
* get : VALUE key 0 len\r\nval\r\n
* gets: VALUE key 0 len cas\r\nval\r\n
*/
ASSERT(*p == 'V');
for (i = 0; i < 3; i++) { /* eat 'VALUE key 0 ' */
for (; *p != ' ';) {
p++;
}
p++;
}
len = 0;
for (; p < mbuf->last && isdigit(*p); p++) {
len = len * 10 + (uint32_t)(*p - '0');
}
for (; p < mbuf->last && ('\r' != *p); p++) { /* eat cas for gets */
;
}
len += CRLF_LEN * 2;
len += (p - mbuf->pos);
bytes = len;
/* copy len bytes to dst */
for (; mbuf;) {
if (mbuf_length(mbuf) <= len) { /* steal this mbuf from src to dst */
nbuf = STAILQ_NEXT(mbuf, next);
mbuf_remove(&src->mhdr, mbuf);
mbuf_insert(&dst->mhdr, mbuf);
len -= mbuf_length(mbuf);
mbuf = nbuf;
} else { /* split it */
nbuf = mbuf_get();
if (nbuf == NULL) {
return NC_ENOMEM;
}
mbuf_copy(nbuf, mbuf->pos, len);
mbuf_insert(&dst->mhdr, nbuf);
mbuf->pos += len;
break;
}
}
dst->mlen += bytes;
src->mlen -= bytes;
log_debug(LOG_VVERB, "memcache_copy_bulk copy bytes: %d", bytes);
return NC_OK;
}
示例12: memcache_fragment_retrieval
/*
* read the comment in proto/nc_redis.c
*/
static rstatus_t
memcache_fragment_retrieval(struct msg *r, uint32_t ncontinuum,
struct msg_tqh *frag_msgq,
uint32_t key_step)
{
struct mbuf *mbuf;
struct msg **sub_msgs;
uint32_t i;
rstatus_t status;
sub_msgs = nc_zalloc(ncontinuum * sizeof(*sub_msgs));
if (sub_msgs == NULL) {
return NC_ENOMEM;
}
ASSERT(r->frag_seq == NULL);
r->frag_seq = nc_alloc(array_n(r->keys) * sizeof(*r->frag_seq));
if (r->frag_seq == NULL) {
nc_free(sub_msgs);
return NC_ENOMEM;
}
mbuf = STAILQ_FIRST(&r->mhdr);
mbuf->pos = mbuf->start;
/*
* This code is based on the assumption that 'gets ' is located
* in a contiguous location.
* This is always true because we have capped our MBUF_MIN_SIZE at 512 and
* whenever we have multiple messages, we copy the tail message into a new mbuf
*/
for (; *(mbuf->pos) != ' ';) { /* eat get/gets */
mbuf->pos++;
}
mbuf->pos++;
r->frag_id = msg_gen_frag_id();
r->nfrag = 0;
r->frag_owner = r;
for (i = 0; i < array_n(r->keys); i++) { /* for each key */
struct msg *sub_msg;
struct keypos *kpos = array_get(r->keys, i);
uint32_t idx = msg_backend_idx(r, kpos->start, kpos->end - kpos->start);
if (sub_msgs[idx] == NULL) {
sub_msgs[idx] = msg_get(r->owner, r->request, r->redis);
if (sub_msgs[idx] == NULL) {
nc_free(sub_msgs);
return NC_ENOMEM;
}
}
r->frag_seq[i] = sub_msg = sub_msgs[idx];
sub_msg->narg++;
status = memcache_append_key(sub_msg, kpos->start, kpos->end - kpos->start);
if (status != NC_OK) {
nc_free(sub_msgs);
return status;
}
}
for (i = 0; i < ncontinuum; i++) { /* prepend mget header, and forward it */
struct msg *sub_msg = sub_msgs[i];
if (sub_msg == NULL) {
continue;
}
/* prepend get/gets */
if (r->type == MSG_REQ_MC_GET) {
status = msg_prepend(sub_msg, (uint8_t *)"get ", 4);
} else if (r->type == MSG_REQ_MC_GETS) {
status = msg_prepend(sub_msg, (uint8_t *)"gets ", 5);
}
if (status != NC_OK) {
nc_free(sub_msgs);
return status;
}
/* append \r\n */
status = msg_append(sub_msg, (uint8_t *)CRLF, CRLF_LEN);
if (status != NC_OK) {
nc_free(sub_msgs);
return status;
}
sub_msg->type = r->type;
sub_msg->frag_id = r->frag_id;
sub_msg->frag_owner = r->frag_owner;
TAILQ_INSERT_TAIL(frag_msgq, sub_msg, m_tqe);
r->nfrag++;
}
nc_free(sub_msgs);
return NC_OK;
}
示例13: pci_ioctl
//.........这里部分代码省略.........
if (ap->a_cmd == PCIOCGETCONF_OLD) {
pattern_buf_old = kmalloc(cio->pat_buf_len,
M_TEMP, M_WAITOK);
error = copyin(cio->patterns,
pattern_buf_old, cio->pat_buf_len);
} else
#endif
{
pattern_buf = kmalloc(cio->pat_buf_len, M_TEMP,
M_WAITOK);
error = copyin(cio->patterns, pattern_buf,
cio->pat_buf_len);
}
if (error != 0) {
error = EINVAL;
goto getconfexit;
}
num_patterns = cio->num_patterns;
} else if ((cio->num_patterns > 0)
|| (cio->pat_buf_len > 0)) {
/*
* The user made a mistake, spit out an error.
*/
cio->status = PCI_GETCONF_ERROR;
error = EINVAL;
break;
}
/*
* Go through the list of devices and copy out the devices
* that match the user's criteria.
*/
for (cio->num_matches = 0, error = 0, i = 0,
dinfo = STAILQ_FIRST(devlist_head);
(dinfo != NULL) && (cio->num_matches < ionum)
&& (error == 0) && (i < pci_numdevs) && (dinfo != NULL);
dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
if (i < cio->offset)
continue;
/* Populate pd_name and pd_unit */
name = NULL;
if (dinfo->cfg.dev)
name = device_get_name(dinfo->cfg.dev);
if (name) {
strncpy(dinfo->conf.pd_name, name,
sizeof(dinfo->conf.pd_name));
dinfo->conf.pd_name[PCI_MAXNAMELEN] = 0;
dinfo->conf.pd_unit =
device_get_unit(dinfo->cfg.dev);
} else {
dinfo->conf.pd_name[0] = '\0';
dinfo->conf.pd_unit = 0;
}
#ifdef PRE7_COMPAT
if ((ap->a_cmd == PCIOCGETCONF_OLD &&
(pattern_buf_old == NULL ||
pci_conf_match_old(pattern_buf_old, num_patterns,
&dinfo->conf) == 0)) ||
(ap->a_cmd == PCIOCGETCONF &&
(pattern_buf == NULL ||
pci_conf_match(pattern_buf, num_patterns,
&dinfo->conf) == 0))) {
#else
示例14: dnode_req_forward
static void
dnode_req_forward(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct server_pool *pool;
uint8_t *key;
uint32_t keylen;
if (log_loggable(LOG_DEBUG)) {
log_debug(LOG_DEBUG, "dnode_req_forward entering ");
}
log_debug(LOG_DEBUG, "DNODE REQ RECEIVED %s %d dmsg->id %u",
conn_get_type_string(conn), conn->sd, msg->dmsg->id);
ASSERT(conn->type == CONN_DNODE_PEER_CLIENT);
pool = conn->owner;
key = NULL;
keylen = 0;
log_debug(LOG_DEBUG, "conn %p adding message %d:%d", conn, msg->id, msg->parent_id);
dictAdd(conn->outstanding_msgs_dict, &msg->id, msg);
if (!string_empty(&pool->hash_tag)) {
struct string *tag = &pool->hash_tag;
uint8_t *tag_start, *tag_end;
tag_start = dn_strchr(msg->key_start, msg->key_end, tag->data[0]);
if (tag_start != NULL) {
tag_end = dn_strchr(tag_start + 1, msg->key_end, tag->data[1]);
if (tag_end != NULL) {
key = tag_start + 1;
keylen = (uint32_t)(tag_end - key);
}
}
}
if (keylen == 0) {
key = msg->key_start;
keylen = (uint32_t)(msg->key_end - msg->key_start);
}
ASSERT(msg->dmsg != NULL);
if (msg->dmsg->type == DMSG_REQ) {
local_req_forward(ctx, conn, msg, key, keylen);
} else if (msg->dmsg->type == DMSG_REQ_FORWARD) {
struct mbuf *orig_mbuf = STAILQ_FIRST(&msg->mhdr);
struct datacenter *dc = server_get_dc(pool, &pool->dc);
uint32_t rack_cnt = array_n(&dc->racks);
uint32_t rack_index;
for(rack_index = 0; rack_index < rack_cnt; rack_index++) {
struct rack *rack = array_get(&dc->racks, rack_index);
//log_debug(LOG_DEBUG, "forwarding to rack '%.*s'",
// rack->name->len, rack->name->data);
struct msg *rack_msg;
if (string_compare(rack->name, &pool->rack) == 0 ) {
rack_msg = msg;
} else {
rack_msg = msg_get(conn, msg->request, __FUNCTION__);
if (rack_msg == NULL) {
log_debug(LOG_VERB, "whelp, looks like yer screwed now, buddy. no inter-rack messages for you!");
continue;
}
if (msg_clone(msg, orig_mbuf, rack_msg) != DN_OK) {
msg_put(rack_msg);
continue;
}
rack_msg->swallow = true;
}
if (log_loggable(LOG_DEBUG)) {
log_debug(LOG_DEBUG, "forwarding request from conn '%s' to rack '%.*s' dc '%.*s' ",
dn_unresolve_peer_desc(conn->sd), rack->name->len, rack->name->data, rack->dc->len, rack->dc->data);
}
remote_req_forward(ctx, conn, rack_msg, rack, key, keylen);
}
}
}
示例15: pkg_create_matches
static int
pkg_create_matches(int argc, char **argv, match_t match, pkg_formats fmt,
const char * const outdir, const char * const rootdir, bool overwrite)
{
int i, ret = EPKG_OK, retcode = EPKG_OK;
struct pkgdb *db = NULL;
struct pkgdb_it *it = NULL;
struct pkg *pkg = NULL;
struct pkg_head head = STAILQ_HEAD_INITIALIZER(head);
struct pkg_entry *e = NULL;
const char *name, *version;
char pkgpath[MAXPATHLEN];
int query_flags = PKG_LOAD_DEPS | PKG_LOAD_FILES |
PKG_LOAD_CATEGORIES | PKG_LOAD_DIRS | PKG_LOAD_SCRIPTS |
PKG_LOAD_OPTIONS | PKG_LOAD_MTREE | PKG_LOAD_LICENSES |
PKG_LOAD_USERS | PKG_LOAD_GROUPS | PKG_LOAD_SHLIBS;
const char *format;
bool foundone;
if (pkgdb_open(&db, PKGDB_DEFAULT) != EPKG_OK) {
pkgdb_close(db);
return (EX_IOERR);
}
switch (fmt) {
case TXZ:
format = "txz";
break;
case TBZ:
format = "tbz";
break;
case TGZ:
format = "tgz";
break;
case TAR:
format = "tar";
break;
}
for (i = 0; i < argc || match == MATCH_ALL; i++) {
if (match == MATCH_ALL) {
printf("Loading package list...\n");
if ((it = pkgdb_query(db, NULL, match)) == NULL)
goto cleanup;
match = !MATCH_ALL;
} else
if ((it = pkgdb_query(db, argv[i], match)) == NULL)
goto cleanup;
foundone = false;
while ((ret = pkgdb_it_next(it, &pkg, query_flags)) == EPKG_OK) {
if ((e = malloc(sizeof(struct pkg_entry))) == NULL)
err(1, "malloc(pkg_entry)");
e->pkg = pkg;
pkg = NULL;
STAILQ_INSERT_TAIL(&head, e, next);
foundone = true;
}
if (!foundone)
warnx("No installed package matching \"%s\" found\n",
argv[i]);
pkgdb_it_free(it);
if (ret != EPKG_END)
retcode++;
}
while (!STAILQ_EMPTY(&head)) {
e = STAILQ_FIRST(&head);
STAILQ_REMOVE_HEAD(&head, next);
pkg_get(e->pkg, PKG_NAME, &name, PKG_VERSION, &version);
if (!overwrite) {
snprintf(pkgpath, MAXPATHLEN, "%s/%s-%s.%s", outdir,
name, version, format);
if (access(pkgpath, F_OK) == 0) {
printf("%s-%s already packaged skipping...\n",
name, version);
pkg_free(e->pkg);
free(e);
continue;
}
}
printf("Creating package for %s-%s\n", name, version);
if (pkg_create_installed(outdir, fmt, rootdir, e->pkg) !=
EPKG_OK)
retcode++;
pkg_free(e->pkg);
free(e);
}
cleanup:
pkgdb_close(db);
return (retcode);
}