本文整理汇总了C++中KASSERT函数的典型用法代码示例。如果您正苦于以下问题:C++ KASSERT函数的具体用法?C++ KASSERT怎么用?C++ KASSERT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了KASSERT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ether_output
//.........这里部分代码省略.........
type = eh->ether_type;
break;
}
default:
if_printf(ifp, "can't handle af%d\n", dst->sa_family);
senderr(EAFNOSUPPORT);
}
if (lle != NULL && (lle->la_flags & LLE_IFADDR)) {
update_mbuf_csumflags(m, m);
return (if_simloop(ifp, m, dst->sa_family, 0));
}
/*
* Add local net header. If no space in first mbuf,
* allocate another.
*/
M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
if (m == NULL)
senderr(ENOBUFS);
eh = mtod(m, struct ether_header *);
(void)memcpy(&eh->ether_type, &type,
sizeof(eh->ether_type));
(void)memcpy(eh->ether_dhost, edst, sizeof (edst));
if (hdrcmplt)
(void)memcpy(eh->ether_shost, esrc,
sizeof(eh->ether_shost));
else
(void)memcpy(eh->ether_shost, IF_LLADDR(ifp),
sizeof(eh->ether_shost));
/*
* If a simplex interface, and the packet is being sent to our
* Ethernet address or a broadcast address, loopback a copy.
* XXX To make a simplex device behave exactly like a duplex
* device, we should copy in the case of sending to our own
* ethernet address (thus letting the original actually appear
* on the wire). However, we don't do that here for security
* reasons and compatibility with the original behavior.
*/
if ((ifp->if_flags & IFF_SIMPLEX) && loop_copy &&
((t = pf_find_mtag(m)) == NULL || !t->routed)) {
if (m->m_flags & M_BCAST) {
struct mbuf *n;
/*
* Because if_simloop() modifies the packet, we need a
* writable copy through m_dup() instead of a readonly
* one as m_copy[m] would give us. The alternative would
* be to modify if_simloop() to handle the readonly mbuf,
* but performancewise it is mostly equivalent (trading
* extra data copying vs. extra locking).
*
* XXX This is a local workaround. A number of less
* often used kernel parts suffer from the same bug.
* See PR kern/105943 for a proposed general solution.
*/
if ((n = m_dup(m, M_NOWAIT)) != NULL) {
update_mbuf_csumflags(m, n);
(void)if_simloop(ifp, n, dst->sa_family, hlen);
} else
ifp->if_iqdrops++;
} else if (bcmp(eh->ether_dhost, eh->ether_shost,
ETHER_ADDR_LEN) == 0) {
update_mbuf_csumflags(m, m);
(void) if_simloop(ifp, m, dst->sa_family, hlen);
return (0); /* XXX */
}
}
/*
* Bridges require special output handling.
*/
if (ifp->if_bridge) {
BRIDGE_OUTPUT(ifp, m, error);
return (error);
}
#if defined(INET) || defined(INET6)
if (ifp->if_carp &&
(error = (*carp_output_p)(ifp, m, dst)))
goto bad;
#endif
/* Handle ng_ether(4) processing, if any */
if (IFP2AC(ifp)->ac_netgraph != NULL) {
KASSERT(ng_ether_output_p != NULL,
("ng_ether_output_p is NULL"));
if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
bad: if (m != NULL)
m_freem(m);
return (error);
}
if (m == NULL)
return (0);
}
/* Continue with link-layer output */
return ether_output_frame(ifp, m);
}
示例2: udp6_send
static int
udp6_send(struct socket *so, int flags, struct mbuf *m,
struct sockaddr *addr, struct mbuf *control, struct thread *td)
{
struct inpcb *inp;
struct inpcbinfo *pcbinfo;
int error = 0;
pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp6_send: inp == NULL"));
INP_WLOCK(inp);
if (addr) {
if (addr->sa_len != sizeof(struct sockaddr_in6)) {
error = EINVAL;
goto bad;
}
if (addr->sa_family != AF_INET6) {
error = EAFNOSUPPORT;
goto bad;
}
}
#ifdef INET
if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
int hasv4addr;
struct sockaddr_in6 *sin6 = 0;
if (addr == 0)
hasv4addr = (inp->inp_vflag & INP_IPV4);
else {
sin6 = (struct sockaddr_in6 *)addr;
hasv4addr = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)
? 1 : 0;
}
if (hasv4addr) {
struct pr_usrreqs *pru;
/*
* XXXRW: We release UDP-layer locks before calling
* udp_send() in order to avoid recursion. However,
* this does mean there is a short window where inp's
* fields are unstable. Could this lead to a
* potential race in which the factors causing us to
* select the UDPv4 output routine are invalidated?
*/
INP_WUNLOCK(inp);
if (sin6)
in6_sin6_2_sin_in_sock(addr);
pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
/* addr will just be freed in sendit(). */
return ((*pru->pru_send)(so, flags, m, addr, control,
td));
}
}
#endif
#ifdef MAC
mac_inpcb_create_mbuf(inp, m);
#endif
INP_HASH_WLOCK(pcbinfo);
error = udp6_output(inp, m, addr, control, td);
INP_HASH_WUNLOCK(pcbinfo);
#ifdef INET
#endif
INP_WUNLOCK(inp);
return (error);
bad:
INP_WUNLOCK(inp);
m_freem(m);
return (error);
}
示例3: drmfb_attach
int
drmfb_attach(struct drmfb_softc *sc, const struct drmfb_attach_args *da)
{
const struct drm_fb_helper_surface_size *const sizes = da->da_fb_sizes;
const prop_dictionary_t dict = device_properties(da->da_dev);
#if NVGA > 0
struct drm_device *const dev = da->da_fb_helper->dev;
#endif
static const struct genfb_ops zero_genfb_ops;
struct genfb_ops genfb_ops = zero_genfb_ops;
enum { CONS_VGA, CONS_GENFB, CONS_NONE } what_was_cons;
int error;
/* genfb requires this. */
KASSERTMSG((void *)&sc->sc_genfb == device_private(da->da_dev),
"drmfb_softc must be first member of device softc");
sc->sc_da = *da;
prop_dictionary_set_uint32(dict, "width", sizes->surface_width);
prop_dictionary_set_uint32(dict, "height", sizes->surface_height);
prop_dictionary_set_uint8(dict, "depth", sizes->surface_bpp);
prop_dictionary_set_uint16(dict, "linebytes",
roundup2((sizes->surface_width * howmany(sizes->surface_bpp, 8)),
64));
prop_dictionary_set_uint32(dict, "address", 0); /* XXX >32-bit */
CTASSERT(sizeof(uintptr_t) <= sizeof(uint64_t));
prop_dictionary_set_uint64(dict, "virtual_address",
(uint64_t)(uintptr_t)da->da_fb_vaddr);
prop_dictionary_set_uint64(dict, "mode_callback",
(uint64_t)(uintptr_t)&drmfb_genfb_mode_callback);
/* XXX Whattakludge! */
#if NVGA > 0
if ((da->da_params->dp_is_vga_console != NULL) &&
(*da->da_params->dp_is_vga_console)(dev)) {
what_was_cons = CONS_VGA;
prop_dictionary_set_bool(dict, "is_console", true);
vga_cndetach();
if (da->da_params->dp_disable_vga)
(*da->da_params->dp_disable_vga)(dev);
} else
#endif
if (genfb_is_console() && genfb_is_enabled()) {
what_was_cons = CONS_GENFB;
prop_dictionary_set_bool(dict, "is_console", true);
} else {
what_was_cons = CONS_NONE;
prop_dictionary_set_bool(dict, "is_console", false);
}
sc->sc_genfb.sc_dev = sc->sc_da.da_dev;
genfb_init(&sc->sc_genfb);
genfb_ops.genfb_ioctl = drmfb_genfb_ioctl;
genfb_ops.genfb_mmap = drmfb_genfb_mmap;
genfb_ops.genfb_enable_polling = drmfb_genfb_enable_polling;
genfb_ops.genfb_disable_polling = drmfb_genfb_disable_polling;
error = genfb_attach(&sc->sc_genfb, &genfb_ops);
if (error) {
aprint_error_dev(sc->sc_da.da_dev,
"failed to attach genfb: %d\n", error);
goto fail0;
}
/* Success! */
return 0;
fail0: KASSERT(error);
/* XXX Restore console... */
switch (what_was_cons) {
case CONS_VGA:
break;
case CONS_GENFB:
break;
case CONS_NONE:
break;
default:
break;
}
return error;
}
示例4: srat_parse_entry
static void
srat_parse_entry(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
ACPI_SRAT_CPU_AFFINITY *cpu;
ACPI_SRAT_X2APIC_CPU_AFFINITY *x2apic;
ACPI_SRAT_MEM_AFFINITY *mem;
int domain, i, slot;
switch (entry->Type) {
case ACPI_SRAT_TYPE_CPU_AFFINITY:
cpu = (ACPI_SRAT_CPU_AFFINITY *)entry;
domain = cpu->ProximityDomainLo |
cpu->ProximityDomainHi[0] << 8 |
cpu->ProximityDomainHi[1] << 16 |
cpu->ProximityDomainHi[2] << 24;
if (bootverbose)
printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
cpu->ApicId, domain,
(cpu->Flags & ACPI_SRAT_CPU_ENABLED) ?
"enabled" : "disabled");
if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED))
break;
KASSERT(!cpus[cpu->ApicId].enabled,
("Duplicate local APIC ID %u", cpu->ApicId));
cpus[cpu->ApicId].domain = domain;
cpus[cpu->ApicId].enabled = 1;
break;
case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
x2apic = (ACPI_SRAT_X2APIC_CPU_AFFINITY *)entry;
if (bootverbose)
printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
x2apic->ApicId, x2apic->ProximityDomain,
(x2apic->Flags & ACPI_SRAT_CPU_ENABLED) ?
"enabled" : "disabled");
if (!(x2apic->Flags & ACPI_SRAT_CPU_ENABLED))
break;
KASSERT(!cpus[x2apic->ApicId].enabled,
("Duplicate local APIC ID %u", x2apic->ApicId));
cpus[x2apic->ApicId].domain = x2apic->ProximityDomain;
cpus[x2apic->ApicId].enabled = 1;
break;
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
mem = (ACPI_SRAT_MEM_AFFINITY *)entry;
if (bootverbose)
printf(
"SRAT: Found memory domain %d addr %jx len %jx: %s\n",
mem->ProximityDomain, (uintmax_t)mem->BaseAddress,
(uintmax_t)mem->Length,
(mem->Flags & ACPI_SRAT_MEM_ENABLED) ?
"enabled" : "disabled");
if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED))
break;
if (num_mem == VM_PHYSSEG_MAX) {
printf("SRAT: Too many memory regions\n");
*(int *)arg = ENXIO;
break;
}
slot = num_mem;
for (i = 0; i < num_mem; i++) {
if (mem_info[i].end <= mem->BaseAddress)
continue;
if (mem_info[i].start <
(mem->BaseAddress + mem->Length)) {
printf("SRAT: Overlapping memory entries\n");
*(int *)arg = ENXIO;
return;
}
slot = i;
}
for (i = num_mem; i > slot; i--)
mem_info[i] = mem_info[i - 1];
mem_info[slot].start = mem->BaseAddress;
mem_info[slot].end = mem->BaseAddress + mem->Length;
mem_info[slot].domain = mem->ProximityDomain;
num_mem++;
break;
}
}
示例5: pci_list_vpd
static int
pci_list_vpd(device_t dev, struct pci_list_vpd_io *lvio)
{
struct pci_vpd_element vpd_element, *vpd_user;
struct pcicfg_vpd *vpd;
size_t len;
int error, i;
vpd = pci_fetch_vpd_list(dev);
if (vpd->vpd_reg == 0 || vpd->vpd_ident == NULL)
return (ENXIO);
/*
* Calculate the amount of space needed in the data buffer. An
* identifier element is always present followed by the read-only
* and read-write keywords.
*/
len = sizeof(struct pci_vpd_element) + strlen(vpd->vpd_ident);
for (i = 0; i < vpd->vpd_rocnt; i++)
len += sizeof(struct pci_vpd_element) + vpd->vpd_ros[i].len;
for (i = 0; i < vpd->vpd_wcnt; i++)
len += sizeof(struct pci_vpd_element) + vpd->vpd_w[i].len;
if (lvio->plvi_len == 0) {
lvio->plvi_len = len;
return (0);
}
if (lvio->plvi_len < len) {
lvio->plvi_len = len;
return (ENOMEM);
}
/*
* Copyout the identifier string followed by each keyword and
* value.
*/
vpd_user = lvio->plvi_data;
vpd_element.pve_keyword[0] = '\0';
vpd_element.pve_keyword[1] = '\0';
vpd_element.pve_flags = PVE_FLAG_IDENT;
vpd_element.pve_datalen = strlen(vpd->vpd_ident);
error = copyout(&vpd_element, vpd_user, sizeof(vpd_element));
if (error)
return (error);
error = copyout(vpd->vpd_ident, vpd_user->pve_data,
strlen(vpd->vpd_ident));
if (error)
return (error);
vpd_user = PVE_NEXT(vpd_user);
vpd_element.pve_flags = 0;
for (i = 0; i < vpd->vpd_rocnt; i++) {
vpd_element.pve_keyword[0] = vpd->vpd_ros[i].keyword[0];
vpd_element.pve_keyword[1] = vpd->vpd_ros[i].keyword[1];
vpd_element.pve_datalen = vpd->vpd_ros[i].len;
error = copyout(&vpd_element, vpd_user, sizeof(vpd_element));
if (error)
return (error);
error = copyout(vpd->vpd_ros[i].value, vpd_user->pve_data,
vpd->vpd_ros[i].len);
if (error)
return (error);
vpd_user = PVE_NEXT(vpd_user);
}
vpd_element.pve_flags = PVE_FLAG_RW;
for (i = 0; i < vpd->vpd_wcnt; i++) {
vpd_element.pve_keyword[0] = vpd->vpd_w[i].keyword[0];
vpd_element.pve_keyword[1] = vpd->vpd_w[i].keyword[1];
vpd_element.pve_datalen = vpd->vpd_w[i].len;
error = copyout(&vpd_element, vpd_user, sizeof(vpd_element));
if (error)
return (error);
error = copyout(vpd->vpd_w[i].value, vpd_user->pve_data,
vpd->vpd_w[i].len);
if (error)
return (error);
vpd_user = PVE_NEXT(vpd_user);
}
KASSERT((char *)vpd_user - (char *)lvio->plvi_data == len,
("length mismatch"));
lvio->plvi_len = len;
return (0);
}
示例6: send_flowc_wr
void
send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
{
struct wrqe *wr;
struct fw_flowc_wr *flowc;
unsigned int nparams = ftxp ? 8 : 6, flowclen;
struct port_info *pi = toep->port;
struct adapter *sc = pi->adapter;
unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
("%s: flowc for tid %u sent already", __func__, toep->tid));
flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
}
flowc = wrtod(wr);
memset(flowc, 0, wr->wr_len);
flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
V_FW_FLOWC_WR_NPARAMS(nparams));
flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
V_FW_WR_FLOWID(toep->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = htobe32(pi->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
flowc->mnemval[2].val = htobe32(pi->tx_chan);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id);
if (ftxp) {
uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
flowc->mnemval[4].val = htobe32(ftxp->snd_nxt);
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
flowc->mnemval[6].val = htobe32(sndbuf);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = htobe32(ftxp->mss);
CTR6(KTR_CXGBE,
"%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
__func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
ftxp->rcv_nxt);
} else {
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
flowc->mnemval[4].val = htobe32(512);
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[5].val = htobe32(512);
CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
}
txsd->tx_credits = howmany(flowclen, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
("%s: not enough credits (%d)", __func__, toep->tx_credits));
toep->tx_credits -= txsd->tx_credits;
if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
toep->txsd_pidx = 0;
toep->txsd_avail--;
toep->flags |= TPF_FLOWC_WR_SENT;
t4_wrq_tx(sc, wr);
}
示例7: sys_execv
int sys_execv(userptr_t progname, userptr_t *arguments) {
struct proc *proc = curproc;
struct addrspace *as;
struct vnode *v;
vaddr_t entrypoint, stackptr;
int result;
//kprintf("****EXECV]***** process-%d trying to exec", proc->pid);
//lock_acquire(execlock);
//kprintf("****EXECV]***** process-%d acquired exec lock", proc->pid);
if(progname == NULL || progname == (void *)0x80000000 || progname == (void *)0x40000000) {
return EFAULT;
}
if(arguments == NULL || arguments == (void *)0x80000000 || arguments == (void *)0x40000000) {
return EFAULT;
}
/* This process should have an address space copied during fork */
KASSERT(proc != NULL);
char *_progname;
size_t size;
int i=0, count=0;
_progname = (char *) kmalloc(sizeof(char)*PATH_MAX);
result = copyinstr(progname, _progname, PATH_MAX, &size);
if(result) {
kfree(_progname);
return EFAULT;
}
if(strlen(_progname) == 0) {
kfree(_progname);
return EINVAL;
}
if(swapping_started == true) {
}
kfree(_progname);
char *args = (char *) kmalloc(sizeof(char)*ARG_MAX);
result = copyinstr((const_userptr_t)arguments, args, ARG_MAX, &size);
if(result) {
kfree(args);
return EFAULT;
}
/* Copy the user arguments on to the kernel */
int offset = 0;
while((char *) arguments[count] != NULL) {
result = copyinstr((const_userptr_t) arguments[count], args+offset, ARG_MAX, &size);
if(result) {
kfree(args);
return EFAULT;
}
offset += size;
count++;
}
/* Open the file */
result = vfs_open((char *)progname, O_RDONLY, 0, &v);
if(result) {
kfree(args);
return result;
}
/* Destroy the current address space and Create a new address space */
as_destroy(proc->p_addrspace);
proc->p_addrspace = NULL;
KASSERT(proc_getas() == NULL);
as = as_create();
if(as == NULL) {
kfree(args);
vfs_close(v);
return ENOMEM;
}
/* Switch to it and activate it */
proc_setas(as);
as_activate();
/* Load the executable. */
// kprintf("free pages available before load_elf : %d \n", coremap_free_bytes()/4096);
result = load_elf(v, &entrypoint);
if(result) {
kfree(args);
vfs_close(v);
return result;
}
/* Done with the file now */
vfs_close(v);
/* Define the user stack in the address space */
result = as_define_stack(as, &stackptr);
if(result) {
kfree(args);
return result;
}
i = 0;
//.........这里部分代码省略.........
示例8: testa
static
void
testa(struct array *a)
{
int testarray[TESTSIZE];
int i, j, n, r, *p;
for (i=0; i<TESTSIZE; i++) {
testarray[i]=i;
}
n = array_num(a);
KASSERT(n==0);
for (i=0; i<TESTSIZE; i++) {
r = array_add(a, &testarray[i], NULL);
KASSERT(r==0);
n = array_num(a);
KASSERT(n==i+1);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (i=0; i<TESTSIZE; i++) {
p = array_get(a, i);
KASSERT(*p == i);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (j=0; j<TESTSIZE*4; j++) {
i = random()%TESTSIZE;
p = array_get(a, i);
KASSERT(*p == i);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (i=0; i<TESTSIZE; i++) {
array_set(a, i, &testarray[TESTSIZE-i-1]);
}
for (i=0; i<TESTSIZE; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-1);
}
r = array_setsize(a, TESTSIZE/2);
KASSERT(r==0);
for (i=0; i<TESTSIZE/2; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-1);
}
array_remove(a, 1);
for (i=1; i<TESTSIZE/2 - 1; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-2);
}
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
array_setsize(a, 2);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(*p == TESTSIZE-3);
array_set(a, 1, NULL);
array_setsize(a, 2);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(p==NULL);
array_setsize(a, TESTSIZE*10);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(p==NULL);
}
示例9: ofw_bus_search_intrmap
/*
* Map an interrupt using the firmware reg, interrupt-map and
* interrupt-map-mask properties.
* The interrupt property to be mapped must be of size intrsz, and pointed to
* by intr. The regs property of the node for which the mapping is done must
* be passed as regs. This property is an array of register specifications;
* the size of the address part of such a specification must be passed as
* physsz. Only the first element of the property is used.
* imap and imapsz hold the interrupt mask and it's size.
* imapmsk is a pointer to the interrupt-map-mask property, which must have
* a size of physsz + intrsz; it may be NULL, in which case a full mask is
* assumed.
* maskbuf must point to a buffer of length physsz + intrsz.
* The interrupt is returned in result, which must point to a buffer of length
* rintrsz (which gives the expected size of the mapped interrupt).
* Returns number of cells in the interrupt if a mapping was found, 0 otherwise.
*/
int
ofw_bus_search_intrmap(void *intr, int intrsz, void *regs, int physsz,
void *imap, int imapsz, void *imapmsk, void *maskbuf, void *result,
int rintrsz, phandle_t *iparent)
{
phandle_t parent;
uint8_t *ref = maskbuf;
uint8_t *uiintr = intr;
uint8_t *uiregs = regs;
uint8_t *uiimapmsk = imapmsk;
uint8_t *mptr;
pcell_t paddrsz;
pcell_t pintrsz;
int i, tsz;
if (imapmsk != NULL) {
for (i = 0; i < physsz; i++)
ref[i] = uiregs[i] & uiimapmsk[i];
for (i = 0; i < intrsz; i++)
ref[physsz + i] = uiintr[i] & uiimapmsk[physsz + i];
} else {
bcopy(regs, ref, physsz);
bcopy(intr, ref + physsz, intrsz);
}
mptr = imap;
i = imapsz;
paddrsz = 0;
while (i > 0) {
bcopy(mptr + physsz + intrsz, &parent, sizeof(parent));
#ifndef OFW_IMAP_NO_IPARENT_ADDR_CELLS
/*
* Find if we need to read the parent address data.
* CHRP-derived OF bindings, including ePAPR-compliant FDTs,
* use this as an optional part of the specifier.
*/
if (OF_getencprop(OF_node_from_xref(parent),
"#address-cells", &paddrsz, sizeof(paddrsz)) == -1)
paddrsz = 0; /* default */
paddrsz *= sizeof(pcell_t);
#endif
if (OF_searchencprop(OF_node_from_xref(parent),
"#interrupt-cells", &pintrsz, sizeof(pintrsz)) == -1)
pintrsz = 1; /* default */
pintrsz *= sizeof(pcell_t);
/* Compute the map stride size. */
tsz = physsz + intrsz + sizeof(phandle_t) + paddrsz + pintrsz;
KASSERT(i >= tsz, ("ofw_bus_search_intrmap: truncated map"));
if (bcmp(ref, mptr, physsz + intrsz) == 0) {
bcopy(mptr + physsz + intrsz + sizeof(parent) + paddrsz,
result, MIN(rintrsz, pintrsz));
if (iparent != NULL)
*iparent = parent;
return (pintrsz/sizeof(pcell_t));
}
mptr += tsz;
i -= tsz;
}
return (0);
}
示例10: bcma_erom_parse_corecfg
/**
* Parse the next core entry from the EROM table and produce a bcma_corecfg
* to be owned by the caller.
*
* @param erom EROM read state.
* @param[out] result On success, the core's device info. The caller inherits
* ownership of this allocation.
*
* @return If successful, returns 0. If the end of the EROM table is hit,
* ENOENT will be returned. On error, returns a non-zero error value.
*/
int
bcma_erom_parse_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
{
struct bcma_corecfg *cfg;
struct bcma_erom_core core;
uint8_t first_region_type;
bus_size_t initial_offset;
u_int core_index;
int core_unit;
int error;
cfg = NULL;
initial_offset = bcma_erom_tell(erom);
/* Parse the next core entry */
if ((error = bcma_erom_parse_core(erom, &core)))
return (error);
/* Determine the core's index and unit numbers */
bcma_erom_reset(erom);
core_unit = 0;
core_index = 0;
for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
struct bcma_erom_core prev_core;
/* Parse next core */
if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
return (error);
if ((error = bcma_erom_parse_core(erom, &prev_core)))
return (error);
/* Is earlier unit? */
if (core.vendor == prev_core.vendor &&
core.device == prev_core.device)
{
core_unit++;
}
/* Seek to next core */
if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
return (error);
}
/* We already parsed the core descriptor */
if ((error = erom_skip_core(erom)))
return (error);
/* Allocate our corecfg */
cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
core.device, core.rev);
if (cfg == NULL)
return (ENOMEM);
/* These are 5-bit values in the EROM table, and should never be able
* to overflow BCMA_PID_MAX. */
KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
("unsupported wport count"));
if (bootverbose) {
EROM_LOG(erom,
"core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
core_index,
bhnd_vendor_name(core.vendor),
bhnd_find_core_name(core.vendor, core.device),
core.device, core.rev, core_unit);
}
cfg->num_master_ports = core.num_mport;
cfg->num_dev_ports = 0; /* determined below */
cfg->num_bridge_ports = 0; /* determined blow */
cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
/* Parse Master Port Descriptors */
for (uint8_t i = 0; i < core.num_mport; i++) {
struct bcma_mport *mport;
struct bcma_erom_mport mpd;
/* Parse the master port descriptor */
error = bcma_erom_parse_mport(erom, &mpd);
if (error)
goto failed;
/* Initialize a new bus mport structure */
mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
if (mport == NULL) {
error = ENOMEM;
//.........这里部分代码省略.........
示例11: trap
//.........这里部分代码省略.........
goto userout;
#else /* !POWERFAIL_NMI */
/* machine/parity/power fail/"kitchen sink" faults */
if (isa_nmi(code) == 0) {
#ifdef KDB
/*
* NMI can be hooked up to a pushbutton
* for debugging.
*/
if (kdb_on_nmi) {
printf ("NMI ... going to debugger\n");
kdb_trap(type, 0, frame);
}
#endif /* KDB */
goto userout;
} else if (panic_on_nmi)
panic("NMI indicates hardware failure");
break;
#endif /* POWERFAIL_NMI */
#endif /* DEV_ISA */
case T_OFLOW: /* integer overflow fault */
ucode = FPE_INTOVF;
i = SIGFPE;
break;
case T_BOUND: /* bounds check fault */
ucode = FPE_FLTSUB;
i = SIGFPE;
break;
case T_DNA:
#ifdef DEV_NPX
KASSERT(PCB_USER_FPU(td->td_pcb),
("kernel FPU ctx has leaked"));
/* transparent fault (due to context switch "late") */
if (npxdna())
goto userout;
#endif
uprintf("pid %d killed due to lack of floating point\n",
p->p_pid);
i = SIGKILL;
ucode = 0;
break;
case T_FPOPFLT: /* FPU operand fetch fault */
ucode = ILL_COPROC;
i = SIGILL;
break;
case T_XMMFLT: /* SIMD floating-point exception */
#if defined(DEV_NPX) && !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
ucode = npxtrap_sse();
if (ucode == -1)
goto userout;
#else
ucode = 0;
#endif
i = SIGFPE;
break;
#ifdef KDTRACE_HOOKS
case T_DTRACE_RET:
enable_intr();
fill_frame_regs(frame, ®s);
if (dtrace_return_probe_ptr != NULL &&
dtrace_return_probe_ptr(®s) == 0)
示例12: cv_broadcast
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
KASSERT( lock_do_i_hold(lock) );
wchan_wakeall(cv->cv_wchan);
}
示例13: cv_signal
void
cv_signal(struct cv *cv, struct lock *lock)
{
KASSERT( lock_do_i_hold(lock) );
wchan_wakeone(cv->cv_wchan);
}
示例14: ether_input_internal
//.........这里部分代码省略.........
CURVNET_SET_QUIET(ifp->if_vnet);
if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
if (ETHER_IS_BROADCAST(eh->ether_dhost))
m->m_flags |= M_BCAST;
else
m->m_flags |= M_MCAST;
ifp->if_imcasts++;
}
#ifdef MAC
/*
* Tag the mbuf with an appropriate MAC label before any other
* consumers can get to it.
*/
mac_ifnet_create_mbuf(ifp, m);
#endif
/*
* Give bpf a chance at the packet.
*/
ETHER_BPF_MTAP(ifp, m);
/*
* If the CRC is still on the packet, trim it off. We do this once
* and once only in case we are re-entered. Nothing else on the
* Ethernet receive path expects to see the FCS.
*/
if (m->m_flags & M_HASFCS) {
m_adj(m, -ETHER_CRC_LEN);
m->m_flags &= ~M_HASFCS;
}
if (!(ifp->if_capenable & IFCAP_HWSTATS))
ifp->if_ibytes += m->m_pkthdr.len;
/* Allow monitor mode to claim this frame, after stats are updated. */
if (ifp->if_flags & IFF_MONITOR) {
m_freem(m);
CURVNET_RESTORE();
return;
}
/* Handle input from a lagg(4) port */
if (ifp->if_type == IFT_IEEE8023ADLAG) {
KASSERT(lagg_input_p != NULL,
("%s: if_lagg not loaded!", __func__));
m = (*lagg_input_p)(ifp, m);
if (m != NULL)
ifp = m->m_pkthdr.rcvif;
else {
CURVNET_RESTORE();
return;
}
}
/*
* If the hardware did not process an 802.1Q tag, do this now,
* to allow 802.1P priority frames to be passed to the main input
* path correctly.
* TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
*/
if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
struct ether_vlan_header *evl;
if (m->m_len < sizeof(*evl) &&
(m = m_pullup(m, sizeof(*evl))) == NULL) {
#ifdef DIAGNOSTIC
if_printf(ifp, "cannot pullup VLAN header\n");
#endif
ifp->if_ierrors++;
m_freem(m);
CURVNET_RESTORE();
return;
}
evl = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
m->m_flags |= M_VLANTAG;
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
eh = mtod(m, struct ether_header *);
}
M_SETFIB(m, ifp->if_fib);
/* Allow ng_ether(4) to claim this frame. */
if (IFP2AC(ifp)->ac_netgraph != NULL) {
KASSERT(ng_ether_input_p != NULL,
("%s: ng_ether_input_p is NULL", __func__));
m->m_flags &= ~M_PROMISC;
(*ng_ether_input_p)(ifp, &m);
if (m == NULL) {
CURVNET_RESTORE();
return;
}
eh = mtod(m, struct ether_header *);
}
示例15: uvm_unloanpage
static void
uvm_unloanpage(struct vm_page **ploans, int npages)
{
struct vm_page *pg;
kmutex_t *slock;
mutex_enter(&uvm_pageqlock);
while (npages-- > 0) {
pg = *ploans++;
/*
* do a little dance to acquire the object or anon lock
* as appropriate. we are locking in the wrong order,
* so we have to do a try-lock here.
*/
slock = NULL;
while (pg->uobject != NULL || pg->uanon != NULL) {
if (pg->uobject != NULL) {
slock = pg->uobject->vmobjlock;
} else {
slock = pg->uanon->an_lock;
}
if (mutex_tryenter(slock)) {
break;
}
/* XXX Better than yielding but inadequate. */
kpause("livelock", false, 1, &uvm_pageqlock);
slock = NULL;
}
/*
* drop our loan. if page is owned by an anon but
* PQ_ANON is not set, the page was loaned to the anon
* from an object which dropped ownership, so resolve
* this by turning the anon's loan into real ownership
* (ie. decrement loan_count again and set PQ_ANON).
* after all this, if there are no loans left, put the
* page back a paging queue (if the page is owned by
* an anon) or free it (if the page is now unowned).
*/
KASSERT(pg->loan_count > 0);
pg->loan_count--;
if (pg->uobject == NULL && pg->uanon != NULL &&
(pg->pqflags & PQ_ANON) == 0) {
KASSERT(pg->loan_count > 0);
pg->loan_count--;
pg->pqflags |= PQ_ANON;
}
if (pg->loan_count == 0 && pg->uobject == NULL &&
pg->uanon == NULL) {
KASSERT((pg->flags & PG_BUSY) == 0);
uvm_pagefree(pg);
}
if (slock != NULL) {
mutex_exit(slock);
}
}
mutex_exit(&uvm_pageqlock);
}