本文整理汇总了C++中bus_dmamem_alloc函数的典型用法代码示例。如果您正苦于以下问题:C++ bus_dmamem_alloc函数的具体用法?C++ bus_dmamem_alloc怎么用?C++ bus_dmamem_alloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bus_dmamem_alloc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: advisaprobe
//.........这里部分代码省略.........
error =
bus_dma_tag_create(/*parent*/NULL,
/*alignemnt*/0,
/*boundary*/0,
lowaddr,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL,
/*filterarg*/NULL,
maxsize,
/*nsegs*/BUS_SPACE_UNRESTRICTED,
maxsegsz,
/*flags*/0,
&adv->parent_dmat);
if (error != 0) {
printf("%s: Could not allocate DMA tag - error %d\n",
adv_name(adv), error);
adv_free(adv);
return (0);
}
adv->init_level++;
if (overrun_buf == NULL) {
/* Need to allocate our overrun buffer */
if (bus_dma_tag_create(adv->parent_dmat,
/*alignment*/8,
/*boundary*/0,
ADV_ISA_MAX_DMA_ADDR,
BUS_SPACE_MAXADDR,
/*filter*/NULL,
/*filterarg*/NULL,
ADV_OVERRUN_BSIZE,
/*nsegments*/1,
BUS_SPACE_MAXSIZE_32BIT,
/*flags*/0,
&overrun_dmat) != 0) {
adv_free(adv);
return (0);
}
if (bus_dmamem_alloc(overrun_dmat,
(void **)&overrun_buf,
BUS_DMA_NOWAIT,
&overrun_dmamap) != 0) {
bus_dma_tag_destroy(overrun_dmat);
adv_free(adv);
return (0);
}
/* And permanently map it in */
bus_dmamap_load(overrun_dmat, overrun_dmamap,
overrun_buf, ADV_OVERRUN_BSIZE,
adv_map, &overrun_physbase,
/*flags*/0);
}
adv->overrun_physbase = overrun_physbase;
if (adv_init(adv) != 0) {
adv_free(adv);
return (0);
}
switch (adv->type) {
case ADV_ISAPNP:
if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG){
adv->bug_fix_control
|= ADV_BUG_FIX_ASYN_USE_SYN;
adv->fix_asyn_xfer = ~0;
}
/* Fall Through */
case ADV_ISA:
adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT;
adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR;
adv_set_isa_dma_settings(adv);
break;
case ADV_VL:
adv->max_dma_count = ADV_VL_MAX_DMA_COUNT;
adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR;
break;
default:
panic("advisaprobe: Invalid card type\n");
}
/* Determine our IRQ */
if (id->id_irq == 0 /* irq ? */)
id->id_irq = 1 << adv_get_chip_irq(adv);
else
adv_set_chip_irq(adv, ffs(id->id_irq) - 1);
id->id_intr = adv_isa_intr;
/* Mark as probed */
adv_isa_ioports[port_index] = 0;
return 1;
}
}
return 0;
}
示例2: iavc_pci_attach
//.........这里部分代码省略.........
if (pci_intr_map(pa, &ih)) {
aprint_error_dev(&sc->sc_dev, "couldn't map interrupt\n");
return;
}
intrstr = pci_intr_string(pc, ih);
psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, iavc_pci_intr, psc);
if (psc->sc_ih == NULL) {
aprint_error_dev(&sc->sc_dev, "couldn't establish interrupt");
if (intrstr != NULL)
aprint_normal(" at %s", intrstr);
aprint_normal("\n");
return;
}
psc->sc_pc = pc;
aprint_normal("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr);
memset(&sc->sc_txq, 0, sizeof(struct ifqueue));
sc->sc_txq.ifq_maxlen = sc->sc_capi.sc_nbch * 4;
sc->sc_intr = 0;
sc->sc_state = IAVC_DOWN;
sc->sc_blocked = 0;
/* setup capi link */
sc->sc_capi.load = iavc_load;
sc->sc_capi.reg_appl = iavc_register;
sc->sc_capi.rel_appl = iavc_release;
sc->sc_capi.send = iavc_send;
sc->sc_capi.ctx = (void *) sc;
/* lock & load DMA for TX */
if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
&sc->txseg, 1, &sc->ntxsegs, BUS_DMA_ALLOCNOW)) != 0) {
aprint_error_dev(&sc->sc_dev, "can't allocate tx DMA memory, error = %d\n",
ret);
goto fail1;
}
if ((ret = bus_dmamem_map(sc->dmat, &sc->txseg, sc->ntxsegs,
IAVC_DMA_SIZE, &sc->sc_sendbuf, BUS_DMA_NOWAIT)) != 0) {
aprint_error_dev(&sc->sc_dev, "can't map tx DMA memory, error = %d\n",
ret);
goto fail2;
}
if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1,
IAVC_DMA_SIZE, 0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
&sc->tx_map)) != 0) {
aprint_error_dev(&sc->sc_dev, "can't create tx DMA map, error = %d\n",
ret);
goto fail3;
}
if ((ret = bus_dmamap_load(sc->dmat, sc->tx_map, sc->sc_sendbuf,
IAVC_DMA_SIZE, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
aprint_error_dev(&sc->sc_dev, "can't load tx DMA map, error = %d\n",
ret);
goto fail4;
}
/* do the same for RX */
if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
&sc->rxseg, 1, &sc->nrxsegs, BUS_DMA_ALLOCNOW)) != 0) {
aprint_error_dev(&sc->sc_dev, "can't allocate rx DMA memory, error = %d\n",
示例3: tws_init
static int
tws_init(struct tws_softc *sc)
{
u_int32_t max_sg_elements;
u_int32_t dma_mem_size;
int error;
u_int32_t reg;
sc->seq_id = 0;
if ( tws_queue_depth > TWS_MAX_REQS )
tws_queue_depth = TWS_MAX_REQS;
if (tws_queue_depth < TWS_RESERVED_REQS+1)
tws_queue_depth = TWS_RESERVED_REQS+1;
sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
TWS_MAX_64BIT_SG_ELEMENTS :
TWS_MAX_32BIT_SG_ELEMENTS;
dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
(TWS_SECTOR_SIZE) ;
if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
TWS_ALIGNMENT, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
max_sg_elements, /* numsegs */
BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->parent_tag /* tag */
)) {
TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
sc->is64bit);
return(ENOMEM);
}
/* In bound message frame requires 16byte alignment.
* Outbound MF's can live with 4byte alignment - for now just
* use 16 for both.
*/
if ( bus_dma_tag_create(sc->parent_tag, /* parent */
TWS_IN_MF_ALIGNMENT, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_mem_size, /* maxsize */
1, /* numsegs */
BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->cmd_tag /* tag */
)) {
TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
return(ENOMEM);
}
if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
BUS_DMA_NOWAIT, &sc->cmd_map)) {
TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
return(ENOMEM);
}
/* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
sc->dma_mem_phys=0;
error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
dma_mem_size, tws_dmamap_cmds_load_cbfn,
&sc->dma_mem_phys, 0);
/*
* Create a dma tag for data buffers; size will be the maximum
* possible I/O size (128kB).
*/
if (bus_dma_tag_create(sc->parent_tag, /* parent */
TWS_ALIGNMENT, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
TWS_MAX_IO_SIZE, /* maxsize */
max_sg_elements, /* nsegments */
TWS_MAX_IO_SIZE, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->io_lock, /* lockfuncarg */
&sc->data_tag /* tag */)) {
TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
return(ENOMEM);
}
sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
M_WAITOK | M_ZERO);
if ( sc->reqs == NULL ) {
TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
return(ENOMEM);
}
sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
M_WAITOK | M_ZERO);
if ( sc->sense_bufs == NULL ) {
//.........这里部分代码省略.........
示例4: bce_attach
//.........这里部分代码省略.........
"waking up from power state D%d\n", pmode);
pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
}
}
if (pci_intr_map(pa, &ih)) {
aprint_error_dev(self, "couldn't map interrupt\n");
return;
}
intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc);
if (sc->bce_intrhand == NULL) {
aprint_error_dev(self, "couldn't establish interrupt\n");
if (intrstr != NULL)
aprint_error(" at %s", intrstr);
aprint_error("\n");
return;
}
aprint_normal_dev(self, "interrupting at %s\n", intrstr);
/* reset the chip */
bce_reset(sc);
/*
* Allocate DMA-safe memory for ring descriptors.
* The receive, and transmit rings can not share the same
* 4k space, however both are allocated at once here.
*/
/*
* XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
* due to the limition above. ??
*/
if ((error = bus_dmamem_alloc(sc->bce_dmatag,
2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
&seg, 1, &rseg, BUS_DMA_NOWAIT))) {
aprint_error_dev(self,
"unable to alloc space for ring descriptors, error = %d\n",
error);
return;
}
/* map ring space to kernel */
if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
aprint_error_dev(self,
"unable to map DMA buffers, error = %d\n", error);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
return;
}
/* create a dma map for the ring */
if ((error = bus_dmamap_create(sc->bce_dmatag,
2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
&sc->bce_ring_map))) {
aprint_error_dev(self,
"unable to create ring DMA map, error = %d\n", error);
bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
return;
}
/* connect the ring space to the dma map */
if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
return;
示例5: ae_attach
/*
* ae_attach:
*
* Attach an ae interface to the system.
*/
void
ae_attach(device_t parent, device_t self, void *aux)
{
const uint8_t *enaddr;
prop_data_t ea;
struct ae_softc *sc = device_private(self);
struct arbus_attach_args *aa = aux;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
int i, error;
sc->sc_dev = self;
callout_init(&sc->sc_tick_callout, 0);
printf(": Atheros AR531X 10/100 Ethernet\n");
/*
* Try to get MAC address.
*/
ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
if (ea == NULL) {
printf("%s: unable to get mac-addr property\n",
device_xname(sc->sc_dev));
return;
}
KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
enaddr = prop_data_data_nocopy(ea);
/* Announce ourselves. */
printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
ether_sprintf(enaddr));
sc->sc_cirq = aa->aa_cirq;
sc->sc_mirq = aa->aa_mirq;
sc->sc_st = aa->aa_bst;
sc->sc_dmat = aa->aa_dmat;
SIMPLEQ_INIT(&sc->sc_txfreeq);
SIMPLEQ_INIT(&sc->sc_txdirtyq);
/*
* Map registers.
*/
sc->sc_size = aa->aa_size;
if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
&sc->sc_sh)) != 0) {
printf("%s: unable to map registers, error = %d\n",
device_xname(sc->sc_dev), error);
goto fail_0;
}
/*
* Allocate the control data structures, and create and load the
* DMA map for it.
*/
if ((error = bus_dmamem_alloc(sc->sc_dmat,
sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
1, &sc->sc_cdnseg, 0)) != 0) {
printf("%s: unable to allocate control data, error = %d\n",
device_xname(sc->sc_dev), error);
goto fail_1;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
BUS_DMA_COHERENT)) != 0) {
printf("%s: unable to map control data, error = %d\n",
device_xname(sc->sc_dev), error);
goto fail_2;
}
if ((error = bus_dmamap_create(sc->sc_dmat,
sizeof(struct ae_control_data), 1,
sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
printf("%s: unable to create control data DMA map, "
"error = %d\n", device_xname(sc->sc_dev), error);
goto fail_3;
}
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct ae_control_data), NULL,
0)) != 0) {
printf("%s: unable to load control data DMA map, error = %d\n",
device_xname(sc->sc_dev), error);
goto fail_4;
}
/*
* Create the transmit buffer DMA maps.
*/
for (i = 0; i < AE_TXQUEUELEN; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
AE_NTXSEGS, MCLBYTES, 0, 0,
&sc->sc_txsoft[i].txs_dmamap)) != 0) {
//.........这里部分代码省略.........
示例6: pcscp_attach
//.........这里部分代码省略.........
/*
* This is the value used to start sync negotiations
* Note that the NCR register "SYNCTP" is programmed
* in "clocks per byte", and has a minimum value of 4.
* The SCSI period used in negotiation is one-fourth
* of the time (in nanoseconds) needed to transfer one byte.
* Since the chip's clock is given in MHz, we have the following
* formula: 4 * period = (1000 / freq) * 4
*/
sc->sc_minsync = 1000 / sc->sc_freq;
/* Really no limit, but since we want to fit into the TCR... */
sc->sc_maxxfer = 16 * 1024 * 1024;
/*
* Create the DMA maps for the data transfers.
*/
#define MDL_SEG_SIZE 0x1000 /* 4kbyte per segment */
#define MDL_SEG_OFFSET 0x0FFF
#define MDL_SIZE (MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */
if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MDL_SEG_SIZE,
MDL_SEG_SIZE, BUS_DMA_NOWAIT, &esc->sc_xfermap)) {
aprint_error(": can't create DMA maps\n");
return;
}
/*
* Allocate and map memory for the MDL.
*/
if ((error = bus_dmamem_alloc(esc->sc_dmat,
sizeof(uint32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg,
BUS_DMA_NOWAIT)) != 0) {
aprint_error(": unable to allocate memory for the MDL,"
" error = %d\n", error);
goto fail_0;
}
if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg,
sizeof(uint32_t) * MDL_SIZE , (void **)&esc->sc_mdladdr,
BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
aprint_error(": unable to map the MDL memory, error = %d\n",
error);
goto fail_1;
}
if ((error = bus_dmamap_create(esc->sc_dmat,
sizeof(uint32_t) * MDL_SIZE, 1, sizeof(uint32_t) * MDL_SIZE,
0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) {
aprint_error(": unable to map_create for the MDL, error = %d\n",
error);
goto fail_2;
}
if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap,
esc->sc_mdladdr, sizeof(uint32_t) * MDL_SIZE,
NULL, BUS_DMA_NOWAIT)) != 0) {
aprint_error(": unable to load for the MDL, error = %d\n",
error);
goto fail_3;
}
/* map and establish interrupt */
if (pci_intr_map(pa, &ih)) {
aprint_error(": couldn't map interrupt\n");
goto fail_4;
示例7: zy7_devcfg_write
static int
zy7_devcfg_write(struct cdev *dev, struct uio *uio, int ioflag)
{
struct zy7_devcfg_softc *sc = dev->si_drv1;
void *dma_mem;
bus_addr_t dma_physaddr;
int segsz, err;
DEVCFG_SC_LOCK(sc);
/* First write? Reset PL. */
if (uio->uio_offset == 0 && uio->uio_resid > 0) {
zy7_devcfg_init_hw(sc);
zy7_slcr_preload_pl();
err = zy7_devcfg_reset_pl(sc);
if (err != 0) {
DEVCFG_SC_UNLOCK(sc);
return (err);
}
}
/* Allocate dma memory and load. */
err = bus_dmamem_alloc(sc->dma_tag, &dma_mem, BUS_DMA_NOWAIT,
&sc->dma_map);
if (err != 0) {
DEVCFG_SC_UNLOCK(sc);
return (err);
}
err = bus_dmamap_load(sc->dma_tag, sc->dma_map, dma_mem, PAGE_SIZE,
zy7_dma_cb2, &dma_physaddr, 0);
if (err != 0) {
bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
DEVCFG_SC_UNLOCK(sc);
return (err);
}
while (uio->uio_resid > 0) {
/* If DONE signal has been set, we shouldn't write anymore. */
if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
ZY7_DEVCFG_INT_PCFG_DONE) != 0) {
err = EIO;
break;
}
/* uiomove the data from user buffer to our dma map. */
segsz = MIN(PAGE_SIZE, uio->uio_resid);
DEVCFG_SC_UNLOCK(sc);
err = uiomove(dma_mem, segsz, uio);
DEVCFG_SC_LOCK(sc);
if (err != 0)
break;
/* Flush the cache to memory. */
bus_dmamap_sync(sc->dma_tag, sc->dma_map,
BUS_DMASYNC_PREWRITE);
/* Program devcfg's DMA engine. The ordering of these
* register writes is critical.
*/
if (uio->uio_resid > segsz)
WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
(uint32_t) dma_physaddr);
else
WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
(uint32_t) dma_physaddr |
ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP);
WR4(sc, ZY7_DEVCFG_DMA_DST_ADDR, ZY7_DEVCFG_DMA_ADDR_ILLEGAL);
WR4(sc, ZY7_DEVCFG_DMA_SRC_LEN, (segsz+3)/4);
WR4(sc, ZY7_DEVCFG_DMA_DST_LEN, 0);
/* Now clear done bit and set up DMA done interrupt. */
WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_DMA_DONE);
/* Wait for DMA done interrupt. */
err = mtx_sleep(sc->dma_map, &sc->sc_mtx, PCATCH,
"zy7dma", hz);
if (err != 0)
break;
bus_dmamap_sync(sc->dma_tag, sc->dma_map,
BUS_DMASYNC_POSTWRITE);
/* Check DONE signal. */
if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
ZY7_DEVCFG_INT_PCFG_DONE) != 0)
zy7_slcr_postload_pl(zy7_en_level_shifters);
}
bus_dmamap_unload(sc->dma_tag, sc->dma_map);
bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
DEVCFG_SC_UNLOCK(sc);
return (err);
}
示例8: malo_hal_attach
/*
* Setup for communication with the device. We allocate
* a command buffer and map it for bus dma use. The pci
* device id is used to identify whether the device has
* SRAM on it (in which case f/w download must include a
* memory controller reset). All bus i/o operations happen
* in BAR 1; the driver passes in the tag and handle we need.
*/
struct malo_hal *
malo_hal_attach(device_t dev, uint16_t devid,
bus_space_handle_t ioh, bus_space_tag_t iot, bus_dma_tag_t tag)
{
int error;
struct malo_hal *mh;
mh = malloc(sizeof(struct malo_hal), M_DEVBUF, M_NOWAIT | M_ZERO);
if (mh == NULL)
return NULL;
mh->mh_dev = dev;
mh->mh_ioh = ioh;
mh->mh_iot = iot;
snprintf(mh->mh_mtxname, sizeof(mh->mh_mtxname),
"%s_hal", device_get_nameunit(dev));
mtx_init(&mh->mh_mtx, mh->mh_mtxname, NULL, MTX_DEF);
/*
* Allocate the command buffer and map into the address
* space of the h/w. We request "coherent" memory which
* will be uncached on some architectures.
*/
error = bus_dma_tag_create(tag, /* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MALO_CMDBUF_SIZE, /* maxsize */
1, /* nsegments */
MALO_CMDBUF_SIZE, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&mh->mh_dmat);
if (error != 0) {
device_printf(dev, "unable to allocate memory for cmd tag, "
"error %u\n", error);
goto fail;
}
/* allocate descriptors */
error = bus_dmamap_create(mh->mh_dmat, BUS_DMA_NOWAIT, &mh->mh_dmamap);
if (error != 0) {
device_printf(dev, "unable to create dmamap for cmd buffers, "
"error %u\n", error);
goto fail;
}
error = bus_dmamem_alloc(mh->mh_dmat, (void**) &mh->mh_cmdbuf,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
&mh->mh_dmamap);
if (error != 0) {
device_printf(dev, "unable to allocate memory for cmd buffer, "
"error %u\n", error);
goto fail;
}
error = bus_dmamap_load(mh->mh_dmat, mh->mh_dmamap,
mh->mh_cmdbuf, MALO_CMDBUF_SIZE,
malo_hal_load_cb, &mh->mh_cmdaddr,
BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(dev, "unable to load cmd buffer, error %u\n",
error);
goto fail;
}
return (mh);
fail:
if (mh->mh_dmamap != NULL) {
bus_dmamap_unload(mh->mh_dmat, mh->mh_dmamap);
if (mh->mh_cmdbuf != NULL)
bus_dmamem_free(mh->mh_dmat, mh->mh_cmdbuf,
mh->mh_dmamap);
bus_dmamap_destroy(mh->mh_dmat, mh->mh_dmamap);
}
if (mh->mh_dmat)
bus_dma_tag_destroy(mh->mh_dmat);
free(mh, M_DEVBUF);
return (NULL);
}
示例9: le_isa_attach
//.........这里部分代码省略.........
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&lesc->sc_pdmat);
if (error != 0) {
device_printf(dev, "cannot allocate parent DMA tag\n");
goto fail_ires;
}
sc->sc_memsize = LE_ISA_MEMSIZE;
/*
* For Am79C90, Am79C961 and Am79C961A the init block must be 2-byte
* aligned and the ring descriptors must be 8-byte aligned.
*/
error = bus_dma_tag_create(
lesc->sc_pdmat, /* parent */
8, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_24BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->sc_memsize, /* maxsize */
1, /* nsegments */
sc->sc_memsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&lesc->sc_dmat);
if (error != 0) {
device_printf(dev, "cannot allocate buffer DMA tag\n");
goto fail_pdtag;
}
error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem,
BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam);
if (error != 0) {
device_printf(dev, "cannot allocate DMA buffer memory\n");
goto fail_dtag;
}
sc->sc_addr = 0;
error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem,
sc->sc_memsize, le_isa_dma_callback, sc, 0);
if (error != 0 || sc->sc_addr == 0) {
device_printf(dev, "cannot load DMA buffer map\n");
goto fail_dmem;
}
isa_dmacascade(rman_get_start(lesc->sc_dres));
sc->sc_flags = 0;
sc->sc_conf3 = 0;
/*
* Extract the physical MAC address from the ROM.
*/
for (i = 0; i < sizeof(sc->sc_enaddr); i++)
sc->sc_enaddr[i] = bus_read_1(lesc->sc_rres,
macstart + i * macstride);
sc->sc_copytodesc = lance_copytobuf_contig;
sc->sc_copyfromdesc = lance_copyfrombuf_contig;
sc->sc_copytobuf = lance_copytobuf_contig;
sc->sc_copyfrombuf = lance_copyfrombuf_contig;
sc->sc_zerobuf = lance_zerobuf_contig;
示例10: adv_pci_attach
static int
adv_pci_attach(device_t dev)
{
struct adv_softc *adv;
u_int32_t id;
u_int32_t command;
int error, rid, irqrid;
void *ih;
struct resource *iores, *irqres;
/*
* Determine the chip version.
*/
id = pci_read_config(dev, PCIR_DEVVENDOR, /*bytes*/4);
command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/1);
/*
* These cards do not allow memory mapped accesses, so we must
* ensure that I/O accesses are available or we won't be able
* to talk to them.
*/
if ((command & (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN))
!= (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) {
command |= PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, command, /*bytes*/1);
}
/*
* Early chips can't handle non-zero latency timer settings.
*/
if (id == PCI_DEVICE_ID_ADVANSYS_1200A
|| id == PCI_DEVICE_ID_ADVANSYS_1200B) {
pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1);
}
rid = PCI_BASEADR0;
iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
RF_ACTIVE);
if (iores == NULL)
return ENXIO;
if (adv_find_signature(rman_get_bustag(iores),
rman_get_bushandle(iores)) == 0) {
bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
return ENXIO;
}
adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores));
if (adv == NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
return ENXIO;
}
/* Allocate a dmatag for our transfer DMA maps */
/* XXX Should be a child of the PCI bus dma tag */
error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1,
/*boundary*/0,
/*lowaddr*/ADV_PCI_MAX_DMA_ADDR,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
/*nsegments*/BUS_SPACE_UNRESTRICTED,
/*maxsegsz*/ADV_PCI_MAX_DMA_COUNT,
/*flags*/0,
&adv->parent_dmat);
if (error != 0) {
kprintf("%s: Could not allocate DMA tag - error %d\n",
adv_name(adv), error);
adv_free(adv);
bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
return ENXIO;
}
adv->init_level++;
if (overrun_buf == NULL) {
/* Need to allocate our overrun buffer */
if (bus_dma_tag_create(adv->parent_dmat,
/*alignment*/8, /*boundary*/0,
ADV_PCI_MAX_DMA_ADDR, BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
ADV_OVERRUN_BSIZE, /*nsegments*/1,
BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
&overrun_dmat) != 0) {
bus_dma_tag_destroy(adv->parent_dmat);
adv_free(adv);
bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
return ENXIO;
}
if (bus_dmamem_alloc(overrun_dmat,
(void *)&overrun_buf,
BUS_DMA_NOWAIT,
&overrun_dmamap) != 0) {
bus_dma_tag_destroy(overrun_dmat);
bus_dma_tag_destroy(adv->parent_dmat);
adv_free(adv);
bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
return ENXIO;
}
//.........这里部分代码省略.........
示例11: ie_pcctwo_attach
/* ARGSUSED */
void
ie_pcctwo_attach(device_t parent, device_t self, void *aux)
{
struct pcctwo_attach_args *pa;
struct ie_pcctwo_softc *ps;
struct ie_softc *sc;
bus_dma_segment_t seg;
int rseg;
pa = aux;
ps = device_private(self);
sc = &ps->ps_ie;
sc->sc_dev = self;
/* Map the MPU controller registers in PCCTWO space */
ps->ps_bust = pa->pa_bust;
bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE,
0, &ps->ps_bush);
/* Get contiguous DMA-able memory for the IE chip */
if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0,
&seg, 1, &rseg,
BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) {
aprint_error_dev(self, "Failed to allocate ether buffer\n");
return;
}
if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size,
(void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
aprint_error_dev(self, "Failed to map ether buffer\n");
bus_dmamem_free(pa->pa_dmat, &seg, rseg);
return;
}
sc->bt = pa->pa_bust;
sc->bh = (bus_space_handle_t) sc->sc_maddr; /* XXXSCW Better way? */
sc->sc_iobase = (void *) seg.ds_addr;
sc->sc_msize = ether_data_buff_size;
memset(sc->sc_maddr, 0, ether_data_buff_size);
sc->hwreset = ie_reset;
sc->hwinit = ie_hwinit;
sc->chan_attn = ie_atten;
sc->intrhook = ie_intrhook;
sc->memcopyin = ie_copyin;
sc->memcopyout = ie_copyout;
sc->ie_bus_barrier = NULL;
sc->ie_bus_read16 = ie_read_16;
sc->ie_bus_write16 = ie_write_16;
sc->ie_bus_write24 = ie_write_24;
sc->sc_mediachange = NULL;
sc->sc_mediastatus = NULL;
sc->scp = 0;
sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15);
sc->scb = sc->iscp + IE_ISCP_SZ;
sc->buf_area = sc->scb + IE_SCB_SZ;
sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp);
/*
* BUS_USE -> Interrupt Active High (edge-triggered),
* Lock function enabled,
* Internal bus throttle timer triggering,
* 82586 operating mode.
*/
ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE);
ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp);
/* This has the side-effect of resetting the chip */
i82586_proberam(sc);
/* Attach the MI back-end */
i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0);
/* Register the event counter */
evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR,
pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self));
/* Finally, hook the hardware interrupt */
pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc,
&ps->ps_evcnt);
}
示例12: drm_pci_alloc
/*
* Allocate a drm dma handle, allocate memory fit for DMA, and map it.
*
* XXX This is called drm_pci_alloc for hysterical raisins; it is not
* specific to PCI.
*
* XXX For now, we use non-blocking allocations because this is called
* by ioctls with the drm global mutex held.
*
* XXX Error information is lost because this returns NULL on failure,
* not even an error embedded in a pointer.
*/
struct drm_dma_handle *
drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
{
int nsegs;
int error;
/*
* Allocate a drm_dma_handle record.
*/
struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
KM_NOSLEEP);
if (dmah == NULL) {
error = -ENOMEM;
goto out;
}
dmah->dmah_tag = dev->dmat;
/*
* Allocate the requested amount of DMA-safe memory.
*/
/* XXX errno NetBSD->Linux */
error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
&dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
if (error)
goto fail0;
KASSERT(nsegs == 1);
/*
* XXX Old drm passed BUS_DMA_NOWAIT below but BUS_DMA_WAITOK
* above. WTF?
*/
/*
* Map the DMA-safe memory into kernel virtual address space.
*/
/* XXX errno NetBSD->Linux */
error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
&dmah->vaddr,
(BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
if (error)
goto fail1;
dmah->size = size;
/*
* Create a map for DMA transfers.
*/
/* XXX errno NetBSD->Linux */
error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
BUS_DMA_NOWAIT, &dmah->dmah_map);
if (error)
goto fail2;
/*
* Load the kva buffer into the map for DMA transfers.
*/
/* XXX errno NetBSD->Linux */
error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
if (error)
goto fail3;
/* Record the bus address for convenient reference. */
dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;
/* Zero the DMA buffer. XXX Yikes! Is this necessary? */
memset(dmah->vaddr, 0, size);
/* Success! */
return dmah;
fail3: bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
fail2: bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
fail1: bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
fail0: dmah->dmah_tag = NULL; /* XXX paranoia */
kmem_free(dmah, sizeof(*dmah));
out: DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
return NULL;
}
示例13: atiixp_pci_attach
static int
atiixp_pci_attach(device_t dev)
{
struct atiixp_info *sc;
int i;
sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
sc->lock = snd_mtxcreate(device_get_nameunit(dev), "sound softc");
sc->dev = dev;
/*
* Default DMA segments per playback / recording channel
*/
sc->dma_segs = ATI_IXP_DMA_CHSEGS;
pci_set_powerstate(dev, PCI_POWERSTATE_D0);
pci_enable_busmaster(dev);
sc->regid = PCIR_BAR(0);
sc->regtype = SYS_RES_MEMORY;
sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid,
RF_ACTIVE);
if (!sc->reg) {
device_printf(dev, "unable to allocate register space\n");
goto bad;
}
sc->st = rman_get_bustag(sc->reg);
sc->sh = rman_get_bushandle(sc->reg);
sc->bufsz = pcm_getbuffersize(dev, 4096, ATI_IXP_DEFAULT_BUFSZ, 65536);
sc->irqid = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
RF_ACTIVE | RF_SHAREABLE);
if (!sc->irq ||
snd_setup_intr(dev, sc->irq, INTR_MPSAFE,
atiixp_intr, sc, &sc->ih)) {
device_printf(dev, "unable to map interrupt\n");
goto bad;
}
/*
* Let the user choose the best DMA segments.
*/
if (resource_int_value(device_get_name(dev),
device_get_unit(dev), "dma_segs",
&i) == 0) {
if (i < ATI_IXP_DMA_CHSEGS_MIN)
i = ATI_IXP_DMA_CHSEGS_MIN;
if (i > ATI_IXP_DMA_CHSEGS_MAX)
i = ATI_IXP_DMA_CHSEGS_MAX;
sc->dma_segs = i;
}
/*
* round the value to the nearest ^2
*/
i = 0;
while (sc->dma_segs >> i)
i++;
sc->dma_segs = 1 << (i - 1);
if (sc->dma_segs < ATI_IXP_DMA_CHSEGS_MIN)
sc->dma_segs = ATI_IXP_DMA_CHSEGS_MIN;
else if (sc->dma_segs > ATI_IXP_DMA_CHSEGS_MAX)
sc->dma_segs = ATI_IXP_DMA_CHSEGS_MAX;
/*
* DMA tag for scatter-gather buffers and link pointers
*/
if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff,
/*flags*/0,
&sc->parent_dmat) != 0) {
device_printf(dev, "unable to create dma tag\n");
goto bad;
}
if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/sc->dma_segs * ATI_IXP_NCHANS *
sizeof(struct atiixp_dma_op),
/*nsegments*/1, /*maxsegz*/0x3ffff,
/*flags*/0,
&sc->sgd_dmat) != 0) {
device_printf(dev, "unable to create dma tag\n");
goto bad;
}
if (bus_dmamem_alloc(sc->sgd_dmat, (void **)&sc->sgd_table,
BUS_DMA_NOWAIT, &sc->sgd_dmamap) == -1)
goto bad;
if (bus_dmamap_load(sc->sgd_dmat, sc->sgd_dmamap, sc->sgd_table,
sc->dma_segs * ATI_IXP_NCHANS *
//.........这里部分代码省略.........
示例14: at91_mci_attach
static int
at91_mci_attach(device_t dev)
{
struct at91_mci_softc *sc = device_get_softc(dev);
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
device_t child;
int err, i;
sctx = device_get_sysctl_ctx(dev);
soid = device_get_sysctl_tree(dev);
sc->dev = dev;
sc->sc_cap = 0;
if (at91_is_rm92())
sc->sc_cap |= CAP_NEEDS_BYTESWAP;
/*
* MCI1 Rev 2 controllers need some workarounds, flag if so.
*/
if (at91_mci_is_mci1rev2xx())
sc->sc_cap |= CAP_MCI1_REV2XX;
err = at91_mci_activate(dev);
if (err)
goto out;
AT91_MCI_LOCK_INIT(sc);
at91_mci_fini(dev);
at91_mci_init(dev);
/*
* Allocate DMA tags and maps and bounce buffers.
*
* The parms in the tag_create call cause the dmamem_alloc call to
* create each bounce buffer as a single contiguous buffer of BBSIZE
* bytes aligned to a 4096 byte boundary.
*
* Do not use DMA_COHERENT for these buffers because that maps the
* memory as non-cachable, which prevents cache line burst fills/writes,
* which is something we need since we're trying to overlap the
* byte-swapping with the DMA operations.
*/
err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag);
if (err != 0)
goto out;
for (i = 0; i < BBCOUNT; ++i) {
err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i],
BUS_DMA_NOWAIT, &sc->bbuf_map[i]);
if (err != 0)
goto out;
}
/*
* Activate the interrupt
*/
err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, at91_mci_intr, sc, &sc->intrhand);
if (err) {
AT91_MCI_LOCK_DESTROY(sc);
goto out;
}
/*
* Allow 4-wire to be initially set via #define.
* Allow a device hint to override that.
* Allow a sysctl to override that.
*/
#if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0
sc->has_4wire = 1;
#endif
resource_int_value(device_get_name(dev), device_get_unit(dev),
"4wire", &sc->has_4wire);
SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire",
CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus");
if (sc->has_4wire)
sc->sc_cap |= CAP_HAS_4WIRE;
sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK;
resource_int_value(device_get_name(dev), device_get_unit(dev),
"allow_overclock", &sc->allow_overclock);
SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock",
CTLFLAG_RW, &sc->allow_overclock, 0,
"Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less.");
/*
* Our real min freq is master_clock/512, but upper driver layers are
* going to set the min speed during card discovery, and the right speed
* for that is 400kHz, so advertise a safe value just under that.
*
* For max speed, while the rm9200 manual says the max is 50mhz, it also
* says it supports only the SD v1.0 spec, which means the real limit is
* 25mhz. On the other hand, historical use has been to slightly violate
* the standard by running the bus at 30MHz. For more information on
* that, see the comments at the top of this file.
*/
sc->host.f_min = 375000;
//.........这里部分代码省略.........
示例15: mpt_dma_mem_alloc
static int
mpt_dma_mem_alloc(struct mpt_softc *mpt)
{
size_t len;
struct mpt_map_info mi;
/* Check if we alreay have allocated the reply memory */
if (mpt->reply_phys != 0) {
return 0;
}
len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt);
#ifdef RELENG_4
mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK);
if (mpt->request_pool == NULL) {
mpt_prt(mpt, "cannot allocate request pool\n");
return (1);
}
memset(mpt->request_pool, 0, len);
#else
mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
if (mpt->request_pool == NULL) {
mpt_prt(mpt, "cannot allocate request pool\n");
return (1);
}
#endif
/*
* Create a parent dma tag for this device.
*
* Align at byte boundaries,
* Limit to 32-bit addressing for request/reply queues.
*/
if (mpt_dma_tag_create(mpt, /*parent*/bus_get_dma_tag(mpt->dev),
/*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
/*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
/*nsegments*/BUS_SPACE_UNRESTRICTED,
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
&mpt->parent_dmat) != 0) {
mpt_prt(mpt, "cannot create parent dma tag\n");
return (1);
}
/* Create a child tag for reply buffers */
if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0,
&mpt->reply_dmat) != 0) {
mpt_prt(mpt, "cannot create a dma tag for replies\n");
return (1);
}
/* Allocate some DMA accessible memory for replies */
if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply,
BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) {
mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n",
(u_long) (2 * PAGE_SIZE));
return (1);
}
mi.mpt = mpt;
mi.error = 0;
/* Load and lock it into "bus space" */
bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply,
2 * PAGE_SIZE, mpt_map_rquest, &mi, 0);
if (mi.error) {
mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n",
mi.error);
return (1);
}
mpt->reply_phys = mi.phys;
return (0);
}