本文整理汇总了C++中wmb函数的典型用法代码示例。如果您正苦于以下问题:C++ wmb函数的具体用法?C++ wmb怎么用?C++ wmb使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wmb函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: eth_close
static int eth_close(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
struct msg msg;
int buffs = RX_DESCS; /* allocated RX buffers */
int i;
ports_open--;
qmgr_disable_irq(port->plat->rxq);
napi_disable(&port->napi);
netif_stop_queue(dev);
while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
buffs--;
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_SETLOOPBACK_MODE;
msg.eth_id = port->id;
msg.byte3 = 1;
if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
i = 0;
do { /* drain RX buffers */
while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
buffs--;
if (!buffs)
break;
if (qmgr_stat_empty(TX_QUEUE(port->id))) {
/* we have to inject some packet */
struct desc *desc;
u32 phys;
int n = queue_get_desc(port->plat->txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
phys = tx_desc_phys(port, n);
desc->buf_len = desc->pkt_len = 1;
wmb();
queue_put_desc(TX_QUEUE(port->id), phys, desc);
}
udelay(1);
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
" left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
#endif
buffs = TX_DESCS;
while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
"left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
msg.byte3 = 0;
if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
phy_stop(port->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
destroy_queues(port);
release_queues(port);
return 0;
}
示例2: mdp4_overlay_update_dsi_cmd
static void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
{
int ptype;
struct mdp4_overlay_pipe *pipe;
int ret;
int cndx = 0;
struct vsycn_ctrl *vctrl;
if (mfd->key != MFD_KEY)
return;
vctrl = &vsync_ctrl_db[cndx];
if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
if (pipe == NULL) {
printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
return;
}
pipe->pipe_used++;
pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
pipe->mixer_num = MDP4_MIXER0;
pipe->src_format = mfd->fb_imgType;
mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
vctrl->base_pipe = pipe; /* keep it */
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
} else {
pipe = vctrl->base_pipe;
}
MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */
/*
* configure dsi stream id
* dma_p = 0, dma_s = 1
*/
MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
/* disable dsi trigger */
MDP_OUTP(MDP_BASE + 0x000a4, 0x00);
mdp4_overlay_setup_pipe_addr(mfd, pipe);
mdp4_overlay_rgb_setup(pipe);
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0);
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_dmap_xy(pipe);
mdp4_overlay_dmap_cfg(mfd, 0);
wmb();
}
示例3: prom_init
//.........这里部分代码省略.........
GT_WRITE(GT_PCI0IOREMAP_OFS, map);
}
set_io_port_base(MALTA_GT_PORT_BASE);
break;
case MIPS_REVISION_SCON_BONITO:
_pcictrl_bonito_pcicfg = (unsigned long)ioremap(BONITO_PCICFG_BASE, BONITO_PCICFG_SIZE);
/*
* Disable Bonito IOBC.
*/
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
/*
* Setup the North bridge to do Master byte-lane swapping
* when running in bigendian.
*/
#ifdef CONFIG_CPU_LITTLE_ENDIAN
BONITO_BONGENCFG = BONITO_BONGENCFG &
~(BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP);
#else
BONITO_BONGENCFG = BONITO_BONGENCFG |
BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP;
#endif
set_io_port_base(MALTA_BONITO_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
mips_pci_controller:
mb();
MSC_READ(MSC01_PCI_CFG, data);
MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
wmb();
/* Fix up lane swapping. */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
MSC_WRITE(MSC01_PCI_SWAP, MSC01_PCI_SWAP_NOSWAP);
#else
MSC_WRITE(MSC01_PCI_SWAP,
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_IO_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
/* Fix up target memory mapping. */
MSC_READ(MSC01_PCI_BAR0, mask);
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
MSC01_PCI_CFG_MAXRTRY_MSK)
data = (data & ~(MSC01_PCI_CFG_MAXRTRY_MSK <<
MSC01_PCI_CFG_MAXRTRY_SHF)) |
((MSC01_PCI_CFG_MAXRTRY_MSK - 1) <<
MSC01_PCI_CFG_MAXRTRY_SHF);
wmb();
MSC_WRITE(MSC01_PCI_CFG, data);
mb();
set_io_port_base(MALTA_MSC_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
_pcictrl_msc = (unsigned long)ioremap(MIPS_SOCITSC_PCI_REG_BASE, 0x2000);
goto mips_pci_controller;
default:
/* Unknown system controller */
mips_display_message("SC Error");
while (1); /* We die here... */
}
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
prom_init_cmdline();
prom_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
/* Early detection of CMP support */
if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
if (!register_cmp_smp_ops())
return;
if (!register_vsmp_smp_ops())
return;
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
}
示例4: greth_start_xmit_gbit
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
u32 status = 0, dma_addr, ctrl;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
unsigned long flags;
nr_frags = skb_shinfo(skb)->nr_frags;
/* Clean TX Ring */
greth_clean_tx_gbit(dev);
if (greth->tx_free < nr_frags + 1) {
spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
ctrl = GRETH_REGLOAD(greth->regs->control);
/* Enable TX IRQ only if not already in poll() routine */
if (ctrl & GRETH_RXI)
GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
if (netif_msg_pktdata(greth))
greth_print_tx_packet(skb);
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
goto out;
}
/* Save skb pointer. */
greth->tx_skbuff[greth->tx_next] = skb;
/* Linear buf */
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
status |= skb_headlen(skb) & GRETH_BD_LEN;
if (greth->tx_next == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
bdp = greth->tx_bd_base + greth->tx_next;
greth_write_bd(&bdp->stat, status);
dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto map_error;
greth_write_bd(&bdp->addr, dma_addr);
curr_tx = NEXT_TX(greth->tx_next);
/* Frags */
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
/* More fragments left */
if (i < nr_frags - 1)
status |= GRETH_TXBD_MORE;
else
status |= GRETH_BD_IE; /* enable IRQ on last fragment */
greth_write_bd(&bdp->stat, status);
dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto frag_map_error;
greth_write_bd(&bdp->addr, dma_addr);
curr_tx = NEXT_TX(curr_tx);
}
wmb();
/* Enable the descriptor chain by enabling the first descriptor */
bdp = greth->tx_bd_base + greth->tx_next;
greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
greth->tx_next = curr_tx;
greth->tx_free -= nr_frags + 1;
//.........这里部分代码省略.........
示例5: greth_rx_gbit
//.........这里部分代码省略.........
skb = greth->rx_skbuff[greth->rx_cur];
GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
mb();
status = greth_read_bd(&bdp->stat);
bad = 0;
if (status & GRETH_BD_EN)
break;
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
dev->stats.rx_length_errors++;
bad = 1;
} else if (status &
(GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
dev->stats.rx_frame_errors++;
bad = 1;
} else if (status & GRETH_RXBD_ERR_CRC) {
dev->stats.rx_crc_errors++;
bad = 1;
}
}
/* Allocate new skb to replace current, not needed if the
* current skb can be reused */
if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
skb_reserve(newskb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
newskb->data,
MAX_FRAME_SIZE + NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (!dma_mapping_error(greth->dev, dma_addr)) {
/* Process the incoming frame. */
pkt_len = status & GRETH_BD_LEN;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
MAX_FRAME_SIZE + NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
skb_put(skb, pkt_len);
if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
netif_receive_skb(skb);
greth->rx_skbuff[greth->rx_cur] = newskb;
greth_write_bd(&bdp->addr, dma_addr);
} else {
if (net_ratelimit())
dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
dev_kfree_skb(newskb);
/* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
} else if (bad) {
/* Bad Frame transfer, the skb is reused */
dev->stats.rx_dropped++;
} else {
/* Failed Allocating a new skb. This is rather stupid
* but the current "filled" skb is reused, as if
* transfer failure. One could argue that RX descriptor
* table handling should be divided into cleaning and
* filling as the TX part of the driver
*/
if (net_ratelimit())
dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
/* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
status = GRETH_BD_EN | GRETH_BD_IE;
if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
status |= GRETH_BD_WR;
}
wmb();
greth_write_bd(&bdp->stat, status);
spin_lock_irqsave(&greth->devlock, flags);
greth_enable_rx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
return count;
}
示例6: mipi_dsi_on
//.........这里部分代码省略.........
MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);
MIPI_OUTP(MIPI_DSI_BASE + 0x20,
((hbp + width + dummy_xres) << 16 | (hbp)));
MIPI_OUTP(MIPI_DSI_BASE + 0x24,
((vbp + height + dummy_yres) << 16 | (vbp)));
MIPI_OUTP(MIPI_DSI_BASE + 0x28,
(vbp + height + dummy_yres + vfp) << 16 |
(hbp + width + dummy_xres + hfp));
}
MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));
} else { /* command mode */
if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
bpp = 3;
else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
bpp = 3;
else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
bpp = 2;
else
bpp = 3; /* Default format set to RGB888 */
ystride = width * bpp + 1;
/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);
/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
data = height << 16 | width;
MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
}
mipi_dsi_host_init(mipi);
if (mipi->force_clk_lane_hs) {
u32 tmp;
tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
tmp |= (1<<28);
MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
wmb();
}
if (mdp_rev >= MDP_REV_41)
mutex_lock(&mfd->dma->ov_mutex);
else
down(&mfd->dma->mutex);
ret = panel_next_on(pdev);
mipi_dsi_op_mode_config(mipi->mode);
if (mfd->panel_info.type == MIPI_CMD_PANEL) {
if (pinfo->lcd.vsync_enable) {
if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
if (mdp_rev >= MDP_REV_41) {
if (gpio_request(vsync_gpio,
"MDP_VSYNC") == 0)
gpio_direction_input(
vsync_gpio);
else
pr_err("%s: unable to \
request gpio=%d\n",
__func__, vsync_gpio);
} else if (mdp_rev == MDP_REV_303) {
if (!tlmm_settings && gpio_request(
vsync_gpio, "MDP_VSYNC") == 0) {
ret = gpio_tlmm_config(
GPIO_CFG(
vsync_gpio, 1,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_DOWN,
GPIO_CFG_2MA),
GPIO_CFG_ENABLE);
if (ret) {
pr_err(
"%s: unable to config \
tlmm = %d\n",
__func__, vsync_gpio);
}
tlmm_settings = TRUE;
gpio_direction_input(
vsync_gpio);
} else {
if (!tlmm_settings) {
pr_err(
"%s: unable to request \
gpio=%d\n",
__func__, vsync_gpio);
}
}
}
示例7: ibmveth_poll
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
container_of(napi, struct ibmveth_adapter, napi);
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
restart_poll:
do {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
smp_rmb();
if (!ibmveth_rxq_buffer_valid(adapter)) {
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
ibmveth_rxq_recycle_buffer(adapter);
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
if (new_skb) {
skb_copy_to_linear_data(new_skb,
skb->data + offset,
length);
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
if (!ibmveth_rxq_recycle_buffer(adapter))
kfree_skb(skb);
skb = new_skb;
} else {
ibmveth_rxq_harvest_buffer(adapter);
skb_reserve(skb, offset);
}
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
if (csum_good)
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_receive_skb(skb); /* send it up */
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += length;
frames_processed++;
}
} while (frames_processed < budget);
ibmveth_replenish_task(adapter);
if (frames_processed < budget) {
/* We think we are done - reenable interrupts,
* then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_ENABLE);
BUG_ON(lpar_rc != H_SUCCESS);
napi_complete(napi);
if (ibmveth_rxq_pending_buffer(adapter) &&
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
goto restart_poll;
}
}
return frames_processed;
}
示例8: mdp3_dmap_histo_get
static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
{
int i, state, timeout, ret;
u32 addr;
unsigned long flag;
spin_lock_irqsave(&dma->histo_lock, flag);
state = dma->histo_state;
spin_unlock_irqrestore(&dma->histo_lock, flag);
if (state != MDP3_DMA_HISTO_STATE_START &&
state != MDP3_DMA_HISTO_STATE_READY) {
pr_err("mdp3_dmap_histo_get invalid state %d\n", state);
return -EINVAL;
}
timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count);
ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout);
if (ret == 0) {
pr_debug("mdp3_dmap_histo_get time out\n");
ret = -ETIMEDOUT;
} else if (ret < 0) {
pr_err("mdp3_dmap_histo_get interrupted\n");
}
if (ret < 0)
return ret;
if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
pr_debug("mdp3_dmap_histo_get after dma shut down\n");
return -EPERM;
}
addr = MDP3_REG_DMA_P_HIST_R_DATA;
for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
dma->histo_data.r_data[i] = MDP3_REG_READ(addr);
addr += 4;
}
addr = MDP3_REG_DMA_P_HIST_G_DATA;
for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
dma->histo_data.g_data[i] = MDP3_REG_READ(addr);
addr += 4;
}
addr = MDP3_REG_DMA_P_HIST_B_DATA;
for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
dma->histo_data.b_data[i] = MDP3_REG_READ(addr);
addr += 4;
}
dma->histo_data.extra[0] =
MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
dma->histo_data.extra[1] =
MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
spin_lock_irqsave(&dma->histo_lock, flag);
init_completion(&dma->histo_comp);
MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
wmb();
dma->histo_state = MDP3_DMA_HISTO_STATE_START;
spin_unlock_irqrestore(&dma->histo_lock, flag);
return 0;
}
示例9: usb_tranzport_write
/**
* usb_tranzport_write
*/
static ssize_t usb_tranzport_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_tranzport *dev;
size_t bytes_to_write;
int retval = 0;
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
/* lock this object */
if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->intf == NULL) {
retval = -ENODEV;
err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* wait until previous transfer is finished */
if (dev->interrupt_out_busy) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible(dev->write_wait,
!dev->interrupt_out_busy);
if (retval < 0)
goto unlock_exit;
}
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min(count,
write_buffer_size *
dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
dev_warn(&dev->intf->dev,
"Write buffer overflow, %zd bytes dropped\n",
count - bytes_to_write);
dbg_info(&dev->intf->dev,
"%s: count = %zd, bytes_to_write = %zd\n", __func__,
count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
if (dev->interrupt_out_endpoint == NULL) {
err("Endpoint should not be be null!\n");
goto unlock_exit;
}
/* send off the urb */
usb_fill_int_urb(dev->interrupt_out_urb,
interface_to_usbdev(dev->intf),
usb_sndintpipe(interface_to_usbdev(dev->intf),
dev->interrupt_out_endpoint->
bEndpointAddress),
dev->interrupt_out_buffer, bytes_to_write,
usb_tranzport_interrupt_out_callback, dev,
dev->interrupt_out_interval);
dev->interrupt_out_busy = 1;
wmb();
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
err("Couldn't submit interrupt_out_urb %d\n", retval);
goto unlock_exit;
}
retval = bytes_to_write;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->mtx);
exit:
return retval;
}
示例10: mlxsw_pci_queue_doorbell_producer_ring
static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
wmb(); /* ensure all writes are done before we ring a bell */
__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
}
示例11: nvhdcp_set_plugged
static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged)
{
nvhdcp->plugged = plugged;
wmb();
return plugged;
}
示例12: eth_xmit
static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = netdev_priv(dev);
unsigned int txreadyq = port->plat->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
struct desc *desc;
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
#endif
if (unlikely(skb->len > MAX_MRU)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
debug_pkt(dev, "eth_xmit", skb->data, skb->len);
len = skb->len;
#ifdef __ARMEB__
offset = 0; /* no need to keep alignment */
bytes = len;
mem = skb->data;
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
#ifdef __ARMEB__
dev_kfree_skb(skb);
#else
kfree(mem);
#endif
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
n = queue_get_desc(txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
#ifdef __ARMEB__
port->tx_buff_tab[n] = skb;
#else
port->tx_buff_tab[n] = mem;
#endif
desc->data = phys + offset;
desc->buf_len = desc->pkt_len = len;
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
dev->trans_start = jiffies;
if (qmgr_stat_empty(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
if (!qmgr_stat_empty(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit ready again\n",
dev->name);
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
#endif
return NETDEV_TX_OK;
}
示例13: ld_usb_write
/**
* ld_usb_write
*/
static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct ld_usb *dev;
size_t bytes_to_write;
int retval = 0;
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
/* lock this object */
if (mutex_lock_interruptible(&dev->mutex)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->intf == NULL) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* wait until previous transfer is finished */
if (dev->interrupt_out_busy) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible(dev->write_wait, !dev->interrupt_out_busy);
if (retval < 0) {
goto unlock_exit;
}
}
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
if (dev->interrupt_out_endpoint == NULL) {
/* try HID_REQ_SET_REPORT=9 on control_endpoint instead of interrupt_out_endpoint */
retval = usb_control_msg(interface_to_usbdev(dev->intf),
usb_sndctrlpipe(interface_to_usbdev(dev->intf), 0),
9,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
USB_CTRL_SET_TIMEOUT * HZ);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",
retval);
goto unlock_exit;
}
/* send off the urb */
usb_fill_int_urb(dev->interrupt_out_urb,
interface_to_usbdev(dev->intf),
usb_sndintpipe(interface_to_usbdev(dev->intf),
dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
ld_usb_interrupt_out_callback,
dev,
dev->interrupt_out_interval);
dev->interrupt_out_busy = 1;
wmb();
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->intf->dev,
"Couldn't submit interrupt_out_urb %d\n", retval);
goto unlock_exit;
}
retval = bytes_to_write;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->mutex);
exit:
return retval;
}
示例14: msm_bus_noc_set_qos_bw
static void msm_bus_noc_set_qos_bw(struct msm_bus_noc_info *ninfo,
uint32_t mport, uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
{
uint32_t reg_val, val, mode;
if (!ninfo->qos_freq) {
MSM_BUS_DBG("Zero QoS Freq\n");
return;
}
/* If Limiter or Regulator modes are not supported, bw not available*/
if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
NOC_QOS_PERM_MODE_REGULATOR)) {
uint32_t bw_val = noc_bw_field(qbw->bw, ninfo->qos_freq);
uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
ninfo->qos_freq);
MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
perm_mode, bw_val, sat_val);
/*
* If in Limiter/Regulator mode, first go to fixed mode.
* Clear QoS accumulator
**/
mode = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->base,
mport)) & NOC_QOS_MODEn_MODE_BMSK;
if (mode == NOC_QOS_MODE_REGULATOR || mode ==
NOC_QOS_MODE_LIMITER) {
reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->
base, mport));
val = NOC_QOS_MODE_FIXED;
writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
| (val & NOC_QOS_MODEn_MODE_BMSK),
NOC_QOS_MODEn_ADDR(ninfo->base, mport));
}
reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(ninfo->base, mport));
val = bw_val << NOC_QOS_BWn_BW_SHFT;
writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
(val & NOC_QOS_BWn_BW_BMSK)),
NOC_QOS_BWn_ADDR(ninfo->base, mport));
MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
(~NOC_QOS_BWn_BW_BMSK)) | (val &
NOC_QOS_BWn_BW_BMSK)));
reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(ninfo->base,
mport));
val = sat_val << NOC_QOS_SATn_SAT_SHFT;
writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
(val & NOC_QOS_SATn_SAT_BMSK)),
NOC_QOS_SATn_ADDR(ninfo->base, mport));
MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
(~NOC_QOS_SATn_SAT_BMSK)) | (val &
NOC_QOS_SATn_SAT_BMSK)));
/* Set mode back to what it was initially */
reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->base,
mport));
writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
| (mode & NOC_QOS_MODEn_MODE_BMSK),
NOC_QOS_MODEn_ADDR(ninfo->base, mport));
/* Ensure that all writes for bandwidth registers have
* completed before returning
*/
wmb();
}
}
示例15: greth_enable_rx
static inline void greth_enable_rx(struct greth_private *greth)
{
wmb();
GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
}