本文整理汇总了C++中pci_write_config_byte函数的典型用法代码示例。如果您正苦于以下问题:C++ pci_write_config_byte函数的具体用法?C++ pci_write_config_byte怎么用?C++ pci_write_config_byte使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pci_write_config_byte函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cs5520_init_one
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
struct ata_port_info pi = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cs5520_port_ops,
};
const struct ata_port_info *ppi[2];
u8 pcicfg;
void __iomem *iomap[5];
struct ata_host *host;
struct ata_ioports *ioaddr;
int i, rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* IDE port enable bits */
pci_read_config_byte(pdev, 0x60, &pcicfg);
/* Check if the ATA ports are enabled */
if ((pcicfg & 3) == 0)
return -ENODEV;
ppi[0] = ppi[1] = &ata_dummy_port_info;
if (pcicfg & 1)
ppi[0] = π
if (pcicfg & 2)
ppi[1] = π
if ((pcicfg & 0x40) == 0) {
dev_printk(KERN_WARNING, &pdev->dev,
"DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
pi.mwdma_mask = id->driver_data;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* Perform set up for DMA */
if (pci_enable_device_io(pdev)) {
printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
return -ENODEV;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV;
}
/* Map IO ports and initialize host accordingly */
iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
iomap[4] = pcim_iomap(pdev, 2, 0);
if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
return -ENOMEM;
ioaddr = &host->ports[0]->ioaddr;
ioaddr->cmd_addr = iomap[0];
ioaddr->ctl_addr = iomap[1];
ioaddr->altstatus_addr = iomap[1];
ioaddr->bmdma_addr = iomap[4];
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[0],
"cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
ioaddr = &host->ports[1]->ioaddr;
ioaddr->cmd_addr = iomap[2];
ioaddr->ctl_addr = iomap[3];
ioaddr->altstatus_addr = iomap[3];
ioaddr->bmdma_addr = iomap[4] + 8;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[1],
"cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
/* activate the host */
pci_set_master(pdev);
rc = ata_host_start(host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
static const int irq[] = { 14, 15 };
//.........这里部分代码省略.........
示例2: pirq_piix_set
static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
pci_write_config_byte(router, pirq, irq);
return 1;
}
示例3: i915_restore_state
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
//.........这里部分代码省略.........
示例4: pcibios_update_irq
void __init pcibios_update_irq(struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
示例5: ehci_pci_setup
//.........这里部分代码省略.........
switch (pdev->device) {
/* Some NForce2 chips have problems with selective suspend;
* fixed in newer silicon.
*/
case 0x0068:
if (pdev->revision < 0xa4)
ehci->no_selective_suspend = 1;
break;
/* MCP89 chips on the MacBookAir3,1 give EPROTO when
* fetching device descriptors unless LPM is disabled.
* There are also intermittent problems enumerating
* devices with PPCD enabled.
*/
case 0x0d9d:
ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
ehci->has_lpm = 0;
ehci->has_ppcd = 0;
ehci->command &= ~CMD_PPCEE;
break;
}
break;
case PCI_VENDOR_ID_VIA:
if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
u8 tmp;
/* The VT6212 defaults to a 1 usec EHCI sleep time which
* hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
* that sleep time use the conventional 10 usec.
*/
pci_read_config_byte(pdev, 0x4b, &tmp);
if (tmp & 0x20)
break;
pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
}
break;
case PCI_VENDOR_ID_ATI:
/* AMD PLL quirk */
if (usb_amd_find_chipset_info())
ehci->amd_pll_fix = 1;
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p_smbus)
break;
rev = p_smbus->revision;
if ((pdev->device == 0x4386) || (rev == 0x3a)
|| (rev == 0x3b)) {
u8 tmp;
ehci_info(ehci, "applying AMD SB600/SB700 USB "
"freeze workaround\n");
pci_read_config_byte(pdev, 0x53, &tmp);
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
pci_dev_put(p_smbus);
}
break;
case PCI_VENDOR_ID_NETMOS:
/* MosChip frame-index-register bug */
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
示例6: os_pci_writeb
void os_pci_writeb(void *osext, HPT_U8 offset, HPT_U8 value)
{
pci_write_config_byte(((PHBA)osext)->pcidev, offset, value);
}
示例7: pci_mpc85xx_init
void
pci_mpc85xx_init(struct pci_controller *hose)
{
volatile immap_t *immap = (immap_t *)CFG_CCSRBAR;
volatile ccsr_pcix_t *pcix = &immap->im_pcix;
u16 reg16;
hose->first_busno = 0;
hose->last_busno = 0xff;
pci_set_region(hose->regions + 0,
CFG_PCI1_MEM_BASE,
CFG_PCI1_MEM_PHYS,
CFG_PCI1_MEM_SIZE,
PCI_REGION_MEM);
pci_set_region(hose->regions + 1,
CFG_PCI1_IO_BASE,
CFG_PCI1_IO_PHYS,
CFG_PCI1_IO_SIZE,
PCI_REGION_IO);
hose->region_count = 2;
pci_setup_indirect(hose,
(CFG_IMMR+0x8000),
(CFG_IMMR+0x8004));
pcix->potar1 = (CFG_PCI1_MEM_BASE >> 12) & 0x000fffff;
pcix->potear1 = 0x00000000;
pcix->powbar1 = (CFG_PCI1_MEM_BASE >> 12) & 0x000fffff;
pcix->powbear1 = 0x00000000;
pcix->powar1 = 0x8004401c; /* 512M MEM space */
pcix->potar2 = 0x00000000;
pcix->potear2 = 0x00000000;
pcix->powbar2 = (CFG_PCI1_IO_BASE >> 12) & 0x000fffff;
pcix->powbear2 = 0x00000000;
pcix->powar2 = 0x80088017; /* 16M IO space */
pcix->pitar1 = 0x00000000;
pcix->piwbar1 = 0x00000000;
pcix->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local Mem,
* Snoop R/W, 2G */
/*
* Hose scan.
*/
pci_register_hose(hose);
pci_read_config_word (PCI_BDF(0,0,0), PCI_COMMAND, ®16);
reg16 |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
pci_write_config_word(PCI_BDF(0,0,0), PCI_COMMAND, reg16);
/*
* Clear non-reserved bits in status register.
*/
pci_write_config_word(PCI_BDF(0,0,0), PCI_STATUS, 0xffff);
pci_write_config_byte(PCI_BDF(0,0,0), PCI_LATENCY_TIMER,0x80);
#if defined(CONFIG_MPC8555CDS) || defined(CONFIG_MPC8541CDS)
/*
* This is a SW workaround for an apparent HW problem
* in the PCI controller on the MPC85555/41 CDS boards.
* The first config cycle must be to a valid, known
* device on the PCI bus in order to trick the PCI
* controller state machine into a known valid state.
* Without this, the first config cycle has the chance
* of hanging the controller permanently, just leaving
* it in a semi-working state, or leaving it working.
*
* Pick on the Tundra, Device 17, to get it right.
*/
{
u8 header_type;
pci_hose_read_config_byte(hose,
PCI_BDF(0,17,0),
PCI_HEADER_TYPE,
&header_type);
}
#endif
hose->last_busno = pci_hose_scan(hose);
}
示例8: ocxl_config_set_TL
int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec)
{
u32 val;
__be32 *be32ptr;
u8 timers;
int i, rc;
long recv_cap;
char *recv_rate;
/*
* Skip on function != 0, as the TL can only be defined on 0
*/
if (PCI_FUNC(dev->devfn) != 0)
return 0;
recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL);
if (!recv_rate)
return -ENOMEM;
/*
* The spec defines 64 templates for messages in the
* Transaction Layer (TL).
*
* The host and device each support a subset, so we need to
* configure the transmitters on each side to send only
* templates the receiver understands, at a rate the receiver
* can process. Per the spec, template 0 must be supported by
* everybody. That's the template which has been used by the
* host and device so far.
*
* The sending rate limit must be set before the template is
* enabled.
*/
/*
* Device -> host
*/
rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate,
PNV_OCXL_TL_RATE_BUF_SIZE);
if (rc)
goto out;
for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
be32ptr = (__be32 *) &recv_rate[i];
pci_write_config_dword(dev,
tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i,
be32_to_cpu(*be32ptr));
}
val = recv_cap >> 32;
pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val);
val = recv_cap & GENMASK(31, 0);
pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val);
/*
* Host -> device
*/
for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
pci_read_config_dword(dev,
tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i,
&val);
be32ptr = (__be32 *) &recv_rate[i];
*be32ptr = cpu_to_be32(val);
}
pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val);
recv_cap = (long) val << 32;
pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val);
recv_cap |= val;
rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate),
PNV_OCXL_TL_RATE_BUF_SIZE);
if (rc)
goto out;
/*
* Opencapi commands needing to be retried are classified per
* the TL in 2 groups: short and long commands.
*
* The short back off timer it not used for now. It will be
* for opencapi 4.0.
*
* The long back off timer is typically used when an AFU hits
* a page fault but the NPU is already processing one. So the
* AFU needs to wait before it can resubmit. Having a value
* too low doesn't break anything, but can generate extra
* traffic on the link.
* We set it to 1.6 us for now. It's shorter than, but in the
* same order of magnitude as the time spent to process a page
* fault.
*/
timers = 0x2 << 4; /* long timer = 1.6 us */
pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS,
timers);
rc = 0;
out:
kfree(recv_rate);
return rc;
}
示例9: esb_ioctl
//.........这里部分代码省略.........
err_release:
pci_release_region(pdev, 0);
err_disable:
pci_disable_device(pdev);
err_devput:
return 0;
}
static void __devinit esb_initdevice(void)
{
u8 val1;
u16 val2;
/*
* Config register:
* Bit 5 : 0 = Enable WDT_OUTPUT
* Bit 2 : 0 = set the timer frequency to the PCI clock
* divided by 2^15 (approx 1KHz).
* Bits 1:0 : 11 = WDT_INT_TYPE Disabled.
* The watchdog has two timers, it can be setup so that the
* expiry of timer1 results in an interrupt and the expiry of
* timer2 results in a reboot. We set it to not generate
* any interrupts as there is not much we can do with it
* right now.
*/
pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003);
/* Check that the WDT isn't already locked */
pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
if (val1 & ESB_WDT_LOCK)
printk(KERN_WARNING PFX "nowayout already set\n");
/* Set the timer to watchdog mode and disable it for now */
pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
/* Check if the watchdog was previously triggered */
esb_unlock_registers();
val2 = readw(ESB_RELOAD_REG);
if (val2 & ESB_WDT_TIMEOUT)
triggered = WDIOF_CARDRESET;
/* Reset WDT_TIMEOUT flag and timers */
esb_unlock_registers();
writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG);
/* And set the correct timeout value */
esb_timer_set_heartbeat(heartbeat);
}
static int __devinit esb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
cards_found++;
if (cards_found == 1)
printk(KERN_INFO PFX "Intel 6300ESB WatchDog Timer Driver v%s\n",
ESB_VERSION);
if (cards_found > 1) {
printk(KERN_ERR PFX "This driver only supports 1 device\n");
return -ENODEV;
}
/* Check whether or not the hardware watchdog is there */
if (!esb_getdevice(pdev) || esb_pci == NULL)
示例10: solo_pci_probe
static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct solo_dev *solo_dev;
int ret;
u8 chip_id;
solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL);
if (solo_dev == NULL)
return -ENOMEM;
if (id->driver_data == SOLO_DEV_6010)
dev_info(&pdev->dev, "Probing Softlogic 6010\n");
else
dev_info(&pdev->dev, "Probing Softlogic 6110\n");
solo_dev->type = id->driver_data;
solo_dev->pdev = pdev;
spin_lock_init(&solo_dev->reg_io_lock);
ret = v4l2_device_register(&pdev->dev, &solo_dev->v4l2_dev);
if (ret)
goto fail_probe;
/* Only for during init */
solo_dev->p2m_jiffies = msecs_to_jiffies(100);
ret = pci_enable_device(pdev);
if (ret)
goto fail_probe;
pci_set_master(pdev);
/* RETRY/TRDY Timeout disabled */
pci_write_config_byte(pdev, 0x40, 0x00);
pci_write_config_byte(pdev, 0x41, 0x00);
ret = pci_request_regions(pdev, SOLO6X10_NAME);
if (ret)
goto fail_probe;
solo_dev->reg_base = pci_ioremap_bar(pdev, 0);
if (solo_dev->reg_base == NULL) {
ret = -ENOMEM;
goto fail_probe;
}
chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
SOLO_CHIP_ID_MASK;
switch (chip_id) {
case 7:
solo_dev->nr_chans = 16;
solo_dev->nr_ext = 5;
break;
case 6:
solo_dev->nr_chans = 8;
solo_dev->nr_ext = 2;
break;
default:
dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, assuming 4 ch\n",
chip_id);
case 5:
solo_dev->nr_chans = 4;
solo_dev->nr_ext = 1;
}
/* Disable all interrupts to start */
solo_irq_off(solo_dev, ~0);
/* Initial global settings */
if (solo_dev->type == SOLO_DEV_6010) {
solo_dev->clock_mhz = 108;
solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT
| SOLO_SYS_CFG_INPUTDIV(25)
| SOLO_SYS_CFG_FEEDBACKDIV(solo_dev->clock_mhz * 2 - 2)
| SOLO_SYS_CFG_OUTDIV(3);
solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config);
} else {
u32 divq, divf;
solo_dev->clock_mhz = 135;
if (solo_dev->clock_mhz < 125) {
divq = 3;
divf = (solo_dev->clock_mhz * 4) / 3 - 1;
} else {
divq = 2;
divf = (solo_dev->clock_mhz * 2) / 3 - 1;
}
solo_reg_write(solo_dev, SOLO_PLL_CONFIG,
(1 << 20) | /* PLL_RANGE */
(8 << 15) | /* PLL_DIVR */
(divq << 12) |
(divf << 4) |
(1 << 1) /* PLL_FSEN */);
solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT;
}
solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config);
solo_reg_write(solo_dev, SOLO_TIMER_CLOCK_NUM,
//.........这里部分代码省略.........
示例11: ocxl_config_read_afu
int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu, u8 afu_idx)
{
int rc;
u32 val32;
/*
* First, we need to write the AFU idx for the AFU we want to
* access.
*/
WARN_ON((afu_idx & OCXL_DVSEC_AFU_IDX_MASK) != afu_idx);
afu->idx = afu_idx;
pci_write_config_byte(dev,
fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
afu->idx);
rc = read_afu_name(dev, fn, afu);
if (rc)
return rc;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_AFU_VERSION, &val32);
if (rc)
return rc;
afu->version_major = EXTRACT_BITS(val32, 24, 31);
afu->version_minor = EXTRACT_BITS(val32, 16, 23);
afu->afuc_type = EXTRACT_BITS(val32, 14, 15);
afu->afum_type = EXTRACT_BITS(val32, 12, 13);
afu->profile = EXTRACT_BITS(val32, 0, 7);
rc = read_afu_mmio(dev, fn, afu);
if (rc)
return rc;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MEM_SZ, &val32);
if (rc)
return rc;
afu->log_mem_size = EXTRACT_BITS(val32, 0, 7);
rc = read_afu_control(dev, afu);
if (rc)
return rc;
dev_dbg(&dev->dev, "AFU configuration:\n");
dev_dbg(&dev->dev, " name = %s\n", afu->name);
dev_dbg(&dev->dev, " version = %d.%d\n", afu->version_major,
afu->version_minor);
dev_dbg(&dev->dev, " global mmio bar = %hhu\n", afu->global_mmio_bar);
dev_dbg(&dev->dev, " global mmio offset = %#llx\n",
afu->global_mmio_offset);
dev_dbg(&dev->dev, " global mmio size = %#x\n", afu->global_mmio_size);
dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar);
dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset);
dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride);
dev_dbg(&dev->dev, " mem size (log) = %hhu\n", afu->log_mem_size);
dev_dbg(&dev->dev, " pasid supported (log) = %u\n",
afu->pasid_supported_log);
dev_dbg(&dev->dev, " actag supported = %u\n",
afu->actag_supported);
rc = validate_afu(dev, afu);
return rc;
}
示例12: et131x_find_adapter
//.........这里部分代码省略.........
* eeprom or that the eeprom doesn't exist. We will treat
* each case the same and not try to gather additional
* information that normally would come from the eeprom, like
* MAC Address
*/
adapter->has_eeprom = 0;
return -EIO;
} else
adapter->has_eeprom = 1;
/* Read the EEPROM for information regarding LED behavior. Refer to
* ET1310_phy.c, et131x_xcvr_init(), for its use.
*/
EepromReadByte(adapter, 0x70, &adapter->eepromData[0]);
EepromReadByte(adapter, 0x71, &adapter->eepromData[1]);
if (adapter->eepromData[0] != 0xcd)
/* Disable all optional features */
adapter->eepromData[1] = 0x00;
/* Let's set up the PORT LOGIC Register. First we need to know what
* the max_payload_size is
*/
result = pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &maxPayload);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max Payload Size\n");
return -EIO;
}
/* Program the Ack/Nak latency and replay timers */
maxPayload &= 0x07; /* Only the lower 3 bits are valid */
if (maxPayload < 2) {
const uint16_t AckNak[2] = { 0x76, 0xD0 };
const uint16_t Replay[2] = { 0x1E0, 0x2ED };
result = pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
AckNak[maxPayload]);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not write PCI config space for ACK/NAK\n");
return -EIO;
}
result = pci_write_config_word(pdev, ET1310_PCI_REPLAY,
Replay[maxPayload]);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not write PCI config space for Replay Timer\n");
return -EIO;
}
}
/* l0s and l1 latency timers. We are using default values.
* Representing 001 for L0s and 010 for L1
*/
result = pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not write PCI config space for Latency Timers\n");
return -EIO;
}
/* Change the max read size to 2k */
result = pci_read_config_byte(pdev, 0x51, &read_size_reg);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max read size\n");
return -EIO;
}
read_size_reg &= 0x8f;
read_size_reg |= 0x40;
result = pci_write_config_byte(pdev, 0x51, read_size_reg);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev,
"Could not write PCI config space for Max read size\n");
return -EIO;
}
/* Get MAC address from config space if an eeprom exists, otherwise
* the MAC address there will not be valid
*/
if (adapter->has_eeprom) {
int i;
for (i = 0; i < ETH_ALEN; i++) {
result = pci_read_config_byte(
pdev, ET1310_PCI_MAC_ADDRESS + i,
adapter->PermanentAddress + i);
if (result != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, ";Could not read PCI config space for MAC address\n");
return -EIO;
}
}
}
return 0;
}
示例13: i915_restore_state
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
mutex_lock(&dev->struct_mutex);
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
i915_restore_display(dev);
/* Interrupt state */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
} else {
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
mutex_unlock(&dev->struct_mutex);
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
if (IS_GEN6(dev)) {
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
intel_i2c_reset(dev);
return 0;
}
示例14: cs5520_init_one
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ata_port_info pi = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f,
.port_ops = &cs5520_port_ops,
};
const struct ata_port_info *ppi[2];
u8 pcicfg;
void *iomap[5];
struct ata_host *host;
struct ata_ioports *ioaddr;
int i, rc;
/* IDE port enable bits */
pci_read_config_byte(pdev, 0x60, &pcicfg);
/* Check if the ATA ports are enabled */
if ((pcicfg & 3) == 0)
return -ENODEV;
ppi[0] = ppi[1] = &ata_dummy_port_info;
if (pcicfg & 1)
ppi[0] = π
if (pcicfg & 2)
ppi[1] = π
if ((pcicfg & 0x40) == 0) {
dev_printk(KERN_WARNING, &pdev->dev,
"DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
pi.mwdma_mask = id->driver_data;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* Perform set up for DMA */
if (pci_enable_device_bars(pdev, 1<<2)) {
printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
return -ENODEV;
}
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV;
}
/* Map IO ports and initialize host accordingly */
iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
iomap[4] = pcim_iomap(pdev, 2, 0);
if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
return -ENOMEM;
ioaddr = &host->ports[0]->ioaddr;
ioaddr->cmd_addr = iomap[0];
ioaddr->ctl_addr = iomap[1];
ioaddr->altstatus_addr = iomap[1];
ioaddr->bmdma_addr = iomap[4];
ata_std_ports(ioaddr);
ioaddr = &host->ports[1]->ioaddr;
ioaddr->cmd_addr = iomap[2];
ioaddr->ctl_addr = iomap[3];
ioaddr->altstatus_addr = iomap[3];
ioaddr->bmdma_addr = iomap[4] + 8;
ata_std_ports(ioaddr);
/* activate the host */
pci_set_master(pdev);
rc = ata_host_start(host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
static const int irq[] = { 14, 15 };
struct ata_port *ap = host->ports[i];
if (ata_port_is_dummy(ap))
continue;
rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
ata_interrupt, 0, DRV_NAME, host);
if (rc)
return rc;
if (i == 0)
host->irq = irq[0];
else
host->irq2 = irq[1];
//.........这里部分代码省略.........
示例15: aec62xx_config_drive_xfer_rate
static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct hd_driveid *id = drive->id;
#ifndef CONFIG_BCM947XX
if ((id->capability & 1) && drive->autodma) {
#else
if (1) {
#endif
/* Consult the list of known "bad" drives */
if (hwif->ide_dma_bad_drive(drive))
goto fast_ata_pio;
if (id->field_valid & 4) {
if (id->dma_ultra & hwif->ultra_mask) {
/* Force if Capable UltraDMA */
int dma = config_chipset_for_dma(drive);
if ((id->field_valid & 2) && !dma)
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & hwif->mwdma_mask) ||
(id->dma_1word & hwif->swdma_mask)) {
/* Force if Capable regular DMA modes */
if (!config_chipset_for_dma(drive))
goto no_dma_set;
}
} else if (hwif->ide_dma_good_drive(drive) &&
(id->eide_dma_time < 150)) {
/* Consult the list of known "good" drives */
if (!config_chipset_for_dma(drive))
goto no_dma_set;
} else {
goto fast_ata_pio;
}
return hwif->ide_dma_on(drive);
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
no_dma_set:
aec62xx_tune_drive(drive, 5);
return hwif->ide_dma_off_quietly(drive);
}
/* IORDY not supported */
return 0;
}
static int aec62xx_irq_timeout (ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
switch(dev->device) {
case PCI_DEVICE_ID_ARTOP_ATP860:
case PCI_DEVICE_ID_ARTOP_ATP860R:
case PCI_DEVICE_ID_ARTOP_ATP865:
case PCI_DEVICE_ID_ARTOP_ATP865R:
printk(" AEC62XX time out ");
#if 0
{
int i = 0;
u8 reg49h = 0;
pci_read_config_byte(HWIF(drive)->pci_dev, 0x49, ®49h);
for (i=0;i<256;i++)
pci_write_config_byte(HWIF(drive)->pci_dev, 0x49, reg49h|0x10);
pci_write_config_byte(HWIF(drive)->pci_dev, 0x49, reg49h & ~0x10);
}
return 0;
#endif
default:
break;
}
#if 0
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 tmp1 = 0, tmp2 = 0, mode6 = 0;
pci_read_config_byte(dev, 0x44, &tmp1);
pci_read_config_byte(dev, 0x45, &tmp2);
printk(" AEC6280 r44=%x r45=%x ",tmp1,tmp2);
mode6 = HWIF(drive)->INB(((hwif->channel) ?
hwif->mate->dma_status :
hwif->dma_status));
printk(" AEC6280 133=%x ", (mode6 & 0x10));
}
#endif
return 0;
}