本文整理汇总了C++中pci_find_capability函数的典型用法代码示例。如果您正苦于以下问题:C++ pci_find_capability函数的具体用法?C++ pci_find_capability怎么用?C++ pci_find_capability使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pci_find_capability函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: agp_intel_i460_probe
static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &intel_i460_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
示例2: sis_delayed_enable
static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
{
struct pci_dev *device = NULL;
u32 command;
int rate;
printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
agp_bridge->major_version,
agp_bridge->minor_version,
pci_name(agp_bridge->dev));
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(bridge, mode, command);
command |= AGPSTAT_AGP_ENABLE;
rate = (command & 0x7) << 2;
for_each_pci_dev(device) {
u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!agp)
continue;
printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
pci_name(device), rate);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
/*
* Weird: on some sis chipsets any rate change in the target
* command register triggers a 5ms screwup during which the master
* cannot be configured
*/
if (device->device == bridge->dev->device) {
printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
msleep(10);
}
}
}
示例3: pci_save_msi_state
int pci_save_msi_state(struct pci_dev *dev)
{
int pos, i = 0;
u16 control;
struct pci_cap_saved_state *save_state;
u32 *cap;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos <= 0 || dev->no_msi)
return 0;
pci_read_config_word(dev, msi_control_reg(pos), &control);
if (!(control & PCI_MSI_FLAGS_ENABLE))
return 0;
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
GFP_KERNEL);
if (!save_state) {
printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
return -ENOMEM;
}
cap = &save_state->data[0];
pci_read_config_dword(dev, pos, &cap[i++]);
control = cap[0] >> 16;
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
if (control & PCI_MSI_FLAGS_64BIT) {
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
} else
pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
if (control & PCI_MSI_FLAGS_MASKBIT)
pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
save_state->cap_nr = PCI_CAP_ID_MSI;
pci_add_saved_cap(dev, save_state);
return 0;
}
示例4: agp_intel_probe
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
struct resource *r;
int i, err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->capndx = cap_ptr;
if (intel_gmch_probe(pdev, bridge))
goto found_gmch;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
/* In case that multiple models of gfx chip may
stand on same host bridge type, this can be
sure we detect the right IGD. */
if (pdev->device == intel_agp_chipsets[i].chip_id) {
bridge->driver = intel_agp_chipsets[i].driver;
break;
}
}
if (intel_agp_chipsets[i].name == NULL) {
if (cap_ptr)
dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
bridge->dev = pdev;
bridge->dev_private_data = NULL;
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
* If the device has not been properly setup, the following will catch
* the problem and should stop the system from crashing.
* 20030610 - [email protected]
*/
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "can't enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
/*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - [email protected]
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
dev_err(&pdev->dev, "can't assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
}
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
}
found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
if (!err)
intel_agp_enabled = 1;
return err;
}
示例5: agp_ali_probe
static int __devinit agp_ali_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ali_agp_device_ids;
struct agp_bridge_data *bridge;
u8 hidden_1621_id, cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name; j++) {
if (pdev->device == devs[j].device_id)
goto found;
}
dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
pdev->vendor, pdev->device);
return -ENODEV;
found:
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
switch (pdev->device) {
case PCI_DEVICE_ID_AL_M1541:
bridge->driver = &ali_m1541_bridge;
break;
case PCI_DEVICE_ID_AL_M1621:
pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
switch (hidden_1621_id) {
case 0x31:
devs[j].chipset_name = "M1631";
break;
case 0x32:
devs[j].chipset_name = "M1632";
break;
case 0x41:
devs[j].chipset_name = "M1641";
break;
case 0x43:
devs[j].chipset_name = "M1621";
break;
case 0x47:
devs[j].chipset_name = "M1647";
break;
case 0x51:
devs[j].chipset_name = "M1651";
break;
default:
break;
}
/*FALLTHROUGH*/
default:
bridge->driver = &ali_generic_bridge;
}
dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
示例6: RtmpOsPciFindCapability
/*
========================================================================
Routine Description:
Query for devices' capabilities.
Arguments:
pDev - PCI device
Cap - Capability code
Return Value:
None
Note:
========================================================================
*/
int RtmpOsPciFindCapability(VOID *pDev, INT Cap)
{
return pci_find_capability(pDev, Cap);
}
示例7: usb_hcd_pci_suspend
/**
* usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
* @dev: USB Host Controller being suspended
* @message: semantics in flux
*
* Store this function in the HCD's struct pci_driver as suspend().
*/
int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
{
struct usb_hcd *hcd;
int retval = 0;
int has_pci_pm;
hcd = pci_get_drvdata(dev);
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
* and the stub usb_device (which we check here). But maybe it
* didn't; writing sysfs power/state files ignores such rules...
*
* We must ignore the FREEZE vs SUSPEND distinction here, because
* otherwise the swsusp will save (and restore) garbage state.
*/
if (!(hcd->state == HC_STATE_SUSPENDED ||
hcd->state == HC_STATE_HALT))
return -EBUSY;
if (hcd->driver->suspend) {
retval = hcd->driver->suspend(hcd, message);
suspend_report_result(hcd->driver->suspend, retval);
if (retval)
goto done;
}
synchronize_irq(dev->irq);
/* FIXME until the generic PM interfaces change a lot more, this
* can't use PCI D1 and D2 states. For example, the confusion
* between messages and states will need to vanish, and messages
* will need to provide a target system state again.
*
* It'll be important to learn characteristics of the target state,
* especially on embedded hardware where the HCD will often be in
* charge of an external VBUS power supply and one or more clocks.
* Some target system states will leave them active; others won't.
* (With PCI, that's often handled by platform BIOS code.)
*/
/* even when the PCI layer rejects some of the PCI calls
* below, HCs can try global suspend and reduce DMA traffic.
* PM-sensitive HCDs may already have done this.
*/
has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream
* link (except maybe for PME# resume signaling) and enter some PCI
* low power state, if the hardware allows.
*/
if (hcd->state == HC_STATE_SUSPENDED) {
/* no DMA or IRQs except when HC is active */
if (dev->current_state == PCI_D0) {
pci_save_state (dev);
pci_disable_device (dev);
}
if (!has_pci_pm) {
dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
goto done;
}
/* NOTE: dev->current_state becomes nonzero only here, and
* only for devices that support PCI PM. Also, exiting
* PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
* some device state (e.g. as part of clock reinit).
*/
retval = pci_set_power_state (dev, PCI_D3hot);
suspend_report_result(pci_set_power_state, retval);
if (retval == 0) {
int wake = device_can_wakeup(&hcd->self.root_hub->dev);
wake = wake && device_may_wakeup(hcd->self.controller);
dev_dbg (hcd->self.controller, "--> PCI D3%s\n",
wake ? "/wakeup" : "");
/* Ignore these return values. We rely on pci code to
* reject requests the hardware can't implement, rather
* than coding the same thing.
*/
(void) pci_enable_wake (dev, PCI_D3hot, wake);
(void) pci_enable_wake (dev, PCI_D3cold, wake);
} else {
dev_dbg (&dev->dev, "PCI D3 suspend fail, %d\n",
retval);
(void) usb_hcd_pci_resume (dev);
}
} else if (hcd->state != HC_STATE_HALT) {
dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
//.........这里部分代码省略.........
示例8: usb_hcd_pci_resume
/**
* usb_hcd_pci_resume - power management resume of a PCI-based HCD
* @dev: USB Host Controller being resumed
*
* Store this function in the HCD's struct pci_driver as resume().
*/
int usb_hcd_pci_resume (struct pci_dev *dev)
{
struct usb_hcd *hcd;
int retval;
hcd = pci_get_drvdata(dev);
if (hcd->state != HC_STATE_SUSPENDED) {
dev_dbg (hcd->self.controller,
"can't resume, not suspended!\n");
return 0;
}
#ifdef CONFIG_PPC_PMAC
/* Reenable ASIC clocks for USB */
if (machine_is(powermac)) {
struct device_node *of_node;
of_node = pci_device_to_OF_node (dev);
if (of_node)
pmac_call_feature (PMAC_FTR_USB_ENABLE,
of_node, 0, 1);
}
#endif
/* NOTE: chip docs cover clean "real suspend" cases (what Linux
* calls "standby", "suspend to RAM", and so on). There are also
* dirty cases when swsusp fakes a suspend in "shutdown" mode.
*/
if (dev->current_state != PCI_D0) {
#ifdef DEBUG
int pci_pm;
u16 pmcr;
pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
pci_read_config_word(dev, pci_pm + PCI_PM_CTRL, &pmcr);
pmcr &= PCI_PM_CTRL_STATE_MASK;
if (pmcr) {
/* Clean case: power to USB and to HC registers was
* maintained; remote wakeup is easy.
*/
dev_dbg(hcd->self.controller, "resume from PCI D%d\n",
pmcr);
} else {
/* Clean: HC lost Vcc power, D0 uninitialized
* + Vaux may have preserved port and transceiver
* state ... for remote wakeup from D3cold
* + or not; HCD must reinit + re-enumerate
*
* Dirty: D0 semi-initialized cases with swsusp
* + after BIOS init
* + after Linux init (HCD statically linked)
*/
dev_dbg(hcd->self.controller,
"PCI D0, from previous PCI D%d\n",
dev->current_state);
}
#endif
/* yes, ignore these results too... */
(void) pci_enable_wake (dev, dev->current_state, 0);
(void) pci_enable_wake (dev, PCI_D3cold, 0);
} else {
/* Same basic cases: clean (powered/not), dirty */
dev_dbg(hcd->self.controller, "PCI legacy resume\n");
}
/* NOTE: the PCI API itself is asymmetric here. We don't need to
* pci_set_power_state(PCI_D0) since that's part of re-enabling;
* but that won't re-enable bus mastering. Yet pci_disable_device()
* explicitly disables bus mastering...
*/
retval = pci_enable_device (dev);
if (retval < 0) {
dev_err (hcd->self.controller,
"can't re-enable after resume, %d!\n", retval);
return retval;
}
pci_set_master (dev);
pci_restore_state (dev);
dev->dev.power.power_state = PMSG_ON;
clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
if (hcd->driver->resume) {
retval = hcd->driver->resume(hcd);
if (retval) {
dev_err (hcd->self.controller,
"PCI post-resume error %d!\n", retval);
usb_hc_died (hcd);
}
}
return retval;
}
示例9: ehci_pci_setup
//.........这里部分代码省略.........
pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
}
break;
case PCI_VENDOR_ID_ATI:
if (usb_amd_find_chipset_info())
ehci->amd_pll_fix = 1;
if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p_smbus)
break;
rev = p_smbus->revision;
if ((pdev->device == 0x4386) || (rev == 0x3a)
|| (rev == 0x3b)) {
u8 tmp;
ehci_info(ehci, "applying AMD SB600/SB700 USB "
"freeze workaround\n");
pci_read_config_byte(pdev, 0x53, &tmp);
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
pci_dev_put(p_smbus);
}
break;
case PCI_VENDOR_ID_NETMOS:
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
}
temp = pci_find_capability(pdev, 0x0a);
if (temp) {
pci_read_config_dword(pdev, temp, &temp);
temp >>= 16;
if ((temp & (3 << 13)) == (1 << 13)) {
temp &= 0x1fff;
ehci->debug = ehci_to_hcd(ehci)->regs + temp;
temp = ehci_readl(ehci, &ehci->debug->control);
ehci_info(ehci, "debug port %d%s\n",
HCS_DEBUG_PORT(ehci->hcs_params),
(temp & DBGP_ENABLED)
? " IN USE"
: "");
if (!(temp & DBGP_ENABLED))
ehci->debug = NULL;
}
}
ehci_reset(ehci);
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
temp &= 0x0f;
if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
ehci_dbg(ehci, "bogus port configuration: "
"cc=%d x pcc=%d < ports=%d\n",
HCS_N_CC(ehci->hcs_params),
HCS_N_PCC(ehci->hcs_params),
HCS_N_PORTS(ehci->hcs_params));
switch (pdev->vendor) {
case 0x17a0:
示例10: set_msi_affinity
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{
struct msi_desc *entry;
u32 address_hi, address_lo;
unsigned int irq = vector;
unsigned int dest_cpu = first_cpu(cpu_mask);
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
entry = (struct msi_desc *)msi_desc[vector];
if (!entry || !entry->dev)
goto out_unlock;
if (entry->msi_attrib.state == 0)
goto out_unlock;
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
{
int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
if (!pos)
goto out_unlock;
pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
&address_hi);
pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
&address_lo);
msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
address_hi);
pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
address_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
case PCI_CAP_ID_MSIX:
{
int offset_hi =
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
int offset_lo =
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
address_hi = readl(entry->mask_base + offset_hi);
address_lo = readl(entry->mask_base + offset_lo);
msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
writel(address_hi, entry->mask_base + offset_hi);
writel(address_lo, entry->mask_base + offset_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
default:
break;
}
out_unlock:
spin_unlock_irqrestore(&msi_lock, flags);
}
示例11: msix_capability_init
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X vector. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated vectors or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
u32 address_hi;
u32 address_lo;
u32 data;
int status;
int vector, pos, i, j, nr_entries, temp = 0;
unsigned long phys_addr;
u32 table_offset;
u16 control;
u8 bir;
void __iomem *base;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
/* Request & Map MSI-X table region */
pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
phys_addr = pci_resource_start (dev, bir) + table_offset;
base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
if (base == NULL)
return -ENOMEM;
/* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry();
if (!entry)
break;
vector = get_msi_vector(dev);
if (vector < 0) {
kmem_cache_free(msi_cachep, entry);
break;
}
j = entries[i].entry;
entries[i].vector = vector;
entry->msi_attrib.type = PCI_CAP_ID_MSIX;
entry->msi_attrib.state = 0; /* Mark it not active */
entry->msi_attrib.entry_nr = j;
entry->msi_attrib.maskbit = 1;
entry->msi_attrib.default_vector = dev->irq;
entry->dev = dev;
entry->mask_base = base;
if (!head) {
entry->link.head = vector;
entry->link.tail = vector;
head = entry;
} else {
entry->link.head = temp;
entry->link.tail = tail->link.tail;
tail->link.tail = vector;
head->link.head = vector;
}
temp = vector;
tail = entry;
/* Replace with MSI-X handler */
irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
/* Configure MSI-X capability structure */
status = msi_ops->setup(dev, vector,
&address_hi,
&address_lo,
&data);
if (status < 0)
break;
writel(address_lo,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
writel(address_hi,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
writel(data,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
attach_msi_entry(entry, vector);
}
if (i != nvec) {
i--;
for (; i >= 0; i--) {
vector = (entries + i)->vector;
msi_free_vector(dev, vector, 0);
(entries + i)->vector = 0;
}
return -EBUSY;
}
//.........这里部分代码省略.........
示例12: agp_amdk7_probe
static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
j = ent - agp_amdk7_pci_table;
printk(KERN_INFO PFX "Detected AMD %s chipset\n",
amd_agp_device_ids[j].chipset_name);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &amd_irongate_driver;
bridge->dev_private_data = &amd_irongate_private,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/* 751 Errata (22564_B-1.PDF)
erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
system controller may experience noise due to strong drive strengths
*/
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
struct pci_dev *gfxcard=NULL;
cap_ptr = 0;
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
}
/* With so many variants of NVidia cards, it's simpler just
to blacklist them all, and then whitelist them as needed
(if necessary at all). */
if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
agp_bridge->flags |= AGP_ERRATA_1X;
printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
}
pci_dev_put(gfxcard);
}
/* 761 Errata (23613_F.pdf)
* Revisions B0/B1 were a disaster.
* erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
* erratum 45: Timing problem prevents fast writes -- Disable fast write.
* erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
* With this lot disabled, we should prevent lockups. */
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
if (pdev->revision == 0x10 || pdev->revision == 0x11) {
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
}
}
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
示例13: usb_hcd_pci_suspend
/**
* usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
* @dev: USB Host Controller being suspended
* @message: semantics in flux
*
* Store this function in the HCD's struct pci_driver as suspend().
*/
int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
{
struct usb_hcd *hcd;
int retval = 0;
int has_pci_pm;
hcd = pci_get_drvdata(dev);
/* FIXME until the generic PM interfaces change a lot more, this
* can't use PCI D1 and D2 states. For example, the confusion
* between messages and states will need to vanish, and messages
* will need to provide a target system state again.
*
* It'll be important to learn characteristics of the target state,
* especially on embedded hardware where the HCD will often be in
* charge of an external VBUS power supply and one or more clocks.
* Some target system states will leave them active; others won't.
* (With PCI, that's often handled by platform BIOS code.)
*/
/* even when the PCI layer rejects some of the PCI calls
* below, HCs can try global suspend and reduce DMA traffic.
* PM-sensitive HCDs may already have done this.
*/
has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
switch (hcd->state) {
/* entry if root hub wasn't yet suspended ... from sysfs,
* without autosuspend, or if USB_SUSPEND isn't configured.
*/
case HC_STATE_RUNNING:
hcd->state = HC_STATE_QUIESCING;
retval = hcd->driver->suspend (hcd, message);
if (retval) {
dev_dbg (hcd->self.controller,
"suspend fail, retval %d\n",
retval);
break;
}
hcd->state = HC_STATE_SUSPENDED;
/* FALLTHROUGH */
/* entry with CONFIG_USB_SUSPEND, or hcds that autosuspend: the
* controller and/or root hub will already have been suspended,
* but it won't be ready for a PCI resume call.
*
* FIXME only CONFIG_USB_SUSPEND guarantees hub_suspend() will
* have been called, otherwise root hub timers still run ...
*/
case HC_STATE_SUSPENDED:
/* no DMA or IRQs except when HC is active */
if (dev->current_state == PCI_D0) {
free_irq (hcd->irq, hcd);
pci_save_state (dev);
pci_disable_device (dev);
}
if (!has_pci_pm) {
dev_dbg (hcd->self.controller, "--> PCI D0/legacy\n");
break;
}
/* NOTE: dev->current_state becomes nonzero only here, and
* only for devices that support PCI PM. Also, exiting
* PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
* some device state (e.g. as part of clock reinit).
*/
retval = pci_set_power_state (dev, PCI_D3hot);
if (retval == 0) {
dev_dbg (hcd->self.controller, "--> PCI D3\n");
pci_enable_wake (dev, PCI_D3hot, hcd->remote_wakeup);
pci_enable_wake (dev, PCI_D3cold, hcd->remote_wakeup);
} else if (retval < 0) {
dev_dbg (&dev->dev, "PCI D3 suspend fail, %d\n",
retval);
(void) usb_hcd_pci_resume (dev);
break;
}
break;
default:
dev_dbg (hcd->self.controller, "hcd state %d; not suspended\n",
hcd->state);
WARN_ON(1);
retval = -EINVAL;
break;
}
/* update power_state **ONLY** to make sysfs happier */
if (retval == 0)
dev->dev.power.power_state = message;
return retval;
}
示例14: pci_enable_msix
/**
* pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of MSI-X entries
* @nvec: number of MSI-X vectors requested for allocation by device driver
*
* Setup the MSI-X capability structure of device function with the number
* of requested vectors upon its software driver call to request for
* MSI-X mode enabled on its hardware device function. A return of zero
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X vectors. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
* of vectors available. Driver should use the returned value to re-send
* its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
int status, pos, nr_entries, free_vectors;
int i, j, temp;
u16 control;
unsigned long flags;
if (!entries || pci_msi_supported(dev) < 0)
return -EINVAL;
status = msi_init();
if (status < 0)
return status;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
if (!pos)
return -EINVAL;
pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
if (nvec > nr_entries)
return -EINVAL;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
if (entries[i].entry >= nr_entries)
return -EINVAL; /* invalid entry */
for (j = i + 1; j < nvec; j++) {
if (entries[i].entry == entries[j].entry)
return -EINVAL; /* duplicate entry */
}
}
temp = dev->irq;
WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));
/* Check whether driver already requested for MSI vector */
if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
"Device already has an MSI vector assigned\n",
pci_name(dev));
dev->irq = temp;
return -EINVAL;
}
spin_lock_irqsave(&msi_lock, flags);
/*
* msi_lock is provided to ensure that enough vectors resources are
* available before granting.
*/
free_vectors = pci_vector_resources(last_alloc_vector,
nr_released_vectors);
/* Ensure that each MSI/MSI-X device has one vector reserved by
default to avoid any MSI-X driver to take all available
resources */
free_vectors -= nr_reserved_vectors;
spin_unlock_irqrestore(&msi_lock, flags);
if (nvec > free_vectors) {
if (free_vectors > 0)
return free_vectors;
else
return -EBUSY;
}
status = msix_capability_init(dev, entries, nvec);
return status;
}
示例15: agp_serverworks_probe
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
struct pci_dev *bridge_dev;
u32 temp, temp2;
u8 cap_ptr = 0;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
switch (pdev->device) {
case 0x0006:
dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
case PCI_DEVICE_ID_SERVERWORKS_LE:
case 0x0007:
break;
default:
if (cap_ptr)
dev_err(&pdev->dev, "unsupported Serverworks chipset "
"[%04x/%04x]\n", pdev->vendor, pdev->device);
return -ENODEV;
}
/* Everything is on func 1 here so we are hardcoding function one */
bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
PCI_DEVFN(0, 1));
if (!bridge_dev) {
dev_info(&pdev->dev, "can't find secondary device\n");
return -ENODEV;
}
serverworks_private.svrwrks_dev = bridge_dev;
serverworks_private.gart_addr_ofs = 0x10;
pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
if (temp2 != 0) {
dev_info(&pdev->dev, "64 bit aperture address, "
"but top bits are not zero; disabling AGP\n");
return -ENODEV;
}
serverworks_private.mm_addr_ofs = 0x18;
} else
serverworks_private.mm_addr_ofs = 0x14;
pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev,
serverworks_private.mm_addr_ofs + 4, &temp2);
if (temp2 != 0) {
dev_info(&pdev->dev, "64 bit MMIO address, but top "
"bits are not zero; disabling AGP\n");
return -ENODEV;
}
}
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &sworks_driver;
bridge->dev_private_data = &serverworks_private,
bridge->dev = pci_dev_get(pdev);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}