本文整理汇总了C++中dev_printk函数的典型用法代码示例。如果您正苦于以下问题:C++ dev_printk函数的具体用法?C++ dev_printk怎么用?C++ dev_printk使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dev_printk函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: optidma_init_one
static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info_82c700 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &optidma_port_ops
};
static const struct ata_port_info info_82c700_udma = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &optiplus_port_ops
};
const struct ata_port_info *ppi[] = { &info_82c700, NULL };
static int printed_version;
int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
rc = pcim_enable_device(dev);
if (rc)
return rc;
/* Fixed location chipset magic */
inw(0x1F1);
inw(0x1F1);
pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
if (optiplus_with_udma(dev))
ppi[0] = &info_82c700_udma;
return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL);
}
示例2: find_source_device
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
* @e_info: including detailed error information such like id
*
* Return true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
* struct aer_err_info pointed by @e_info properly. This function must fill
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
/* Must reset in this function */
e_info->error_dev_num = 0;
/* Is Root Port an agent that sends error message? */
result = find_device_iter(dev, e_info);
if (result)
return true;
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
dev_printk(KERN_DEBUG, &parent->dev,
"can't find device of ID%04x\n",
e_info->id);
return false;
}
return true;
}
示例3: platform_get_resource_byname
void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
struct resource *res;
void __iomem *ptr;
if (name)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
else
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ptr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ptr)) {
dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
PTR_ERR(ptr));
return ptr;
}
if (reglog)
dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
dbgname, ptr, (size_t)resource_size(res));
return ptr;
}
示例4: ns87415_init_one
static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ns87415_pata_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
int rc;
#if defined(CONFIG_SUPERIO)
static const struct ata_port_info info87560 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ns87560_pata_ops,
};
if (PCI_SLOT(pdev->devfn) == 0x0E)
ppi[0] = &info87560;
#endif
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* Select 512 byte sectors */
pci_write_config_byte(pdev, 0x55, 0xEE);
/* Select PIO0 8bit clocking */
pci_write_config_byte(pdev, 0x54, 0xB7);
return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL);
}
示例5: svia_configure
static void svia_configure(struct pci_dev *pdev, int board_id)
{
u8 tmp8;
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
(int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
/* make sure SATA channels are enabled */
pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channels (0x%x)\n",
(int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
}
/* make sure interrupts for each channel sent to us */
pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channel interrupts (0x%x)\n",
(int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
}
/* make sure native mode is enabled */
pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channel native mode (0x%x)\n",
(int) tmp8);
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
/*
* vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <[email protected]>.
*
* When host issues HOLD, device may send up to 20DW of data
* before acknowledging it with HOLDA and the host should be
* able to buffer them in FIFO. Unfortunately, some WD drives
* send upto 40DW before acknowledging HOLD and, in the
* default configuration, this ends up overflowing vt6421's
* FIFO, making the controller abort the transaction with
* R_ERR.
*
* Rx52[2] is the internal 128DW FIFO Flow control watermark
* adjusting mechanism enable bit and the default value 0
* means host will issue HOLD to device when the left FIFO
* size goes below 32DW. Setting it to 1 makes the watermark
* 64DW.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
* http://thread.gmane.org/gmane.linux.kernel/1062139
*/
if (board_id == vt6420 || board_id == vt6421) {
pci_read_config_byte(pdev, 0x52, &tmp8);
tmp8 |= 1 << 2;
pci_write_config_byte(pdev, 0x52, tmp8);
}
}
示例6: piix4_setup
static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned char temp;
if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
(PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5))
srvrworks_csb5_delay = 1;
/* On some motherboards, it was reported that accessing the SMBus
caused severe hardware problems */
if (dmi_check_system(piix4_dmi_blacklist)) {
dev_err(&PIIX4_dev->dev,
"Accessing the SMBus on this system is unsafe!\n");
return -EPERM;
}
/* Don't access SMBus on IBM systems which get corrupted eeproms */
if (dmi_check_system(piix4_dmi_ibm) &&
PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) {
dev_err(&PIIX4_dev->dev, "IBM system detected; this module "
"may corrupt your serial eeprom! Refusing to load "
"module!\n");
return -EPERM;
}
/* Determine the address of the SMBus areas */
if (force_addr) {
piix4_smba = force_addr & 0xfff0;
force = 0;
} else {
pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba);
piix4_smba &= 0xfff0;
if(piix4_smba == 0) {
dev_err(&PIIX4_dev->dev, "SMBus base address "
"uninitialized - upgrade BIOS or use "
"force_addr=0xaddr\n");
return -ENODEV;
}
}
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
piix4_smba);
return -EBUSY;
}
pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
sure, we disable the PIIX4 first. */
if (force_addr) {
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe);
pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba);
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01);
dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to "
"new address %04x!\n", piix4_smba);
} else if ((temp & 1) == 0) {
if (force) {
/* This should never need to be done, but has been
* noted that many Dell machines have the SMBus
* interface on the PIIX4 disabled!? NOTE: This assumes
* I/O space and other allocations WERE done by the
* Bios! Don't complain if your hardware does weird
* things after enabling this. :') Check for Bios
* updates before resorting to this.
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
"WARNING: SMBus interface has been "
"FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
piix4_smba = 0;
return -ENODEV;
}
}
if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
else if ((temp & 0x0E) == 0)
dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
else
dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
"(or code out of date)!\n");
pci_read_config_byte(PIIX4_dev, SMBREV, &temp);
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, temp);
return 0;
}
示例7: pm8001_task_exec
//.........这里部分代码省略.........
pm8001_printk("device %d not ready.\n",
pm8001_dev->device_id));
} else {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("device %016llx not "
"ready.\n", SAS_ADDR(dev->sas_addr)));
}
rc = SAS_PHY_DOWN;
goto out_done;
}
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (!port->port_attached) {
if (sas_protocol_ata(t->task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irqrestore(dev->sata_dev.ap->lock,
flags_libsas);
t->task_done(t);
spin_lock_irqsave(dev->sata_dev.ap->lock,
flags_libsas);
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (n > 1)
t = list_entry(t->list.next,
struct sas_task, list);
continue;
} else {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
t->task_done(t);
if (n > 1)
t = list_entry(t->list.next,
struct sas_task, list);
continue;
}
}
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
goto err_out;
ccb = &pm8001_ha->ccb_info[tag];
if (!sas_protocol_ata(t->task_proto)) {
if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter,
t->num_scatter,
t->data_dir);
if (!n_elem) {
rc = -ENOMEM;
goto err_out_tag;
}
}
} else {
n_elem = t->num_scatter;
}
t->lldd_task = ccb;
ccb->n_elem = n_elem;
ccb->ccb_tag = tag;
ccb->task = t;
switch (t->task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
case SAS_PROTOCOL_SSP:
if (is_tmf)
rc = pm8001_task_prep_ssp_tm(pm8001_ha,
ccb, tmf);
else
rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
rc = pm8001_task_prep_ata(pm8001_ha, ccb);
break;
default:
dev_printk(KERN_ERR, pm8001_ha->dev,
"unknown sas_task proto: 0x%x\n",
t->task_proto);
rc = -EINVAL;
break;
}
if (rc) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("rc is %x\n", rc));
goto err_out_tag;
}
/* TODO: select normal or high priority */
spin_lock(&t->task_state_lock);
t->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock(&t->task_state_lock);
pm8001_dev->running_req++;
if (n > 1)
t = list_entry(t->list.next, struct sas_task, list);
} while (--n);
示例8: asihpi_adapter_probe
int asihpi_adapter_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
int idx, nm;
int adapter_index;
unsigned int memlen;
struct hpi_message hm;
struct hpi_response hr;
struct hpi_adapter adapter;
struct hpi_pci pci;
memset(&adapter, 0, sizeof(adapter));
dev_printk(KERN_DEBUG, &pci_dev->dev,
"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
pci_dev->device, pci_dev->subsystem_vendor,
pci_dev->subsystem_device, pci_dev->devfn);
if (pci_enable_device(pci_dev) < 0) {
dev_err(&pci_dev->dev,
"pci_enable_device failed, disabling device\n");
return -EIO;
}
pci_set_master(pci_dev); /* also sets latency timer if < 16 */
hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_CREATE_ADAPTER);
hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
HPI_ERROR_PROCESSING_MESSAGE);
hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;
nm = HPI_MAX_ADAPTER_MEM_SPACES;
for (idx = 0; idx < nm; idx++) {
HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
&pci_dev->resource[idx]);
if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
memlen = pci_resource_len(pci_dev, idx);
pci.ap_mem_base[idx] =
ioremap(pci_resource_start(pci_dev, idx),
memlen);
if (!pci.ap_mem_base[idx]) {
HPI_DEBUG_LOG(ERROR,
"ioremap failed, aborting\n");
/* unmap previously mapped pci mem space */
goto err;
}
}
}
pci.pci_dev = pci_dev;
hm.u.s.resource.bus_type = HPI_BUS_PCI;
hm.u.s.resource.r.pci = &pci;
/* call CreateAdapterObject on the relevant hpi module */
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
if (hr.error)
goto err;
adapter_index = hr.u.s.adapter_index;
adapter.adapter = hpi_find_adapter(adapter_index);
if (prealloc_stream_buf) {
adapter.p_buffer = vmalloc(prealloc_stream_buf);
if (!adapter.p_buffer) {
HPI_DEBUG_LOG(ERROR,
"HPI could not allocate "
"kernel buffer size %d\n",
prealloc_stream_buf);
goto err;
}
}
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_OPEN);
hm.adapter_index = adapter.adapter->index;
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
if (hr.error)
goto err;
/* WARNING can't init mutex in 'adapter'
* and then copy it to adapters[] ?!?!
*/
adapters[adapter_index] = adapter;
mutex_init(&adapters[adapter_index].mutex);
pci_set_drvdata(pci_dev, &adapters[adapter_index]);
dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
adapter.adapter->type, adapter_index);
return 0;
err:
for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
if (pci.ap_mem_base[idx]) {
iounmap(pci.ap_mem_base[idx]);
//.........这里部分代码省略.........
示例9: mpcore_wdt_probe
static int __devinit mpcore_wdt_probe(struct platform_device *dev)
{
struct mpcore_wdt *wdt;
struct resource *res;
int ret;
/* We only accept one device, and it must have an id of -1 */
if (dev->id != -1)
return -ENODEV;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto err_out;
}
wdt = kmalloc(sizeof(struct mpcore_wdt), GFP_KERNEL);
if (!wdt) {
ret = -ENOMEM;
goto err_out;
}
memset(wdt, 0, sizeof(struct mpcore_wdt));
wdt->dev = &dev->dev;
wdt->irq = platform_get_irq(dev, 0);
if (wdt->irq < 0) {
ret = -ENXIO;
goto err_free;
}
wdt->base = ioremap(res->start, res->end - res->start + 1);
if (!wdt->base) {
ret = -ENOMEM;
goto err_free;
}
mpcore_wdt_miscdev.dev = &dev->dev;
ret = misc_register(&mpcore_wdt_miscdev);
if (ret) {
dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto err_misc;
}
ret = request_irq(wdt->irq, mpcore_wdt_fire, SA_INTERRUPT, "mpcore_wdt", wdt);
if (ret) {
dev_printk(KERN_ERR, _dev, "cannot register IRQ%d for watchdog\n", wdt->irq);
goto err_irq;
}
mpcore_wdt_stop(wdt);
platform_set_drvdata(&dev->dev, wdt);
mpcore_wdt_dev = dev;
return 0;
err_irq:
misc_deregister(&mpcore_wdt_miscdev);
err_misc:
iounmap(wdt->base);
err_free:
kfree(wdt);
err_out:
return ret;
}
示例10: cs5520_init_one
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
struct ata_port_info pi = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f,
.port_ops = &cs5520_port_ops,
};
const struct ata_port_info *ppi[2];
u8 pcicfg;
void __iomem *iomap[5];
struct ata_host *host;
struct ata_ioports *ioaddr;
int i, rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* IDE port enable bits */
pci_read_config_byte(pdev, 0x60, &pcicfg);
/* Check if the ATA ports are enabled */
if ((pcicfg & 3) == 0)
return -ENODEV;
ppi[0] = ppi[1] = &ata_dummy_port_info;
if (pcicfg & 1)
ppi[0] = π
if (pcicfg & 2)
ppi[1] = π
if ((pcicfg & 0x40) == 0) {
dev_printk(KERN_WARNING, &pdev->dev,
"DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
pi.mwdma_mask = id->driver_data;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* Perform set up for DMA */
if (pci_enable_device_bars(pdev, 1<<2)) {
printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
return -ENODEV;
}
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV;
}
/* Map IO ports and initialize host accordingly */
iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
iomap[4] = pcim_iomap(pdev, 2, 0);
if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
return -ENOMEM;
ioaddr = &host->ports[0]->ioaddr;
ioaddr->cmd_addr = iomap[0];
ioaddr->ctl_addr = iomap[1];
ioaddr->altstatus_addr = iomap[1];
ioaddr->bmdma_addr = iomap[4];
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[0],
"cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
ioaddr = &host->ports[1]->ioaddr;
ioaddr->cmd_addr = iomap[2];
ioaddr->ctl_addr = iomap[3];
ioaddr->altstatus_addr = iomap[3];
ioaddr->bmdma_addr = iomap[4] + 8;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[1],
"cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
/* activate the host */
pci_set_master(pdev);
rc = ata_host_start(host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
static const int irq[] = { 14, 15 };
//.........这里部分代码省略.........
示例11: setup_resource
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
struct pci_root_info *info = data;
struct resource *res;
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
u64 start, orig_end, end;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
flags |= IORESOURCE_PREFETCH;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
} else
return AE_OK;
start = addr.minimum + addr.translation_offset;
orig_end = end = addr.maximum + addr.translation_offset;
/* Exclude non-addressable range or non-addressable portion of range */
end = min(end, (u64)iomem_resource.end);
if (end <= start) {
dev_info(&info->bridge->dev,
"host bridge window [%#llx-%#llx] "
"(ignored, not CPU addressable)\n", start, orig_end);
return AE_OK;
} else if (orig_end != end) {
dev_info(&info->bridge->dev,
"host bridge window [%#llx-%#llx] "
"([%#llx-%#llx] ignored, not CPU addressable)\n",
start, orig_end, end + 1, orig_end);
}
res = &info->res[info->res_num];
res->name = info->name;
res->flags = flags;
res->start = start;
res->end = end;
res->child = NULL;
if (!pci_use_crs) {
dev_printk(KERN_DEBUG, &info->bridge->dev,
"host bridge window %pR (ignored)\n", res);
return AE_OK;
}
info->res_num++;
if (addr.translation_offset)
dev_info(&info->bridge->dev, "host bridge window %pR "
"(PCI address [%#llx-%#llx])\n",
res, res->start - addr.translation_offset,
res->end - addr.translation_offset);
else
dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
return AE_OK;
}
示例12: hpt3x3_init_one
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
#if defined(CONFIG_PATA_HPT3X3_DMA)
/* Further debug needed */
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
#endif
.port_ops = &hpt3x3_port_ops
};
/* Register offsets of taskfiles in BAR4 area */
static const u8 offset_cmd[2] = { 0x20, 0x28 };
static const u8 offset_ctl[2] = { 0x36, 0x3E };
const struct ata_port_info *ppi[] = { &info, NULL };
struct ata_host *host;
int i, rc;
void __iomem *base;
hpt3x3_init_chipset(pdev);
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* Everything is relative to BAR4 if we set up this way */
rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
base = host->iomap[4]; /* Bus mastering base */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->cmd_addr = base + offset_cmd[i];
ioaddr->altstatus_addr =
ioaddr->ctl_addr = base + offset_ctl[i];
ioaddr->scr_addr = NULL;
ata_sff_std_ports(ioaddr);
ioaddr->bmdma_addr = base + 8 * i;
ata_port_pbar_desc(ap, 4, -1, "ioport");
ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
}
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
IRQF_SHARED, &hpt3x3_sht);
}
示例13: pcmcia_modify_configuration
/** pcmcia_modify_configuration
*
* Modify a locked socket configuration
*/
int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
modconf_t *mod)
{
struct pcmcia_socket *s;
config_t *c;
int ret;
s = p_dev->socket;
mutex_lock(&s->ops_mutex);
c = p_dev->function_config;
if (!(s->state & SOCKET_PRESENT)) {
dev_dbg(&s->dev, "No card present\n");
ret = -ENODEV;
goto unlock;
}
if (!(c->state & CONFIG_LOCKED)) {
dev_dbg(&s->dev, "Configuration isnt't locked\n");
ret = -EACCES;
goto unlock;
}
if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
dev_dbg(&s->dev,
"changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
/* We only allow changing Vpp1 and Vpp2 to the same value */
if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
if (mod->Vpp1 != mod->Vpp2) {
dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
ret = -EINVAL;
goto unlock;
}
s->socket.Vpp = mod->Vpp1;
if (s->ops->set_socket(s, &s->socket)) {
dev_printk(KERN_WARNING, &s->dev,
"Unable to set VPP\n");
ret = -EIO;
goto unlock;
}
} else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
if (mod->Attributes & CONF_IO_CHANGE_WIDTH) {
pccard_io_map io_off = { 0, 0, 0, 0, 1 };
pccard_io_map io_on;
int i;
io_on.speed = io_speed;
for (i = 0; i < MAX_IO_WIN; i++) {
if (!s->io[i].res)
continue;
io_off.map = i;
io_on.map = i;
io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8;
io_on.start = s->io[i].res->start;
io_on.stop = s->io[i].res->end;
s->ops->set_io_map(s, &io_off);
mdelay(40);
s->ops->set_io_map(s, &io_on);
}
}
ret = 0;
unlock:
mutex_unlock(&s->ops_mutex);
return ret;
} /* modify_configuration */
示例14: sis_init_one
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info pi = sis_port_info;
const struct ata_port_info *ppi[] = { &pi, &pi };
struct ata_host *host;
u32 genctl, val;
u8 pmr;
u8 port2_start = 0x20;
int i, rc;
if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* check and see if the SCRs are in IO space or PCI cfg space */
pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
pi.flags |= SIS_FLAG_CFGSCR;
/* if hardware thinks SCRs are in IO space, but there are
* no IO resources assigned, change to PCI cfg space.
*/
if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
(pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
genctl &= ~GENCTL_IOMAPPED_SCR;
pci_write_config_dword(pdev, SIS_GENCTL, genctl);
pi.flags |= SIS_FLAG_CFGSCR;
}
pci_read_config_byte(pdev, SIS_PMR, &pmr);
switch (ent->device) {
case 0x0180:
case 0x0181:
/* The PATA-handling is provided by pata_sis */
switch (pmr & 0x30) {
case 0x10:
ppi[1] = &sis_info133_for_sata;
break;
case 0x30:
ppi[0] = &sis_info133_for_sata;
break;
}
if ((pmr & SIS_PMR_COMBINED) == 0) {
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 180/181/964 chipset in SATA mode\n");
port2_start = 64;
} else {
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 180/181 chipset in combined mode\n");
port2_start = 0;
pi.flags |= ATA_FLAG_SLAVE_POSS;
}
break;
case 0x0182:
case 0x0183:
pci_read_config_dword(pdev, 0x6C, &val);
if (val & (1L << 31)) {
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 182/965 chipset\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
} else {
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 182/965L chipset\n");
}
break;
case 0x1182:
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 1182/966/680 SATA controller\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
break;
case 0x1183:
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
ppi[0] = &sis_info133_for_sata;
ppi[1] = &sis_info133_for_sata;
break;
}
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
if (ap->flags & ATA_FLAG_SATA &&
ap->flags & ATA_FLAG_SLAVE_POSS) {
rc = ata_slave_link_init(ap);
if (rc)
return rc;
//.........这里部分代码省略.........
示例15: sil680_init_one
static int __devinit sil680_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sil680_port_ops
};
static const struct ata_port_info info_slow = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil680_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
static int printed_version;
struct ata_host *host;
void __iomem *mmio_base;
int rc, try_mmio;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
rc = pcim_enable_device(pdev);
if (rc)
return rc;
switch (sil680_init_chip(pdev, &try_mmio)) {
case 0:
ppi[0] = &info_slow;
break;
case 0x30:
return -ENODEV;
}
if (!try_mmio)
goto use_ioports;
/* Try to acquire MMIO resources and fallback to PIO if
* that fails
*/
rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
if (rc)
goto use_ioports;
/* Allocate host and set it up */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(pdev);
/* Get MMIO base and initialize port addresses */
mmio_base = host->iomap[SIL680_MMIO_BAR];
host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
ata_sff_std_ports(&host->ports[0]->ioaddr);
host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}