diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/dmar.c | 73 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp.h | 2 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_core.c | 7 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_hpc.c | 15 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 30 | ||||
-rw-r--r-- | drivers/pci/intr_remapping.c | 21 | ||||
-rw-r--r-- | drivers/pci/msi.c | 10 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 164 | ||||
-rw-r--r-- | drivers/pci/pci-sysfs.c | 4 | ||||
-rw-r--r-- | drivers/pci/pci.c | 17 | ||||
-rw-r--r-- | drivers/pci/pci.h | 20 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv_core.c | 48 | ||||
-rw-r--r-- | drivers/pci/pcie/aspm.c | 4 | ||||
-rw-r--r-- | drivers/pci/pcie/portdrv_pci.c | 18 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 122 | ||||
-rw-r--r-- | drivers/pci/rom.c | 9 |
16 files changed, 396 insertions, 168 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index f5a662a50ac..26c536b51c5 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -330,6 +330,14 @@ parse_dmar_table(void) entry_header = (struct acpi_dmar_header *)(dmar + 1); while (((unsigned long)entry_header) < (((unsigned long)dmar) + dmar_tbl->length)) { + /* Avoid looping forever on bad ACPI tables */ + if (entry_header->length == 0) { + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); + ret = -EINVAL; + break; + } + dmar_table_print_dmar_entry(entry_header); switch (entry_header->type) { @@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) int map_size; u32 ver; static int iommu_allocated = 0; - int agaw; + int agaw = 0; iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); +#ifdef CONFIG_DMAR agaw = iommu_calculate_agaw(iommu); if (agaw < 0) { printk(KERN_ERR @@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->seq_id); goto error; } +#endif iommu->agaw = agaw; /* the registers might be more than one page */ @@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi) } } +static int qi_check_fault(struct intel_iommu *iommu, int index) +{ + u32 fault; + int head; + struct q_inval *qi = iommu->qi; + int wait_index = (index + 1) % QI_LENGTH; + + fault = readl(iommu->reg + DMAR_FSTS_REG); + + /* + * If IQE happens, the head points to the descriptor associated + * with the error. No new descriptors are fetched until the IQE + * is cleared. + */ + if (fault & DMA_FSTS_IQE) { + head = readl(iommu->reg + DMAR_IQH_REG); + if ((head >> 4) == index) { + memcpy(&qi->desc[index], &qi->desc[wait_index], + sizeof(struct qi_desc)); + __iommu_flush_cache(iommu, &qi->desc[index], + sizeof(struct qi_desc)); + writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); + return -EINVAL; + } + } + + return 0; +} + /* * Submit the queued invalidation descriptor to the remapping * hardware unit and wait for its completion. */ -void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) +int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) { + int rc = 0; struct q_inval *qi = iommu->qi; struct qi_desc *hw, wait_desc; int wait_index, index; unsigned long flags; if (!qi) - return; + return 0; hw = qi->desc; @@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) hw[index] = *desc; - wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; + wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | + QI_IWD_STATUS_WRITE | QI_IWD_TYPE; wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); hw[wait_index] = wait_desc; @@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_cnt -= 2; - spin_lock(&iommu->register_lock); /* * update the HW tail register indicating the presence of * new descriptors. */ writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); - spin_unlock(&iommu->register_lock); while (qi->desc_status[wait_index] != QI_DONE) { /* @@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) * a deadlock where the interrupt context can wait indefinitely * for free slots in the queue. */ + rc = qi_check_fault(iommu, index); + if (rc) + goto out; + spin_unlock(&qi->q_lock); cpu_relax(); spin_lock(&qi->q_lock); } - - qi->desc_status[index] = QI_DONE; +out: + qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; reclaim_free_desc(qi); spin_unlock_irqrestore(&qi->q_lock, flags); + + return rc; } /* @@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu) desc.low = QI_IEC_TYPE; desc.high = 0; + /* should never fail */ qi_submit_sync(&desc, iommu); } int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type, int non_present_entry_flush) { - struct qi_desc desc; if (non_present_entry_flush) { @@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | QI_CC_GRAN(type) | QI_CC_TYPE; desc.high = 0; - qi_submit_sync(&desc, iommu); - - return 0; - + return qi_submit_sync(&desc, iommu); } int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, @@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | QI_IOTLB_AM(size_order); - qi_submit_sync(&desc, iommu); - - return 0; - + return qi_submit_sync(&desc, iommu); } /* diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index db85284ffb6..39ae37589fd 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -111,6 +111,7 @@ struct controller { int cmd_busy; unsigned int no_cmd_complete:1; unsigned int link_active_reporting:1; + unsigned int notification_enabled:1; }; #define INT_BUTTON_IGNORE 0 @@ -170,6 +171,7 @@ extern int pciehp_configure_device(struct slot *p_slot); extern int pciehp_unconfigure_device(struct slot *p_slot); extern void pciehp_queue_pushbutton_work(struct work_struct *work); struct controller *pcie_init(struct pcie_device *dev); +int pcie_init_notification(struct controller *ctrl); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); int pcie_enable_notification(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index c2485542f54..681e3912b82 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -434,6 +434,13 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ goto err_out_release_ctlr; } + /* Enable events after we have setup the data structures */ + rc = pcie_init_notification(ctrl); + if (rc) { + ctrl_err(ctrl, "Notification initialization failed\n"); + goto err_out_release_ctlr; + } + /* Check if slot is occupied */ t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); t_slot->hpc_ops->get_adapter_status(t_slot, &value); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 71a8012886b..7a16c6897bb 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -934,7 +934,7 @@ static void pcie_disable_notification(struct controller *ctrl) ctrl_warn(ctrl, "Cannot disable software notification\n"); } -static int pcie_init_notification(struct controller *ctrl) +int pcie_init_notification(struct controller *ctrl) { if (pciehp_request_irq(ctrl)) return -1; @@ -942,13 +942,17 @@ static int pcie_init_notification(struct controller *ctrl) pciehp_free_irq(ctrl); return -1; } + ctrl->notification_enabled = 1; return 0; } static void pcie_shutdown_notification(struct controller *ctrl) { - pcie_disable_notification(ctrl); - pciehp_free_irq(ctrl); + if (ctrl->notification_enabled) { + pcie_disable_notification(ctrl); + pciehp_free_irq(ctrl); + ctrl->notification_enabled = 0; + } } static int pcie_init_slot(struct controller *ctrl) @@ -1110,13 +1114,8 @@ struct controller *pcie_init(struct pcie_device *dev) if (pcie_init_slot(ctrl)) goto abort_ctrl; - if (pcie_init_notification(ctrl)) - goto abort_slot; - return ctrl; -abort_slot: - pcie_cleanup_slot(ctrl); abort_ctrl: kfree(ctrl); abort: diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3dfecb20d5e..f3f686581a9 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -61,6 +61,8 @@ /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; +static int rwbf_quirk; + /* * 0: Present * 1-11: Reserved @@ -268,7 +270,12 @@ static long list_size; static void domain_remove_dev_info(struct dmar_domain *domain); -int dmar_disabled; +#ifdef CONFIG_DMAR_DEFAULT_ON +int dmar_disabled = 0; +#else +int dmar_disabled = 1; +#endif /*CONFIG_DMAR_DEFAULT_ON*/ + static int __initdata dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; @@ -284,9 +291,12 @@ static int __init intel_iommu_setup(char *str) if (!str) return -EINVAL; while (*str) { - if (!strncmp(str, "off", 3)) { + if (!strncmp(str, "on", 2)) { + dmar_disabled = 0; + printk(KERN_INFO "Intel-IOMMU: enabled\n"); + } else if (!strncmp(str, "off", 3)) { dmar_disabled = 1; - printk(KERN_INFO"Intel-IOMMU: disabled\n"); + printk(KERN_INFO "Intel-IOMMU: disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { dmar_map_gfx = 0; printk(KERN_INFO @@ -777,7 +787,7 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu) u32 val; unsigned long flag; - if (!cap_rwbf(iommu->cap)) + if (!rwbf_quirk && !cap_rwbf(iommu->cap)) return; val = iommu->gcmd | DMA_GCMD_WBF; @@ -3129,3 +3139,15 @@ static struct iommu_ops intel_iommu_ops = { .unmap = intel_iommu_unmap_range, .iova_to_phys = intel_iommu_iova_to_phys, }; + +static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) +{ + /* + * Mobile 4 Series Chipset neglects to set RWBF capability, + * but needs it: + */ + printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); + rwbf_quirk = 1; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index f78371b2252..45effc5726c 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) return index; } -static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) { struct qi_desc desc; @@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | QI_IEC_SELECTIVE; desc.high = 0; - qi_submit_sync(&desc, iommu); + return qi_submit_sync(&desc, iommu); } int map_irq_to_irte_handle(int irq, u16 *sub_handle) @@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) int modify_irte(int irq, struct irte *irte_modified) { + int rc; int index; struct irte *irte; struct intel_iommu *iommu; @@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified) set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); __iommu_flush_cache(iommu, irte, sizeof(*irte)); - qi_flush_iec(iommu, index, 0); - + rc = qi_flush_iec(iommu, index, 0); spin_unlock(&irq_2_ir_lock); - return 0; + + return rc; } int flush_irte(int irq) { + int rc; int index; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; @@ -326,10 +328,10 @@ int flush_irte(int irq) index = irq_iommu->irte_index + irq_iommu->sub_handle; - qi_flush_iec(iommu, index, irq_iommu->irte_mask); + rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); spin_unlock(&irq_2_ir_lock); - return 0; + return rc; } struct intel_iommu *map_ioapic_to_ir(int apic) @@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) int free_irte(int irq) { + int rc = 0; int index, i; struct irte *irte; struct intel_iommu *iommu; @@ -375,7 +378,7 @@ int free_irte(int irq) if (!irq_iommu->sub_handle) { for (i = 0; i < (1 << irq_iommu->irte_mask); i++) set_64bit((unsigned long *)irte, 0); - qi_flush_iec(iommu, index, irq_iommu->irte_mask); + rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); } irq_iommu->iommu = NULL; @@ -385,7 +388,7 @@ int free_irte(int irq) spin_unlock(&irq_2_ir_lock); - return 0; + return rc; } static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 44f15ff70c1..baba2eb5367 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -103,14 +103,12 @@ static void msix_set_enable(struct pci_dev *dev, int enable) } } -/* - * Essentially, this is ((1 << (1 << x)) - 1), but without the - * undefinedness of a << 32. - */ static inline __attribute_const__ u32 msi_mask(unsigned x) { - static const u32 mask[] = { 1, 2, 4, 0xf, 0xff, 0xffff, 0xffffffff }; - return mask[x]; + /* Don't shift by >= width of type */ + if (x >= 5) + return 0xffffffff; + return (1 << (1 << x)) - 1; } static void msix_flush_writes(struct irq_desc *desc) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index ab1d615425a..93eac142358 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -355,6 +355,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) int i = 0; if (drv && drv->suspend) { + pci_power_t prev = pci_dev->current_state; + pci_dev->state_saved = false; i = drv->suspend(pci_dev, state); @@ -365,12 +367,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) if (pci_dev->state_saved) goto Fixup; - if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0)) + if (pci_dev->current_state != PCI_D0 + && pci_dev->current_state != PCI_UNKNOWN) { + WARN_ONCE(pci_dev->current_state != prev, + "PCI PM: Device state not saved by %pF\n", + drv->suspend); goto Fixup; + } } pci_save_state(pci_dev); - pci_dev->state_saved = true; /* * This is for compatibility with existing code with legacy PM support. */ @@ -424,35 +430,20 @@ static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) pci_fixup_device(pci_fixup_resume_early, pci_dev); } -static int pci_pm_default_resume(struct pci_dev *pci_dev) +static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); if (!pci_is_bridge(pci_dev)) pci_enable_wake(pci_dev, PCI_D0, false); - - return pci_pm_reenable_device(pci_dev); -} - -static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev) -{ - /* If device is enabled at this point, disable it */ - pci_disable_enabled_device(pci_dev); - /* - * Save state with interrupts enabled, because in principle the bus the - * device is on may be put into a low power state after this code runs. - */ - pci_save_state(pci_dev); } static void pci_pm_default_suspend(struct pci_dev *pci_dev) { - pci_pm_default_suspend_generic(pci_dev); - + /* Disable non-bridge devices without PM support */ if (!pci_is_bridge(pci_dev)) - pci_prepare_to_sleep(pci_dev); - - pci_fixup_device(pci_fixup_suspend, pci_dev); + pci_disable_enabled_device(pci_dev); + pci_save_state(pci_dev); } static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) @@ -497,21 +488,49 @@ static void pci_pm_complete(struct device *dev) static int pci_pm_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; - int error = 0; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); - if (drv && drv->pm && drv->pm->suspend) { - error = drv->pm->suspend(dev); - suspend_report_result(drv->pm->suspend, error); + if (!pm) { + pci_pm_default_suspend(pci_dev); + goto Fixup; } - if (!error) - pci_pm_default_suspend(pci_dev); + pci_dev->state_saved = false; - return error; + if (pm->suspend) { + pci_power_t prev = pci_dev->current_state; + int error; + + error = pm->suspend(dev); + suspend_report_result(pm->suspend, error); + if (error) + return error; + + if (pci_dev->state_saved) + goto Fixup; + + if (pci_dev->current_state != PCI_D0 + && pci_dev->current_state != PCI_UNKNOWN) { + WARN_ONCE(pci_dev->current_state != prev, + "PCI PM: State of device not saved by %pF\n", + pm->suspend); + goto Fixup; + } + } + + if (!pci_dev->state_saved) { + pci_save_state(pci_dev); + if (!pci_is_bridge(pci_dev)) + pci_prepare_to_sleep(pci_dev); + } + + Fixup: + pci_fixup_device(pci_fixup_suspend, pci_dev); + + return 0; } static int pci_pm_suspend_noirq(struct device *dev) @@ -554,7 +573,7 @@ static int pci_pm_resume_noirq(struct device *dev) static int pci_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; /* @@ -567,12 +586,16 @@ static int pci_pm_resume(struct device *dev) if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); - error = pci_pm_default_resume(pci_dev); + pci_pm_default_resume(pci_dev); - if (!error && drv && drv->pm && drv->pm->resume) - error = drv->pm->resume(dev); + if (pm) { + if (pm->resume) + error = pm->resume(dev); + } else { + pci_pm_reenable_device(pci_dev); + } - return error; + return 0; } #else /* !CONFIG_SUSPEND */ @@ -589,21 +612,31 @@ static int pci_pm_resume(struct device *dev) static int pci_pm_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; - int error = 0; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_FREEZE); - if (drv && drv->pm && drv->pm->freeze) { - error = drv->pm->freeze(dev); - suspend_report_result(drv->pm->freeze, error); + if (!pm) { + pci_pm_default_suspend(pci_dev); + return 0; } - if (!error) - pci_pm_default_suspend_generic(pci_dev); + pci_dev->state_saved = false; - return error; + if (pm->freeze) { + int error; + + error = pm->freeze(dev); + suspend_report_result(pm->freeze, error); + if (error) + return error; + } + + if (!pci_dev->state_saved) + pci_save_state(pci_dev); + + return 0; } static int pci_pm_freeze_noirq(struct device *dev) @@ -646,16 +679,18 @@ static int pci_pm_thaw_noirq(struct device *dev) static int pci_pm_thaw(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); - pci_pm_reenable_device(pci_dev); - - if (drv && drv->pm && drv->pm->thaw) - error = drv->pm->thaw(dev); + if (pm) { + if (pm->thaw) + error = pm->thaw(dev); + } else { + pci_pm_reenable_device(pci_dev); + } return error; } @@ -663,22 +698,29 @@ static int pci_pm_thaw(struct device *dev) static int pci_pm_poweroff(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_HIBERNATE); - if (!drv || !drv->pm) - return 0; + if (!pm) { + pci_pm_default_suspend(pci_dev); + goto Fixup; + } + + pci_dev->state_saved = false; - if (drv->pm->poweroff) { - error = drv->pm->poweroff(dev); - suspend_report_result(drv->pm->poweroff, error); + if (pm->poweroff) { + error = pm->poweroff(dev); + suspend_report_result(pm->poweroff, error); } - if (!error) - pci_pm_default_suspend(pci_dev); + if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) + pci_prepare_to_sleep(pci_dev); + + Fixup: + pci_fixup_device(pci_fixup_suspend, pci_dev); return error; } @@ -719,7 +761,7 @@ static int pci_pm_restore_noirq(struct device *dev) static int pci_pm_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); - struct device_driver *drv = dev->driver; + struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; /* @@ -732,10 +774,14 @@ static int pci_pm_restore(struct device *dev) if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); - error = pci_pm_default_resume(pci_dev); + pci_pm_default_resume(pci_dev); - if (!error && drv && drv->pm && drv->pm->restore) - error = drv->pm->restore(dev); + if (pm) { + if (pm->restore) + error = pm->restore(dev); + } else { + pci_pm_reenable_device(pci_dev); + } return error; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index db7ec14fa71..dfc4e0ddf24 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -768,8 +768,8 @@ pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr, return -EINVAL; rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ - if (!rom) - return 0; + if (!rom || !size) + return -EIO; if (off >= size) count = 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 48807556b47..6d6120007af 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1418,10 +1418,10 @@ int pci_restore_standard_config(struct pci_dev *dev) break; } - dev->current_state = PCI_D0; + pci_update_current_state(dev, PCI_D0); Restore: - return pci_restore_state(dev); + return dev->state_saved ? pci_restore_state(dev) : 0; } /** @@ -1540,16 +1540,21 @@ void pci_release_region(struct pci_dev *pdev, int bar) } /** - * pci_request_region - Reserved PCI I/O and memory resource + * __pci_request_region - Reserved PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @res_name: Name to be associated with resource. + * @exclusive: whether the region access is exclusive or not * * Mark the PCI region associated with PCI device @pdev BR @bar as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * + * If @exclusive is set, then the region is marked so that userspace + * is explicitly not allowed to map the resource via /dev/mem or + * sysfs MMIO access. + * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ @@ -1588,12 +1593,12 @@ err_out: } /** - * pci_request_region - Reserved PCI I/O and memory resource + * pci_request_region - Reserve PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved - * @res_name: Name to be associated with resource. + * @res_name: Name to be associated with resource * - * Mark the PCI region associated with PCI device @pdev BR @bar as + * Mark the PCI region associated with PCI device @pdev BAR @bar as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 26ddf78ac30..07c0aa5275e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -16,21 +16,21 @@ extern int pci_mmap_fits(struct pci_dev *pdev, int resno, #endif /** - * Firmware PM callbacks + * struct pci_platform_pm_ops - Firmware PM callbacks * - * @is_manageable - returns 'true' if given device is power manageable by the - * platform firmware + * @is_manageable: returns 'true' if given device is power manageable by the + * platform firmware * - * @set_state - invokes the platform firmware to set the device's power state + * @set_state: invokes the platform firmware to set the device's power state * - * @choose_state - returns PCI power state of given device preferred by the - * platform; to be used during system-wide transitions from a - * sleeping state to the working state and vice versa + * @choose_state: returns PCI power state of given device preferred by the + * platform; to be used during system-wide transitions from a + * sleeping state to the working state and vice versa * - * @can_wakeup - returns 'true' if given device is capable of waking up the - * system from a sleeping state + * @can_wakeup: returns 'true' if given device is capable of waking up the + * system from a sleeping state * - * @sleep_wake - enables/disables the system wake up capability of given device + * @sleep_wake: enables/disables the system wake up capability of given device * * If given platform is generally capable of power managing PCI devices, all of * these callbacks are mandatory. diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index aac7006949f..d0c97368586 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -108,6 +108,34 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) } #endif /* 0 */ + +static void set_device_error_reporting(struct pci_dev *dev, void *data) +{ + bool enable = *((bool *)data); + + if (dev->pcie_type != PCIE_RC_PORT && + dev->pcie_type != PCIE_SW_UPSTREAM_PORT && + dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT) + return; + + if (enable) + pci_enable_pcie_error_reporting(dev); + else + pci_disable_pcie_error_reporting(dev); +} + +/** + * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. + * @dev: pointer to root port's pci_dev data structure + * @enable: true = enable error reporting, false = disable error reporting. + */ +static void set_downstream_devices_error_reporting(struct pci_dev *dev, + bool enable) +{ + set_device_error_reporting(dev, &enable); + pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); +} + static int find_device_iter(struct device *device, void *data) { struct pci_dev *dev; @@ -525,15 +553,11 @@ void aer_enable_rootport(struct aer_rpc *rpc) pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); - /* Enable Root Port device reporting error itself */ - pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, ®16); - reg16 = reg16 | - PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE; - pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL, - reg16); + /* + * Enable error reporting for the root port device and downstream port + * devices. + */ + set_downstream_devices_error_reporting(pdev, true); /* Enable Root Port's interrupt in response to error messages */ pci_write_config_dword(pdev, @@ -553,6 +577,12 @@ static void disable_root_aer(struct aer_rpc *rpc) u32 reg32; int pos; + /* + * Disable error reporting for the root port device and downstream port + * devices. + */ + set_downstream_devices_error_reporting(pdev, false); + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 586b6f75910..b0367f168af 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -718,9 +718,9 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) /* * All PCIe functions are in one slot, remove one function will remove - * the the whole slot, so just wait + * the whole slot, so just wait until we are the last function left. */ - if (!list_empty(&parent->subordinate->devices)) + if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) goto out; /* All functions are removed, so just disable ASPM for the link */ diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 99a914a027f..248b4db9155 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -55,25 +55,13 @@ static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) } -static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state) -{ - return pci_save_state(dev); -} - -static int pcie_portdrv_resume_early(struct pci_dev *dev) -{ - return pci_restore_state(dev); -} - static int pcie_portdrv_resume(struct pci_dev *dev) { - pcie_portdrv_restore_config(dev); + pci_set_master(dev); return pcie_port_device_resume(dev); } #else #define pcie_portdrv_suspend NULL -#define pcie_portdrv_suspend_late NULL -#define pcie_portdrv_resume_early NULL #define pcie_portdrv_resume NULL #endif @@ -109,8 +97,6 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, pcie_portdrv_save_config(dev); - pci_enable_pcie_error_reporting(dev); - return 0; } @@ -292,8 +278,6 @@ static struct pci_driver pcie_portdriver = { .remove = pcie_portdrv_remove, .suspend = pcie_portdrv_suspend, - .suspend_late = pcie_portdrv_suspend_late, - .resume_early = pcie_portdrv_resume_early, .resume = pcie_portdrv_resume, .err_handler = &pcie_portdrv_err_handler, diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index baad093aafe..f20d55368ed 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1584,6 +1584,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_ */ #define AMD_813X_MISC 0x40 #define AMD_813X_NOIOAMODE (1<<0) +#define AMD_813X_REV_B2 0x13 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) { @@ -1591,6 +1592,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) if (noioapicquirk) return; + if (dev->revision == AMD_813X_REV_B2) + return; pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); pci_config_dword &= ~AMD_813X_NOIOAMODE; @@ -1981,7 +1984,6 @@ static void __devinit quirk_msi_ht_cap(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, quirk_msi_ht_cap); - /* The nVidia CK804 chipset may have 2 HT MSI mappings. * MSI are supported if the MSI capability set in any of these mappings. */ @@ -2032,6 +2034,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, ht_enable_msi_mapping); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, + ht_enable_msi_mapping); + /* The P5N32-SLI Premium motherboard from Asus has a problem with msi * for the MCP55 NIC. It is not yet determined whether the msi problem * also affects other devices. As for now, turn off msi for this device. @@ -2048,10 +2053,100 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15, nvenet_msi_disable); -static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) +static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) { struct pci_dev *host_bridge; + int pos; + int i, dev_no; + int found = 0; + + dev_no = dev->devfn >> 3; + for (i = dev_no; i >= 0; i--) { + host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0)); + if (!host_bridge) + continue; + + pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); + if (pos != 0) { + found = 1; + break; + } + pci_dev_put(host_bridge); + } + + if (!found) + return; + + /* root did that ! */ + if (msi_ht_cap_enabled(host_bridge)) + goto out; + + ht_enable_msi_mapping(dev); + +out: + pci_dev_put(host_bridge); +} + +static void __devinit ht_disable_msi_mapping(struct pci_dev *dev) +{ + int pos, ttl = 48; + + pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); + while (pos && ttl--) { + u8 flags; + + if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, + &flags) == 0) { + dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); + + pci_write_config_byte(dev, pos + HT_MSI_FLAGS, + flags & ~HT_MSI_FLAGS_ENABLE); + } + pos = pci_find_next_ht_capability(dev, pos, + HT_CAPTYPE_MSI_MAPPING); + } +} + +static int __devinit ht_check_msi_mapping(struct pci_dev *dev) +{ int pos, ttl = 48; + int found = 0; + + /* check if there is HT MSI cap or enabled on this device */ + pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); + while (pos && ttl--) { + u8 flags; + + if (found < 1) + found = 1; + if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, + &flags) == 0) { + if (flags & HT_MSI_FLAGS_ENABLE) { + if (found < 2) { + found = 2; + break; + } + } + } + pos = pci_find_next_ht_capability(dev, pos, + HT_CAPTYPE_MSI_MAPPING); + } + + return found; +} + +static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) +{ + struct pci_dev *host_bridge; + int pos; + int found; + + /* check if there is HT MSI cap or enabled on this device */ + found = ht_check_msi_mapping(dev); + + /* no HT MSI CAP */ + if (found == 0) + return; /* * HT MSI mapping should be disabled on devices that are below @@ -2067,24 +2162,19 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { /* Host bridge is to HT */ - ht_enable_msi_mapping(dev); + if (found == 1) { + /* it is not enabled, try to enable it */ + nv_ht_enable_msi_mapping(dev); + } return; } - /* Host bridge is not to HT, disable HT MSI mapping on this device */ - pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); - while (pos && ttl--) { - u8 flags; + /* HT MSI is not enabled */ + if (found == 1) + return; - if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, - &flags) == 0) { - dev_info(&dev->dev, "Disabling HT MSI mapping"); - pci_write_config_byte(dev, pos + HT_MSI_FLAGS, - flags & ~HT_MSI_FLAGS_ENABLE); - } - pos = pci_find_next_ht_capability(dev, pos, - HT_CAPTYPE_MSI_MAPPING); - } + /* Host bridge is not to HT, disable HT MSI mapping on this device */ + ht_disable_msi_mapping(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 132a78159b6..36864a935d6 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -55,6 +55,7 @@ void pci_disable_rom(struct pci_dev *pdev) /** * pci_get_rom_size - obtain the actual size of the ROM image + * @pdev: target PCI device * @rom: kernel virtual pointer to image of ROM * @size: size of PCI window * return: size of actual ROM image @@ -63,7 +64,7 @@ void pci_disable_rom(struct pci_dev *pdev) * The PCI window size could be much larger than the * actual image size. */ -size_t pci_get_rom_size(void __iomem *rom, size_t size) +size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) { void __iomem *image; int last_image; @@ -72,8 +73,10 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size) do { void __iomem *pds; /* Standard PCI ROMs start out with these bytes 55 AA */ - if (readb(image) != 0x55) + if (readb(image) != 0x55) { + dev_err(&pdev->dev, "Invalid ROM contents\n"); break; + } if (readb(image + 1) != 0xAA) break; /* get the PCI data structure and check its signature */ @@ -159,7 +162,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) * size is much larger than the actual size of the ROM. * True size is important if the ROM is going to be copied. */ - *size = pci_get_rom_size(rom, *size); + *size = pci_get_rom_size(pdev, rom, *size); return rom; } |