diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/dmar.c | 106 | ||||
-rw-r--r-- | drivers/pci/hotplug/acpiphp_ibm.c | 11 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp.h | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp.h | 107 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_acpi.c | 17 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_core.c | 136 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_ctrl.c | 109 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_hpc.c | 109 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_pci.c | 23 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 438 | ||||
-rw-r--r-- | drivers/pci/intr_remapping.c | 8 | ||||
-rw-r--r-- | drivers/pci/iova.c | 16 | ||||
-rw-r--r-- | drivers/pci/pci.c | 27 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv.c | 5 | ||||
-rw-r--r-- | drivers/pci/pcie/aspm.c | 9 | ||||
-rw-r--r-- | drivers/pci/pcie/portdrv_pci.c | 3 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 38 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 37 |
18 files changed, 618 insertions, 582 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index ab99783dcce..b952ebc7a78 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -34,9 +34,9 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/tboot.h> +#include <linux/dmi.h> -#undef PREFIX -#define PREFIX "DMAR:" +#define PREFIX "DMAR: " /* No locks are needed as DMA remapping hardware unit * list is constructed at boot time and hotplug of @@ -175,15 +175,6 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) int ret = 0; drhd = (struct acpi_dmar_hardware_unit *)header; - if (!drhd->address) { - /* Promote an attitude of violence to a BIOS engineer today */ - WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); - return -ENODEV; - } dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); if (!dmaru) return -ENOMEM; @@ -354,6 +345,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_atsr *atsr; + struct acpi_dmar_rhsa *rhsa; switch (header->type) { case ACPI_DMAR_TYPE_HARDWARE_UNIT: @@ -375,6 +367,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) atsr = container_of(header, struct acpi_dmar_atsr, header); printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); break; + case ACPI_DMAR_HARDWARE_AFFINITY: + rhsa = container_of(header, struct acpi_dmar_rhsa, header); + printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", + (unsigned long long)rhsa->base_address, + rhsa->proximity_domain); + break; } } @@ -459,9 +457,13 @@ parse_dmar_table(void) ret = dmar_parse_one_atsr(entry_header); #endif break; + case ACPI_DMAR_HARDWARE_AFFINITY: + /* We don't do anything with RHSA (yet?) */ + break; default: printk(KERN_WARNING PREFIX - "Unknown DMAR structure type\n"); + "Unknown DMAR structure type %d\n", + entry_header->type); ret = 0; /* for forward compatibility */ break; } @@ -577,18 +579,56 @@ int __init dmar_table_init(void) printk(KERN_INFO PREFIX "No ATSR found\n"); #endif -#ifdef CONFIG_INTR_REMAP - parse_ioapics_under_ir(); -#endif return 0; } +int __init check_zero_address(void) +{ + struct acpi_table_dmar *dmar; + struct acpi_dmar_header *entry_header; + struct acpi_dmar_hardware_unit *drhd; + + dmar = (struct acpi_table_dmar *)dmar_tbl; + entry_header = (struct acpi_dmar_header *)(dmar + 1); + + while (((unsigned long)entry_header) < + (((unsigned long)dmar) + dmar_tbl->length)) { + /* Avoid looping forever on bad ACPI tables */ + if (entry_header->length == 0) { + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); + return 0; + } + + if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { + drhd = (void *)entry_header; + if (!drhd->address) { + /* Promote an attitude of violence to a BIOS engineer today */ + WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); +#ifdef CONFIG_DMAR + dmar_disabled = 1; +#endif + return 0; + } + break; + } + + entry_header = ((void *)entry_header + entry_header->length); + } + return 1; +} + void __init detect_intel_iommu(void) { int ret; ret = dmar_table_detect(); - + if (ret) + ret = check_zero_address(); { #ifdef CONFIG_INTR_REMAP struct acpi_table_dmar *dmar; @@ -639,20 +679,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { + /* Promote an attitude of violence to a BIOS engineer today */ + WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + drhd->reg_base_addr, + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + goto err_unmap; + } + #ifdef CONFIG_DMAR agaw = iommu_calculate_agaw(iommu); if (agaw < 0) { printk(KERN_ERR "Cannot get a valid agaw for iommu (seq_id = %d)\n", iommu->seq_id); - goto error; + goto err_unmap; } msagaw = iommu_calculate_max_sagaw(iommu); if (msagaw < 0) { printk(KERN_ERR "Cannot get a valid max agaw for iommu (seq_id = %d)\n", iommu->seq_id); - goto error; + goto err_unmap; } #endif iommu->agaw = agaw; @@ -672,7 +723,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) } ver = readl(iommu->reg + DMAR_VER_REG); - pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", + pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", (unsigned long long)drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), (unsigned long long)iommu->cap, @@ -682,7 +733,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) drhd->iommu = iommu; return 0; -error: + + err_unmap: + iounmap(iommu->reg); + error: kfree(iommu); return -1; } @@ -1219,7 +1273,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) source_id, guest_addr); fault_index++; - if (fault_index > cap_num_fault_regs(iommu->cap)) + if (fault_index >= cap_num_fault_regs(iommu->cap)) fault_index = 0; spin_lock_irqsave(&iommu->register_lock, flag); } @@ -1312,3 +1366,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) return 0; } + +/* + * Check interrupt remapping support in DMAR table description. + */ +int dmar_ir_support(void) +{ + struct acpi_table_dmar *dmar; + dmar = (struct acpi_table_dmar *)dmar_tbl; + return dmar->flags & 0x1; +} diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 5befa7e379b..e7be66dbac2 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -398,23 +398,20 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, acpi_handle *phandle = (acpi_handle *)context; acpi_status status; struct acpi_device_info *info; - struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; int retval = 0; - status = acpi_get_object_info(handle, &info_buffer); + status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { err("%s: Failed to get device information status=0x%x\n", __func__, status); return retval; } - info = info_buffer.pointer; - info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0'; if (info->current_status && (info->valid & ACPI_VALID_HID) && - (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) || - !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) { + (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || + !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { dbg("found hardware: %s, handle: %p\n", - info->hardware_id.value, handle); + info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop * and returns this value to the caller of diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 53836001d51..9c6a9fd2681 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,6 +32,7 @@ #include <asm/io.h> /* for read? and write? functions */ #include <linux/delay.h> /* for delays */ #include <linux/mutex.h> +#include <linux/sched.h> /* for signal_pending() */ #define MY_NAME "cpqphp" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 36faa9a8e18..3070f77eb56 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -72,15 +72,9 @@ do { \ #define SLOT_NAME_SIZE 10 struct slot { - u8 bus; - u8 device; u8 state; - u8 hp_slot; - u32 number; struct controller *ctrl; - struct hpc_ops *hpc_ops; struct hotplug_slot *hotplug_slot; - struct list_head slot_list; struct delayed_work work; /* work for button event */ struct mutex lock; }; @@ -92,18 +86,10 @@ struct event_info { }; struct controller { - struct mutex crit_sect; /* critical section mutex */ struct mutex ctrl_lock; /* controller lock */ - int num_slots; /* Number of slots on ctlr */ - int slot_num_inc; /* 1 or -1 */ - struct pci_dev *pci_dev; struct pcie_device *pcie; /* PCI Express port service */ - struct list_head slot_list; - struct hpc_ops *hpc_ops; + struct slot *slot; wait_queue_head_t queue; /* sleep & wake process */ - u8 slot_device_offset; - u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */ - u8 slot_bus; /* Bus where the slots handled by this controller sit */ u32 slot_cap; u8 cap_base; struct timer_list poll_timer; @@ -131,40 +117,20 @@ struct controller { #define POWERON_STATE 3 #define POWEROFF_STATE 4 -/* Error messages */ -#define INTERLOCK_OPEN 0x00000002 -#define ADD_NOT_SUPPORTED 0x00000003 -#define CARD_FUNCTIONING 0x00000005 -#define ADAPTER_NOT_SAME 0x00000006 -#define NO_ADAPTER_PRESENT 0x00000009 -#define NOT_ENOUGH_RESOURCES 0x0000000B -#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C -#define WRONG_BUS_FREQUENCY 0x0000000D -#define POWER_FAILURE 0x0000000E - -/* Field definitions in Slot Capabilities Register */ -#define ATTN_BUTTN_PRSN 0x00000001 -#define PWR_CTRL_PRSN 0x00000002 -#define MRL_SENS_PRSN 0x00000004 -#define ATTN_LED_PRSN 0x00000008 -#define PWR_LED_PRSN 0x00000010 -#define HP_SUPR_RM_SUP 0x00000020 -#define EMI_PRSN 0x00020000 -#define NO_CMD_CMPL_SUP 0x00040000 - -#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN) -#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN) -#define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN) -#define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN) -#define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN) -#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP) -#define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN) -#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & NO_CMD_CMPL_SUP) +#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP) +#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP) +#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP) +#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP) +#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP) +#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS) +#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP) +#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS) +#define PSN(ctrl) ((ctrl)->slot_cap >> 19) extern int pciehp_sysfs_enable_slot(struct slot *slot); extern int pciehp_sysfs_disable_slot(struct slot *slot); extern u8 pciehp_handle_attention_button(struct slot *p_slot); - extern u8 pciehp_handle_switch_change(struct slot *p_slot); +extern u8 pciehp_handle_switch_change(struct slot *p_slot); extern u8 pciehp_handle_presence_change(struct slot *p_slot); extern u8 pciehp_handle_power_fault(struct slot *p_slot); extern int pciehp_configure_device(struct slot *p_slot); @@ -175,45 +141,30 @@ int pcie_init_notification(struct controller *ctrl); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); int pcie_enable_notification(struct controller *ctrl); +int pciehp_power_on_slot(struct slot *slot); +int pciehp_power_off_slot(struct slot *slot); +int pciehp_get_power_status(struct slot *slot, u8 *status); +int pciehp_get_attention_status(struct slot *slot, u8 *status); + +int pciehp_set_attention_status(struct slot *slot, u8 status); +int pciehp_get_latch_status(struct slot *slot, u8 *status); +int pciehp_get_adapter_status(struct slot *slot, u8 *status); +int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed); +int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val); +int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed); +int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val); +int pciehp_query_power_fault(struct slot *slot); +void pciehp_green_led_on(struct slot *slot); +void pciehp_green_led_off(struct slot *slot); +void pciehp_green_led_blink(struct slot *slot); +int pciehp_check_link_status(struct controller *ctrl); +void pciehp_release_ctrl(struct controller *ctrl); static inline const char *slot_name(struct slot *slot) { return hotplug_slot_name(slot->hotplug_slot); } -static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) -{ - struct slot *slot; - - list_for_each_entry(slot, &ctrl->slot_list, slot_list) { - if (slot->device == device) - return slot; - } - - ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device); - return NULL; -} - -struct hpc_ops { - int (*power_on_slot)(struct slot *slot); - int (*power_off_slot)(struct slot *slot); - int (*get_power_status)(struct slot *slot, u8 *status); - int (*get_attention_status)(struct slot *slot, u8 *status); - int (*set_attention_status)(struct slot *slot, u8 status); - int (*get_latch_status)(struct slot *slot, u8 *status); - int (*get_adapter_status)(struct slot *slot, u8 *status); - int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); - int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); - int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val); - int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val); - int (*query_power_fault)(struct slot *slot); - void (*green_led_on)(struct slot *slot); - void (*green_led_off)(struct slot *slot); - void (*green_led_blink)(struct slot *slot); - void (*release_ctlr)(struct controller *ctrl); - int (*check_lnk_status)(struct controller *ctrl); -}; - #ifdef CONFIG_ACPI #include <acpi/acpi.h> #include <acpi/acpi_bus.h> diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 7163e6a6cfa..37c8d3d0323 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -33,6 +33,11 @@ #define PCIEHP_DETECT_AUTO (2) #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO +struct dummy_slot { + u32 number; + struct list_head list; +}; + static int slot_detection_mode; static char *pciehp_detect_mode; module_param(pciehp_detect_mode, charp, 0444); @@ -77,7 +82,7 @@ static int __init dummy_probe(struct pcie_device *dev) int pos; u32 slot_cap; acpi_handle handle; - struct slot *slot, *tmp; + struct dummy_slot *slot, *tmp; struct pci_dev *pdev = dev->port; /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ if (pciehp_get_hp_hw_control_from_firmware(pdev)) @@ -89,11 +94,11 @@ static int __init dummy_probe(struct pcie_device *dev) if (!slot) return -ENOMEM; slot->number = slot_cap >> 19; - list_for_each_entry(tmp, &dummy_slots, slot_list) { + list_for_each_entry(tmp, &dummy_slots, list) { if (tmp->number == slot->number) dup_slot_id++; } - list_add_tail(&slot->slot_list, &dummy_slots); + list_add_tail(&slot->list, &dummy_slots); handle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) acpi_slot_detected = 1; @@ -109,11 +114,11 @@ static struct pcie_port_service_driver __initdata dummy_driver = { static int __init select_detection_mode(void) { - struct slot *slot, *tmp; + struct dummy_slot *slot, *tmp; pcie_port_service_register(&dummy_driver); pcie_port_service_unregister(&dummy_driver); - list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { - list_del(&slot->slot_list); + list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { + list_del(&slot->list); kfree(slot); } if (acpi_slot_detected && dup_slot_id) diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 2317557fdee..bc234719b1d 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -99,65 +99,55 @@ static void release_slot(struct hotplug_slot *hotplug_slot) kfree(hotplug_slot); } -static int init_slots(struct controller *ctrl) +static int init_slot(struct controller *ctrl) { - struct slot *slot; - struct hotplug_slot *hotplug_slot; - struct hotplug_slot_info *info; + struct slot *slot = ctrl->slot; + struct hotplug_slot *hotplug = NULL; + struct hotplug_slot_info *info = NULL; char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; - list_for_each_entry(slot, &ctrl->slot_list, slot_list) { - hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); - if (!hotplug_slot) - goto error; - - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - goto error_hpslot; - - /* register this slot with the hotplug pci core */ - hotplug_slot->info = info; - hotplug_slot->private = slot; - hotplug_slot->release = &release_slot; - hotplug_slot->ops = &pciehp_hotplug_slot_ops; - slot->hotplug_slot = hotplug_slot; - snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); - - ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " - "hp_slot=%x sun=%x slot_device_offset=%x\n", - pci_domain_nr(ctrl->pci_dev->subordinate), - slot->bus, slot->device, slot->hp_slot, slot->number, - ctrl->slot_device_offset); - retval = pci_hp_register(hotplug_slot, - ctrl->pci_dev->subordinate, - slot->device, - name); - if (retval) { - ctrl_err(ctrl, "pci_hp_register failed with error %d\n", - retval); - goto error_info; - } - get_power_status(hotplug_slot, &info->power_status); - get_attention_status(hotplug_slot, &info->attention_status); - get_latch_status(hotplug_slot, &info->latch_status); - get_adapter_status(hotplug_slot, &info->adapter_status); + hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL); + if (!hotplug) + goto out; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + goto out; + + /* register this slot with the hotplug pci core */ + hotplug->info = info; + hotplug->private = slot; + hotplug->release = &release_slot; + hotplug->ops = &pciehp_hotplug_slot_ops; + slot->hotplug_slot = hotplug; + snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); + + ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n", + pci_domain_nr(ctrl->pcie->port->subordinate), + ctrl->pcie->port->subordinate->number, PSN(ctrl)); + retval = pci_hp_register(hotplug, + ctrl->pcie->port->subordinate, 0, name); + if (retval) { + ctrl_err(ctrl, + "pci_hp_register failed with error %d\n", retval); + goto out; + } + get_power_status(hotplug, &info->power_status); + get_attention_status(hotplug, &info->attention_status); + get_latch_status(hotplug, &info->latch_status); + get_adapter_status(hotplug, &info->adapter_status); +out: + if (retval) { + kfree(info); + kfree(hotplug); } - - return 0; -error_info: - kfree(info); -error_hpslot: - kfree(hotplug_slot); -error: return retval; } -static void cleanup_slots(struct controller *ctrl) +static void cleanup_slot(struct controller *ctrl) { - struct slot *slot; - list_for_each_entry(slot, &ctrl->slot_list, slot_list) - pci_hp_deregister(slot->hotplug_slot); + pci_hp_deregister(ctrl->slot->hotplug_slot); } /* @@ -173,7 +163,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) hotplug_slot->info->attention_status = status; if (ATTN_LED(slot->ctrl)) - slot->hpc_ops->set_attention_status(slot, status); + pciehp_set_attention_status(slot, status); return 0; } @@ -208,7 +198,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_power_status(slot, value); + retval = pciehp_get_power_status(slot, value); if (retval < 0) *value = hotplug_slot->info->power_status; @@ -223,7 +213,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_attention_status(slot, value); + retval = pciehp_get_attention_status(slot, value); if (retval < 0) *value = hotplug_slot->info->attention_status; @@ -238,7 +228,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_latch_status(slot, value); + retval = pciehp_get_latch_status(slot, value); if (retval < 0) *value = hotplug_slot->info->latch_status; @@ -253,7 +243,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_adapter_status(slot, value); + retval = pciehp_get_adapter_status(slot, value); if (retval < 0) *value = hotplug_slot->info->adapter_status; @@ -269,7 +259,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_max_bus_speed(slot, value); + retval = pciehp_get_max_link_speed(slot, value); if (retval < 0) *value = PCI_SPEED_UNKNOWN; @@ -284,7 +274,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_cur_bus_speed(slot, value); + retval = pciehp_get_cur_link_speed(slot, value); if (retval < 0) *value = PCI_SPEED_UNKNOWN; @@ -295,7 +285,7 @@ static int pciehp_probe(struct pcie_device *dev) { int rc; struct controller *ctrl; - struct slot *t_slot; + struct slot *slot; u8 value; struct pci_dev *pdev = dev->port; @@ -314,7 +304,7 @@ static int pciehp_probe(struct pcie_device *dev) set_service_data(dev, ctrl); /* Setup the slot information structures */ - rc = init_slots(ctrl); + rc = init_slot(ctrl); if (rc) { if (rc == -EBUSY) ctrl_warn(ctrl, "Slot already registered by another " @@ -332,15 +322,15 @@ static int pciehp_probe(struct pcie_device *dev) } /* Check if slot is occupied */ - t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); - t_slot->hpc_ops->get_adapter_status(t_slot, &value); + slot = ctrl->slot; + pciehp_get_adapter_status(slot, &value); if (value) { if (pciehp_force) - pciehp_enable_slot(t_slot); + pciehp_enable_slot(slot); } else { /* Power off slot if not occupied */ if (POWER_CTRL(ctrl)) { - rc = t_slot->hpc_ops->power_off_slot(t_slot); + rc = pciehp_power_off_slot(slot); if (rc) goto err_out_free_ctrl_slot; } @@ -349,19 +339,19 @@ static int pciehp_probe(struct pcie_device *dev) return 0; err_out_free_ctrl_slot: - cleanup_slots(ctrl); + cleanup_slot(ctrl); err_out_release_ctlr: - ctrl->hpc_ops->release_ctlr(ctrl); + pciehp_release_ctrl(ctrl); err_out_none: return -ENODEV; } -static void pciehp_remove (struct pcie_device *dev) +static void pciehp_remove(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); - cleanup_slots(ctrl); - ctrl->hpc_ops->release_ctlr(ctrl); + cleanup_slot(ctrl); + pciehp_release_ctrl(ctrl); } #ifdef CONFIG_PM @@ -376,20 +366,20 @@ static int pciehp_resume (struct pcie_device *dev) dev_info(&dev->device, "%s ENTRY\n", __func__); if (pciehp_force) { struct controller *ctrl = get_service_data(dev); - struct slot *t_slot; + struct slot *slot; u8 status; /* reinitialize the chipset's event detection logic */ pcie_enable_notification(ctrl); - t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); + slot = ctrl->slot; /* Check if slot is occupied */ - t_slot->hpc_ops->get_adapter_status(t_slot, &status); + pciehp_get_adapter_status(slot, &status); if (status) - pciehp_enable_slot(t_slot); + pciehp_enable_slot(slot); else - pciehp_disable_slot(t_slot); + pciehp_disable_slot(slot); } return 0; } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index b97cb4c3e0f..84487d126e4 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -82,7 +82,7 @@ u8 pciehp_handle_switch_change(struct slot *p_slot) /* Switch Change */ ctrl_dbg(ctrl, "Switch interrupt received\n"); - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + pciehp_get_latch_status(p_slot, &getstatus); if (getstatus) { /* * Switch opened @@ -114,7 +114,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) /* Switch is open, assume a presence change * Save the presence state */ - p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save); + pciehp_get_adapter_status(p_slot, &presence_save); if (presence_save) { /* * Card Present @@ -143,7 +143,7 @@ u8 pciehp_handle_power_fault(struct slot *p_slot) /* power fault */ ctrl_dbg(ctrl, "Power fault interrupt received\n"); - if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { + if (!pciehp_query_power_fault(p_slot)) { /* * power fault Cleared */ @@ -172,7 +172,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) { /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ if (POWER_CTRL(ctrl)) { - if (pslot->hpc_ops->power_off_slot(pslot)) { + if (pciehp_power_off_slot(pslot)) { ctrl_err(ctrl, "Issue of Slot Power Off command failed\n"); return; @@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) } if (PWR_LED(ctrl)) - pslot->hpc_ops->green_led_off(pslot); + pciehp_green_led_off(pslot); if (ATTN_LED(ctrl)) { - if (pslot->hpc_ops->set_attention_status(pslot, 1)) { + if (pciehp_set_attention_status(pslot, 1)) { ctrl_err(ctrl, "Issue of Set Attention Led command failed\n"); return; @@ -208,24 +208,20 @@ static int board_added(struct slot *p_slot) { int retval = 0; struct controller *ctrl = p_slot->ctrl; - struct pci_bus *parent = ctrl->pci_dev->subordinate; - - ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n", - __func__, p_slot->device, ctrl->slot_device_offset, - p_slot->hp_slot); + struct pci_bus *parent = ctrl->pcie->port->subordinate; if (POWER_CTRL(ctrl)) { /* Power on slot */ - retval = p_slot->hpc_ops->power_on_slot(p_slot); + retval = pciehp_power_on_slot(p_slot); if (retval) return retval; } if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_blink(p_slot); + pciehp_green_led_blink(p_slot); /* Check link training status */ - retval = p_slot->hpc_ops->check_lnk_status(ctrl); + retval = pciehp_check_link_status(ctrl); if (retval) { ctrl_err(ctrl, "Failed to check link status\n"); set_slot_off(ctrl, p_slot); @@ -233,21 +229,21 @@ static int board_added(struct slot *p_slot) } /* Check for a power fault */ - if (p_slot->hpc_ops->query_power_fault(p_slot)) { + if (pciehp_query_power_fault(p_slot)) { ctrl_dbg(ctrl, "Power fault detected\n"); - retval = POWER_FAILURE; + retval = -EIO; goto err_exit; } retval = pciehp_configure_device(p_slot); if (retval) { - ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", - pci_domain_nr(parent), p_slot->bus, p_slot->device); + ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n", + pci_domain_nr(parent), parent->number); goto err_exit; } if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_on(p_slot); + pciehp_green_led_on(p_slot); return 0; @@ -269,11 +265,9 @@ static int remove_board(struct slot *p_slot) if (retval) return retval; - ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot); - if (POWER_CTRL(ctrl)) { /* power off slot */ - retval = p_slot->hpc_ops->power_off_slot(p_slot); + retval = pciehp_power_off_slot(p_slot); if (retval) { ctrl_err(ctrl, "Issue of Slot Disable command failed\n"); @@ -287,9 +281,9 @@ static int remove_board(struct slot *p_slot) msleep(1000); } + /* turn off Green LED */ if (PWR_LED(ctrl)) - /* turn off Green LED */ - p_slot->hpc_ops->green_led_off(p_slot); + pciehp_green_led_off(p_slot); return 0; } @@ -317,18 +311,17 @@ static void pciehp_power_thread(struct work_struct *work) case POWEROFF_STATE: mutex_unlock(&p_slot->lock); ctrl_dbg(p_slot->ctrl, - "Disabling domain:bus:device=%04x:%02x:%02x\n", - pci_domain_nr(p_slot->ctrl->pci_dev->subordinate), - p_slot->bus, p_slot->device); + "Disabling domain:bus:device=%04x:%02x:00\n", + pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), + p_slot->ctrl->pcie->port->subordinate->number); pciehp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: mutex_unlock(&p_slot->lock); - if (pciehp_enable_slot(p_slot) && - PWR_LED(p_slot->ctrl)) - p_slot->hpc_ops->green_led_off(p_slot); + if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) + pciehp_green_led_off(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; @@ -379,10 +372,10 @@ static int update_slot_info(struct slot *slot) if (!info) return -ENOMEM; - slot->hpc_ops->get_power_status(slot, &(info->power_status)); - slot->hpc_ops->get_attention_status(slot, &(info->attention_status)); - slot->hpc_ops->get_latch_status(slot, &(info->latch_status)); - slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status)); + pciehp_get_power_status(slot, &info->power_status); + pciehp_get_attention_status(slot, &info->attention_status); + pciehp_get_latch_status(slot, &info->latch_status); + pciehp_get_adapter_status(slot, &info->adapter_status); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); @@ -399,7 +392,7 @@ static void handle_button_press_event(struct slot *p_slot) switch (p_slot->state) { case STATIC_STATE: - p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + pciehp_get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; ctrl_info(ctrl, @@ -413,9 +406,9 @@ static void handle_button_press_event(struct slot *p_slot) } /* blink green LED and turn off amber */ if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_blink(p_slot); + pciehp_green_led_blink(p_slot); if (ATTN_LED(ctrl)) - p_slot->hpc_ops->set_attention_status(p_slot, 0); + pciehp_set_attention_status(p_slot, 0); schedule_delayed_work(&p_slot->work, 5*HZ); break; @@ -430,13 +423,13 @@ static void handle_button_press_event(struct slot *p_slot) cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) { if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_on(p_slot); + pciehp_green_led_on(p_slot); } else { if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_off(p_slot); + pciehp_green_led_off(p_slot); } if (ATTN_LED(ctrl)) - p_slot->hpc_ops->set_attention_status(p_slot, 0); + pciehp_set_attention_status(p_slot, 0); ctrl_info(ctrl, "PCI slot #%s - action canceled " "due to button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; @@ -474,7 +467,7 @@ static void handle_surprise_event(struct slot *p_slot) info->p_slot = p_slot; INIT_WORK(&info->work, pciehp_power_thread); - p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + pciehp_get_adapter_status(p_slot, &getstatus); if (!getstatus) p_slot->state = POWEROFF_STATE; else @@ -498,9 +491,9 @@ static void interrupt_event_handler(struct work_struct *work) if (!POWER_CTRL(ctrl)) break; if (ATTN_LED(ctrl)) - p_slot->hpc_ops->set_attention_status(p_slot, 1); + pciehp_set_attention_status(p_slot, 1); if (PWR_LED(ctrl)) - p_slot->hpc_ops->green_led_off(p_slot); + pciehp_green_led_off(p_slot); break; case INT_PRESENCE_ON: case INT_PRESENCE_OFF: @@ -525,45 +518,38 @@ int pciehp_enable_slot(struct slot *p_slot) int rc; struct controller *ctrl = p_slot->ctrl; - /* Check to see if (latch closed, card present, power off) */ - mutex_lock(&p_slot->ctrl->crit_sect); - - rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + rc = pciehp_get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } if (MRL_SENS(p_slot->ctrl)) { - rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + rc = pciehp_get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } } if (POWER_CTRL(p_slot->ctrl)) { - rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + rc = pciehp_get_power_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Already enabled on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } } - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + pciehp_get_latch_status(p_slot, &getstatus); rc = board_added(p_slot); if (rc) { - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + pciehp_get_latch_status(p_slot, &getstatus); } update_slot_info(p_slot); - mutex_unlock(&p_slot->ctrl->crit_sect); return rc; } @@ -577,35 +563,29 @@ int pciehp_disable_slot(struct slot *p_slot) if (!p_slot->ctrl) return 1; - /* Check to see if (latch closed, card present, power on) */ - mutex_lock(&p_slot->ctrl->crit_sect); - if (!HP_SUPR_RM(p_slot->ctrl)) { - ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + ret = pciehp_get_adapter_status(p_slot, &getstatus); if (ret || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } } if (MRL_SENS(p_slot->ctrl)) { - ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + ret = pciehp_get_latch_status(p_slot, &getstatus); if (ret || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } } if (POWER_CTRL(p_slot->ctrl)) { - ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + ret = pciehp_get_power_status(p_slot, &getstatus); if (ret || !getstatus) { ctrl_info(ctrl, "Already disabled on slot(%s)\n", slot_name(p_slot)); - mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } } @@ -613,7 +593,6 @@ int pciehp_disable_slot(struct slot *p_slot) ret = remove_board(p_slot); update_slot_info(p_slot); - mutex_unlock(&p_slot->ctrl->crit_sect); return ret; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 271f917b6f2..9ef4605c1ef 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -44,25 +44,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; return pci_read_config_word(dev, ctrl->cap_base + reg, value); } static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; return pci_read_config_dword(dev, ctrl->cap_base + reg, value); } static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; return pci_write_config_word(dev, ctrl->cap_base + reg, value); } static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; return pci_write_config_dword(dev, ctrl->cap_base + reg, value); } @@ -266,7 +266,7 @@ static void pcie_wait_link_active(struct controller *ctrl) ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n"); } -static int hpc_check_lnk_status(struct controller *ctrl) +int pciehp_check_link_status(struct controller *ctrl) { u16 lnk_status; int retval = 0; @@ -305,7 +305,7 @@ static int hpc_check_lnk_status(struct controller *ctrl) return retval; } -static int hpc_get_attention_status(struct slot *slot, u8 *status) +int pciehp_get_attention_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_ctrl; @@ -344,7 +344,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) return 0; } -static int hpc_get_power_status(struct slot *slot, u8 *status) +int pciehp_get_power_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_ctrl; @@ -376,7 +376,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) return retval; } -static int hpc_get_latch_status(struct slot *slot, u8 *status) +int pciehp_get_latch_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_status; @@ -392,7 +392,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) return 0; } -static int hpc_get_adapter_status(struct slot *slot, u8 *status) +int pciehp_get_adapter_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u16 slot_status; @@ -408,7 +408,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) return 0; } -static int hpc_query_power_fault(struct slot *slot) +int pciehp_query_power_fault(struct slot *slot) { struct controller *ctrl = slot->ctrl; u16 slot_status; @@ -422,7 +422,7 @@ static int hpc_query_power_fault(struct slot *slot) return !!(slot_status & PCI_EXP_SLTSTA_PFD); } -static int hpc_set_attention_status(struct slot *slot, u8 value) +int pciehp_set_attention_status(struct slot *slot, u8 value) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -450,7 +450,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) return rc; } -static void hpc_set_green_led_on(struct slot *slot) +void pciehp_green_led_on(struct slot *slot) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -463,7 +463,7 @@ static void hpc_set_green_led_on(struct slot *slot) __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } -static void hpc_set_green_led_off(struct slot *slot) +void pciehp_green_led_off(struct slot *slot) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -476,7 +476,7 @@ static void hpc_set_green_led_off(struct slot *slot) __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } -static void hpc_set_green_led_blink(struct slot *slot) +void pciehp_green_led_blink(struct slot *slot) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -489,7 +489,7 @@ static void hpc_set_green_led_blink(struct slot *slot) __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); } -static int hpc_power_on_slot(struct slot * slot) +int pciehp_power_on_slot(struct slot * slot) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -497,8 +497,6 @@ static int hpc_power_on_slot(struct slot * slot) u16 slot_status; int retval = 0; - ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); - /* Clear sticky power-fault bit from previous power failures */ retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); if (retval) { @@ -539,7 +537,7 @@ static int hpc_power_on_slot(struct slot * slot) static inline int pcie_mask_bad_dllp(struct controller *ctrl) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; int pos; u32 reg; @@ -556,7 +554,7 @@ static inline int pcie_mask_bad_dllp(struct controller *ctrl) static inline void pcie_unmask_bad_dllp(struct controller *ctrl) { - struct pci_dev *dev = ctrl->pci_dev; + struct pci_dev *dev = ctrl->pcie->port; u32 reg; int pos; @@ -570,7 +568,7 @@ static inline void pcie_unmask_bad_dllp(struct controller *ctrl) pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); } -static int hpc_power_off_slot(struct slot * slot) +int pciehp_power_off_slot(struct slot * slot) { struct controller *ctrl = slot->ctrl; u16 slot_cmd; @@ -578,8 +576,6 @@ static int hpc_power_off_slot(struct slot * slot) int retval = 0; int changed; - ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); - /* * Set Bad DLLP Mask bit in Correctable Error Mask * Register. This is the workaround against Bad DLLP error @@ -614,8 +610,8 @@ static int hpc_power_off_slot(struct slot * slot) static irqreturn_t pcie_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; + struct slot *slot = ctrl->slot; u16 detected, intr_loc; - struct slot *p_slot; /* * In order to guarantee that all interrupt events are @@ -656,29 +652,27 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) return IRQ_HANDLED; - p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); - /* Check MRL Sensor Changed */ if (intr_loc & PCI_EXP_SLTSTA_MRLSC) - pciehp_handle_switch_change(p_slot); + pciehp_handle_switch_change(slot); /* Check Attention Button Pressed */ if (intr_loc & PCI_EXP_SLTSTA_ABP) - pciehp_handle_attention_button(p_slot); + pciehp_handle_attention_button(slot); /* Check Presence Detect Changed */ if (intr_loc & PCI_EXP_SLTSTA_PDC) - pciehp_handle_presence_change(p_slot); + pciehp_handle_presence_change(slot); /* Check Power Fault Detected */ if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { ctrl->power_fault_detected = 1; - pciehp_handle_power_fault(p_slot); + pciehp_handle_power_fault(slot); } return IRQ_HANDLED; } -static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) +int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) { struct controller *ctrl = slot->ctrl; enum pcie_link_speed lnk_speed; @@ -709,7 +703,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) return retval; } -static int hpc_get_max_lnk_width(struct slot *slot, +int pciehp_get_max_lnk_width(struct slot *slot, enum pcie_link_width *value) { struct controller *ctrl = slot->ctrl; @@ -759,7 +753,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, return retval; } -static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) +int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) { struct controller *ctrl = slot->ctrl; enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; @@ -791,7 +785,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) return retval; } -static int hpc_get_cur_lnk_width(struct slot *slot, +int pciehp_get_cur_lnk_width(struct slot *slot, enum pcie_link_width *value) { struct controller *ctrl = slot->ctrl; @@ -842,30 +836,6 @@ static int hpc_get_cur_lnk_width(struct slot *slot, return retval; } -static void pcie_release_ctrl(struct controller *ctrl); -static struct hpc_ops pciehp_hpc_ops = { - .power_on_slot = hpc_power_on_slot, - .power_off_slot = hpc_power_off_slot, - .set_attention_status = hpc_set_attention_status, - .get_power_status = hpc_get_power_status, - .get_attention_status = hpc_get_attention_status, - .get_latch_status = hpc_get_latch_status, - .get_adapter_status = hpc_get_adapter_status, - - .get_max_bus_speed = hpc_get_max_lnk_speed, - .get_cur_bus_speed = hpc_get_cur_lnk_speed, - .get_max_lnk_width = hpc_get_max_lnk_width, - .get_cur_lnk_width = hpc_get_cur_lnk_width, - - .query_power_fault = hpc_query_power_fault, - .green_led_on = hpc_set_green_led_on, - .green_led_off = hpc_set_green_led_off, - .green_led_blink = hpc_set_green_led_blink, - - .release_ctlr = pcie_release_ctrl, - .check_lnk_status = hpc_check_lnk_status, -}; - int pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; @@ -930,23 +900,16 @@ static int pcie_init_slot(struct controller *ctrl) if (!slot) return -ENOMEM; - slot->hp_slot = 0; slot->ctrl = ctrl; - slot->bus = ctrl->pci_dev->subordinate->number; - slot->device = ctrl->slot_device_offset + slot->hp_slot; - slot->hpc_ops = ctrl->hpc_ops; - slot->number = ctrl->first_slot; mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); - list_add(&slot->slot_list, &ctrl->slot_list); + ctrl->slot = slot; return 0; } static void pcie_cleanup_slot(struct controller *ctrl) { - struct slot *slot; - slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list); - list_del(&slot->slot_list); + struct slot *slot = ctrl->slot; cancel_delayed_work(&slot->work); flush_scheduled_work(); flush_workqueue(pciehp_wq); @@ -957,7 +920,7 @@ static inline void dbg_ctrl(struct controller *ctrl) { int i; u16 reg16; - struct pci_dev *pdev = ctrl->pci_dev; + struct pci_dev *pdev = ctrl->pcie->port; if (!pciehp_debug) return; @@ -980,7 +943,7 @@ static inline void dbg_ctrl(struct controller *ctrl) (unsigned long long)pci_resource_start(pdev, i)); } ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); - ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); + ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); ctrl_info(ctrl, " Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); ctrl_info(ctrl, " Power Controller : %3s\n", @@ -1014,10 +977,7 @@ struct controller *pcie_init(struct pcie_device *dev) dev_err(&dev->device, "%s: Out of memory\n", __func__); goto abort; } - INIT_LIST_HEAD(&ctrl->slot_list); - ctrl->pcie = dev; - ctrl->pci_dev = pdev; ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!ctrl->cap_base) { ctrl_err(ctrl, "Cannot find PCI Express capability\n"); @@ -1029,11 +989,6 @@ struct controller *pcie_init(struct pcie_device *dev) } ctrl->slot_cap = slot_cap; - ctrl->first_slot = slot_cap >> 19; - ctrl->slot_device_offset = 0; - ctrl->num_slots = 1; - ctrl->hpc_ops = &pciehp_hpc_ops; - mutex_init(&ctrl->crit_sect); mutex_init(&ctrl->ctrl_lock); init_waitqueue_head(&ctrl->queue); dbg_ctrl(ctrl); @@ -1089,7 +1044,7 @@ abort: return NULL; } -void pcie_release_ctrl(struct controller *ctrl) +void pciehp_release_ctrl(struct controller *ctrl) { pcie_shutdown_notification(ctrl); pcie_cleanup_slot(ctrl); diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 02e24d63b3e..21733108add 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -63,27 +63,27 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev) int pciehp_configure_device(struct slot *p_slot) { struct pci_dev *dev; - struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; + struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; int num, fn; struct controller *ctrl = p_slot->ctrl; - dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); + dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); if (dev) { ctrl_err(ctrl, "Device %s already exists " - "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), - pci_domain_nr(parent), p_slot->bus, p_slot->device); + "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), + pci_domain_nr(parent), parent->number); pci_dev_put(dev); return -EINVAL; } - num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); + num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } for (fn = 0; fn < 8; fn++) { - dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn)); + dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { @@ -111,19 +111,18 @@ int pciehp_unconfigure_device(struct slot *p_slot) int j; u8 bctl = 0; u8 presence = 0; - struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; + struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; u16 command; struct controller *ctrl = p_slot->ctrl; - ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", - __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); - ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); + ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", + __func__, pci_domain_nr(parent), parent->number); + ret = pciehp_get_adapter_status(p_slot, &presence); if (ret) presence = 0; for (j = 0; j < 8; j++) { - struct pci_dev* temp = pci_get_slot(parent, - (p_slot->device << 3) | j); + struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j)); if (!temp) continue; if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 562221e1191..1840a0578a4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -38,6 +38,7 @@ #include <linux/intel-iommu.h> #include <linux/sysdev.h> #include <linux/tboot.h> +#include <linux/dmi.h> #include <asm/cacheflush.h> #include <asm/iommu.h> #include "pci.h" @@ -47,6 +48,7 @@ #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) +#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) #define IOAPIC_RANGE_START (0xfee00000) #define IOAPIC_RANGE_END (0xfeefffff) @@ -56,8 +58,14 @@ #define MAX_AGAW_WIDTH 64 -#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) -#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) +#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) +#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) + +/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR + to match. That way, we can use 'unsigned long' for PFNs with impunity. */ +#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \ + __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) +#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) @@ -87,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p) /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; +static void __init check_tylersburg_isoch(void); static int rwbf_quirk; /* @@ -252,7 +261,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) * 2. It maps to each iommu if successful. * 3. Each iommu mapps to this domain if successful. */ -struct dmar_domain *si_domain; +static struct dmar_domain *si_domain; +static int hw_pass_through = 1; /* devices under the same p2p bridge are owned in one domain */ #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) @@ -728,7 +738,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, return NULL; domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); - pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; + pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (cmpxchg64(&pte->val, 0ULL, pteval)) { /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); @@ -778,9 +788,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain, BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ - while (start_pfn <= last_pfn) { + do { first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); if (!pte) { start_pfn = align_to_level(start_pfn + 1, 2); @@ -794,7 +805,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); - } + + } while (start_pfn && start_pfn <= last_pfn); } /* free page table pages. last level pte should already be cleared */ @@ -810,6 +822,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(start_pfn > last_pfn); /* We don't need lock here; nobody else touches the iova range */ level = 2; @@ -820,7 +833,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, if (tmp + level_size(level) - 1 > last_pfn) return; - while (tmp + level_size(level) - 1 <= last_pfn) { + do { first_pte = pte = dma_pfn_level_pte(domain, tmp, level); if (!pte) { tmp = align_to_level(tmp + 1, level + 1); @@ -839,7 +852,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); - } + } while (tmp && tmp + level_size(level) - 1 <= last_pfn); level++; } /* free pgd */ @@ -1158,6 +1171,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) pr_debug("Number of Domains supportd <%ld>\n", ndomains); nlongs = BITS_TO_LONGS(ndomains); + spin_lock_init(&iommu->lock); + /* TBD: there might be 64K domains, * consider other allocation for future chip */ @@ -1170,12 +1185,9 @@ static int iommu_init_domains(struct intel_iommu *iommu) GFP_KERNEL); if (!iommu->domains) { printk(KERN_ERR "Allocating domain array failed\n"); - kfree(iommu->domain_ids); return -ENOMEM; } - spin_lock_init(&iommu->lock); - /* * if Caching mode is set, then invalid translations are tagged * with domainid 0. Hence we need to pre-allocate it. @@ -1195,22 +1207,24 @@ void free_dmar_iommu(struct intel_iommu *iommu) int i; unsigned long flags; - i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); - for (; i < cap_ndoms(iommu->cap); ) { - domain = iommu->domains[i]; - clear_bit(i, iommu->domain_ids); + if ((iommu->domains) && (iommu->domain_ids)) { + i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); + for (; i < cap_ndoms(iommu->cap); ) { + domain = iommu->domains[i]; + clear_bit(i, iommu->domain_ids); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (--domain->iommu_count == 0) { + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) + vm_domain_exit(domain); + else + domain_exit(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); - spin_lock_irqsave(&domain->iommu_lock, flags); - if (--domain->iommu_count == 0) { - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) - vm_domain_exit(domain); - else - domain_exit(domain); + i = find_next_bit(iommu->domain_ids, + cap_ndoms(iommu->cap), i+1); } - spin_unlock_irqrestore(&domain->iommu_lock, flags); - - i = find_next_bit(iommu->domain_ids, - cap_ndoms(iommu->cap), i+1); } if (iommu->gcmd & DMA_GCMD_TE) @@ -1310,7 +1324,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, } static struct iova_domain reserved_iova_list; -static struct lock_class_key reserved_alloc_key; static struct lock_class_key reserved_rbtree_key; static void dmar_init_reserved_ranges(void) @@ -1321,8 +1334,6 @@ static void dmar_init_reserved_ranges(void) init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); - lockdep_set_class(&reserved_iova_list.iova_alloc_lock, - &reserved_alloc_key); lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, &reserved_rbtree_key); @@ -1925,6 +1936,9 @@ error: } static int iommu_identity_mapping; +#define IDENTMAP_ALL 1 +#define IDENTMAP_GFX 2 +#define IDENTMAP_AZALIA 4 static int iommu_domain_identity_map(struct dmar_domain *domain, unsigned long long start, @@ -1959,14 +1973,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, struct dmar_domain *domain; int ret; - printk(KERN_INFO - "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) return -ENOMEM; + /* For _hardware_ passthrough, don't bother. But for software + passthrough, we do it anyway -- it may indicate a memory + range which is reserved in E820, so which didn't get set + up to start with in si_domain */ + if (domain == si_domain && hw_pass_through) { + printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + return 0; + } + + printk(KERN_INFO + "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + + if (end >> agaw_to_width(domain->agaw)) { + WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + agaw_to_width(domain->agaw), + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + ret = -EIO; + goto error; + } + ret = iommu_domain_identity_map(domain, start, end); if (ret) goto error; @@ -2017,23 +2052,6 @@ static inline void iommu_prepare_isa(void) } #endif /* !CONFIG_DMAR_FLPY_WA */ -/* Initialize each context entry as pass through.*/ -static int __init init_context_pass_through(void) -{ - struct pci_dev *pdev = NULL; - struct dmar_domain *domain; - int ret; - - for_each_pci_dev(pdev) { - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); - ret = domain_context_mapping(domain, pdev, - CONTEXT_TT_PASS_THROUGH); - if (ret) - return ret; - } - return 0; -} - static int md_domain_init(struct dmar_domain *domain, int guest_width); static int __init si_domain_work_fn(unsigned long start_pfn, @@ -2048,7 +2066,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn, } -static int si_domain_init(void) +static int __init si_domain_init(int hw) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; @@ -2075,6 +2093,9 @@ static int si_domain_init(void) si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; + if (hw) + return 0; + for_each_online_node(nid) { work_with_active_regions(nid, si_domain_work_fn, &ret); if (ret) @@ -2101,15 +2122,23 @@ static int identity_mapping(struct pci_dev *pdev) } static int domain_add_dev_info(struct dmar_domain *domain, - struct pci_dev *pdev) + struct pci_dev *pdev, + int translation) { struct device_domain_info *info; unsigned long flags; + int ret; info = alloc_devinfo_mem(); if (!info) return -ENOMEM; + ret = domain_context_mapping(domain, pdev, translation); + if (ret) { + free_devinfo_mem(info); + return ret; + } + info->segment = pci_domain_nr(pdev->bus); info->bus = pdev->bus->number; info->devfn = pdev->devfn; @@ -2127,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain, static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { - if (iommu_identity_mapping == 2) - return IS_GFX_DEVICE(pdev); + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) + return 1; + + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) + return 1; + + if (!(iommu_identity_mapping & IDENTMAP_ALL)) + return 0; /* * We want to start off with all devices in the 1:1 domain, and @@ -2166,27 +2201,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) return 1; } -static int iommu_prepare_static_identity_mapping(void) +static int __init iommu_prepare_static_identity_mapping(int hw) { struct pci_dev *pdev = NULL; int ret; - ret = si_domain_init(); + ret = si_domain_init(hw); if (ret) return -EFAULT; for_each_pci_dev(pdev) { if (iommu_should_identity_map(pdev, 1)) { - printk(KERN_INFO "IOMMU: identity mapping for device %s\n", - pci_name(pdev)); + printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", + hw ? "hardware" : "software", pci_name(pdev)); - ret = domain_context_mapping(si_domain, pdev, + ret = domain_add_dev_info(si_domain, pdev, + hw ? CONTEXT_TT_PASS_THROUGH : CONTEXT_TT_MULTI_LEVEL); if (ret) return ret; - ret = domain_add_dev_info(si_domain, pdev); - if (ret) - return ret; } } @@ -2200,14 +2233,6 @@ int __init init_dmars(void) struct pci_dev *pdev; struct intel_iommu *iommu; int i, ret; - int pass_through = 1; - - /* - * In case pass through can not be enabled, iommu tries to use identity - * mapping. - */ - if (iommu_pass_through) - iommu_identity_mapping = 1; /* * for each drhd @@ -2235,7 +2260,6 @@ int __init init_dmars(void) deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { - kfree(g_iommus); ret = -ENOMEM; goto error; } @@ -2262,14 +2286,8 @@ int __init init_dmars(void) goto error; } if (!ecap_pass_through(iommu->ecap)) - pass_through = 0; + hw_pass_through = 0; } - if (iommu_pass_through) - if (!pass_through) { - printk(KERN_INFO - "Pass Through is not supported by hardware.\n"); - iommu_pass_through = 0; - } /* * Start from the sane iommu hardware state. @@ -2324,64 +2342,60 @@ int __init init_dmars(void) } } + if (iommu_pass_through) + iommu_identity_mapping |= IDENTMAP_ALL; + +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + iommu_identity_mapping |= IDENTMAP_GFX; +#endif + + check_tylersburg_isoch(); + /* - * If pass through is set and enabled, context entries of all pci - * devices are intialized by pass through translation type. + * If pass through is not set or not enabled, setup context entries for + * identity mappings for rmrr, gfx, and isa and may fall back to static + * identity mapping if iommu_identity_mapping is set. */ - if (iommu_pass_through) { - ret = init_context_pass_through(); + if (iommu_identity_mapping) { + ret = iommu_prepare_static_identity_mapping(hw_pass_through); if (ret) { - printk(KERN_ERR "IOMMU: Pass through init failed.\n"); - iommu_pass_through = 0; + printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); + goto error; } } - /* - * If pass through is not set or not enabled, setup context entries for - * identity mappings for rmrr, gfx, and isa and may fall back to static - * identity mapping if iommu_identity_mapping is set. + * For each rmrr + * for each dev attached to rmrr + * do + * locate drhd for dev, alloc domain for dev + * allocate free domain + * allocate page table entries for rmrr + * if context not allocated for bus + * allocate and init context + * set present in root table for this bus + * init context with domain, translation etc + * endfor + * endfor */ - if (!iommu_pass_through) { -#ifdef CONFIG_DMAR_BROKEN_GFX_WA - if (!iommu_identity_mapping) - iommu_identity_mapping = 2; -#endif - if (iommu_identity_mapping) - iommu_prepare_static_identity_mapping(); - /* - * For each rmrr - * for each dev attached to rmrr - * do - * locate drhd for dev, alloc domain for dev - * allocate free domain - * allocate page table entries for rmrr - * if context not allocated for bus - * allocate and init context - * set present in root table for this bus - * init context with domain, translation etc - * endfor - * endfor - */ - printk(KERN_INFO "IOMMU: Setting RMRR:\n"); - for_each_rmrr_units(rmrr) { - for (i = 0; i < rmrr->devices_cnt; i++) { - pdev = rmrr->devices[i]; - /* - * some BIOS lists non-exist devices in DMAR - * table. - */ - if (!pdev) - continue; - ret = iommu_prepare_rmrr_dev(rmrr, pdev); - if (ret) - printk(KERN_ERR - "IOMMU: mapping reserved region failed\n"); - } + printk(KERN_INFO "IOMMU: Setting RMRR:\n"); + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + pdev = rmrr->devices[i]; + /* + * some BIOS lists non-exist devices in DMAR + * table. + */ + if (!pdev) + continue; + ret = iommu_prepare_rmrr_dev(rmrr, pdev); + if (ret) + printk(KERN_ERR + "IOMMU: mapping reserved region failed\n"); } - - iommu_prepare_isa(); } + iommu_prepare_isa(); + /* * for each drhd * enable fault log @@ -2404,11 +2418,12 @@ int __init init_dmars(void) iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - iommu_disable_protect_mem_regions(iommu); ret = iommu_enable_translation(iommu); if (ret) goto error; + + iommu_disable_protect_mem_regions(iommu); } return 0; @@ -2455,8 +2470,7 @@ static struct iova *intel_alloc_iova(struct device *dev, return iova; } -static struct dmar_domain * -get_valid_domain_for_dev(struct pci_dev *pdev) +static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) { struct dmar_domain *domain; int ret; @@ -2484,6 +2498,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } +static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) +{ + struct device_domain_info *info; + + /* No lock here, assumes no domain exit in normal case */ + info = dev->dev.archdata.iommu; + if (likely(info)) + return info->domain; + + return __get_valid_domain_for_dev(dev); +} + static int iommu_dummy(struct pci_dev *pdev) { return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; @@ -2526,10 +2552,10 @@ static int iommu_no_mapping(struct device *dev) */ if (iommu_should_identity_map(pdev, 0)) { int ret; - ret = domain_add_dev_info(si_domain, pdev); - if (ret) - return 0; - ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); + ret = domain_add_dev_info(si_domain, pdev, + hw_pass_through ? + CONTEXT_TT_PASS_THROUGH : + CONTEXT_TT_MULTI_LEVEL); if (!ret) { printk(KERN_INFO "64bit %s uses identity mapping\n", pci_name(pdev)); @@ -2638,10 +2664,9 @@ static void flush_unmaps(void) unsigned long mask; struct iova *iova = deferred_flush[i].iova[j]; - mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; - mask = ilog2(mask >> VTD_PAGE_SHIFT); + mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); iommu_flush_dev_iotlb(deferred_flush[i].domain[j], - iova->pfn_lo << PAGE_SHIFT, mask); + (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); __free_iova(&deferred_flush[i].domain[j]->iovad, iova); } deferred_flush[i].next = 0; @@ -2734,12 +2759,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, } } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, - int dir) -{ - intel_unmap_page(dev, dev_addr, size, dir, NULL); -} - static void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { @@ -2748,7 +2767,15 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, size = PAGE_ALIGN(size); order = get_order(size); - flags &= ~(GFP_DMA | GFP_DMA32); + + if (!iommu_no_mapping(hwdev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { + if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } vaddr = (void *)__get_free_pages(flags, order); if (!vaddr) @@ -2772,7 +2799,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, size = PAGE_ALIGN(size); order = get_order(size); - intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); + intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long)vaddr, order); } @@ -2808,11 +2835,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, /* free page tables */ dma_pte_free_pagetable(domain, start_pfn, last_pfn); - iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, - (last_pfn - start_pfn + 1)); - - /* free iova */ - __free_iova(&domain->iovad, iova); + if (intel_iommu_strict) { + iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, + last_pfn - start_pfn + 1); + /* free iova */ + __free_iova(&domain->iovad, iova); + } else { + add_unmap(domain, iova); + /* + * queue up the release of the unmap to save the 1/6th of the + * cpu used up by the iotlb flush operation... + */ + } } static int intel_nontranslate_map_sg(struct device *hddev, @@ -3056,8 +3090,8 @@ static int init_iommu_hw(void) DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - iommu_disable_protect_mem_regions(iommu); iommu_enable_translation(iommu); + iommu_disable_protect_mem_regions(iommu); } return 0; @@ -3181,6 +3215,33 @@ static int __init init_iommu_sysfs(void) } #endif /* CONFIG_PM */ +/* + * Here we only respond to action of unbound device from driver. + * + * Added device is not attached to its DMAR domain here yet. That will happen + * when mapping the device to iova. + */ +static int device_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dmar_domain *domain; + + domain = find_domain(pdev); + if (!domain) + return 0; + + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) + domain_remove_one_dev_info(domain, pdev); + + return 0; +} + +static struct notifier_block device_nb = { + .notifier_call = device_notifier, +}; + int __init intel_iommu_init(void) { int ret = 0; @@ -3205,7 +3266,7 @@ int __init intel_iommu_init(void) * Check the need for DMA-remapping initialization now. * Above initialization will also be used by Interrupt-remapping. */ - if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) + if (no_iommu || swiotlb || dmar_disabled) return -ENODEV; iommu_init_mempool(); @@ -3227,19 +3288,14 @@ int __init intel_iommu_init(void) init_timer(&unmap_timer); force_iommu = 1; - - if (!iommu_pass_through) { - printk(KERN_INFO - "Multi-level page-table translation for DMAR.\n"); - dma_ops = &intel_dma_ops; - } else - printk(KERN_INFO - "DMAR: Pass through translation for DMAR.\n"); + dma_ops = &intel_dma_ops; init_iommu_sysfs(); register_iommu(&intel_iommu_ops); + bus_register_notifier(&pci_bus_type, &device_nb); + return 0; } @@ -3517,7 +3573,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, struct intel_iommu *iommu; int addr_width; u64 end; - int ret; /* normally pdev is not mapped */ if (unlikely(domain_context_mapped(pdev))) { @@ -3549,12 +3604,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return -EFAULT; } - ret = domain_add_dev_info(dmar_domain, pdev); - if (ret) - return ret; - - ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); - return ret; + return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); } static void intel_iommu_detach_device(struct iommu_domain *domain, @@ -3671,3 +3721,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); + +/* On Tylersburg chipsets, some BIOSes have been known to enable the + ISOCH DMAR unit for the Azalia sound device, but not give it any + TLB entries, which causes it to deadlock. Check for that. We do + this in a function called from init_dmars(), instead of in a PCI + quirk, because we don't want to print the obnoxious "BIOS broken" + message if VT-d is actually disabled. +*/ +static void __init check_tylersburg_isoch(void) +{ + struct pci_dev *pdev; + uint32_t vtisochctrl; + + /* If there's no Azalia in the system anyway, forget it. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); + if (!pdev) + return; + pci_dev_put(pdev); + + /* System Management Registers. Might be hidden, in which case + we can't do the sanity check. But that's OK, because the + known-broken BIOSes _don't_ actually hide it, so far. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); + if (!pdev) + return; + + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { + pci_dev_put(pdev); + return; + } + + pci_dev_put(pdev); + + /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ + if (vtisochctrl & 1) + return; + + /* Drop all bits other than the number of TLB entries */ + vtisochctrl &= 0x1c; + + /* If we have the recommended number of TLB entries (16), fine. */ + if (vtisochctrl == 0x10) + return; + + /* Zero TLB entries? You get to ride the short bus to school. */ + if (!vtisochctrl) { + WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + iommu_identity_mapping |= IDENTMAP_AZALIA; + return; + } + + printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", + vtisochctrl); +} diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 44803644ca0..0ed78a764de 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) if (disable_intremap) return 0; + if (!dmar_ir_support()) + return 0; + for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; @@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) struct dmar_drhd_unit *drhd; int setup = 0; + if (parse_ioapics_under_ir() != 1) { + printk(KERN_INFO "Not enable interrupt remapping\n"); + return -1; + } + for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 46dd440e231..7914951ef29 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -22,7 +22,6 @@ void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) { - spin_lock_init(&iovad->iova_alloc_lock); spin_lock_init(&iovad->iova_rbtree_lock); iovad->rbroot = RB_ROOT; iovad->cached32_node = NULL; @@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { - unsigned long flags; struct iova *new_iova; int ret; @@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (size_aligned) size = __roundup_pow_of_two(size); - spin_lock_irqsave(&iovad->iova_alloc_lock, flags); ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); if (ret) { free_iova_mem(new_iova); return NULL; @@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, struct iova *iova; unsigned int overlap = 0; - spin_lock_irqsave(&iovad->iova_alloc_lock, flags); - spin_lock(&iovad->iova_rbtree_lock); + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { iova = container_of(node, struct iova, node); @@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, iova = __insert_new_range(iovad, pfn_lo, pfn_hi); finish: - spin_unlock(&iovad->iova_rbtree_lock); - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } @@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) unsigned long flags; struct rb_node *node; - spin_lock_irqsave(&from->iova_alloc_lock, flags); - spin_lock(&from->iova_rbtree_lock); + spin_lock_irqsave(&from->iova_rbtree_lock, flags); for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { struct iova *iova = container_of(node, struct iova, node); struct iova *new_iova; @@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", iova->pfn_lo, iova->pfn_lo); } - spin_unlock(&from->iova_rbtree_lock); - spin_unlock_irqrestore(&from->iova_alloc_lock, flags); + spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6edecff0b41..4e4c295a049 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -513,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(PCI_PM_D2_DELAY); - dev->current_state = state; + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + if (dev->current_state != state && printk_ratelimit()) + dev_info(&dev->dev, "Refused to change power state, " + "currently in D%d\n", dev->current_state); /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning @@ -2542,10 +2546,10 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) /** * pci_set_vga_state - set VGA decode state on device and parents if requested - * @dev the PCI device - * @decode - true = enable decoding, false = disable decoding - * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY - * @change_bridge - traverse ancestors and change bridges + * @dev: the PCI device + * @decode: true = enable decoding, false = disable decoding + * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY + * @change_bridge: traverse ancestors and change bridges */ int pci_set_vga_state(struct pci_dev *dev, bool decode, unsigned int command_bits, bool change_bridge) @@ -2719,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) return 1; } -static int __devinit pci_init(void) -{ - struct pci_dev *dev = NULL; - - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - pci_fixup_device(pci_fixup_final, dev); - } - - return 0; -} - static int __init pci_setup(char *str) { while (str) { @@ -2767,8 +2760,6 @@ static int __init pci_setup(char *str) } early_param("pci", pci_setup); -device_initcall(pci_init); - EXPORT_SYMBOL(pci_reenable_device); EXPORT_SYMBOL(pci_enable_device_io); EXPORT_SYMBOL(pci_enable_device_mem); diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 10c0e62bd5a..40c3cc5d1ca 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> @@ -52,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = { static struct pcie_port_service_driver aerdriver = { .name = "aer", - .port_type = PCIE_ANY_PORT, + .port_type = PCIE_RC_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, @@ -318,6 +319,8 @@ static int __init aer_service_init(void) { if (pcie_aer_disable) return -ENXIO; + if (!pci_msi_enabled()) + return -ENXIO; return pcie_port_service_register(&aerdriver); } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f289ca9bf18..5b7056cec00 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -303,9 +303,6 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev, pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; - /* 00b and 10b are defined as "Reserved". */ - if (info->support == PCIE_LINK_STATE_L1) - info->support = 0; info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); @@ -659,8 +656,10 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) free_link_state(link); /* Recheck latencies and configure upstream links */ - pcie_update_aspm_capable(root); - pcie_config_aspm_path(parent_link); + if (parent_link) { + pcie_update_aspm_capable(root); + pcie_config_aspm_path(parent_link); + } out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 6df5c984a79..f635e476d63 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* global data */ -static const char device_name[] = "pcieport-driver"; static int pcie_portdrv_restore_config(struct pci_dev *dev) { @@ -262,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = { }; static struct pci_driver pcie_portdriver = { - .name = (char *)device_name, + .name = "pcieport", .id_table = &port_pci_ids[0], .probe = pcie_portdrv_probe, diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6099facecd7..245d2cdb476 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -670,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); +/* + * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: + * Disable fast back-to-back on the secondary bus segment + */ +static void __devinit quirk_xio2000a(struct pci_dev *dev) +{ + struct pci_dev *pdev; + u16 command; + + dev_warn(&dev->dev, "TI XIO2000a quirk detected; " + "secondary bus fast back-to-back transfers disabled\n"); + list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { + pci_read_config_word(pdev, PCI_COMMAND, &command); + if (command & PCI_COMMAND_FAST_BACK) + pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, + quirk_xio2000a); #ifdef CONFIG_X86_IO_APIC @@ -990,7 +1009,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) { - /* set SBX00 SATA in IDE mode to AHCI mode */ + /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ u8 tmp; pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); @@ -1009,8 +1028,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode @@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) } pci_do_fixups(dev, start, end); } + +static int __init pci_apply_final_quirks(void) +{ + struct pci_dev *dev = NULL; + + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + pci_fixup_device(pci_fixup_final, dev); + } + + return 0; +} + +fs_initcall_sync(pci_apply_final_quirks); #else void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} #endif diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 706f82d8111..c54526b206b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -205,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno) return ret; } -#if 0 -int pci_assign_resource_fixed(struct pci_dev *dev, int resno) -{ - struct pci_bus *bus = dev->bus; - struct resource *res = dev->resource + resno; - unsigned int type_mask; - int i, ret = -EBUSY; - - type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; - - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { - struct resource *r = bus->resource[i]; - if (!r) - continue; - - /* type_mask must match */ - if ((res->flags ^ r->flags) & type_mask) - continue; - - ret = request_resource(r, res); - - if (ret == 0) - break; - } - - if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", - resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); - } else if (resno < PCI_BRIDGE_RESOURCES) { - pci_update_resource(dev, resno); - } - - return ret; -} -EXPORT_SYMBOL_GPL(pci_assign_resource_fixed); -#endif - /* Sort resources by alignment */ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) { |