diff options
Diffstat (limited to 'drivers/pci')
33 files changed, 2389 insertions, 1869 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 1cc23661f79..0857ca981fa 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -475,37 +475,33 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) -{ - return true; -} - static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || + return type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END || type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_ENDPOINT || - type == PCI_EXP_TYPE_LEG_END; + type == PCI_EXP_TYPE_UPSTREAM || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_PCI_BRIDGE || + type == PCI_EXP_TYPE_PCIE_BRIDGE; } static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - (type == PCI_EXP_TYPE_DOWNSTREAM && - pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT); + return (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM) && + pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; } static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || + return type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC; } @@ -520,7 +516,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_DEVSTA: - return pcie_cap_has_devctl(dev); + return true; case PCI_EXP_LNKCAP: case PCI_EXP_LNKCTL: case PCI_EXP_LNKSTA: diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index b1ff02ab4f1..fc1b7401374 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -216,24 +216,6 @@ void pci_bus_add_devices(const struct pci_bus *bus) } } -void pci_enable_bridges(struct pci_bus *bus) -{ - struct pci_dev *dev; - int retval; - - list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->subordinate) { - if (!pci_is_enabled(dev)) { - retval = pci_enable_device(dev); - if (retval) - dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval); - pci_set_master(dev); - } - pci_enable_bridges(dev->subordinate); - } - } -} - /** pci_walk_bus - walk devices on/under bus, calling callback. * @top bus whose devices should be walked * @cb callback to be called for each device found @@ -301,4 +283,3 @@ EXPORT_SYMBOL(pci_bus_put); EXPORT_SYMBOL(pci_bus_alloc_resource); EXPORT_SYMBOL_GPL(pci_bus_add_device); EXPORT_SYMBOL(pci_bus_add_devices); -EXPORT_SYMBOL(pci_enable_bridges); diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 1184ff6fe86..e5ba4eb4e5b 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -4,6 +4,7 @@ menu "PCI host controller drivers" config PCI_MVEBU bool "Marvell EBU PCIe controller" depends on ARCH_MVEBU || ARCH_KIRKWOOD + depends on OF config PCIE_DW bool diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 086d8500e84..ab79ccb5bbf 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c new file mode 100644 index 00000000000..94e096bb2d0 --- /dev/null +++ b/drivers/pci/host/pci-exynos.c @@ -0,0 +1,552 @@ +/* + * PCIe host controller driver for Samsung EXYNOS SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp) + +struct exynos_pcie { + void __iomem *elbi_base; + void __iomem *phy_base; + void __iomem *block_base; + int reset_gpio; + struct clk *clk; + struct clk *bus_clk; + struct pcie_port pp; +}; + +/* PCIe ELBI registers */ +#define PCIE_IRQ_PULSE 0x000 +#define IRQ_INTA_ASSERT (0x1 << 0) +#define IRQ_INTB_ASSERT (0x1 << 2) +#define IRQ_INTC_ASSERT (0x1 << 4) +#define IRQ_INTD_ASSERT (0x1 << 6) +#define PCIE_IRQ_LEVEL 0x004 +#define PCIE_IRQ_SPECIAL 0x008 +#define PCIE_IRQ_EN_PULSE 0x00c +#define PCIE_IRQ_EN_LEVEL 0x010 +#define PCIE_IRQ_EN_SPECIAL 0x014 +#define PCIE_PWR_RESET 0x018 +#define PCIE_CORE_RESET 0x01c +#define PCIE_CORE_RESET_ENABLE (0x1 << 0) +#define PCIE_STICKY_RESET 0x020 +#define PCIE_NONSTICKY_RESET 0x024 +#define PCIE_APP_INIT_RESET 0x028 +#define PCIE_APP_LTSSM_ENABLE 0x02c +#define PCIE_ELBI_RDLH_LINKUP 0x064 +#define PCIE_ELBI_LTSSM_ENABLE 0x1 +#define PCIE_ELBI_SLV_AWMISC 0x11c +#define PCIE_ELBI_SLV_ARMISC 0x120 +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* PCIe Purple registers */ +#define PCIE_PHY_GLOBAL_RESET 0x000 +#define PCIE_PHY_COMMON_RESET 0x004 +#define PCIE_PHY_CMN_REG 0x008 +#define PCIE_PHY_MAC_RESET 0x00c +#define PCIE_PHY_PLL_LOCKED 0x010 +#define PCIE_PHY_TRSVREG_RESET 0x020 +#define PCIE_PHY_TRSV_RESET 0x024 + +/* PCIe PHY registers */ +#define PCIE_PHY_IMPEDANCE 0x004 +#define PCIE_PHY_PLL_DIV_0 0x008 +#define PCIE_PHY_PLL_BIAS 0x00c +#define PCIE_PHY_DCC_FEEDBACK 0x014 +#define PCIE_PHY_PLL_DIV_1 0x05c +#define PCIE_PHY_TRSV0_EMP_LVL 0x084 +#define PCIE_PHY_TRSV0_DRV_LVL 0x088 +#define PCIE_PHY_TRSV0_RXCDR 0x0ac +#define PCIE_PHY_TRSV0_LVCC 0x0dc +#define PCIE_PHY_TRSV1_EMP_LVL 0x144 +#define PCIE_PHY_TRSV1_RXCDR 0x16c +#define PCIE_PHY_TRSV1_LVCC 0x19c +#define PCIE_PHY_TRSV2_EMP_LVL 0x204 +#define PCIE_PHY_TRSV2_RXCDR 0x22c +#define PCIE_PHY_TRSV2_LVCC 0x25c +#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 +#define PCIE_PHY_TRSV3_RXCDR 0x2ec +#define PCIE_PHY_TRSV3_LVCC 0x31c + +static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +{ + writel(val, pcie->elbi_base + reg); +} + +static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg) +{ + return readl(pcie->elbi_base + reg); +} + +static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +{ + writel(val, pcie->phy_base + reg); +} + +static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg) +{ + return readl(pcie->phy_base + reg); +} + +static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg) +{ + writel(val, pcie->block_base + reg); +} + +static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg) +{ + return readl(pcie->block_base + reg); +} + +static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + if (on) { + val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC); + val |= PCIE_ELBI_SLV_DBI_ENABLE; + exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC); + } else { + val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC); + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC); + } +} + +static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + if (on) { + val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC); + val |= PCIE_ELBI_SLV_DBI_ENABLE; + exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC); + } else { + val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC); + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC); + } +} + +static void exynos_pcie_assert_core_reset(struct pcie_port *pp) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET); + val &= ~PCIE_CORE_RESET_ENABLE; + exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET); + exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET); + exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET); + exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET); +} + +static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET); + val |= PCIE_CORE_RESET_ENABLE; + + exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET); + exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET); + exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET); + exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET); + exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET); + exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET); +} + +static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) +{ + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET); + exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET); +} + +static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) +{ + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET); + exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET); + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET); + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG); + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET); + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET); +} + +static void exynos_pcie_init_phy(struct pcie_port *pp) +{ + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + /* DCC feedback control off */ + exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK); + + /* set TX/RX impedance */ + exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE); + + /* set 50Mhz PHY clock */ + exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0); + exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1); + + /* set TX Differential output for lane 0 */ + exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL); + + /* set TX Pre-emphasis Level Control for lane 0 to minimum */ + exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL); + + /* set RX clock and data recovery bandwidth */ + exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS); + exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR); + exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR); + exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR); + exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR); + + /* change TX Pre-emphasis Level Control for lanes */ + exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL); + exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL); + exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL); + exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL); + + /* set LVCC */ + exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC); + exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC); + exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC); + exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC); +} + +static void exynos_pcie_assert_reset(struct pcie_port *pp) +{ + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + if (exynos_pcie->reset_gpio >= 0) + devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio, + GPIOF_OUT_INIT_HIGH, "RESET"); + return; +} + +static int exynos_pcie_establish_link(struct pcie_port *pp) +{ + u32 val; + int count = 0; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + if (dw_pcie_link_up(pp)) { + dev_err(pp->dev, "Link already up\n"); + return 0; + } + + /* assert reset signals */ + exynos_pcie_assert_core_reset(pp); + exynos_pcie_assert_phy_reset(pp); + + /* de-assert phy reset */ + exynos_pcie_deassert_phy_reset(pp); + + /* initialize phy */ + exynos_pcie_init_phy(pp); + + /* pulse for common reset */ + exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET); + udelay(500); + exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET); + + /* de-assert core reset */ + exynos_pcie_deassert_core_reset(pp); + + /* setup root complex */ + dw_pcie_setup_rc(pp); + + /* assert reset signal */ + exynos_pcie_assert_reset(pp); + + /* assert LTSSM enable */ + exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!dw_pcie_link_up(pp)) { + mdelay(100); + count++; + if (count == 10) { + while (exynos_phy_readl(exynos_pcie, + PCIE_PHY_PLL_LOCKED) == 0) { + val = exynos_blk_readl(exynos_pcie, + PCIE_PHY_PLL_LOCKED); + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + } + dev_err(pp->dev, "PCIe Link Fail\n"); + return -EINVAL; + } + } + + dev_info(pp->dev, "Link up\n"); + + return 0; +} + +static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE); + exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE); + return; +} + +static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) +{ + u32 val; + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + + /* enable INTX interrupt */ + val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | + IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, + exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE); + return; +} + +static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + exynos_pcie_clear_irq_pulse(pp); + return IRQ_HANDLED; +} + +static void exynos_pcie_enable_interrupts(struct pcie_port *pp) +{ + exynos_pcie_enable_irq_pulse(pp); + return; +} + +static inline void exynos_pcie_readl_rc(struct pcie_port *pp, + void __iomem *dbi_base, u32 *val) +{ + exynos_pcie_sideband_dbi_r_mode(pp, true); + *val = readl(dbi_base); + exynos_pcie_sideband_dbi_r_mode(pp, false); + return; +} + +static inline void exynos_pcie_writel_rc(struct pcie_port *pp, + u32 val, void __iomem *dbi_base) +{ + exynos_pcie_sideband_dbi_w_mode(pp, true); + writel(val, dbi_base); + exynos_pcie_sideband_dbi_w_mode(pp, false); + return; +} + +static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + int ret; + + exynos_pcie_sideband_dbi_r_mode(pp, true); + ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); + exynos_pcie_sideband_dbi_r_mode(pp, false); + return ret; +} + +static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + int ret; + + exynos_pcie_sideband_dbi_w_mode(pp, true); + ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); + exynos_pcie_sideband_dbi_w_mode(pp, false); + return ret; +} + +static int exynos_pcie_link_up(struct pcie_port *pp) +{ + struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP); + + if (val == PCIE_ELBI_LTSSM_ENABLE) + return 1; + + return 0; +} + +static void exynos_pcie_host_init(struct pcie_port *pp) +{ + exynos_pcie_establish_link(pp); + exynos_pcie_enable_interrupts(pp); +} + +static struct pcie_host_ops exynos_pcie_host_ops = { + .readl_rc = exynos_pcie_readl_rc, + .writel_rc = exynos_pcie_writel_rc, + .rd_own_conf = exynos_pcie_rd_own_conf, + .wr_own_conf = exynos_pcie_wr_own_conf, + .link_up = exynos_pcie_link_up, + .host_init = exynos_pcie_host_init, +}; + +static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) +{ + int ret; + + pp->irq = platform_get_irq(pdev, 1); + if (!pp->irq) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -ENODEV; + } + ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, + IRQF_SHARED, "exynos-pcie", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + return ret; + } + + pp->root_bus_nr = -1; + pp->ops = &exynos_pcie_host_ops; + + spin_lock_init(&pp->conf_lock); + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(&pdev->dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init exynos_pcie_probe(struct platform_device *pdev) +{ + struct exynos_pcie *exynos_pcie; + struct pcie_port *pp; + struct device_node *np = pdev->dev.of_node; + struct resource *elbi_base; + struct resource *phy_base; + struct resource *block_base; + int ret; + + exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie), + GFP_KERNEL); + if (!exynos_pcie) { + dev_err(&pdev->dev, "no memory for exynos pcie\n"); + return -ENOMEM; + } + + pp = &exynos_pcie->pp; + + pp->dev = &pdev->dev; + + exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); + + exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(exynos_pcie->clk)) { + dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(exynos_pcie->clk); + } + ret = clk_prepare_enable(exynos_pcie->clk); + if (ret) + return ret; + + exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(exynos_pcie->bus_clk)) { + dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); + ret = PTR_ERR(exynos_pcie->bus_clk); + goto fail_clk; + } + ret = clk_prepare_enable(exynos_pcie->bus_clk); + if (ret) + goto fail_clk; + + elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); + if (IS_ERR(exynos_pcie->elbi_base)) + return PTR_ERR(exynos_pcie->elbi_base); + + phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); + exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); + if (IS_ERR(exynos_pcie->phy_base)) + return PTR_ERR(exynos_pcie->phy_base); + + block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); + exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base); + if (IS_ERR(exynos_pcie->block_base)) + return PTR_ERR(exynos_pcie->block_base); + + ret = add_pcie_port(pp, pdev); + if (ret < 0) + goto fail_bus_clk; + + platform_set_drvdata(pdev, exynos_pcie); + return 0; + +fail_bus_clk: + clk_disable_unprepare(exynos_pcie->bus_clk); +fail_clk: + clk_disable_unprepare(exynos_pcie->clk); + return ret; +} + +static int __exit exynos_pcie_remove(struct platform_device *pdev) +{ + struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(exynos_pcie->bus_clk); + clk_disable_unprepare(exynos_pcie->clk); + + return 0; +} + +static const struct of_device_id exynos_pcie_of_match[] = { + { .compatible = "samsung,exynos5440-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); + +static struct platform_driver exynos_pcie_driver = { + .remove = __exit_p(exynos_pcie_remove), + .driver = { + .name = "exynos-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(exynos_pcie_of_match), + }, +}; + +/* Exynos PCIe driver does not allow module unload */ + +static int __init pcie_init(void) +{ + return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); +} +subsys_initcall(pcie_init); + +MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("Samsung PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 13a633b1612..ce1543a584a 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge { u16 secondary_status; u16 membase; u16 memlimit; - u16 prefmembase; - u16 prefmemlimit; - u32 prefbaseupper; - u32 preflimitupper; u16 iobaseupper; u16 iolimitupper; u8 cappointer; @@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, break; case PCI_PREF_MEMORY_BASE: - *value = (bridge->prefmemlimit << 16 | bridge->prefmembase); - break; - - case PCI_PREF_BASE_UPPER32: - *value = bridge->prefbaseupper; - break; - - case PCI_PREF_LIMIT_UPPER32: - *value = bridge->preflimitupper; + *value = 0; break; case PCI_IO_BASE_UPPER16: @@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, mvebu_pcie_handle_membase_change(port); break; - case PCI_PREF_MEMORY_BASE: - bridge->prefmembase = value & 0xffff; - bridge->prefmemlimit = value >> 16; - break; - - case PCI_PREF_BASE_UPPER32: - bridge->prefbaseupper = value; - break; - - case PCI_PREF_LIMIT_UPPER32: - bridge->preflimitupper = value; - break; - case PCI_IO_BASE_UPPER16: bridge->iobaseupper = value & 0xffff; bridge->iolimitupper = value >> 16; @@ -750,9 +725,9 @@ mvebu_pcie_map_registers(struct platform_device *pdev, ret = of_address_to_resource(np, 0, ®s); if (ret) - return NULL; + return ERR_PTR(ret); - return devm_request_and_ioremap(&pdev->dev, ®s); + return devm_ioremap_resource(&pdev->dev, ®s); } static int __init mvebu_pcie_probe(struct platform_device *pdev) @@ -842,9 +817,10 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) continue; port->base = mvebu_pcie_map_registers(pdev, child, port); - if (!port->base) { + if (IS_ERR(port->base)) { dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", port->port, port->lane); + port->base = NULL; continue; } diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 26bdbda8ff9..c10e9ac9bbb 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -1,5 +1,5 @@ /* - * PCIe host controller driver for Samsung EXYNOS SoCs + * Synopsys Designware PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com @@ -11,74 +11,28 @@ * published by the Free Software Foundation. */ -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/gpio.h> -#include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/list.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_gpio.h> -#include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_regs.h> -#include <linux/platform_device.h> -#include <linux/resource.h> -#include <linux/signal.h> -#include <linux/slab.h> #include <linux/types.h> -struct pcie_port_info { - u32 cfg0_size; - u32 cfg1_size; - u32 io_size; - u32 mem_size; - phys_addr_t io_bus_addr; - phys_addr_t mem_bus_addr; -}; - -struct pcie_port { - struct device *dev; - u8 controller; - u8 root_bus_nr; - void __iomem *dbi_base; - void __iomem *elbi_base; - void __iomem *phy_base; - void __iomem *purple_base; - u64 cfg0_base; - void __iomem *va_cfg0_base; - u64 cfg1_base; - void __iomem *va_cfg1_base; - u64 io_base; - u64 mem_base; - spinlock_t conf_lock; - struct resource cfg; - struct resource io; - struct resource mem; - struct pcie_port_info config; - struct clk *clk; - struct clk *bus_clk; - int irq; - int reset_gpio; -}; - -/* - * Exynos PCIe IP consists of Synopsys specific part and Exynos - * specific part. Only core block is a Synopsys designware part; - * other parts are Exynos specific. - */ +#include "pcie-designware.h" /* Synopsis specific PCIE configuration registers */ #define PCIE_PORT_LINK_CONTROL 0x710 #define PORT_LINK_MODE_MASK (0x3f << 16) +#define PORT_LINK_MODE_1_LANES (0x1 << 16) +#define PORT_LINK_MODE_2_LANES (0x3 << 16) #define PORT_LINK_MODE_4_LANES (0x7 << 16) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) #define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) -#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8) +#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) +#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) +#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) #define PCIE_MSI_ADDR_LO 0x820 #define PCIE_MSI_ADDR_HI 0x824 @@ -108,69 +62,16 @@ struct pcie_port { #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) #define PCIE_ATU_UPPER_TARGET 0x91C -/* Exynos specific PCIE configuration registers */ - -/* PCIe ELBI registers */ -#define PCIE_IRQ_PULSE 0x000 -#define IRQ_INTA_ASSERT (0x1 << 0) -#define IRQ_INTB_ASSERT (0x1 << 2) -#define IRQ_INTC_ASSERT (0x1 << 4) -#define IRQ_INTD_ASSERT (0x1 << 6) -#define PCIE_IRQ_LEVEL 0x004 -#define PCIE_IRQ_SPECIAL 0x008 -#define PCIE_IRQ_EN_PULSE 0x00c -#define PCIE_IRQ_EN_LEVEL 0x010 -#define PCIE_IRQ_EN_SPECIAL 0x014 -#define PCIE_PWR_RESET 0x018 -#define PCIE_CORE_RESET 0x01c -#define PCIE_CORE_RESET_ENABLE (0x1 << 0) -#define PCIE_STICKY_RESET 0x020 -#define PCIE_NONSTICKY_RESET 0x024 -#define PCIE_APP_INIT_RESET 0x028 -#define PCIE_APP_LTSSM_ENABLE 0x02c -#define PCIE_ELBI_RDLH_LINKUP 0x064 -#define PCIE_ELBI_LTSSM_ENABLE 0x1 -#define PCIE_ELBI_SLV_AWMISC 0x11c -#define PCIE_ELBI_SLV_ARMISC 0x120 -#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) - -/* PCIe Purple registers */ -#define PCIE_PHY_GLOBAL_RESET 0x000 -#define PCIE_PHY_COMMON_RESET 0x004 -#define PCIE_PHY_CMN_REG 0x008 -#define PCIE_PHY_MAC_RESET 0x00c -#define PCIE_PHY_PLL_LOCKED 0x010 -#define PCIE_PHY_TRSVREG_RESET 0x020 -#define PCIE_PHY_TRSV_RESET 0x024 - -/* PCIe PHY registers */ -#define PCIE_PHY_IMPEDANCE 0x004 -#define PCIE_PHY_PLL_DIV_0 0x008 -#define PCIE_PHY_PLL_BIAS 0x00c -#define PCIE_PHY_DCC_FEEDBACK 0x014 -#define PCIE_PHY_PLL_DIV_1 0x05c -#define PCIE_PHY_TRSV0_EMP_LVL 0x084 -#define PCIE_PHY_TRSV0_DRV_LVL 0x088 -#define PCIE_PHY_TRSV0_RXCDR 0x0ac -#define PCIE_PHY_TRSV0_LVCC 0x0dc -#define PCIE_PHY_TRSV1_EMP_LVL 0x144 -#define PCIE_PHY_TRSV1_RXCDR 0x16c -#define PCIE_PHY_TRSV1_LVCC 0x19c -#define PCIE_PHY_TRSV2_EMP_LVL 0x204 -#define PCIE_PHY_TRSV2_RXCDR 0x22c -#define PCIE_PHY_TRSV2_LVCC 0x25c -#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 -#define PCIE_PHY_TRSV3_RXCDR 0x2ec -#define PCIE_PHY_TRSV3_LVCC 0x31c - -static struct hw_pci exynos_pci; +static struct hw_pci dw_pci; + +unsigned long global_io_offset; static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) { return sys->private_data; } -static inline int cfg_read(void *addr, int where, int size, u32 *val) +int cfg_read(void __iomem *addr, int where, int size, u32 *val) { *val = readl(addr); @@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val) return PCIBIOS_SUCCESSFUL; } -static inline int cfg_write(void *addr, int where, int size, u32 val) +int cfg_write(void __iomem *addr, int where, int size, u32 val) { if (size == 4) writel(val, addr); @@ -198,155 +99,217 @@ static inline int cfg_write(void *addr, int where, int size, u32 val) return PCIBIOS_SUCCESSFUL; } -static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) +static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val) { - u32 val; - - if (on) { - val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); - val |= PCIE_ELBI_SLV_DBI_ENABLE; - writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); - } else { - val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); - } -} - -static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) -{ - u32 val; - - if (on) { - val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); - val |= PCIE_ELBI_SLV_DBI_ENABLE; - writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); - } else { - val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); - } -} - -static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val) -{ - exynos_pcie_sideband_dbi_r_mode(pp, true); - *val = readl(dbi_base); - exynos_pcie_sideband_dbi_r_mode(pp, false); - return; + if (pp->ops->readl_rc) + pp->ops->readl_rc(pp, pp->dbi_base + reg, val); + else + *val = readl(pp->dbi_base + reg); } -static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base) +static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) { - exynos_pcie_sideband_dbi_w_mode(pp, true); - writel(val, dbi_base); - exynos_pcie_sideband_dbi_w_mode(pp, false); - return; + if (pp->ops->writel_rc) + pp->ops->writel_rc(pp, val, pp->dbi_base + reg); + else + writel(val, pp->dbi_base + reg); } -static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, +int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val) { int ret; - exynos_pcie_sideband_dbi_r_mode(pp, true); - ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); - exynos_pcie_sideband_dbi_r_mode(pp, false); + if (pp->ops->rd_own_conf) + ret = pp->ops->rd_own_conf(pp, where, size, val); + else + ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); + return ret; } -static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, +int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val) { int ret; - exynos_pcie_sideband_dbi_w_mode(pp, true); - ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); - exynos_pcie_sideband_dbi_w_mode(pp, false); + if (pp->ops->wr_own_conf) + ret = pp->ops->wr_own_conf(pp, where, size, val); + else + ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, + val); + return ret; } -static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) +int dw_pcie_link_up(struct pcie_port *pp) +{ + if (pp->ops->link_up) + return pp->ops->link_up(pp); + else + return 0; +} + +int __init dw_pcie_host_init(struct pcie_port *pp) { + struct device_node *np = pp->dev->of_node; + struct of_pci_range range; + struct of_pci_range_parser parser; u32 val; - void __iomem *dbi_base = pp->dbi_base; - /* Program viewport 0 : OUTBOUND : CFG0 */ - val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; - writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); - writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE); - writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); - writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, - dbi_base + PCIE_ATU_LIMIT); - writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); - writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); - writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1); - val = PCIE_ATU_ENABLE; - writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); + if (of_pci_range_parser_init(&parser, np)) { + dev_err(pp->dev, "missing ranges property\n"); + return -EINVAL; + } + + /* Get the I/O and memory ranges from DT */ + for_each_of_pci_range(&parser, &range) { + unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; + if (restype == IORESOURCE_IO) { + of_pci_range_to_resource(&range, np, &pp->io); + pp->io.name = "I/O"; + pp->io.start = max_t(resource_size_t, + PCIBIOS_MIN_IO, + range.pci_addr + global_io_offset); + pp->io.end = min_t(resource_size_t, + IO_SPACE_LIMIT, + range.pci_addr + range.size + + global_io_offset); + pp->config.io_size = resource_size(&pp->io); + pp->config.io_bus_addr = range.pci_addr; + } + if (restype == IORESOURCE_MEM) { + of_pci_range_to_resource(&range, np, &pp->mem); + pp->mem.name = "MEM"; + pp->config.mem_size = resource_size(&pp->mem); + pp->config.mem_bus_addr = range.pci_addr; + } + if (restype == 0) { + of_pci_range_to_resource(&range, np, &pp->cfg); + pp->config.cfg0_size = resource_size(&pp->cfg)/2; + pp->config.cfg1_size = resource_size(&pp->cfg)/2; + } + } + + if (!pp->dbi_base) { + pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, + resource_size(&pp->cfg)); + if (!pp->dbi_base) { + dev_err(pp->dev, "error with ioremap\n"); + return -ENOMEM; + } + } + + pp->cfg0_base = pp->cfg.start; + pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; + pp->io_base = pp->io.start; + pp->mem_base = pp->mem.start; + + pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, + pp->config.cfg0_size); + if (!pp->va_cfg0_base) { + dev_err(pp->dev, "error with ioremap in function\n"); + return -ENOMEM; + } + pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, + pp->config.cfg1_size); + if (!pp->va_cfg1_base) { + dev_err(pp->dev, "error with ioremap\n"); + return -ENOMEM; + } + + if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { + dev_err(pp->dev, "Failed to parse the number of lanes\n"); + return -EINVAL; + } + + if (pp->ops->host_init) + pp->ops->host_init(pp); + + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* program correct class for RC */ + dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + + dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); + + dw_pci.nr_controllers = 1; + dw_pci.private_data = (void **)&pp; + + pci_common_init(&dw_pci); + pci_assign_unassigned_resources(); +#ifdef CONFIG_PCI_DOMAINS + dw_pci.domain++; +#endif + + return 0; } -static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) +static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) { - u32 val; - void __iomem *dbi_base = pp->dbi_base; + /* Program viewport 0 : OUTBOUND : CFG0 */ + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} +static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) +{ /* Program viewport 1 : OUTBOUND : CFG1 */ - val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; - writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); - writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1); - val = PCIE_ATU_ENABLE; - writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); - writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE); - writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); - writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, - dbi_base + PCIE_ATU_LIMIT); - writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); - writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); } -static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) +static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) { - u32 val; - void __iomem *dbi_base = pp->dbi_base; - /* Program viewport 0 : OUTBOUND : MEM */ - val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; - writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); - writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1); - val = PCIE_ATU_ENABLE; - writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); - writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE); - writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); - writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, - dbi_base + PCIE_ATU_LIMIT); - writel_rc(pp, pp->config.mem_bus_addr, - dbi_base + PCIE_ATU_LOWER_TARGET); - writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), - dbi_base + PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), + PCIE_ATU_UPPER_TARGET); } -static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp) +static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) { - u32 val; - void __iomem *dbi_base = pp->dbi_base; - /* Program viewport 1 : OUTBOUND : IO */ - val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; - writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); - writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1); - val = PCIE_ATU_ENABLE; - writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); - writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE); - writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); - writel_rc(pp, pp->io_base + pp->config.io_size - 1, - dbi_base + PCIE_ATU_LIMIT); - writel_rc(pp, pp->config.io_bus_addr, - dbi_base + PCIE_ATU_LOWER_TARGET); - writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), - dbi_base + PCIE_ATU_UPPER_TARGET); -} - -static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), + PCIE_ATU_UPPER_TARGET); +} + +static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { int ret = PCIBIOS_SUCCESSFUL; @@ -357,19 +320,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - exynos_pcie_prog_viewport_cfg0(pp, busdev); + dw_pcie_prog_viewport_cfg0(pp, busdev); ret = cfg_read(pp->va_cfg0_base + address, where, size, val); - exynos_pcie_prog_viewport_mem_outbound(pp); + dw_pcie_prog_viewport_mem_outbound(pp); } else { - exynos_pcie_prog_viewport_cfg1(pp, busdev); + dw_pcie_prog_viewport_cfg1(pp, busdev); ret = cfg_read(pp->va_cfg1_base + address, where, size, val); - exynos_pcie_prog_viewport_io_outbound(pp); + dw_pcie_prog_viewport_io_outbound(pp); } return ret; } -static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, +static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { int ret = PCIBIOS_SUCCESSFUL; @@ -380,59 +343,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - exynos_pcie_prog_viewport_cfg0(pp, busdev); + dw_pcie_prog_viewport_cfg0(pp, busdev); ret = cfg_write(pp->va_cfg0_base + address, where, size, val); - exynos_pcie_prog_viewport_mem_outbound(pp); + dw_pcie_prog_viewport_mem_outbound(pp); } else { - exynos_pcie_prog_viewport_cfg1(pp, busdev); + dw_pcie_prog_viewport_cfg1(pp, busdev); ret = cfg_write(pp->va_cfg1_base + address, where, size, val); - exynos_pcie_prog_viewport_io_outbound(pp); + dw_pcie_prog_viewport_io_outbound(pp); } return ret; } -static unsigned long global_io_offset; - -static int exynos_pcie_setup(int nr, struct pci_sys_data *sys) -{ - struct pcie_port *pp; - - pp = sys_to_pcie(sys); - - if (!pp) - return 0; - - if (global_io_offset < SZ_1M && pp->config.io_size > 0) { - sys->io_offset = global_io_offset - pp->config.io_bus_addr; - pci_ioremap_io(sys->io_offset, pp->io.start); - global_io_offset += SZ_64K; - pci_add_resource_offset(&sys->resources, &pp->io, - sys->io_offset); - } - - sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; - pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); - - return 1; -} - -static int exynos_pcie_link_up(struct pcie_port *pp) -{ - u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP); - - if (val == PCIE_ELBI_LTSSM_ENABLE) - return 1; - return 0; -} - -static int exynos_pcie_valid_config(struct pcie_port *pp, +static int dw_pcie_valid_config(struct pcie_port *pp, struct pci_bus *bus, int dev) { /* If there is no link, then there is no device */ if (bus->number != pp->root_bus_nr) { - if (!exynos_pcie_link_up(pp)) + if (!dw_pcie_link_up(pp)) return 0; } @@ -450,7 +379,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp, return 1; } -static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, +static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct pcie_port *pp = sys_to_pcie(bus->sysdata); @@ -462,23 +391,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return -EINVAL; } - if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { + if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } spin_lock_irqsave(&pp->conf_lock, flags); if (bus->number != pp->root_bus_nr) - ret = exynos_pcie_rd_other_conf(pp, bus, devfn, + ret = dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); else - ret = exynos_pcie_rd_own_conf(pp, where, size, val); + ret = dw_pcie_rd_own_conf(pp, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } -static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, +static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct pcie_port *pp = sys_to_pcie(bus->sysdata); @@ -490,34 +419,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, return -EINVAL; } - if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) + if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) return PCIBIOS_DEVICE_NOT_FOUND; spin_lock_irqsave(&pp->conf_lock, flags); if (bus->number != pp->root_bus_nr) - ret = exynos_pcie_wr_other_conf(pp, bus, devfn, + ret = dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); else - ret = exynos_pcie_wr_own_conf(pp, where, size, val); + ret = dw_pcie_wr_own_conf(pp, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } -static struct pci_ops exynos_pcie_ops = { - .read = exynos_pcie_rd_conf, - .write = exynos_pcie_wr_conf, +static struct pci_ops dw_pcie_ops = { + .read = dw_pcie_rd_conf, + .write = dw_pcie_wr_conf, }; -static struct pci_bus *exynos_pcie_scan_bus(int nr, - struct pci_sys_data *sys) +int dw_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct pcie_port *pp; + + pp = sys_to_pcie(sys); + + if (!pp) + return 0; + + if (global_io_offset < SZ_1M && pp->config.io_size > 0) { + sys->io_offset = global_io_offset - pp->config.io_bus_addr; + pci_ioremap_io(sys->io_offset, pp->io.start); + global_io_offset += SZ_64K; + pci_add_resource_offset(&sys->resources, &pp->io, + sys->io_offset); + } + + sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; + pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); + + return 1; +} + +struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) { struct pci_bus *bus; struct pcie_port *pp = sys_to_pcie(sys); if (pp) { pp->root_bus_nr = sys->busnr; - bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops, + bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops, sys, &sys->resources); } else { bus = NULL; @@ -527,531 +478,88 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr, return bus; } -static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); return pp->irq; } -static struct hw_pci exynos_pci = { - .setup = exynos_pcie_setup, - .scan = exynos_pcie_scan_bus, - .map_irq = exynos_pcie_map_irq, +static struct hw_pci dw_pci = { + .setup = dw_pcie_setup, + .scan = dw_pcie_scan_bus, + .map_irq = dw_pcie_map_irq, }; -static void exynos_pcie_setup_rc(struct pcie_port *pp) +void dw_pcie_setup_rc(struct pcie_port *pp) { struct pcie_port_info *config = &pp->config; - void __iomem *dbi_base = pp->dbi_base; u32 val; u32 membase; u32 memlimit; /* set the number of lines as 4 */ - readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val); + dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); val &= ~PORT_LINK_MODE_MASK; - val |= PORT_LINK_MODE_4_LANES; - writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL); + switch (pp->lanes) { + case 1: + val |= PORT_LINK_MODE_1_LANES; + break; + case 2: + val |= PORT_LINK_MODE_2_LANES; + break; + case 4: + val |= PORT_LINK_MODE_4_LANES; + break; + } + dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); /* set link width speed control register */ - readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val); + dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val); val &= ~PORT_LOGIC_LINK_WIDTH_MASK; - val |= PORT_LOGIC_LINK_WIDTH_4_LANES; - writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + switch (pp->lanes) { + case 1: + val |= PORT_LOGIC_LINK_WIDTH_1_LANES; + break; + case 2: + val |= PORT_LOGIC_LINK_WIDTH_2_LANES; + break; + case 4: + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; + } + dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); /* setup RC BARs */ - writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0); - writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1); + dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0); + dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1); /* setup interrupt pins */ - readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val); + dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val); val &= 0xffff00ff; val |= 0x00000100; - writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE); + dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE); /* setup bus numbers */ - readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val); + dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val); val &= 0xff000000; val |= 0x00010100; - writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS); + dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); /* setup memory base, memory limit */ membase = ((u32)pp->mem_base & 0xfff00000) >> 16; memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; val = memlimit | membase; - writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE); + dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); /* setup command register */ - readl_rc(pp, dbi_base + PCI_COMMAND, &val); + dw_pcie_readl_rc(pp, PCI_COMMAND, &val); val &= 0xffff0000; val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; - writel_rc(pp, val, dbi_base + PCI_COMMAND); -} - -static void exynos_pcie_assert_core_reset(struct pcie_port *pp) -{ - u32 val; - void __iomem *elbi_base = pp->elbi_base; - - val = readl(elbi_base + PCIE_CORE_RESET); - val &= ~PCIE_CORE_RESET_ENABLE; - writel(val, elbi_base + PCIE_CORE_RESET); - writel(0, elbi_base + PCIE_PWR_RESET); - writel(0, elbi_base + PCIE_STICKY_RESET); - writel(0, elbi_base + PCIE_NONSTICKY_RESET); -} - -static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) -{ - u32 val; - void __iomem *elbi_base = pp->elbi_base; - void __iomem *purple_base = pp->purple_base; - - val = readl(elbi_base + PCIE_CORE_RESET); - val |= PCIE_CORE_RESET_ENABLE; - writel(val, elbi_base + PCIE_CORE_RESET); - writel(1, elbi_base + PCIE_STICKY_RESET); - writel(1, elbi_base + PCIE_NONSTICKY_RESET); - writel(1, elbi_base + PCIE_APP_INIT_RESET); - writel(0, elbi_base + PCIE_APP_INIT_RESET); - writel(1, purple_base + PCIE_PHY_MAC_RESET); -} - -static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) -{ - void __iomem *purple_base = pp->purple_base; - - writel(0, purple_base + PCIE_PHY_MAC_RESET); - writel(1, purple_base + PCIE_PHY_GLOBAL_RESET); -} - -static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) -{ - void __iomem *elbi_base = pp->elbi_base; - void __iomem *purple_base = pp->purple_base; - - writel(0, purple_base + PCIE_PHY_GLOBAL_RESET); - writel(1, elbi_base + PCIE_PWR_RESET); - writel(0, purple_base + PCIE_PHY_COMMON_RESET); - writel(0, purple_base + PCIE_PHY_CMN_REG); - writel(0, purple_base + PCIE_PHY_TRSVREG_RESET); - writel(0, purple_base + PCIE_PHY_TRSV_RESET); -} - -static void exynos_pcie_init_phy(struct pcie_port *pp) -{ - void __iomem *phy_base = pp->phy_base; - - /* DCC feedback control off */ - writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK); - - /* set TX/RX impedance */ - writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE); - - /* set 50Mhz PHY clock */ - writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0); - writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1); - - /* set TX Differential output for lane 0 */ - writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL); - - /* set TX Pre-emphasis Level Control for lane 0 to minimum */ - writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL); - - /* set RX clock and data recovery bandwidth */ - writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS); - writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR); - writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR); - writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR); - writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR); - - /* change TX Pre-emphasis Level Control for lanes */ - writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL); - writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL); - writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL); - writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL); - - /* set LVCC */ - writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC); - writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC); - writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC); - writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC); -} - -static void exynos_pcie_assert_reset(struct pcie_port *pp) -{ - if (pp->reset_gpio >= 0) - devm_gpio_request_one(pp->dev, pp->reset_gpio, - GPIOF_OUT_INIT_HIGH, "RESET"); - return; -} - -static int exynos_pcie_establish_link(struct pcie_port *pp) -{ - u32 val; - int count = 0; - void __iomem *elbi_base = pp->elbi_base; - void __iomem *purple_base = pp->purple_base; - void __iomem *phy_base = pp->phy_base; - - if (exynos_pcie_link_up(pp)) { - dev_err(pp->dev, "Link already up\n"); - return 0; - } - - /* assert reset signals */ - exynos_pcie_assert_core_reset(pp); - exynos_pcie_assert_phy_reset(pp); - - /* de-assert phy reset */ - exynos_pcie_deassert_phy_reset(pp); - - /* initialize phy */ - exynos_pcie_init_phy(pp); - - /* pulse for common reset */ - writel(1, purple_base + PCIE_PHY_COMMON_RESET); - udelay(500); - writel(0, purple_base + PCIE_PHY_COMMON_RESET); - - /* de-assert core reset */ - exynos_pcie_deassert_core_reset(pp); - - /* setup root complex */ - exynos_pcie_setup_rc(pp); - - /* assert reset signal */ - exynos_pcie_assert_reset(pp); - - /* assert LTSSM enable */ - writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE); - - /* check if the link is up or not */ - while (!exynos_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) { - val = readl(purple_base + PCIE_PHY_PLL_LOCKED); - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); - } - dev_err(pp->dev, "PCIe Link Fail\n"); - return -EINVAL; - } - } - - dev_info(pp->dev, "Link up\n"); - - return 0; -} - -static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) -{ - u32 val; - void __iomem *elbi_base = pp->elbi_base; - - val = readl(elbi_base + PCIE_IRQ_PULSE); - writel(val, elbi_base + PCIE_IRQ_PULSE); - return; -} - -static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) -{ - u32 val; - void __iomem *elbi_base = pp->elbi_base; - - /* enable INTX interrupt */ - val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | - IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, - writel(val, elbi_base + PCIE_IRQ_EN_PULSE); - return; -} - -static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) -{ - struct pcie_port *pp = arg; - - exynos_pcie_clear_irq_pulse(pp); - return IRQ_HANDLED; -} - -static void exynos_pcie_enable_interrupts(struct pcie_port *pp) -{ - exynos_pcie_enable_irq_pulse(pp); - return; -} - -static void exynos_pcie_host_init(struct pcie_port *pp) -{ - struct pcie_port_info *config = &pp->config; - u32 val; - - /* Keep first 64K for IO */ - pp->cfg0_base = pp->cfg.start; - pp->cfg1_base = pp->cfg.start + config->cfg0_size; - pp->io_base = pp->io.start; - pp->mem_base = pp->mem.start; - - /* enable link */ - exynos_pcie_establish_link(pp); - - exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - - /* program correct class for RC */ - exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); - - exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); - val |= PORT_LOGIC_SPEED_CHANGE; - exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); - - exynos_pcie_enable_interrupts(pp); -} - -static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) -{ - struct resource *elbi_base; - struct resource *phy_base; - struct resource *purple_base; - int ret; - - elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!elbi_base) { - dev_err(&pdev->dev, "couldn't get elbi base resource\n"); - return -EINVAL; - } - pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); - if (IS_ERR(pp->elbi_base)) - return PTR_ERR(pp->elbi_base); - - phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!phy_base) { - dev_err(&pdev->dev, "couldn't get phy base resource\n"); - return -EINVAL; - } - pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); - if (IS_ERR(pp->phy_base)) - return PTR_ERR(pp->phy_base); - - purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (!purple_base) { - dev_err(&pdev->dev, "couldn't get purple base resource\n"); - return -EINVAL; - } - pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base); - if (IS_ERR(pp->purple_base)) - return PTR_ERR(pp->purple_base); - - pp->irq = platform_get_irq(pdev, 1); - if (!pp->irq) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; - } - ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, - IRQF_SHARED, "exynos-pcie", pp); - if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); - return ret; - } - - pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start, - resource_size(&pp->cfg)); - if (!pp->dbi_base) { - dev_err(&pdev->dev, "error with ioremap\n"); - return -ENOMEM; - } - - pp->root_bus_nr = -1; - - spin_lock_init(&pp->conf_lock); - exynos_pcie_host_init(pp); - pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base, - pp->config.cfg0_size); - if (!pp->va_cfg0_base) { - dev_err(pp->dev, "error with ioremap in function\n"); - return -ENOMEM; - } - pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base, - pp->config.cfg1_size); - if (!pp->va_cfg1_base) { - dev_err(pp->dev, "error with ioremap\n"); - return -ENOMEM; - } - - return 0; -} - -static int __init exynos_pcie_probe(struct platform_device *pdev) -{ - struct pcie_port *pp; - struct device_node *np = pdev->dev.of_node; - struct of_pci_range range; - struct of_pci_range_parser parser; - int ret; - - pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL); - if (!pp) { - dev_err(&pdev->dev, "no memory for pcie port\n"); - return -ENOMEM; - } - - pp->dev = &pdev->dev; - - if (of_pci_range_parser_init(&parser, np)) { - dev_err(&pdev->dev, "missing ranges property\n"); - return -EINVAL; - } - - /* Get the I/O and memory ranges from DT */ - for_each_of_pci_range(&parser, &range) { - unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; - if (restype == IORESOURCE_IO) { - of_pci_range_to_resource(&range, np, &pp->io); - pp->io.name = "I/O"; - pp->io.start = max_t(resource_size_t, - PCIBIOS_MIN_IO, - range.pci_addr + global_io_offset); - pp->io.end = min_t(resource_size_t, - IO_SPACE_LIMIT, - range.pci_addr + range.size - + global_io_offset); - pp->config.io_size = resource_size(&pp->io); - pp->config.io_bus_addr = range.pci_addr; - } - if (restype == IORESOURCE_MEM) { - of_pci_range_to_resource(&range, np, &pp->mem); - pp->mem.name = "MEM"; - pp->config.mem_size = resource_size(&pp->mem); - pp->config.mem_bus_addr = range.pci_addr; - } - if (restype == 0) { - of_pci_range_to_resource(&range, np, &pp->cfg); - pp->config.cfg0_size = resource_size(&pp->cfg)/2; - pp->config.cfg1_size = resource_size(&pp->cfg)/2; - } - } - - pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); - - pp->clk = devm_clk_get(&pdev->dev, "pcie"); - if (IS_ERR(pp->clk)) { - dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); - return PTR_ERR(pp->clk); - } - ret = clk_prepare_enable(pp->clk); - if (ret) - return ret; - - pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); - if (IS_ERR(pp->bus_clk)) { - dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); - ret = PTR_ERR(pp->bus_clk); - goto fail_clk; - } - ret = clk_prepare_enable(pp->bus_clk); - if (ret) - goto fail_clk; - - ret = add_pcie_port(pp, pdev); - if (ret < 0) - goto fail_bus_clk; - - pp->controller = exynos_pci.nr_controllers; - exynos_pci.nr_controllers = 1; - exynos_pci.private_data = (void **)&pp; - - pci_common_init(&exynos_pci); - pci_assign_unassigned_resources(); -#ifdef CONFIG_PCI_DOMAINS - exynos_pci.domain++; -#endif - - platform_set_drvdata(pdev, pp); - return 0; - -fail_bus_clk: - clk_disable_unprepare(pp->bus_clk); -fail_clk: - clk_disable_unprepare(pp->clk); - return ret; -} - -static int __exit exynos_pcie_remove(struct platform_device *pdev) -{ - struct pcie_port *pp = platform_get_drvdata(pdev); - - clk_disable_unprepare(pp->bus_clk); - clk_disable_unprepare(pp->clk); - - return 0; -} - -static const struct of_device_id exynos_pcie_of_match[] = { - { .compatible = "samsung,exynos5440-pcie", }, - {}, -}; -MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); - -static struct platform_driver exynos_pcie_driver = { - .remove = __exit_p(exynos_pcie_remove), - .driver = { - .name = "exynos-pcie", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(exynos_pcie_of_match), - }, -}; - -static int exynos_pcie_abort(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) -{ - unsigned long pc = instruction_pointer(regs); - unsigned long instr = *(unsigned long *)pc; - - WARN_ONCE(1, "pcie abort\n"); - - /* - * If the instruction being executed was a read, - * make it look like it read all-ones. - */ - if ((instr & 0x0c100000) == 0x04100000) { - int reg = (instr >> 12) & 15; - unsigned long val; - - if (instr & 0x00400000) - val = 255; - else - val = -1; - - regs->uregs[reg] = val; - regs->ARM_pc += 4; - return 0; - } - - if ((instr & 0x0e100090) == 0x00100090) { - int reg = (instr >> 12) & 15; - - regs->uregs[reg] = -1; - regs->ARM_pc += 4; - return 0; - } - - return 1; -} - -/* Exynos PCIe driver does not allow module unload */ - -static int __init pcie_init(void) -{ - hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0, - "imprecise external abort"); - - platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); - - return 0; + dw_pcie_writel_rc(pp, val, PCI_COMMAND); } -subsys_initcall(pcie_init); MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); -MODULE_DESCRIPTION("Samsung PCIe host controller driver"); +MODULE_DESCRIPTION("Designware PCIe host controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h new file mode 100644 index 00000000000..133820f1da9 --- /dev/null +++ b/drivers/pci/host/pcie-designware.h @@ -0,0 +1,65 @@ +/* + * Synopsys Designware PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +struct pcie_port_info { + u32 cfg0_size; + u32 cfg1_size; + u32 io_size; + u32 mem_size; + phys_addr_t io_bus_addr; + phys_addr_t mem_bus_addr; +}; + +struct pcie_port { + struct device *dev; + u8 root_bus_nr; + void __iomem *dbi_base; + u64 cfg0_base; + void __iomem *va_cfg0_base; + u64 cfg1_base; + void __iomem *va_cfg1_base; + u64 io_base; + u64 mem_base; + spinlock_t conf_lock; + struct resource cfg; + struct resource io; + struct resource mem; + struct pcie_port_info config; + int irq; + u32 lanes; + struct pcie_host_ops *ops; +}; + +struct pcie_host_ops { + void (*readl_rc)(struct pcie_port *pp, + void __iomem *dbi_base, u32 *val); + void (*writel_rc)(struct pcie_port *pp, + u32 val, void __iomem *dbi_base); + int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); + int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); + int (*link_up)(struct pcie_port *pp); + void (*host_init)(struct pcie_port *pp); +}; + +extern unsigned long global_io_offset; + +int cfg_read(void __iomem *addr, int where, int size, u32 *val); +int cfg_write(void __iomem *addr, int where, int size, u32 val); +int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val); +int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val); +int dw_pcie_link_up(struct pcie_port *pp); +void dw_pcie_setup_rc(struct pcie_port *pp); +int dw_pcie_host_init(struct pcie_port *pp); +int dw_pcie_setup(int nr, struct pci_sys_data *sys); +struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys); +int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index bb7ebb22db0..0a648af8953 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -3,16 +3,13 @@ # menuconfig HOTPLUG_PCI - tristate "Support for PCI Hotplug" + bool "Support for PCI Hotplug" depends on PCI && SYSFS ---help--- Say Y here if you have a motherboard with a PCI Hotplug controller. This allows you to add and remove PCI cards while the machine is powered up and running. - To compile this driver as a module, choose M here: the - module will be called pci_hotplug. - When in doubt, say N. if HOTPLUG_PCI @@ -149,7 +146,7 @@ config HOTPLUG_PCI_SGI When in doubt, say N. config HOTPLUG_PCI_S390 - tristate "System z PCI Hotplug Support" + bool "System z PCI Hotplug Support" depends on S390 && 64BIT help Say Y here if you want to use the System z PCI Hotplug diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 6fdd49c6f0b..f4e02892466 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -49,6 +49,7 @@ #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) +struct acpiphp_context; struct acpiphp_bridge; struct acpiphp_slot; @@ -59,6 +60,7 @@ struct slot { struct hotplug_slot *hotplug_slot; struct acpiphp_slot *acpi_slot; struct hotplug_slot_info info; + unsigned int sun; /* ACPI _SUN (Slot User Number) value */ }; static inline const char *slot_name(struct slot *slot) @@ -75,15 +77,11 @@ struct acpiphp_bridge { struct list_head list; struct list_head slots; struct kref ref; - acpi_handle handle; - /* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */ - struct acpiphp_func *func; + struct acpiphp_context *context; int nr_slots; - u32 flags; - /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */ struct pci_bus *pci_bus; @@ -99,15 +97,13 @@ struct acpiphp_bridge { */ struct acpiphp_slot { struct list_head node; - struct acpiphp_bridge *bridge; /* parent */ + struct pci_bus *bus; struct list_head funcs; /* one slot may have different objects (i.e. for each function) */ struct slot *slot; struct mutex crit_sect; u8 device; /* pci device# */ - - unsigned long long sun; /* ACPI _SUN (slot unique number) */ u32 flags; /* see below */ }; @@ -119,16 +115,32 @@ struct acpiphp_slot { * typically 8 objects per slot (i.e. for each PCI function) */ struct acpiphp_func { - struct acpiphp_slot *slot; /* parent */ + struct acpiphp_bridge *parent; + struct acpiphp_slot *slot; struct list_head sibling; - struct notifier_block nb; - acpi_handle handle; u8 function; /* pci function# */ u32 flags; /* see below */ }; +struct acpiphp_context { + acpi_handle handle; + struct acpiphp_func func; + struct acpiphp_bridge *bridge; + unsigned int refcount; +}; + +static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func) +{ + return container_of(func, struct acpiphp_context, func); +} + +static inline acpi_handle func_to_handle(struct acpiphp_func *func) +{ + return func_to_context(func)->handle; +} + /* * struct acpiphp_attention_info - device specific attention registration * @@ -142,45 +154,32 @@ struct acpiphp_attention_info struct module *owner; }; -/* PCI bus bridge HID */ -#define ACPI_PCI_HOST_HID "PNP0A03" - /* ACPI _STA method value (ignore bit 4; battery present) */ #define ACPI_STA_ALL (0x0000000f) -/* bridge flags */ -#define BRIDGE_HAS_EJ0 (0x00000001) - /* slot flags */ -#define SLOT_POWEREDON (0x00000001) -#define SLOT_ENABLED (0x00000002) -#define SLOT_MULTIFUNCTION (0x00000004) +#define SLOT_ENABLED (0x00000001) /* function flags */ #define FUNC_HAS_STA (0x00000001) #define FUNC_HAS_EJ0 (0x00000002) -#define FUNC_HAS_PS0 (0x00000010) -#define FUNC_HAS_PS1 (0x00000020) -#define FUNC_HAS_PS2 (0x00000040) -#define FUNC_HAS_PS3 (0x00000080) -#define FUNC_HAS_DCK (0x00000100) +#define FUNC_HAS_DCK (0x00000004) /* function prototypes */ /* acpiphp_core.c */ int acpiphp_register_attention(struct acpiphp_attention_info*info); int acpiphp_unregister_attention(struct acpiphp_attention_info *info); -int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot); +int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun); void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot); /* acpiphp_glue.c */ typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data); int acpiphp_enable_slot(struct acpiphp_slot *slot); -int acpiphp_disable_slot(struct acpiphp_slot *slot); -int acpiphp_eject_slot(struct acpiphp_slot *slot); +int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot); u8 acpiphp_get_power_status(struct acpiphp_slot *slot); u8 acpiphp_get_attention_status(struct acpiphp_slot *slot); u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index ca8127950fc..bf2203ef130 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -155,15 +155,11 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - int retval; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); /* disable the specified slot */ - retval = acpiphp_disable_slot(slot->acpi_slot); - if (!retval) - retval = acpiphp_eject_slot(slot->acpi_slot); - return retval; + return acpiphp_disable_and_eject_slot(slot->acpi_slot); } @@ -290,7 +286,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot) } /* callback routine to initialize 'struct slot' for each slot */ -int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) +int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot, + unsigned int sun) { struct slot *slot; int retval = -ENOMEM; @@ -317,12 +314,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); acpiphp_slot->slot = slot; - snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); + slot->sun = sun; + snprintf(name, SLOT_NAME_SIZE, "%u", sun); - retval = pci_hp_register(slot->hotplug_slot, - acpiphp_slot->bridge->pci_bus, - acpiphp_slot->device, - name); + retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus, + acpiphp_slot->device, name); if (retval == -EBUSY) goto error_hpslot; if (retval) { diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 59df8575a48..f6488adf3af 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -46,6 +46,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/pci-acpi.h> +#include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/acpi.h> @@ -55,28 +56,82 @@ static LIST_HEAD(bridge_list); static DEFINE_MUTEX(bridge_mutex); +static DEFINE_MUTEX(acpiphp_context_lock); #define MY_NAME "acpiphp_glue" -static void handle_hotplug_event_bridge (acpi_handle, u32, void *); +static void handle_hotplug_event(acpi_handle handle, u32 type, void *data); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(struct pci_bus *bus); -static void hotplug_event_func(acpi_handle handle, u32 type, void *context); -static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); +static void hotplug_event(acpi_handle handle, u32 type, void *data); static void free_bridge(struct kref *kref); -/* callback routine to check for the existence of a pci dock device */ -static acpi_status -is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) +static void acpiphp_context_handler(acpi_handle handle, void *context) { - int *count = (int *)context; + /* Intentionally empty. */ +} - if (is_dock_device(handle)) { - (*count)++; - return AE_CTRL_TERMINATE; - } else { - return AE_OK; +/** + * acpiphp_init_context - Create hotplug context and grab a reference to it. + * @handle: ACPI object handle to create the context for. + * + * Call under acpiphp_context_lock. + */ +static struct acpiphp_context *acpiphp_init_context(acpi_handle handle) +{ + struct acpiphp_context *context; + acpi_status status; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return NULL; + + context->handle = handle; + context->refcount = 1; + status = acpi_attach_data(handle, acpiphp_context_handler, context); + if (ACPI_FAILURE(status)) { + kfree(context); + return NULL; } + return context; +} + +/** + * acpiphp_get_context - Get hotplug context and grab a reference to it. + * @handle: ACPI object handle to get the context for. + * + * Call under acpiphp_context_lock. + */ +static struct acpiphp_context *acpiphp_get_context(acpi_handle handle) +{ + struct acpiphp_context *context = NULL; + acpi_status status; + void *data; + + status = acpi_get_data(handle, acpiphp_context_handler, &data); + if (ACPI_SUCCESS(status)) { + context = data; + context->refcount++; + } + return context; +} + +/** + * acpiphp_put_context - Drop a reference to ACPI hotplug context. + * @handle: ACPI object handle to put the context for. + * + * The context object is removed if there are no more references to it. + * + * Call under acpiphp_context_lock. + */ +static void acpiphp_put_context(struct acpiphp_context *context) +{ + if (--context->refcount) + return; + + WARN_ON(context->bridge); + acpi_detach_data(context->handle, acpiphp_context_handler); + kfree(context); } static inline void get_bridge(struct acpiphp_bridge *bridge) @@ -91,25 +146,36 @@ static inline void put_bridge(struct acpiphp_bridge *bridge) static void free_bridge(struct kref *kref) { + struct acpiphp_context *context; struct acpiphp_bridge *bridge; struct acpiphp_slot *slot, *next; struct acpiphp_func *func, *tmp; + mutex_lock(&acpiphp_context_lock); + bridge = container_of(kref, struct acpiphp_bridge, ref); list_for_each_entry_safe(slot, next, &bridge->slots, node) { - list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) { - kfree(func); - } + list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) + acpiphp_put_context(func_to_context(func)); + kfree(slot); } - /* Release reference acquired by acpiphp_bridge_handle_to_function() */ - if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) - put_bridge(bridge->func->slot->bridge); + context = bridge->context; + /* Root bridges will not have hotplug context. */ + if (context) { + /* Release the reference taken by acpiphp_enumerate_slots(). */ + put_bridge(context->func.parent); + context->bridge = NULL; + acpiphp_put_context(context); + } + put_device(&bridge->pci_bus->dev); pci_dev_put(bridge->pci_dev); kfree(bridge); + + mutex_unlock(&acpiphp_context_lock); } /* @@ -119,15 +185,14 @@ static void free_bridge(struct kref *kref) * TBD - figure out a way to only call fixups for * systems that require them. */ -static int post_dock_fixups(struct notifier_block *nb, unsigned long val, - void *v) +static void post_dock_fixups(acpi_handle not_used, u32 event, void *data) { - struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb); - struct pci_bus *bus = func->slot->bridge->pci_bus; + struct acpiphp_context *context = data; + struct pci_bus *bus = context->func.slot->bus; u32 buses; if (!bus->self) - return NOTIFY_OK; + return; /* fixup bad _DCK function that rewrites * secondary bridge on slot @@ -143,12 +208,12 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, | ((unsigned int)(bus->busn_res.end) << 16); pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); } - return NOTIFY_OK; } static const struct acpi_dock_ops acpiphp_dock_ops = { - .handler = hotplug_event_func, + .fixup = post_dock_fixups, + .handler = hotplug_event, }; /* Check whether the PCI device is managed by native PCIe hotplug driver */ @@ -182,129 +247,118 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev) static void acpiphp_dock_init(void *data) { - struct acpiphp_func *func = data; + struct acpiphp_context *context = data; - get_bridge(func->slot->bridge); + get_bridge(context->func.parent); } static void acpiphp_dock_release(void *data) { - struct acpiphp_func *func = data; + struct acpiphp_context *context = data; - put_bridge(func->slot->bridge); + put_bridge(context->func.parent); } /* callback routine to register each ACPI PCI slot object */ -static acpi_status -register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) +static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, + void **rv) { - struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context; + struct acpiphp_bridge *bridge = data; + struct acpiphp_context *context; struct acpiphp_slot *slot; struct acpiphp_func *newfunc; - acpi_handle tmp; acpi_status status = AE_OK; - unsigned long long adr, sun; - int device, function, retval, found = 0; + unsigned long long adr; + int device, function; struct pci_bus *pbus = bridge->pci_bus; - struct pci_dev *pdev; + struct pci_dev *pdev = bridge->pci_dev; u32 val; - if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) + if (pdev && device_is_managed_by_native_pciehp(pdev)) return AE_OK; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { - warn("can't evaluate _ADR (%#x)\n", status); + acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); return AE_OK; } device = (adr >> 16) & 0xffff; function = adr & 0xffff; - pdev = bridge->pci_dev; - if (pdev && device_is_managed_by_native_pciehp(pdev)) - return AE_OK; - - newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL); - if (!newfunc) - return AE_NO_MEMORY; - - newfunc->handle = handle; + mutex_lock(&acpiphp_context_lock); + context = acpiphp_init_context(handle); + if (!context) { + mutex_unlock(&acpiphp_context_lock); + acpi_handle_err(handle, "No hotplug context\n"); + return AE_NOT_EXIST; + } + newfunc = &context->func; newfunc->function = function; + newfunc->parent = bridge; + mutex_unlock(&acpiphp_context_lock); - if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) + if (acpi_has_method(handle, "_EJ0")) newfunc->flags = FUNC_HAS_EJ0; - if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) + if (acpi_has_method(handle, "_STA")) newfunc->flags |= FUNC_HAS_STA; - if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp))) - newfunc->flags |= FUNC_HAS_PS0; - - if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp))) - newfunc->flags |= FUNC_HAS_PS3; - - if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp))) + if (acpi_has_method(handle, "_DCK")) newfunc->flags |= FUNC_HAS_DCK; - status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun); - if (ACPI_FAILURE(status)) { - /* - * use the count of the number of slots we've found - * for the number of the slot - */ - sun = bridge->nr_slots+1; - } - /* search for objects that share the same slot */ list_for_each_entry(slot, &bridge->slots, node) - if (slot->device == device) { - if (slot->sun != sun) - warn("sibling found, but _SUN doesn't match!\n"); - found = 1; - break; - } + if (slot->device == device) + goto slot_found; - if (!found) { - slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); - if (!slot) { - kfree(newfunc); - return AE_NO_MEMORY; - } + slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); + if (!slot) { + status = AE_NO_MEMORY; + goto err; + } - slot->bridge = bridge; - slot->device = device; - slot->sun = sun; - INIT_LIST_HEAD(&slot->funcs); - mutex_init(&slot->crit_sect); + slot->bus = bridge->pci_bus; + slot->device = device; + INIT_LIST_HEAD(&slot->funcs); + mutex_init(&slot->crit_sect); + + list_add_tail(&slot->node, &bridge->slots); + + /* Register slots for ejectable funtions only. */ + if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) { + unsigned long long sun; + int retval; - mutex_lock(&bridge_mutex); - list_add_tail(&slot->node, &bridge->slots); - mutex_unlock(&bridge_mutex); bridge->nr_slots++; + status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun); + if (ACPI_FAILURE(status)) + sun = bridge->nr_slots; dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", - slot->sun, pci_domain_nr(pbus), pbus->number, device); - retval = acpiphp_register_hotplug_slot(slot); + sun, pci_domain_nr(pbus), pbus->number, device); + + retval = acpiphp_register_hotplug_slot(slot, sun); if (retval) { + slot->slot = NULL; + bridge->nr_slots--; if (retval == -EBUSY) warn("Slot %llu already registered by another " - "hotplug driver\n", slot->sun); + "hotplug driver\n", sun); else warn("acpiphp_register_hotplug_slot failed " "(err code = 0x%x)\n", retval); - goto err_exit; } + /* Even if the slot registration fails, we can still use it. */ } + slot_found: newfunc->slot = slot; - mutex_lock(&bridge_mutex); list_add_tail(&newfunc->sibling, &slot->funcs); - mutex_unlock(&bridge_mutex); if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function), &val, 60*1000)) - slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); + slot->flags |= SLOT_ENABLED; if (is_dock_device(handle)) { /* we don't want to call this device's _EJ0 @@ -313,136 +367,46 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, - &acpiphp_dock_ops, newfunc, + &acpiphp_dock_ops, context, acpiphp_dock_init, acpiphp_dock_release)) dbg("failed to register dock device\n"); - - /* we need to be notified when dock events happen - * outside of the hotplug operation, since we may - * need to do fixups before we can hotplug. - */ - newfunc->nb.notifier_call = post_dock_fixups; - if (register_dock_notifier(&newfunc->nb)) - dbg("failed to register a dock notifier"); } /* install notify handler */ if (!(newfunc->flags & FUNC_HAS_DCK)) { - status = acpi_install_notify_handler(handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_func, - newfunc); - + status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, + handle_hotplug_event, + context); if (ACPI_FAILURE(status)) - err("failed to register interrupt notify handler\n"); - } else - status = AE_OK; - - return status; - - err_exit: - bridge->nr_slots--; - mutex_lock(&bridge_mutex); - list_del(&slot->node); - mutex_unlock(&bridge_mutex); - kfree(slot); - kfree(newfunc); - - return AE_OK; -} - - -/* see if it's worth looking at this bridge */ -static int detect_ejectable_slots(acpi_handle handle) -{ - int found = acpi_pci_detect_ejectable(handle); - if (!found) { - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, - is_pci_dock_device, NULL, (void *)&found, NULL); - } - return found; -} - -/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ -static void init_bridge_misc(struct acpiphp_bridge *bridge) -{ - acpi_status status; - - /* must be added to the list prior to calling register_slot */ - mutex_lock(&bridge_mutex); - list_add(&bridge->list, &bridge_list); - mutex_unlock(&bridge_mutex); - - /* register all slot objects under this bridge */ - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, - register_slot, NULL, bridge, NULL); - if (ACPI_FAILURE(status)) { - mutex_lock(&bridge_mutex); - list_del(&bridge->list); - mutex_unlock(&bridge_mutex); - return; + acpi_handle_err(handle, + "failed to install notify handler\n"); } - /* install notify handler for P2P bridges */ - if (!pci_is_root_bus(bridge->pci_bus)) { - if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) { - status = acpi_remove_notify_handler(bridge->func->handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_func); - if (ACPI_FAILURE(status)) - err("failed to remove notify handler\n"); - } - status = acpi_install_notify_handler(bridge->handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_bridge, - bridge); - - if (ACPI_FAILURE(status)) { - err("failed to register interrupt notify handler\n"); - } - } -} - - -/* find acpiphp_func from acpiphp_bridge */ -static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle) -{ - struct acpiphp_bridge *bridge; - struct acpiphp_slot *slot; - struct acpiphp_func *func = NULL; - - mutex_lock(&bridge_mutex); - list_for_each_entry(bridge, &bridge_list, list) { - list_for_each_entry(slot, &bridge->slots, node) { - list_for_each_entry(func, &slot->funcs, sibling) { - if (func->handle == handle) { - get_bridge(func->slot->bridge); - mutex_unlock(&bridge_mutex); - return func; - } - } - } - } - mutex_unlock(&bridge_mutex); + return AE_OK; - return NULL; + err: + mutex_lock(&acpiphp_context_lock); + acpiphp_put_context(context); + mutex_unlock(&acpiphp_context_lock); + return status; } - static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) { - struct acpiphp_bridge *bridge; - - mutex_lock(&bridge_mutex); - list_for_each_entry(bridge, &bridge_list, list) - if (bridge->handle == handle) { + struct acpiphp_context *context; + struct acpiphp_bridge *bridge = NULL; + + mutex_lock(&acpiphp_context_lock); + context = acpiphp_get_context(handle); + if (context) { + bridge = context->bridge; + if (bridge) get_bridge(bridge); - mutex_unlock(&bridge_mutex); - return bridge; - } - mutex_unlock(&bridge_mutex); - return NULL; + acpiphp_put_context(context); + } + mutex_unlock(&acpiphp_context_lock); + return bridge; } static void cleanup_bridge(struct acpiphp_bridge *bridge) @@ -450,40 +414,24 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) struct acpiphp_slot *slot; struct acpiphp_func *func; acpi_status status; - acpi_handle handle = bridge->handle; - - if (!pci_is_root_bus(bridge->pci_bus)) { - status = acpi_remove_notify_handler(handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_bridge); - if (ACPI_FAILURE(status)) - err("failed to remove notify handler\n"); - } - - if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) { - status = acpi_install_notify_handler(bridge->func->handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_func, - bridge->func); - if (ACPI_FAILURE(status)) - err("failed to install interrupt notify handler\n"); - } list_for_each_entry(slot, &bridge->slots, node) { list_for_each_entry(func, &slot->funcs, sibling) { - if (is_dock_device(func->handle)) { - unregister_hotplug_dock_device(func->handle); - unregister_dock_notifier(&func->nb); - } + acpi_handle handle = func_to_handle(func); + + if (is_dock_device(handle)) + unregister_hotplug_dock_device(handle); + if (!(func->flags & FUNC_HAS_DCK)) { - status = acpi_remove_notify_handler(func->handle, - ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_func); + status = acpi_remove_notify_handler(handle, + ACPI_SYSTEM_NOTIFY, + handle_hotplug_event); if (ACPI_FAILURE(status)) err("failed to remove notify handler\n"); } } - acpiphp_unregister_hotplug_slot(slot); + if (slot->slot) + acpiphp_unregister_hotplug_slot(slot); } mutex_lock(&bridge_mutex); @@ -491,71 +439,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) mutex_unlock(&bridge_mutex); } -static int power_on_slot(struct acpiphp_slot *slot) -{ - acpi_status status; - struct acpiphp_func *func; - int retval = 0; - - /* if already enabled, just skip */ - if (slot->flags & SLOT_POWEREDON) - goto err_exit; - - list_for_each_entry(func, &slot->funcs, sibling) { - if (func->flags & FUNC_HAS_PS0) { - dbg("%s: executing _PS0\n", __func__); - status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL); - if (ACPI_FAILURE(status)) { - warn("%s: _PS0 failed\n", __func__); - retval = -1; - goto err_exit; - } else - break; - } - } - - /* TBD: evaluate _STA to check if the slot is enabled */ - - slot->flags |= SLOT_POWEREDON; - - err_exit: - return retval; -} - - -static int power_off_slot(struct acpiphp_slot *slot) -{ - acpi_status status; - struct acpiphp_func *func; - - int retval = 0; - - /* if already disabled, just skip */ - if ((slot->flags & SLOT_POWEREDON) == 0) - goto err_exit; - - list_for_each_entry(func, &slot->funcs, sibling) { - if (func->flags & FUNC_HAS_PS3) { - status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); - if (ACPI_FAILURE(status)) { - warn("%s: _PS3 failed\n", __func__); - retval = -1; - goto err_exit; - } else - break; - } - } - - /* TBD: evaluate _STA to check if the slot is disabled */ - - slot->flags &= (~SLOT_POWEREDON); - - err_exit: - return retval; -} - - - /** * acpiphp_max_busnr - return the highest reserved bus number under the given bus. * @bus: bus to start search with @@ -583,52 +466,32 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus) return max; } - /** - * acpiphp_bus_add - add a new bus to acpi subsystem - * @func: acpiphp_func of the bridge + * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree. + * @handle: ACPI device object handle to start from. */ -static int acpiphp_bus_add(struct acpiphp_func *func) +static void acpiphp_bus_trim(acpi_handle handle) { - struct acpi_device *device; - int ret_val; - - if (!acpi_bus_get_device(func->handle, &device)) { - dbg("bus exists... trim\n"); - /* this shouldn't be in here, so remove - * the bus then re-add it... - */ - acpi_bus_trim(device); - } - - ret_val = acpi_bus_scan(func->handle); - if (!ret_val) - ret_val = acpi_bus_get_device(func->handle, &device); - - if (ret_val) - dbg("error adding bus, %x\n", -ret_val); + struct acpi_device *adev = NULL; - return ret_val; + acpi_bus_get_device(handle, &adev); + if (adev) + acpi_bus_trim(adev); } - /** - * acpiphp_bus_trim - trim a bus from acpi subsystem - * @handle: handle to acpi namespace + * acpiphp_bus_add - Scan ACPI namespace subtree. + * @handle: ACPI object handle to start the scan from. */ -static int acpiphp_bus_trim(acpi_handle handle) +static void acpiphp_bus_add(acpi_handle handle) { - struct acpi_device *device; - int retval; - - retval = acpi_bus_get_device(handle, &device); - if (retval) { - dbg("acpi_device not found\n"); - return retval; - } + struct acpi_device *adev = NULL; - acpi_bus_trim(device); - return 0; + acpiphp_bus_trim(handle); + acpi_bus_scan(handle); + acpi_bus_get_device(handle, &adev); + if (adev) + acpi_device_set_power(adev, ACPI_STATE_D0); } static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) @@ -645,7 +508,8 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 1; /* _REG is optional, we don't care about if there is failure */ - acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL); + acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list, + NULL); } } @@ -653,59 +517,44 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev) { struct acpiphp_func *func; - if (!dev->subordinate) - return; - /* quirk, or pcie could set it already */ if (dev->is_hotplug_bridge) return; - if (PCI_SLOT(dev->devfn) != slot->device) - return; - list_for_each_entry(func, &slot->funcs, sibling) { if (PCI_FUNC(dev->devfn) == func->function) { - /* check if this bridge has ejectable slots */ - if ((detect_ejectable_slots(func->handle) > 0)) - dev->is_hotplug_bridge = 1; + dev->is_hotplug_bridge = 1; break; } } } /** - * enable_device - enable, configure a slot + * enable_slot - enable, configure a slot * @slot: slot to be enabled * * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ -static int __ref enable_device(struct acpiphp_slot *slot) +static void __ref enable_slot(struct acpiphp_slot *slot) { struct pci_dev *dev; - struct pci_bus *bus = slot->bridge->pci_bus; + struct pci_bus *bus = slot->bus; struct acpiphp_func *func; - int num, max, pass; + int max, pass; LIST_HEAD(add_list); - if (slot->flags & SLOT_ENABLED) - goto err_exit; - list_for_each_entry(func, &slot->funcs, sibling) - acpiphp_bus_add(func); + acpiphp_bus_add(func_to_handle(func)); - num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0)); - if (num == 0) { - /* Maybe only part of funcs are added. */ - dbg("No new device found\n"); - goto err_exit; - } + pci_scan_slot(bus, PCI_DEVFN(slot->device, 0)); max = acpiphp_max_busnr(bus); for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) != slot->device) continue; + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { max = pci_scan_bridge(bus, dev, max, pass); @@ -723,7 +572,6 @@ static int __ref enable_device(struct acpiphp_slot *slot) acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); acpiphp_set_acpi_region(slot); - pci_enable_bridges(bus); list_for_each_entry(dev, &bus->devices, bus_list) { /* Assume that newly added devices are powered on already. */ @@ -744,16 +592,12 @@ static int __ref enable_device(struct acpiphp_slot *slot) continue; } } - - - err_exit: - return 0; } /* return first device in slot, acquiring a reference on it */ static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot) { - struct pci_bus *bus = slot->bridge->pci_bus; + struct pci_bus *bus = slot->bus; struct pci_dev *dev; struct pci_dev *ret = NULL; @@ -769,16 +613,16 @@ static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot) } /** - * disable_device - disable a slot + * disable_slot - disable a slot * @slot: ACPI PHP slot */ -static int disable_device(struct acpiphp_slot *slot) +static void disable_slot(struct acpiphp_slot *slot) { struct acpiphp_func *func; struct pci_dev *pdev; /* - * enable_device() enumerates all functions in this device via + * enable_slot() enumerates all functions in this device via * pci_scan_slot(), whether they have associated ACPI hotplug * methods (_EJ0, etc.) or not. Therefore, we remove all functions * here. @@ -788,13 +632,10 @@ static int disable_device(struct acpiphp_slot *slot) pci_dev_put(pdev); } - list_for_each_entry(func, &slot->funcs, sibling) { - acpiphp_bus_trim(func->handle); - } + list_for_each_entry(func, &slot->funcs, sibling) + acpiphp_bus_trim(func_to_handle(func)); slot->flags &= (~SLOT_ENABLED); - - return 0; } @@ -812,18 +653,21 @@ static int disable_device(struct acpiphp_slot *slot) */ static unsigned int get_slot_status(struct acpiphp_slot *slot) { - acpi_status status; unsigned long long sta = 0; - u32 dvid; struct acpiphp_func *func; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_STA) { - status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); + acpi_status status; + + status = acpi_evaluate_integer(func_to_handle(func), + "_STA", NULL, &sta); if (ACPI_SUCCESS(status) && sta) break; } else { - pci_bus_read_config_dword(slot->bridge->pci_bus, + u32 dvid; + + pci_bus_read_config_dword(slot->bus, PCI_DEVFN(slot->device, func->function), PCI_VENDOR_ID, &dvid); @@ -838,34 +682,42 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) } /** - * acpiphp_eject_slot - physically eject the slot - * @slot: ACPI PHP slot + * trim_stale_devices - remove PCI devices that are not responding. + * @dev: PCI device to start walking the hierarchy from. */ -int acpiphp_eject_slot(struct acpiphp_slot *slot) +static void trim_stale_devices(struct pci_dev *dev) { - acpi_status status; - struct acpiphp_func *func; - struct acpi_object_list arg_list; - union acpi_object arg; + acpi_handle handle = ACPI_HANDLE(&dev->dev); + struct pci_bus *bus = dev->subordinate; + bool alive = false; - list_for_each_entry(func, &slot->funcs, sibling) { - /* We don't want to call _EJ0 on non-existing functions. */ - if ((func->flags & FUNC_HAS_EJ0)) { - /* _EJ0 method take one argument */ - arg_list.count = 1; - arg_list.pointer = &arg; - arg.type = ACPI_TYPE_INTEGER; - arg.integer.value = 1; - - status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL); - if (ACPI_FAILURE(status)) { - warn("%s: _EJ0 failed\n", __func__); - return -1; - } else - break; - } + if (handle) { + acpi_status status; + unsigned long long sta; + + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); + alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; + } + if (!alive) { + u32 v; + + /* Check if the device responds. */ + alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0); + } + if (!alive) { + pci_stop_and_remove_bus_device(dev); + if (handle) + acpiphp_bus_trim(handle); + } else if (bus) { + struct pci_dev *child, *tmp; + + /* The device is a bridge. so check the bus below it. */ + pm_runtime_get_sync(&dev->dev); + list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) + trim_stale_devices(child); + + pm_runtime_put(&dev->dev); } - return 0; } /** @@ -875,43 +727,30 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot) * Iterate over all slots under this bridge and make sure that if a * card is present they are enabled, and if not they are disabled. */ -static int acpiphp_check_bridge(struct acpiphp_bridge *bridge) +static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) { struct acpiphp_slot *slot; - int retval = 0; - int enabled, disabled; - - enabled = disabled = 0; list_for_each_entry(slot, &bridge->slots, node) { - unsigned int status = get_slot_status(slot); - if (slot->flags & SLOT_ENABLED) { - if (status == ACPI_STA_ALL) - continue; - retval = acpiphp_disable_slot(slot); - if (retval) { - err("Error occurred in disabling\n"); - goto err_exit; - } else { - acpiphp_eject_slot(slot); - } - disabled++; + struct pci_bus *bus = slot->bus; + struct pci_dev *dev, *tmp; + + mutex_lock(&slot->crit_sect); + /* wake up all functions */ + if (get_slot_status(slot) == ACPI_STA_ALL) { + /* remove stale devices if any */ + list_for_each_entry_safe(dev, tmp, &bus->devices, + bus_list) + if (PCI_SLOT(dev->devfn) == slot->device) + trim_stale_devices(dev); + + /* configure all functions */ + enable_slot(slot); } else { - if (status != ACPI_STA_ALL) - continue; - retval = acpiphp_enable_slot(slot); - if (retval) { - err("Error occurred in enabling\n"); - goto err_exit; - } - enabled++; + disable_slot(slot); } + mutex_unlock(&slot->crit_sect); } - - dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled); - - err_exit: - return retval; } static void acpiphp_set_hpp_values(struct pci_bus *bus) @@ -950,25 +789,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) * ACPI event handlers */ -static acpi_status -check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) -{ - struct acpiphp_bridge *bridge; - char objname[64]; - struct acpi_buffer buffer = { .length = sizeof(objname), - .pointer = objname }; - - bridge = acpiphp_handle_to_bridge(handle); - if (bridge) { - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - dbg("%s: re-enumerating slots under %s\n", - __func__, objname); - acpiphp_check_bridge(bridge); - put_bridge(bridge); - } - return AE_OK ; -} - void acpiphp_check_host_bridge(acpi_handle handle) { struct acpiphp_bridge *bridge; @@ -978,27 +798,23 @@ void acpiphp_check_host_bridge(acpi_handle handle) acpiphp_check_bridge(bridge); put_bridge(bridge); } - - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); } -static void _handle_hotplug_event_bridge(struct work_struct *work) +static void hotplug_event(acpi_handle handle, u32 type, void *data) { + struct acpiphp_context *context = data; + struct acpiphp_func *func = &context->func; struct acpiphp_bridge *bridge; char objname[64]; struct acpi_buffer buffer = { .length = sizeof(objname), .pointer = objname }; - struct acpi_hp_work *hp_work; - acpi_handle handle; - u32 type; - hp_work = container_of(work, struct acpi_hp_work, work); - handle = hp_work->handle; - type = hp_work->type; - bridge = (struct acpiphp_bridge *)hp_work->context; + mutex_lock(&acpiphp_context_lock); + bridge = context->bridge; + if (bridge) + get_bridge(bridge); - acpi_scan_lock_acquire(); + mutex_unlock(&acpiphp_context_lock); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); @@ -1007,188 +823,129 @@ static void _handle_hotplug_event_bridge(struct work_struct *work) /* bus re-enumerate */ dbg("%s: Bus check notify on %s\n", __func__, objname); dbg("%s: re-enumerating slots under %s\n", __func__, objname); - acpiphp_check_bridge(bridge); - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); + if (bridge) { + acpiphp_check_bridge(bridge); + } else { + struct acpiphp_slot *slot = func->slot; + + mutex_lock(&slot->crit_sect); + enable_slot(slot); + mutex_unlock(&slot->crit_sect); + } break; case ACPI_NOTIFY_DEVICE_CHECK: /* device check */ dbg("%s: Device check notify on %s\n", __func__, objname); - acpiphp_check_bridge(bridge); - break; + if (bridge) + acpiphp_check_bridge(bridge); + else + acpiphp_check_bridge(func->parent); - case ACPI_NOTIFY_DEVICE_WAKE: - /* wake event */ - dbg("%s: Device wake notify on %s\n", __func__, objname); break; case ACPI_NOTIFY_EJECT_REQUEST: /* request device eject */ dbg("%s: Device eject notify on %s\n", __func__, objname); - if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) { - struct acpiphp_slot *slot; - slot = bridge->func->slot; - if (!acpiphp_disable_slot(slot)) - acpiphp_eject_slot(slot); - } + acpiphp_disable_and_eject_slot(func->slot); break; + } - case ACPI_NOTIFY_FREQUENCY_MISMATCH: - printk(KERN_ERR "Device %s cannot be configured due" - " to a frequency mismatch\n", objname); - break; + if (bridge) + put_bridge(bridge); +} - case ACPI_NOTIFY_BUS_MODE_MISMATCH: - printk(KERN_ERR "Device %s cannot be configured due" - " to a bus mode mismatch\n", objname); - break; +static void hotplug_event_work(struct work_struct *work) +{ + struct acpiphp_context *context; + struct acpi_hp_work *hp_work; - case ACPI_NOTIFY_POWER_FAULT: - printk(KERN_ERR "Device %s has suffered a power fault\n", - objname); - break; + hp_work = container_of(work, struct acpi_hp_work, work); + context = hp_work->context; + acpi_scan_lock_acquire(); - default: - warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); - break; - } + hotplug_event(hp_work->handle, hp_work->type, context); acpi_scan_lock_release(); - kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ - put_bridge(bridge); + kfree(hp_work); /* allocated in handle_hotplug_event() */ + put_bridge(context->func.parent); } /** - * handle_hotplug_event_bridge - handle ACPI event on bridges + * handle_hotplug_event - handle ACPI hotplug event * @handle: Notify()'ed acpi_handle * @type: Notify code - * @context: pointer to acpiphp_bridge structure + * @data: pointer to acpiphp_context structure * - * Handles ACPI event notification on {host,p2p} bridges. + * Handles ACPI event notification on slots. */ -static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, - void *context) +static void handle_hotplug_event(acpi_handle handle, u32 type, void *data) { - struct acpiphp_bridge *bridge = context; - - /* - * Currently the code adds all hotplug events to the kacpid_wq - * queue when it should add hotplug events to the kacpi_hotplug_wq. - * The proper way to fix this is to reorganize the code so that - * drivers (dock, etc.) do not call acpi_os_execute(), etc. - * For now just re-add this work to the kacpi_hotplug_wq so we - * don't deadlock on hotplug actions. - */ - get_bridge(bridge); - alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); -} - -static void hotplug_event_func(acpi_handle handle, u32 type, void *context) -{ - struct acpiphp_func *func = context; - char objname[64]; - struct acpi_buffer buffer = { .length = sizeof(objname), - .pointer = objname }; - - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + struct acpiphp_context *context; switch (type) { case ACPI_NOTIFY_BUS_CHECK: - /* bus re-enumerate */ - dbg("%s: Bus check notify on %s\n", __func__, objname); - acpiphp_enable_slot(func->slot); - break; - case ACPI_NOTIFY_DEVICE_CHECK: - /* device check : re-enumerate from parent bus */ - dbg("%s: Device check notify on %s\n", __func__, objname); - acpiphp_check_bridge(func->slot->bridge); - break; - - case ACPI_NOTIFY_DEVICE_WAKE: - /* wake event */ - dbg("%s: Device wake notify on %s\n", __func__, objname); - break; - case ACPI_NOTIFY_EJECT_REQUEST: - /* request device eject */ - dbg("%s: Device eject notify on %s\n", __func__, objname); - if (!(acpiphp_disable_slot(func->slot))) - acpiphp_eject_slot(func->slot); break; - default: - warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); - break; - } -} - -static void _handle_hotplug_event_func(struct work_struct *work) -{ - struct acpi_hp_work *hp_work; - struct acpiphp_func *func; + case ACPI_NOTIFY_DEVICE_WAKE: + return; - hp_work = container_of(work, struct acpi_hp_work, work); - func = hp_work->context; - acpi_scan_lock_acquire(); + case ACPI_NOTIFY_FREQUENCY_MISMATCH: + acpi_handle_err(handle, "Device cannot be configured due " + "to a frequency mismatch\n"); + return; - hotplug_event_func(hp_work->handle, hp_work->type, func); + case ACPI_NOTIFY_BUS_MODE_MISMATCH: + acpi_handle_err(handle, "Device cannot be configured due " + "to a bus mode mismatch\n"); + return; - acpi_scan_lock_release(); - kfree(hp_work); /* allocated in handle_hotplug_event_func */ - put_bridge(func->slot->bridge); -} + case ACPI_NOTIFY_POWER_FAULT: + acpi_handle_err(handle, "Device has suffered a power fault\n"); + return; -/** - * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) - * @handle: Notify()'ed acpi_handle - * @type: Notify code - * @context: pointer to acpiphp_func structure - * - * Handles ACPI event notification on slots. - */ -static void handle_hotplug_event_func(acpi_handle handle, u32 type, - void *context) -{ - struct acpiphp_func *func = context; + default: + acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type); + return; + } - /* - * Currently the code adds all hotplug events to the kacpid_wq - * queue when it should add hotplug events to the kacpi_hotplug_wq. - * The proper way to fix this is to reorganize the code so that - * drivers (dock, etc.) do not call acpi_os_execute(), etc. - * For now just re-add this work to the kacpi_hotplug_wq so we - * don't deadlock on hotplug actions. - */ - get_bridge(func->slot->bridge); - alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func); + mutex_lock(&acpiphp_context_lock); + context = acpiphp_get_context(handle); + if (context) { + get_bridge(context->func.parent); + acpiphp_put_context(context); + alloc_acpi_hp_work(handle, type, context, hotplug_event_work); + } + mutex_unlock(&acpiphp_context_lock); } /* * Create hotplug slots for the PCI bus. * It should always return 0 to avoid skipping following notifiers. */ -void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle) +void acpiphp_enumerate_slots(struct pci_bus *bus) { - acpi_handle dummy_handle; struct acpiphp_bridge *bridge; + acpi_handle handle; + acpi_status status; if (acpiphp_disabled) return; - if (detect_ejectable_slots(handle) <= 0) + handle = ACPI_HANDLE(bus->bridge); + if (!handle) return; bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); - if (bridge == NULL) { - err("out of memory\n"); + if (!bridge) { + acpi_handle_err(handle, "No memory for bridge object\n"); return; } INIT_LIST_HEAD(&bridge->slots); kref_init(&bridge->ref); - bridge->handle = handle; bridge->pci_dev = pci_dev_get(bus->self); bridge->pci_bus = bus; @@ -1199,31 +956,62 @@ void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle) */ get_device(&bus->dev); - if (!pci_is_root_bus(bridge->pci_bus) && - ACPI_SUCCESS(acpi_get_handle(bridge->handle, - "_EJ0", &dummy_handle))) { - dbg("found ejectable p2p bridge\n"); - bridge->flags |= BRIDGE_HAS_EJ0; - bridge->func = acpiphp_bridge_handle_to_function(handle); + if (!pci_is_root_bus(bridge->pci_bus)) { + struct acpiphp_context *context; + + /* + * This bridge should have been registered as a hotplug function + * under its parent, so the context has to be there. If not, we + * are in deep goo. + */ + mutex_lock(&acpiphp_context_lock); + context = acpiphp_get_context(handle); + if (WARN_ON(!context)) { + mutex_unlock(&acpiphp_context_lock); + put_device(&bus->dev); + kfree(bridge); + return; + } + bridge->context = context; + context->bridge = bridge; + /* Get a reference to the parent bridge. */ + get_bridge(context->func.parent); + mutex_unlock(&acpiphp_context_lock); } - init_bridge_misc(bridge); + /* must be added to the list prior to calling register_slot */ + mutex_lock(&bridge_mutex); + list_add(&bridge->list, &bridge_list); + mutex_unlock(&bridge_mutex); + + /* register all slot objects under this bridge */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + register_slot, NULL, bridge, NULL); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to register slots\n"); + cleanup_bridge(bridge); + put_bridge(bridge); + } } /* Destroy hotplug slots associated with the PCI bus */ void acpiphp_remove_slots(struct pci_bus *bus) { - struct acpiphp_bridge *bridge, *tmp; + struct acpiphp_bridge *bridge; if (acpiphp_disabled) return; - list_for_each_entry_safe(bridge, tmp, &bridge_list, list) + mutex_lock(&bridge_mutex); + list_for_each_entry(bridge, &bridge_list, list) if (bridge->pci_bus == bus) { + mutex_unlock(&bridge_mutex); cleanup_bridge(bridge); put_bridge(bridge); - break; + return; } + + mutex_unlock(&bridge_mutex); } /** @@ -1232,51 +1020,39 @@ void acpiphp_remove_slots(struct pci_bus *bus) */ int acpiphp_enable_slot(struct acpiphp_slot *slot) { - int retval; - mutex_lock(&slot->crit_sect); + /* configure all functions */ + if (!(slot->flags & SLOT_ENABLED)) + enable_slot(slot); - /* wake up all functions */ - retval = power_on_slot(slot); - if (retval) - goto err_exit; - - if (get_slot_status(slot) == ACPI_STA_ALL) { - /* configure all functions */ - retval = enable_device(slot); - if (retval) - power_off_slot(slot); - } else { - dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__); - power_off_slot(slot); - } - - err_exit: mutex_unlock(&slot->crit_sect); - return retval; + return 0; } /** - * acpiphp_disable_slot - power off slot + * acpiphp_disable_and_eject_slot - power off and eject slot * @slot: ACPI PHP slot */ -int acpiphp_disable_slot(struct acpiphp_slot *slot) +int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot) { + struct acpiphp_func *func; int retval = 0; mutex_lock(&slot->crit_sect); /* unconfigure all functions */ - retval = disable_device(slot); - if (retval) - goto err_exit; + disable_slot(slot); + + list_for_each_entry(func, &slot->funcs, sibling) + if (func->flags & FUNC_HAS_EJ0) { + acpi_handle handle = func_to_handle(func); - /* power off all functions */ - retval = power_off_slot(slot); - if (retval) - goto err_exit; + if (ACPI_FAILURE(acpi_evaluate_ej0(handle))) + acpi_handle_err(handle, "_EJ0 failed\n"); + + break; + } - err_exit: mutex_unlock(&slot->crit_sect); return retval; } @@ -1288,7 +1064,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot) */ u8 acpiphp_get_power_status(struct acpiphp_slot *slot) { - return (slot->flags & SLOT_POWEREDON); + return (slot->flags & SLOT_ENABLED); } @@ -1298,11 +1074,7 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot) */ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) { - unsigned int sta; - - sta = get_slot_status(slot); - - return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1; + return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI); } @@ -1312,9 +1084,5 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) */ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) { - unsigned int sta; - - sta = get_slot_status(slot); - - return (sta == 0) ? 0 : 1; + return !!get_slot_status(slot); } diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index c35e8ad6db0..2f5786c8522 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -66,7 +66,7 @@ do { \ #define IBM_HARDWARE_ID1 "IBM37D0" #define IBM_HARDWARE_ID2 "IBM37D4" -#define hpslot_to_sun(A) (((struct slot *)((A)->private))->acpi_slot->sun) +#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun) /* union apci_descriptor - allows access to the * various device descriptors that are embedded in the @@ -270,7 +270,6 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) if (subevent == 0x80) { dbg("%s: generationg bus event\n", __func__); - acpi_bus_generate_proc_event(note->device, note->event, detail); acpi_bus_generate_netlink_event(note->device->pnp.device_class, dev_name(¬e->device->dev), note->event, detail); diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 7fb326983ed..541bbe6d534 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -155,6 +155,7 @@ void pciehp_green_led_off(struct slot *slot); void pciehp_green_led_blink(struct slot *slot); int pciehp_check_link_status(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); +int pciehp_reset_slot(struct slot *slot, int probe); static inline const char *slot_name(struct slot *slot) { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 7d72c5e2eba..f4a18f51a29 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -69,6 +69,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); +static int reset_slot (struct hotplug_slot *slot, int probe); /** * release_slot - free up the memory used by a slot @@ -111,6 +112,7 @@ static int init_slot(struct controller *ctrl) ops->disable_slot = disable_slot; ops->get_power_status = get_power_status; ops->get_adapter_status = get_adapter_status; + ops->reset_slot = reset_slot; if (MRL_SENS(ctrl)) ops->get_latch_status = get_latch_status; if (ATTN_LED(ctrl)) { @@ -223,6 +225,16 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) return pciehp_get_adapter_status(slot, value); } +static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) +{ + struct slot *slot = hotplug_slot->private; + + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); + + return pciehp_reset_slot(slot, probe); +} + static int pciehp_probe(struct pcie_device *dev) { int rc; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b2255736ac8..51f56ef4ab6 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -749,6 +749,37 @@ static void pcie_disable_notification(struct controller *ctrl) ctrl_warn(ctrl, "Cannot disable software notification\n"); } +/* + * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary + * bus reset of the bridge, but if the slot supports surprise removal we need + * to disable presence detection around the bus reset and clear any spurious + * events after. + */ +int pciehp_reset_slot(struct slot *slot, int probe) +{ + struct controller *ctrl = slot->ctrl; + + if (probe) + return 0; + + if (HP_SUPR_RM(ctrl)) { + pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE); + if (pciehp_poll_mode) + del_timer_sync(&ctrl->poll_timer); + } + + pci_reset_bridge_secondary_bus(ctrl->pcie->port); + + if (HP_SUPR_RM(ctrl)) { + pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); + pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE); + if (pciehp_poll_mode) + int_poll_timeout(ctrl->poll_timer.data); + } + + return 0; +} + int pcie_init_notification(struct controller *ctrl) { if (pciehp_request_irq(ctrl)) diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index aac7a40e4a4..0e0d0f7f63f 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot) if (ret) presence = 0; - list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { + /* + * Stopping an SR-IOV PF device removes all the associated VFs, + * which will update the bus->devices list and confuse the + * iterator. Therefore, iterate in reverse so we remove the VFs + * first, then the PF. We do the same in pci_stop_bus_device(). + */ + list_for_each_entry_safe_reverse(dev, temp, &parent->devices, + bus_list) { pci_dev_get(dev); if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index fec2d5b7544..16f92035231 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -160,9 +160,8 @@ void pci_configure_slot(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - if (dev->bus && dev->bus->self) - pcie_bus_configure_settings(dev->bus, - dev->bus->self->pcie_mpss); + if (dev->bus) + pcie_bus_configure_settings(dev->bus); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index b29e20b7862..bb7af78e4ee 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) /* Remove the EADS bridge device itself */ BUG_ON(!bus->self); pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); - eeh_remove_bus_device(bus->self, true); pci_stop_and_remove_bus_device(bus->self); return 0; diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index ea3fa90d020..66e505ca24e 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -79,8 +79,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) if (rc) goto out_deconfigure; - slot->zdev->state = ZPCI_FN_STATE_ONLINE; - pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN); pci_bus_add_devices(slot->zdev->bus); @@ -148,7 +146,7 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = { .get_adapter_status = get_adapter_status, }; -static int init_pci_slot(struct zpci_dev *zdev) +int zpci_init_slot(struct zpci_dev *zdev) { struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; @@ -202,7 +200,7 @@ error: return -ENOMEM; } -static void exit_pci_slot(struct zpci_dev *zdev) +void zpci_exit_slot(struct zpci_dev *zdev) { struct list_head *tmp, *n; struct slot *slot; @@ -215,60 +213,3 @@ static void exit_pci_slot(struct zpci_dev *zdev) pci_hp_deregister(slot->hotplug_slot); } } - -static struct pci_hp_callback_ops hp_ops = { - .create_slot = init_pci_slot, - .remove_slot = exit_pci_slot, -}; - -static void __init init_pci_slots(void) -{ - struct zpci_dev *zdev; - - /* - * Create a structure for each slot, and register that slot - * with the pci_hotplug subsystem. - */ - mutex_lock(&zpci_list_lock); - list_for_each_entry(zdev, &zpci_list, entry) { - init_pci_slot(zdev); - } - mutex_unlock(&zpci_list_lock); -} - -static void __exit exit_pci_slots(void) -{ - struct list_head *tmp, *n; - struct slot *slot; - - /* - * Unregister all of our slots with the pci_hotplug subsystem. - * Memory will be freed in release_slot() callback after slot's - * lifespan is finished. - */ - list_for_each_safe(tmp, n, &s390_hotplug_slot_list) { - slot = list_entry(tmp, struct slot, slot_list); - list_del(&slot->slot_list); - pci_hp_deregister(slot->hotplug_slot); - } -} - -static int __init pci_hotplug_s390_init(void) -{ - if (!s390_pci_probe) - return -EOPNOTSUPP; - - zpci_register_hp_ops(&hp_ops); - init_pci_slots(); - - return 0; -} - -static void __exit pci_hotplug_s390_exit(void) -{ - exit_pci_slots(); - zpci_deregister_hp_ops(); -} - -module_init(pci_hotplug_s390_init); -module_exit(pci_hotplug_s390_exit); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index de8ffacf9c9..21a7182dccd 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -286,7 +286,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) return -EINVAL; - pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride); if (!offset || (nr_virtfn > 1 && !stride)) @@ -324,7 +323,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) if (!pdev->is_physfn) { pci_dev_put(pdev); - return -ENODEV; + return -ENOSYS; } rc = sysfs_create_link(&dev->dev.kobj, @@ -334,6 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) return rc; } + pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); @@ -368,6 +368,7 @@ failed: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); + pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); ssleep(1); pci_cfg_access_unlock(dev); @@ -401,6 +402,7 @@ static void sriov_disable(struct pci_dev *dev) sysfs_remove_link(&dev->dev.kobj, "dep_link"); iov->num_VFs = 0; + pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); } static int sriov_init(struct pci_dev *dev, int pos) @@ -662,7 +664,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) might_sleep(); if (!dev->is_physfn) - return -ENODEV; + return -ENOSYS; return sriov_enable(dev, nr_virtfn); } @@ -722,7 +724,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf); * @dev: the PCI device * * Returns number of VFs belonging to this device that are assigned to a guest. - * If device is not a physical function returns -ENODEV. + * If device is not a physical function returns 0. */ int pci_vfs_assigned(struct pci_dev *dev) { @@ -767,12 +769,15 @@ EXPORT_SYMBOL_GPL(pci_vfs_assigned); * device's mutex held. * * Returns 0 if PF is an SRIOV-capable device and - * value of numvfs valid. If not a PF with VFS, return -EINVAL; + * value of numvfs valid. If not a PF return -ENOSYS; + * if numvfs is invalid return -EINVAL; * if VFs already enabled, return -EBUSY. */ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { - if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs)) + if (!dev->is_physfn) + return -ENOSYS; + if (numvfs > dev->sriov->total_VFs) return -EINVAL; /* Shouldn't change if VFs already enabled */ @@ -786,17 +791,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); /** - * pci_sriov_get_totalvfs -- get total VFs supported on this devic3 + * pci_sriov_get_totalvfs -- get total VFs supported on this device * @dev: the PCI PF device * * For a PCIe device with SRIOV support, return the PCIe * SRIOV capability value of TotalVFs or the value of driver_max_VFs - * if the driver reduced it. Otherwise, -EINVAL. + * if the driver reduced it. Otherwise 0. */ int pci_sriov_get_totalvfs(struct pci_dev *dev) { if (!dev->is_physfn) - return -EINVAL; + return 0; if (dev->sriov->driver_max_VFs) return dev->sriov->driver_max_VFs; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dbdc5f7e2b2..7c29ee4ed0a 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -210,7 +210,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) } if (!error) - dev_info(&dev->dev, "power state changed by ACPI to %s\n", + dev_dbg(&dev->dev, "power state changed by ACPI to %s\n", acpi_power_state_string(state_conv[state])); return error; @@ -290,24 +290,16 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = { void acpi_pci_add_bus(struct pci_bus *bus) { - acpi_handle handle = NULL; - - if (bus->bridge) - handle = ACPI_HANDLE(bus->bridge); - if (acpi_pci_disabled || handle == NULL) + if (acpi_pci_disabled || !bus->bridge) return; - acpi_pci_slot_enumerate(bus, handle); - acpiphp_enumerate_slots(bus, handle); + acpi_pci_slot_enumerate(bus); + acpiphp_enumerate_slots(bus); } void acpi_pci_remove_bus(struct pci_bus *bus) { - /* - * bus->bridge->acpi_node.handle has already been reset to NULL - * when acpi_pci_remove_bus() is called, so don't check ACPI handle. - */ - if (acpi_pci_disabled) + if (acpi_pci_disabled || !bus->bridge) return; acpiphp_remove_slots(bus); @@ -317,13 +309,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { - struct pci_dev * pci_dev; - u64 addr; + struct pci_dev *pci_dev = to_pci_dev(dev); + bool is_bridge; + u64 addr; - pci_dev = to_pci_dev(dev); + /* + * pci_is_bridge() is not suitable here, because pci_dev->subordinate + * is set only after acpi_pci_find_device() has been called for the + * given device. + */ + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); if (!*handle) return -ENODEV; return 0; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e6515e21afa..98f7b9b8950 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -763,6 +763,13 @@ static int pci_pm_resume(struct device *dev) #ifdef CONFIG_HIBERNATE_CALLBACKS + +/* + * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing + * a hibernate transition + */ +struct dev_pm_ops __weak pcibios_pm_ops; + static int pci_pm_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); @@ -786,6 +793,9 @@ static int pci_pm_freeze(struct device *dev) return error; } + if (pcibios_pm_ops.freeze) + return pcibios_pm_ops.freeze(dev); + return 0; } @@ -811,6 +821,9 @@ static int pci_pm_freeze_noirq(struct device *dev) pci_pm_set_unknown_state(pci_dev); + if (pcibios_pm_ops.freeze_noirq) + return pcibios_pm_ops.freeze_noirq(dev); + return 0; } @@ -820,6 +833,12 @@ static int pci_pm_thaw_noirq(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; + if (pcibios_pm_ops.thaw_noirq) { + error = pcibios_pm_ops.thaw_noirq(dev); + if (error) + return error; + } + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -837,6 +856,12 @@ static int pci_pm_thaw(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; + if (pcibios_pm_ops.thaw) { + error = pcibios_pm_ops.thaw(dev); + if (error) + return error; + } + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); @@ -878,6 +903,9 @@ static int pci_pm_poweroff(struct device *dev) Fixup: pci_fixup_device(pci_fixup_suspend, pci_dev); + if (pcibios_pm_ops.poweroff) + return pcibios_pm_ops.poweroff(dev); + return 0; } @@ -911,6 +939,9 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) pci_write_config_word(pci_dev, PCI_COMMAND, 0); + if (pcibios_pm_ops.poweroff_noirq) + return pcibios_pm_ops.poweroff_noirq(dev); + return 0; } @@ -920,6 +951,12 @@ static int pci_pm_restore_noirq(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; + if (pcibios_pm_ops.restore_noirq) { + error = pcibios_pm_ops.restore_noirq(dev); + if (error) + return error; + } + pci_pm_default_resume_early(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) @@ -937,6 +974,12 @@ static int pci_pm_restore(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; + if (pcibios_pm_ops.restore) { + error = pcibios_pm_ops.restore(dev); + if (error) + return error; + } + /* * This is necessary for the hibernation error path in which restore is * called without restoring the standard config registers of the device. diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c0dbe1f6136..7128cfdd64a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -131,19 +131,19 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, return ret; } -static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t cpuaffinity_show(struct device *dev, + struct device_attribute *attr, char *buf) { return pci_bus_show_cpuaffinity(dev, 0, attr, buf); } +static DEVICE_ATTR_RO(cpuaffinity); -static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t cpulistaffinity_show(struct device *dev, + struct device_attribute *attr, char *buf) { return pci_bus_show_cpuaffinity(dev, 1, attr, buf); } +static DEVICE_ATTR_RO(cpulistaffinity); /* show resources */ static ssize_t @@ -379,6 +379,7 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr, } return count; } +static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, @@ -514,11 +515,20 @@ struct device_attribute pci_dev_attrs[] = { __ATTR_NULL, }; -struct device_attribute pcibus_dev_attrs[] = { - __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store), - __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL), - __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL), - __ATTR_NULL, +static struct attribute *pcibus_attrs[] = { + &dev_attr_rescan.attr, + &dev_attr_cpuaffinity.attr, + &dev_attr_cpulistaffinity.attr, + NULL, +}; + +static const struct attribute_group pcibus_group = { + .attrs = pcibus_attrs, +}; + +const struct attribute_group *pcibus_groups[] = { + &pcibus_group, + NULL, }; static ssize_t diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e37fea6e178..e8ccf6c0f08 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> +#include <linux/pci_hotplug.h> #include <asm-generic/pci-bridge.h> #include <asm/setup.h> #include "pci.h" @@ -1145,6 +1146,24 @@ int pci_reenable_device(struct pci_dev *dev) return 0; } +static void pci_enable_bridge(struct pci_dev *dev) +{ + int retval; + + if (!dev) + return; + + pci_enable_bridge(dev->bus->self); + + if (pci_is_enabled(dev)) + return; + retval = pci_enable_device(dev); + if (retval) + dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", + retval); + pci_set_master(dev); +} + static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) { int err; @@ -1165,6 +1184,8 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ + pci_enable_bridge(dev->bus->self); + /* only skip sriov related */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & flags) @@ -1992,7 +2013,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, } /** - * pci_add_save_buffer - allocate buffer for saving given capability registers + * pci_add_cap_save_buffer - allocate buffer for saving given capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @size: requested size of the buffer @@ -2095,9 +2116,9 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type) u16 ctrl = 0; if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_IDO_REQ_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_IDO_CMP_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; if (ctrl) pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); } @@ -2113,9 +2134,9 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type) u16 ctrl = 0; if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_IDO_REQ_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_IDO_CMP_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; if (ctrl) pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); } @@ -2147,7 +2168,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) int ret; pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); - if (!(cap & PCI_EXP_OBFF_MASK)) + if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK)) return -ENOTSUPP; /* no OBFF support at all */ /* Make sure the topology supports OBFF as well */ @@ -2158,17 +2179,17 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) } pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); - if (cap & PCI_EXP_OBFF_WAKE) - ctrl |= PCI_EXP_OBFF_WAKE_EN; + if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE) + ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN; else { switch (type) { case PCI_EXP_OBFF_SIGNAL_L0: - if (!(ctrl & PCI_EXP_OBFF_WAKE_EN)) - ctrl |= PCI_EXP_OBFF_MSGA_EN; + if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN)) + ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN; break; case PCI_EXP_OBFF_SIGNAL_ALWAYS: - ctrl &= ~PCI_EXP_OBFF_WAKE_EN; - ctrl |= PCI_EXP_OBFF_MSGB_EN; + ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN; + ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN; break; default: WARN(1, "bad OBFF signal type\n"); @@ -2189,7 +2210,8 @@ EXPORT_SYMBOL(pci_enable_obff); */ void pci_disable_obff(struct pci_dev *dev) { - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_OBFF_WAKE_EN); } EXPORT_SYMBOL(pci_disable_obff); @@ -2237,7 +2259,8 @@ int pci_enable_ltr(struct pci_dev *dev) return ret; } - return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); } EXPORT_SYMBOL(pci_enable_ltr); @@ -2254,7 +2277,8 @@ void pci_disable_ltr(struct pci_dev *dev) if (!pci_ltr_supported(dev)) return; - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); } EXPORT_SYMBOL(pci_disable_ltr); @@ -2359,6 +2383,27 @@ void pci_enable_acs(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); } +static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) +{ + int pos; + u16 cap, ctrl; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + if (!pos) + return false; + + /* + * Except for egress control, capabilities are either required + * or only required if controllable. Features missing from the + * capability field can therefore be assumed as hard-wired enabled. + */ + pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap); + acs_flags &= (cap | PCI_ACS_EC); + + pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); + return (ctrl & acs_flags) == acs_flags; +} + /** * pci_acs_enabled - test ACS against required flags for a given device * @pdev: device to test @@ -2366,36 +2411,76 @@ void pci_enable_acs(struct pci_dev *dev) * * Return true if the device supports the provided flags. Automatically * filters out flags that are not implemented on multifunction devices. + * + * Note that this interface checks the effective ACS capabilities of the + * device rather than the actual capabilities. For instance, most single + * function endpoints are not required to support ACS because they have no + * opportunity for peer-to-peer access. We therefore return 'true' + * regardless of whether the device exposes an ACS capability. This makes + * it much easier for callers of this function to ignore the actual type + * or topology of the device when testing ACS support. */ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) { - int pos, ret; - u16 ctrl; + int ret; ret = pci_dev_specific_acs_enabled(pdev, acs_flags); if (ret >= 0) return ret > 0; + /* + * Conventional PCI and PCI-X devices never support ACS, either + * effectively or actually. The shared bus topology implies that + * any device on the bus can receive or snoop DMA. + */ if (!pci_is_pcie(pdev)) return false; - /* Filter out flags not applicable to multifunction */ - if (pdev->multifunction) - acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | - PCI_ACS_EC | PCI_ACS_DT); - - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || - pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pdev->multifunction) { - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); - if (!pos) - return false; + switch (pci_pcie_type(pdev)) { + /* + * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, + * but since their primary inteface is PCI/X, we conservatively + * handle them as we would a non-PCIe device. + */ + case PCI_EXP_TYPE_PCIE_BRIDGE: + /* + * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never + * applicable... must never implement an ACS Extended Capability...". + * This seems arbitrary, but we take a conservative interpretation + * of this statement. + */ + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + return false; + /* + * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should + * implement ACS in order to indicate their peer-to-peer capabilities, + * regardless of whether they are single- or multi-function devices. + */ + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_ROOT_PORT: + return pci_acs_flags_enabled(pdev, acs_flags); + /* + * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be + * implemented by the remaining PCIe types to indicate peer-to-peer + * capabilities, but only when they are part of a multifunciton + * device. The footnote for section 6.12 indicates the specific + * PCIe types included here. + */ + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + if (!pdev->multifunction) + break; - pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); - if ((ctrl & acs_flags) != acs_flags) - return false; + return pci_acs_flags_enabled(pdev, acs_flags); } + /* + * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable + * to single function devices with the exception of downstream ports. + */ return true; } @@ -3059,18 +3144,23 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); /** - * pci_msi_off - disables any msi or msix capabilities + * pci_msi_off - disables any MSI or MSI-X capabilities * @dev: the PCI device to operate on * - * If you want to use msi see pci_enable_msi and friends. - * This is a lower level primitive that allows us to disable - * msi operation at the device level. + * If you want to use MSI, see pci_enable_msi() and friends. + * This is a lower-level primitive that allows us to disable + * MSI operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; + /* + * This looks like it could go in msi.c, but we need it even when + * CONFIG_PCI_MSI=n. For the same reason, we can't use + * dev->msi_cap or dev->msix_cap here. + */ pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); @@ -3098,19 +3188,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) } EXPORT_SYMBOL(pci_set_dma_seg_boundary); -static int pcie_flr(struct pci_dev *dev, int probe) +/** + * pci_wait_for_pending_transaction - waits for pending transaction + * @dev: the PCI device to operate on + * + * Return 0 if transaction is pending 1 otherwise. + */ +int pci_wait_for_pending_transaction(struct pci_dev *dev) { int i; - u32 cap; u16 status; - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return -ENOTTY; - - if (probe) - return 0; - /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) @@ -3118,13 +3206,27 @@ static int pcie_flr(struct pci_dev *dev, int probe) pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) - goto clear; + return 1; } - dev_err(&dev->dev, "transaction is not cleared; " - "proceeding with reset anyway\n"); + return 0; +} +EXPORT_SYMBOL(pci_wait_for_pending_transaction); + +static int pcie_flr(struct pci_dev *dev, int probe) +{ + u32 cap; + + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + if (probe) + return 0; + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); -clear: pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); msleep(100); @@ -3215,9 +3317,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return 0; } -static int pci_parent_bus_reset(struct pci_dev *dev, int probe) +/** + * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge. + * @dev: Bridge device + * + * Use the bridge control register to assert reset on the secondary bus. + * Devices on the secondary bus are left in power-on state. + */ +void pci_reset_bridge_secondary_bus(struct pci_dev *dev) { u16 ctrl; + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + /* + * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * this to 2ms to ensure that we meet the minium requirement. + */ + msleep(2); + + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * Trhfa for conventional PCI is 2^25 clock cycles. + * Assuming a minimum 33MHz clock this results in a 1s + * delay before we can consider subordinate devices to + * be re-initialized. PCIe has some ways to shorten this, + * but we don't make use of them yet. + */ + ssleep(1); +} +EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus); + +static int pci_parent_bus_reset(struct pci_dev *dev, int probe) +{ struct pci_dev *pdev; if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) @@ -3230,18 +3365,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) if (probe) return 0; - pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); - ctrl |= PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); - msleep(100); - - ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); - msleep(100); + pci_reset_bridge_secondary_bus(dev->bus->self); return 0; } +static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe) +{ + int rc = -ENOTTY; + + if (!hotplug || !try_module_get(hotplug->ops->owner)) + return rc; + + if (hotplug->ops->reset_slot) + rc = hotplug->ops->reset_slot(hotplug, probe); + + module_put(hotplug->ops->owner); + + return rc; +} + +static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) +{ + struct pci_dev *pdev; + + if (dev->subordinate || !dev->slot) + return -ENOTTY; + + list_for_each_entry(pdev, &dev->bus->devices, bus_list) + if (pdev != dev && pdev->slot == dev->slot) + return -ENOTTY; + + return pci_reset_hotplug_slot(dev->slot->hotplug, probe); +} + static int __pci_dev_reset(struct pci_dev *dev, int probe) { int rc; @@ -3264,27 +3421,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe) if (rc != -ENOTTY) goto done; + rc = pci_dev_reset_slot_function(dev, probe); + if (rc != -ENOTTY) + goto done; + rc = pci_parent_bus_reset(dev, probe); done: return rc; } +static void pci_dev_lock(struct pci_dev *dev) +{ + pci_cfg_access_lock(dev); + /* block PM suspend, driver probe, etc. */ + device_lock(&dev->dev); +} + +static void pci_dev_unlock(struct pci_dev *dev) +{ + device_unlock(&dev->dev); + pci_cfg_access_unlock(dev); +} + +static void pci_dev_save_and_disable(struct pci_dev *dev) +{ + /* + * Wake-up device prior to save. PM registers default to D0 after + * reset and a simple register restore doesn't reliably return + * to a non-D0 state anyway. + */ + pci_set_power_state(dev, PCI_D0); + + pci_save_state(dev); + /* + * Disable the device by clearing the Command register, except for + * INTx-disable which is set. This not only disables MMIO and I/O port + * BARs, but also prevents the device from being Bus Master, preventing + * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 + * compliant devices, INTx-disable prevents legacy interrupts. + */ + pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); +} + +static void pci_dev_restore(struct pci_dev *dev) +{ + pci_restore_state(dev); +} + static int pci_dev_reset(struct pci_dev *dev, int probe) { int rc; - if (!probe) { - pci_cfg_access_lock(dev); - /* block PM suspend, driver probe, etc. */ - device_lock(&dev->dev); - } + if (!probe) + pci_dev_lock(dev); rc = __pci_dev_reset(dev, probe); - if (!probe) { - device_unlock(&dev->dev); - pci_cfg_access_unlock(dev); - } + if (!probe) + pci_dev_unlock(dev); + return rc; } /** @@ -3375,22 +3570,249 @@ int pci_reset_function(struct pci_dev *dev) if (rc) return rc; - pci_save_state(dev); - - /* - * both INTx and MSI are disabled after the Interrupt Disable bit - * is set and the Bus Master bit is cleared. - */ - pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + pci_dev_save_and_disable(dev); rc = pci_dev_reset(dev, 0); - pci_restore_state(dev); + pci_dev_restore(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function); +/* Lock devices from the top of the tree down */ +static void pci_bus_lock(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); + } +} + +/* Unlock devices from the bottom of the tree up */ +static void pci_bus_unlock(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } +} + +/* Lock devices from the top of the tree down */ +static void pci_slot_lock(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); + } +} + +/* Unlock devices from the bottom of the tree up */ +static void pci_slot_unlock(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } +} + +/* Save and disable devices from the top of the tree down */ +static void pci_bus_save_and_disable(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_save_and_disable(dev); + if (dev->subordinate) + pci_bus_save_and_disable(dev->subordinate); + } +} + +/* + * Restore devices from top of the tree down - parent bridges need to be + * restored before we can get to subordinate devices. + */ +static void pci_bus_restore(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_restore(dev); + if (dev->subordinate) + pci_bus_restore(dev->subordinate); + } +} + +/* Save and disable devices from the top of the tree down */ +static void pci_slot_save_and_disable(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_save_and_disable(dev); + if (dev->subordinate) + pci_bus_save_and_disable(dev->subordinate); + } +} + +/* + * Restore devices from top of the tree down - parent bridges need to be + * restored before we can get to subordinate devices. + */ +static void pci_slot_restore(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_restore(dev); + if (dev->subordinate) + pci_bus_restore(dev->subordinate); + } +} + +static int pci_slot_reset(struct pci_slot *slot, int probe) +{ + int rc; + + if (!slot) + return -ENOTTY; + + if (!probe) + pci_slot_lock(slot); + + might_sleep(); + + rc = pci_reset_hotplug_slot(slot->hotplug, probe); + + if (!probe) + pci_slot_unlock(slot); + + return rc; +} + +/** + * pci_probe_reset_slot - probe whether a PCI slot can be reset + * @slot: PCI slot to probe + * + * Return 0 if slot can be reset, negative if a slot reset is not supported. + */ +int pci_probe_reset_slot(struct pci_slot *slot) +{ + return pci_slot_reset(slot, 1); +} +EXPORT_SYMBOL_GPL(pci_probe_reset_slot); + +/** + * pci_reset_slot - reset a PCI slot + * @slot: PCI slot to reset + * + * A PCI bus may host multiple slots, each slot may support a reset mechanism + * independent of other slots. For instance, some slots may support slot power + * control. In the case of a 1:1 bus to slot architecture, this function may + * wrap the bus reset to avoid spurious slot related events such as hotplug. + * Generally a slot reset should be attempted before a bus reset. All of the + * function of the slot and any subordinate buses behind the slot are reset + * through this function. PCI config space of all devices in the slot and + * behind the slot is saved before and restored after reset. + * + * Return 0 on success, non-zero on error. + */ +int pci_reset_slot(struct pci_slot *slot) +{ + int rc; + + rc = pci_slot_reset(slot, 1); + if (rc) + return rc; + + pci_slot_save_and_disable(slot); + + rc = pci_slot_reset(slot, 0); + + pci_slot_restore(slot); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_slot); + +static int pci_bus_reset(struct pci_bus *bus, int probe) +{ + if (!bus->self) + return -ENOTTY; + + if (probe) + return 0; + + pci_bus_lock(bus); + + might_sleep(); + + pci_reset_bridge_secondary_bus(bus->self); + + pci_bus_unlock(bus); + + return 0; +} + +/** + * pci_probe_reset_bus - probe whether a PCI bus can be reset + * @bus: PCI bus to probe + * + * Return 0 if bus can be reset, negative if a bus reset is not supported. + */ +int pci_probe_reset_bus(struct pci_bus *bus) +{ + return pci_bus_reset(bus, 1); +} +EXPORT_SYMBOL_GPL(pci_probe_reset_bus); + +/** + * pci_reset_bus - reset a PCI bus + * @bus: top level PCI bus to reset + * + * Do a bus reset on the given bus and any subordinate buses, saving + * and restoring state of all devices. + * + * Return 0 on success, non-zero on error. + */ +int pci_reset_bus(struct pci_bus *bus) +{ + int rc; + + rc = pci_bus_reset(bus, 1); + if (rc) + return rc; + + pci_bus_save_and_disable(bus); + + rc = pci_bus_reset(bus, 0); + + pci_bus_restore(bus); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_bus); + /** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query @@ -3525,8 +3947,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); - if (mps < 0) - return mps; if (mps < rq) rq = mps; } @@ -3543,7 +3963,6 @@ EXPORT_SYMBOL(pcie_set_readrq); * @dev: PCI device to query * * Returns maximum payload size in bytes - * or appropriate error value. */ int pcie_get_mps(struct pci_dev *dev) { @@ -3579,6 +3998,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps) } /** + * pcie_get_minimum_link - determine minimum link settings of a PCI device + * @dev: PCI device to query + * @speed: storage for minimum speed + * @width: storage for minimum width + * + * This function will walk up the PCI device chain and determine the minimum + * link width and speed of the device. + */ +int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} +EXPORT_SYMBOL(pcie_get_minimum_link); + +/** * pci_select_bars - Make BAR mask from the type of resource * @dev: the PCI device for which BAR mask is made * @flags: resource type mask to be selected diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d1182c4a754..8a00c063d7b 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -6,6 +6,9 @@ #define PCI_CFG_SPACE_SIZE 256 #define PCI_CFG_SPACE_EXP_SIZE 4096 +extern const unsigned char pcix_bus_speed[]; +extern const unsigned char pcie_link_speed[]; + /* Functions internal to the PCI core code */ int pci_create_sysfs_dev_files(struct pci_dev *pdev); @@ -151,7 +154,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev) } extern struct device_attribute pci_dev_attrs[]; -extern struct device_attribute pcibus_dev_attrs[]; +extern const struct attribute_group *pcibus_groups[]; extern struct device_type pci_dev_type; extern struct bus_attribute pci_bus_attrs[]; diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 569f82fc9e2..7958e59d607 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -2,7 +2,7 @@ # PCI Express Port Bus Configuration # config PCIEPORTBUS - bool "PCI Express support" + bool "PCI Express Port Bus support" depends on PCI help This automatically enables PCI Express Port Bus support. Users can @@ -14,15 +14,12 @@ config PCIEPORTBUS # Include service Kconfig here # config HOTPLUG_PCI_PCIE - tristate "PCI Express Hotplug driver" + bool "PCI Express Hotplug driver" depends on HOTPLUG_PCI && PCIEPORTBUS help Say Y here if you have a motherboard that supports PCI Express Native Hotplug - To compile this driver as a module, choose M here: the - module will be called pciehp. - When in doubt, say N. source "drivers/pci/pcie/aer/Kconfig" diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 76ef634caf6..0bf82a20a0f 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); - aer_do_secondary_bus_reset(dev); + pci_reset_bridge_secondary_bus(dev); dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); /* Clear Root Error Status */ diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 90ea3e88041..84420b7c945 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -106,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig, } extern struct bus_type pcie_port_bus_type; -void aer_do_secondary_bus_reset(struct pci_dev *dev); int aer_init(struct pcie_device *dev); void aer_isr(struct work_struct *work); void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 8b68ae59b7b..85ca36f2136 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -367,39 +367,6 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, } /** - * aer_do_secondary_bus_reset - perform secondary bus reset - * @dev: pointer to bridge's pci_dev data structure - * - * Invoked when performing link reset at Root Port or Downstream Port. - */ -void aer_do_secondary_bus_reset(struct pci_dev *dev) -{ - u16 p2p_ctrl; - - /* Assert Secondary Bus Reset */ - pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); - p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); - - /* - * we should send hot reset message for 2ms to allow it time to - * propagate to all downstream ports - */ - msleep(2); - - /* De-assert Secondary Bus Reset */ - p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); - - /* - * System software must wait for at least 100ms from the end - * of a reset of one or more device before it is permitted - * to issue Configuration Requests to those devices. - */ - msleep(200); -} - -/** * default_reset_link - default reset function * @dev: pointer to pci_dev data structure * @@ -408,7 +375,7 @@ void aer_do_secondary_bus_reset(struct pci_dev *dev) */ static pci_ers_result_t default_reset_link(struct pci_dev *dev) { - aer_do_secondary_bus_reset(dev); + pci_reset_bridge_secondary_bus(dev); dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n"); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 46ada5c098e..4f9cc93c3b5 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -96,7 +96,7 @@ static void release_pcibus_dev(struct device *dev) static struct class pcibus_class = { .name = "pci_bus", .dev_release = &release_pcibus_dev, - .dev_attrs = pcibus_dev_attrs, + .dev_groups = pcibus_groups, }; static int __init pcibus_class_init(void) @@ -156,6 +156,8 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) return flags; } +#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) + /** * pci_read_base - read a PCI BAR * @dev: the PCI device @@ -178,8 +180,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(dev, PCI_COMMAND, - orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); + if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { + pci_write_config_word(dev, PCI_COMMAND, + orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); + } } res->name = pci_name(dev); @@ -293,7 +297,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, fail: res->flags = 0; out: - if (!dev->mmio_always_on) + if (!dev->mmio_always_on && + (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); if (bar_too_big) @@ -513,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) return bridge; } -static unsigned char pcix_bus_speed[] = { +const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ PCI_SPEED_100MHz_PCIX, /* 2 */ @@ -532,7 +537,7 @@ static unsigned char pcix_bus_speed[] = { PCI_SPEED_133MHz_PCIX_533 /* F */ }; -static unsigned char pcie_link_speed[] = { +const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ @@ -1491,24 +1496,23 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) if (!pci_is_pcie(dev)) return 0; - /* For PCIE hotplug enabled slots not connected directly to a - * PCI-E root port, there can be problems when hotplugging - * devices. This is due to the possibility of hotplugging a - * device into the fabric with a smaller MPS that the devices - * currently running have configured. Modifying the MPS on the - * running devices could cause a fatal bus error due to an - * incoming frame being larger than the newly configured MPS. - * To work around this, the MPS for the entire fabric must be - * set to the minimum size. Any devices hotplugged into this - * fabric will have the minimum MPS set. If the PCI hotplug - * slot is directly connected to the root port and there are not - * other devices on the fabric (which seems to be the most - * common case), then this is not an issue and MPS discovery - * will occur as normal. + /* + * We don't have a way to change MPS settings on devices that have + * drivers attached. A hot-added device might support only the minimum + * MPS setting (MPS=128). Therefore, if the fabric contains a bridge + * where devices may be hot-added, we limit the fabric MPS to 128 so + * hot-added devices will work correctly. + * + * However, if we hot-add a device to a slot directly below a Root + * Port, it's impossible for there to be other existing devices below + * the port. We don't limit the MPS in this case because we can + * reconfigure MPS on both the Root Port and the hot-added device, + * and there are no other devices involved. + * + * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. */ - if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || - (dev->bus->self && - pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT))) + if (dev->is_hotplug_bridge && + pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) *smpss = 0; if (*smpss > dev->pcie_mpss) @@ -1583,6 +1587,22 @@ static void pcie_write_mrrs(struct pci_dev *dev) "with pci=pcie_bus_safe.\n"); } +static void pcie_bus_detect_mps(struct pci_dev *dev) +{ + struct pci_dev *bridge = dev->bus->self; + int mps, p_mps; + + if (!bridge) + return; + + mps = pcie_get_mps(dev); + p_mps = pcie_get_mps(bridge); + + if (mps != p_mps) + dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", + mps, pci_name(bridge), p_mps); +} + static int pcie_bus_configure_set(struct pci_dev *dev, void *data) { int mps, orig_mps; @@ -1590,13 +1610,18 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) if (!pci_is_pcie(dev)) return 0; + if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { + pcie_bus_detect_mps(dev); + return 0; + } + mps = 128 << *(u8 *)data; orig_mps = pcie_get_mps(dev); pcie_write_mps(dev, mps); pcie_write_mrrs(dev); - dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " + dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), " "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, orig_mps, pcie_get_readrq(dev)); @@ -1607,25 +1632,25 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) * parents then children fashion. If this changes, then this code will not * work as designed. */ -void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) +void pcie_bus_configure_settings(struct pci_bus *bus) { u8 smpss; - if (!pci_is_pcie(bus->self)) + if (!bus->self) return; - if (pcie_bus_config == PCIE_BUS_TUNE_OFF) + if (!pci_is_pcie(bus->self)) return; /* FIXME - Peer to peer DMA is possible, though the endpoint would need - * to be aware to the MPS of the destination. To work around this, + * to be aware of the MPS of the destination. To work around this, * simply force the MPS of the entire system to the smallest possible. */ if (pcie_bus_config == PCIE_BUS_PEER2PEER) smpss = 0; if (pcie_bus_config == PCIE_BUS_SAFE) { - smpss = mpss; + smpss = bus->self->pcie_mpss; pcie_find_smpss(bus->self, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss); @@ -1979,7 +2004,6 @@ unsigned int __ref pci_rescan_bus(struct pci_bus *bus) max = pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); - pci_enable_bridges(bus); pci_bus_add_devices(bus); return max; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e85d23044ae..f6c31fabf3a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3126,9 +3126,6 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe) static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) { - int i; - u16 status; - /* * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf * @@ -3140,20 +3137,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) if (probe) return 0; - /* Wait for Transaction Pending bit clean */ - for (i = 0; i < 4; i++) { - if (i) - msleep((1 << (i - 1)) * 100); - - pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); - if (!(status & PCI_EXP_DEVSTA_TRPND)) - goto clear; - } - - dev_err(&dev->dev, "transaction is not cleared; " - "proceeding with reset anyway\n"); + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); -clear: pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); msleep(100); @@ -3208,6 +3194,83 @@ reset_complete: return 0; } +/* + * Device-specific reset method for Chelsio T4-based adapters. + */ +static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe) +{ + u16 old_command; + u16 msix_flags; + + /* + * If this isn't a Chelsio T4-based device, return -ENOTTY indicating + * that we have no device-specific reset method. + */ + if ((dev->device & 0xf000) != 0x4000) + return -ENOTTY; + + /* + * If this is the "probe" phase, return 0 indicating that we can + * reset this device. + */ + if (probe) + return 0; + + /* + * T4 can wedge if there are DMAs in flight within the chip and Bus + * Master has been disabled. We need to have it on till the Function + * Level Reset completes. (BUS_MASTER is disabled in + * pci_reset_function()). + */ + pci_read_config_word(dev, PCI_COMMAND, &old_command); + pci_write_config_word(dev, PCI_COMMAND, + old_command | PCI_COMMAND_MASTER); + + /* + * Perform the actual device function reset, saving and restoring + * configuration information around the reset. + */ + pci_save_state(dev); + + /* + * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts + * are disabled when an MSI-X interrupt message needs to be delivered. + * So we briefly re-enable MSI-X interrupts for the duration of the + * FLR. The pci_restore_state() below will restore the original + * MSI-X state. + */ + pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags); + if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0) + pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, + msix_flags | + PCI_MSIX_FLAGS_ENABLE | + PCI_MSIX_FLAGS_MASKALL); + + /* + * Start of pcie_flr() code sequence. This reset code is a copy of + * the guts of pcie_flr() because that's not an exported function. + */ + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); + + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); + + /* + * End of pcie_flr() code sequence. + */ + + /* + * Restore the configuration information (BAR values, etc.) including + * the original PCI Configuration Space Command word, and return + * success. + */ + pci_restore_state(dev); + pci_write_config_word(dev, PCI_COMMAND, old_command); + return 0; +} + #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed #define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166 @@ -3221,6 +3284,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { reset_ivb_igd }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, reset_intel_generic_dev }, + { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, + reset_chelsio_generic_dev }, { 0 } }; @@ -3295,11 +3360,61 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev) return pci_dev_get(dev); } +/* + * AMD has indicated that the devices below do not support peer-to-peer + * in any system where they are found in the southbridge with an AMD + * IOMMU in the system. Multifunction devices that do not support + * peer-to-peer between functions can claim to support a subset of ACS. + * Such devices effectively enable request redirect (RR) and completion + * redirect (CR) since all transactions are redirected to the upstream + * root complex. + * + * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086 + * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102 + * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402 + * + * 1002:4385 SBx00 SMBus Controller + * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller + * 1002:4383 SBx00 Azalia (Intel HDA) + * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller + * 1002:4384 SBx00 PCI to PCI Bridge + * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller + */ +static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header *header = NULL; + acpi_status status; + + /* Targeting multifunction devices on the SB (appears on root bus) */ + if (!dev->multifunction || !pci_is_root_bus(dev->bus)) + return -ENODEV; + + /* The IVRS table describes the AMD IOMMU */ + status = acpi_get_table("IVRS", 0, &header); + if (ACPI_FAILURE(status)) + return -ENODEV; + + /* Filter out flags not applicable to multifunction */ + acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); + + return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; +#else + return -ENODEV; +#endif +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags); } pci_dev_acs_enabled[] = { + { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs }, + { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs }, + { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs }, + { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs }, + { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs }, + { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs }, { 0 } }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index d254e237953..bc26d7990cc 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head, } } +static unsigned long pci_fail_res_type_mask(struct list_head *fail_head) +{ + struct pci_dev_resource *fail_res; + unsigned long mask = 0; + + /* check failed type */ + list_for_each_entry(fail_res, fail_head, list) + mask |= fail_res->flags; + + /* + * one pref failed resource will set IORESOURCE_MEM, + * as we can allocate pref in non-pref range. + * Will release all assigned non-pref sibling resources + * according to that bit. + */ + return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); +} + +static bool pci_need_to_release(unsigned long mask, struct resource *res) +{ + if (res->flags & IORESOURCE_IO) + return !!(mask & IORESOURCE_IO); + + /* check pref at first */ + if (res->flags & IORESOURCE_PREFETCH) { + if (mask & IORESOURCE_PREFETCH) + return true; + /* count pref if its parent is non-pref */ + else if ((mask & IORESOURCE_MEM) && + !(res->parent->flags & IORESOURCE_PREFETCH)) + return true; + else + return false; + } + + if (res->flags & IORESOURCE_MEM) + return !!(mask & IORESOURCE_MEM); + + return false; /* should not get here */ +} + static void __assign_resources_sorted(struct list_head *head, struct list_head *realloc_head, struct list_head *fail_head) @@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head, * if could do that, could get out early. * if could not do that, we still try to assign requested at first, * then try to reassign add_size for some resources. + * + * Separate three resource type checking if we need to release + * assigned resource after requested + add_size try. + * 1. if there is io port assign fail, will release assigned + * io port. + * 2. if there is pref mmio assign fail, release assigned + * pref mmio. + * if assigned pref mmio's parent is non-pref mmio and there + * is non-pref mmio assign fail, will release that assigned + * pref mmio. + * 3. if there is non-pref mmio assign fail or pref mmio + * assigned fail, will release assigned non-pref mmio. */ LIST_HEAD(save_head); LIST_HEAD(local_fail_head); struct pci_dev_resource *save_res; - struct pci_dev_resource *dev_res; + struct pci_dev_resource *dev_res, *tmp_res; + unsigned long fail_type; /* Check if optional add_size is there */ if (!realloc_head || list_empty(realloc_head)) @@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head, return; } + /* check failed type */ + fail_type = pci_fail_res_type_mask(&local_fail_head); + /* remove not need to be released assigned res from head list etc */ + list_for_each_entry_safe(dev_res, tmp_res, head, list) + if (dev_res->res->parent && + !pci_need_to_release(fail_type, dev_res->res)) { + /* remove it from realloc_head list */ + remove_from_list(realloc_head, dev_res->res); + remove_from_list(&save_head, dev_res->res); + list_del(&dev_res->list); + kfree(dev_res); + } + free_list(&local_fail_head); /* Release assigned resource */ list_for_each_entry(dev_res, head, list) @@ -747,14 +814,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, { struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); - unsigned long size = 0, size0 = 0, size1 = 0; + resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; - resource_size_t min_align, io_align, align; + resource_size_t min_align, align; if (!b_res) return; - io_align = min_align = window_alignment(bus, IORESOURCE_IO); + min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -781,9 +848,6 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } } - if (min_align > io_align) - min_align = io_align; - size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), min_align); if (children_add_size > add_size) @@ -807,8 +871,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " - "%pR to %pR add_size %lx\n", b_res, - &bus->busn_res, size1-size0); + "%pR to %pR add_size %llx\n", b_res, + &bus->busn_res, + (unsigned long long)size1-size0); } } @@ -838,6 +903,8 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, * pbus_size_mem() - size the memory window of a given bus * * @bus : the bus + * @mask: mask the resource flag, then compare it with type + * @type: the type of free resource from bridge * @min_size : the minimum memory window that must to be allocated * @add_size : additional optional memory window * @realloc_head : track the additional memory window on this list @@ -1297,39 +1364,21 @@ static void pci_bus_dump_resources(struct pci_bus *bus) } } -static int __init pci_bus_get_depth(struct pci_bus *bus) +static int pci_bus_get_depth(struct pci_bus *bus) { int depth = 0; - struct pci_dev *dev; + struct pci_bus *child_bus; - list_for_each_entry(dev, &bus->devices, bus_list) { + list_for_each_entry(child_bus, &bus->children, node){ int ret; - struct pci_bus *b = dev->subordinate; - if (!b) - continue; - ret = pci_bus_get_depth(b); + ret = pci_bus_get_depth(child_bus); if (ret + 1 > depth) depth = ret + 1; } return depth; } -static int __init pci_get_max_depth(void) -{ - int depth = 0; - struct pci_bus *bus; - - list_for_each_entry(bus, &pci_root_buses, node) { - int ret; - - ret = pci_bus_get_depth(bus); - if (ret > depth) - depth = ret; - } - - return depth; -} /* * -1: undefined, will auto detect later @@ -1346,7 +1395,7 @@ enum enable_type { auto_enabled, }; -static enum enable_type pci_realloc_enable __initdata = undefined; +static enum enable_type pci_realloc_enable = undefined; void __init pci_realloc_get_opt(char *str) { if (!strncmp(str, "off", 3)) @@ -1354,45 +1403,64 @@ void __init pci_realloc_get_opt(char *str) else if (!strncmp(str, "on", 2)) pci_realloc_enable = user_enabled; } -static bool __init pci_realloc_enabled(void) +static bool pci_realloc_enabled(enum enable_type enable) { - return pci_realloc_enable >= user_enabled; + return enable >= user_enabled; } -static void __init pci_realloc_detect(void) -{ #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO) - struct pci_dev *dev = NULL; - - if (pci_realloc_enable != undefined) - return; - - for_each_pci_dev(dev) { - int i; +static int iov_resources_unassigned(struct pci_dev *dev, void *data) +{ + int i; + bool *unassigned = data; - for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { - struct resource *r = &dev->resource[i]; + for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { + struct resource *r = &dev->resource[i]; + struct pci_bus_region region; - /* Not assigned, or rejected by kernel ? */ - if (r->flags && !r->start) { - pci_realloc_enable = auto_enabled; + /* Not assigned or rejected by kernel? */ + if (!r->flags) + continue; - return; - } + pcibios_resource_to_bus(dev, ®ion, r); + if (!region.start) { + *unassigned = true; + return 1; /* return early from pci_walk_bus() */ } } -#endif + + return 0; } +static enum enable_type pci_realloc_detect(struct pci_bus *bus, + enum enable_type enable_local) +{ + bool unassigned = false; + + if (enable_local != undefined) + return enable_local; + + pci_walk_bus(bus, iov_resources_unassigned, &unassigned); + if (unassigned) + return auto_enabled; + + return enable_local; +} +#else +static enum enable_type pci_realloc_detect(struct pci_bus *bus, + enum enable_type enable_local) +{ + return enable_local; +} +#endif + /* * first try will not touch pci bridge res * second and later try will clear small leaf bridge res * will stop till to the max deepth if can not find good one */ -void __init -pci_assign_unassigned_resources(void) +void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) { - struct pci_bus *bus; LIST_HEAD(realloc_head); /* list of resources that want additional resources */ struct list_head *add_list = NULL; @@ -1403,15 +1471,17 @@ pci_assign_unassigned_resources(void) unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; int pci_try_num = 1; + enum enable_type enable_local; /* don't realloc if asked to do so */ - pci_realloc_detect(); - if (pci_realloc_enabled()) { - int max_depth = pci_get_max_depth(); + enable_local = pci_realloc_detect(bus, pci_realloc_enable); + if (pci_realloc_enabled(enable_local)) { + int max_depth = pci_bus_get_depth(bus); pci_try_num = max_depth + 1; - printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", - max_depth, pci_try_num); + dev_printk(KERN_DEBUG, &bus->dev, + "max bus depth: %d pci_try_num: %d\n", + max_depth, pci_try_num); } again: @@ -1423,32 +1493,30 @@ again: add_list = &realloc_head; /* Depth first, calculate sizes and alignments of all subordinate buses. */ - list_for_each_entry(bus, &pci_root_buses, node) - __pci_bus_size_bridges(bus, add_list); + __pci_bus_size_bridges(bus, add_list); /* Depth last, allocate resources and update the hardware. */ - list_for_each_entry(bus, &pci_root_buses, node) - __pci_bus_assign_resources(bus, add_list, &fail_head); + __pci_bus_assign_resources(bus, add_list, &fail_head); if (add_list) BUG_ON(!list_empty(add_list)); tried_times++; /* any device complain? */ if (list_empty(&fail_head)) - goto enable_and_dump; + goto dump; if (tried_times >= pci_try_num) { - if (pci_realloc_enable == undefined) - printk(KERN_INFO "Some PCI device resources are unassigned, try booting with pci=realloc\n"); - else if (pci_realloc_enable == auto_enabled) - printk(KERN_INFO "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n"); + if (enable_local == undefined) + dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n"); + else if (enable_local == auto_enabled) + dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n"); free_list(&fail_head); - goto enable_and_dump; + goto dump; } - printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", - tried_times + 1); + dev_printk(KERN_DEBUG, &bus->dev, + "No. %d try to assign unassigned res\n", tried_times + 1); /* third times and later will not check if it is leaf */ if ((tried_times + 1) > 2) @@ -1458,12 +1526,11 @@ again: * Try to release leaf bridge's resources that doesn't fit resource of * child device under that bridge */ - list_for_each_entry(fail_res, &fail_head, list) { - bus = fail_res->dev->bus; - pci_bus_release_bridge_resources(bus, + list_for_each_entry(fail_res, &fail_head, list) + pci_bus_release_bridge_resources(fail_res->dev->bus, fail_res->flags & type_mask, rel_type); - } + /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; @@ -1478,14 +1545,17 @@ again: goto again; -enable_and_dump: - /* Depth last, update the hardware. */ - list_for_each_entry(bus, &pci_root_buses, node) - pci_enable_bridges(bus); - +dump: /* dump the resource on buses */ - list_for_each_entry(bus, &pci_root_buses, node) - pci_bus_dump_resources(bus); + pci_bus_dump_resources(bus); +} + +void __init pci_assign_unassigned_resources(void) +{ + struct pci_bus *root_bus; + + list_for_each_entry(root_bus, &pci_root_buses, node) + pci_assign_unassigned_root_bus_resources(root_bus); } void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) @@ -1522,13 +1592,11 @@ again: * Try to release leaf bridge's resources that doesn't fit resource of * child device under that bridge */ - list_for_each_entry(fail_res, &fail_head, list) { - struct pci_bus *bus = fail_res->dev->bus; - unsigned long flags = fail_res->flags; - - pci_bus_release_bridge_resources(bus, flags & type_mask, + list_for_each_entry(fail_res, &fail_head, list) + pci_bus_release_bridge_resources(fail_res->dev->bus, + fail_res->flags & type_mask, whole_subtree); - } + /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; @@ -1548,7 +1616,6 @@ enable_all: if (retval) dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval); pci_set_master(bridge); - pci_enable_bridges(parent); } EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); |