summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/pci_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/pci_64.c')
-rw-r--r--arch/powerpc/kernel/pci_64.c750
1 files changed, 126 insertions, 624 deletions
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 249cca27a9b..93b2920effc 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
+#include <linux/vmalloc.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -41,35 +42,23 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-static int pci_initial_scan_done;
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
-static void phbs_remap_io(void);
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
* and it *must* be the start of ISA space if an ISA bus exists because
- * ISA drivers use hard coded offsets. If no ISA bus exists a dummy
- * page is mapped and isa_io_limit prevents access to it.
+ * ISA drivers use hard coded offsets. If no ISA bus exists nothing
+ * is mapped on the first 64K of IO space
*/
-unsigned long isa_io_base; /* NULL if no ISA bus */
-EXPORT_SYMBOL(isa_io_base);
-unsigned long pci_io_base;
+unsigned long pci_io_base = ISA_IO_BASE;
EXPORT_SYMBOL(pci_io_base);
-void iSeries_pcibios_init(void);
-
LIST_HEAD(hose_list);
static struct dma_mapping_ops *pci_dma_ops;
-int global_phb_number; /* Global phb counter */
-
-/* Cached ISA bridge dev. */
-struct pci_dev *ppc64_isabridge_dev = NULL;
-EXPORT_SYMBOL_GPL(ppc64_isabridge_dev);
-
void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
{
pci_dma_ops = dma_ops;
@@ -100,7 +89,7 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region
return;
if (res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - pci_io_base;
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
if (res->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
@@ -119,7 +108,7 @@ void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
return;
if (res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - pci_io_base;
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
if (res->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
@@ -156,7 +145,7 @@ void pcibios_align_resource(void *data, struct resource *res,
if (res->flags & IORESOURCE_IO) {
unsigned long offset = (unsigned long)hose->io_base_virt -
- pci_io_base;
+ _IO_BASE;
/* Make sure we start at our min on all hoses */
if (start - offset < PCIBIOS_MIN_IO)
start = PCIBIOS_MIN_IO + offset;
@@ -180,55 +169,6 @@ void pcibios_align_resource(void *data, struct resource *res,
res->start = start;
}
-static DEFINE_SPINLOCK(hose_spinlock);
-
-/*
- * pci_controller(phb) initialized common variables.
- */
-static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
-{
- memset(hose, 0, sizeof(struct pci_controller));
-
- spin_lock(&hose_spinlock);
- hose->global_number = global_phb_number++;
- list_add_tail(&hose->list_node, &hose_list);
- spin_unlock(&hose_spinlock);
-}
-
-struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
-{
- struct pci_controller *phb;
-
- if (mem_init_done)
- phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
- else
- phb = alloc_bootmem(sizeof (struct pci_controller));
- if (phb == NULL)
- return NULL;
- pci_setup_pci_controller(phb);
- phb->arch_data = dev;
- phb->is_dynamic = mem_init_done;
- if (dev) {
- int nid = of_node_to_nid(dev);
-
- if (nid < 0 || !node_online(nid))
- nid = -1;
-
- PHB_SET_NODE(phb, nid);
- }
- return phb;
-}
-
-void pcibios_free_controller(struct pci_controller *phb)
-{
- spin_lock(&hose_spinlock);
- list_del(&phb->list_node);
- spin_unlock(&hose_spinlock);
-
- if (phb->is_dynamic)
- kfree(phb);
-}
-
void __devinit pcibios_claim_one_bus(struct pci_bus *b)
{
struct pci_dev *dev;
@@ -291,7 +231,6 @@ static unsigned int pci_parse_of_flags(u32 addr0)
return flags;
}
-#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
{
@@ -310,8 +249,8 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
continue;
- base = GET_64BIT(addrs, 1);
- size = GET_64BIT(addrs, 3);
+ base = of_read_number(&addrs[1], 2);
+ size = of_read_number(&addrs[3], 2);
if (!size)
continue;
i = addrs[0] & 0xff;
@@ -477,7 +416,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
flags = pci_parse_of_flags(ranges[0]);
- size = GET_64BIT(ranges, 6);
+ size = of_read_number(&ranges[6], 2);
if (flags == 0 || size == 0)
continue;
if (flags & IORESOURCE_IO) {
@@ -496,7 +435,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
res = bus->resource[i];
++i;
}
- res->start = GET_64BIT(ranges, 1);
+ res->start = of_read_number(&ranges[1], 2);
res->end = res->start + size - 1;
res->flags = flags;
fixup_resource(res, dev);
@@ -535,10 +474,16 @@ void __devinit scan_phb(struct pci_controller *hose)
bus->secondary = hose->first_busno;
hose->bus = bus;
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ pcibios_map_io_space(bus);
+
bus->resource[0] = res = &hose->io_resource;
- if (res->flags && request_resource(&ioport_resource, res))
+ if (res->flags && request_resource(&ioport_resource, res)) {
printk(KERN_ERR "Failed to request PCI IO region "
"on PCI domain %04x\n", hose->global_number);
+ DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
+ res->start, res->end);
+ }
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
@@ -596,17 +541,6 @@ static int __init pcibios_init(void)
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
- /* Cache the location of the ISA bridge (if we have one) */
- ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (ppc64_isabridge_dev != NULL)
- printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- /* map in PCI I/O space */
- phbs_remap_io();
-
- pci_initial_scan_done = 1;
-
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
@@ -614,11 +548,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-char __init *pcibios_setup(char *str)
-{
- return str;
-}
-
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, oldcmd;
@@ -649,22 +578,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0;
}
-/*
- * Return the domain number for this bus.
- */
-int pci_domain_nr(struct pci_bus *bus)
-{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
- else {
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- return hose->global_number;
- }
-}
-
-EXPORT_SYMBOL(pci_domain_nr);
-
/* Decide whether to display the domain number in /proc */
int pci_proc_domain(struct pci_bus *bus)
{
@@ -676,281 +589,6 @@ int pci_proc_domain(struct pci_bus *bus)
}
}
-/*
- * Platform support for /proc/bus/pci/X/Y mmap()s,
- * modelled on the sparc64 implementation by Dave Miller.
- * -- paulus.
- */
-
-/*
- * Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap. They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- resource_size_t *offset,
- enum pci_mmap_state mmap_state)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long io_offset = 0;
- int i, res_bit;
-
- if (hose == 0)
- return NULL; /* should never happen */
-
- /* If memory, add on the PCI bridge address offset */
- if (mmap_state == pci_mmap_mem) {
-#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
- *offset += hose->pci_mem_offset;
-#endif
- res_bit = IORESOURCE_MEM;
- } else {
- io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
- *offset += io_offset;
- res_bit = IORESOURCE_IO;
- }
-
- /*
- * Check that the offset requested corresponds to one of the
- * resources of the device.
- */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &dev->resource[i];
- int flags = rp->flags;
-
- /* treat ROM as memory (should be already) */
- if (i == PCI_ROM_RESOURCE)
- flags |= IORESOURCE_MEM;
-
- /* Active and same type? */
- if ((flags & res_bit) == 0)
- continue;
-
- /* In the range of this resource? */
- if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
- continue;
-
- /* found it! construct the final physical address */
- if (mmap_state == pci_mmap_io)
- *offset += hose->io_base_phys - io_offset;
- return rp;
- }
-
- return NULL;
-}
-
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
- pgprot_t protection,
- enum pci_mmap_state mmap_state,
- int write_combine)
-{
- unsigned long prot = pgprot_val(protection);
-
- /* Write combine is always 0 on non-memory space mappings. On
- * memory space, if the user didn't pass 1, we check for a
- * "prefetchable" resource. This is a bit hackish, but we use
- * this to workaround the inability of /sysfs to provide a write
- * combine bit
- */
- if (mmap_state != pci_mmap_mem)
- write_combine = 0;
- else if (write_combine == 0) {
- if (rp->flags & IORESOURCE_PREFETCH)
- write_combine = 1;
- }
-
- /* XXX would be nice to have a way to ask for write-through */
- prot |= _PAGE_NO_CACHE;
- if (write_combine)
- prot &= ~_PAGE_GUARDED;
- else
- prot |= _PAGE_GUARDED;
-
- return __pgprot(prot);
-}
-
-/*
- * This one is used by /dev/mem and fbdev who have no clue about the
- * PCI device, it tries to find the PCI device first and calls the
- * above routine
- */
-pgprot_t pci_phys_mem_access_prot(struct file *file,
- unsigned long pfn,
- unsigned long size,
- pgprot_t protection)
-{
- struct pci_dev *pdev = NULL;
- struct resource *found = NULL;
- unsigned long prot = pgprot_val(protection);
- unsigned long offset = pfn << PAGE_SHIFT;
- int i;
-
- if (page_is_ram(pfn))
- return __pgprot(prot);
-
- prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-
- for_each_pci_dev(pdev) {
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &pdev->resource[i];
- int flags = rp->flags;
-
- /* Active and same type? */
- if ((flags & IORESOURCE_MEM) == 0)
- continue;
- /* In the range of this resource? */
- if (offset < (rp->start & PAGE_MASK) ||
- offset > rp->end)
- continue;
- found = rp;
- break;
- }
- if (found)
- break;
- }
- if (found) {
- if (found->flags & IORESOURCE_PREFETCH)
- prot &= ~_PAGE_GUARDED;
- pci_dev_put(pdev);
- }
-
- DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
-
- return __pgprot(prot);
-}
-
-
-/*
- * Perform the actual remap of the pages for a PCI device mapping, as
- * appropriate for this architecture. The region in the process to map
- * is described by vm_start and vm_end members of VMA, the base physical
- * address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
- struct resource *rp;
- int ret;
-
- rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
- if (rp == NULL)
- return -EINVAL;
-
- vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
- vma->vm_page_prot,
- mmap_state, write_combine);
-
- ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-
- return ret;
-}
-
-static ssize_t pci_show_devspec(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev;
- struct device_node *np;
-
- pdev = to_pci_dev (dev);
- np = pci_device_to_OF_node(pdev);
- if (np == NULL || np->full_name == NULL)
- return 0;
- return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-
-void pcibios_add_platform_entries(struct pci_dev *pdev)
-{
- device_create_file(&pdev->dev, &dev_attr_devspec);
-}
-
-#define ISA_SPACE_MASK 0x1
-#define ISA_SPACE_IO 0x1
-
-static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
- unsigned long phb_io_base_phys,
- void __iomem * phb_io_base_virt)
-{
- /* Remove these asap */
-
- struct pci_address {
- u32 a_hi;
- u32 a_mid;
- u32 a_lo;
- };
-
- struct isa_address {
- u32 a_hi;
- u32 a_lo;
- };
-
- struct isa_range {
- struct isa_address isa_addr;
- struct pci_address pci_addr;
- unsigned int size;
- };
-
- const struct isa_range *range;
- unsigned long pci_addr;
- unsigned int isa_addr;
- unsigned int size;
- int rlen = 0;
-
- range = of_get_property(isa_node, "ranges", &rlen);
- if (range == NULL || (rlen < sizeof(struct isa_range))) {
- printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
- "mapping 64k\n");
- __ioremap_explicit(phb_io_base_phys,
- (unsigned long)phb_io_base_virt,
- 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
- return;
- }
-
- /* From "ISA Binding to 1275"
- * The ranges property is laid out as an array of elements,
- * each of which comprises:
- * cells 0 - 1: an ISA address
- * cells 2 - 4: a PCI address
- * (size depending on dev->n_addr_cells)
- * cell 5: the size of the range
- */
- if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
- isa_addr = range->isa_addr.a_lo;
- pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
- range->pci_addr.a_lo;
-
- /* Assume these are both zero */
- if ((pci_addr != 0) || (isa_addr != 0)) {
- printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
- __FUNCTION__);
- return;
- }
-
- size = PAGE_ALIGN(range->size);
-
- __ioremap_explicit(phb_io_base_phys,
- (unsigned long) phb_io_base_virt,
- size, _PAGE_NO_CACHE | _PAGE_GUARDED);
- }
-}
-
void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int prim)
{
@@ -1045,155 +683,122 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
}
}
-void __devinit pci_setup_phb_io(struct pci_controller *hose, int primary)
+#ifdef CONFIG_HOTPLUG
+
+int pcibios_unmap_io_space(struct pci_bus *bus)
{
- unsigned long size = hose->pci_io_size;
- unsigned long io_virt_offset;
- struct resource *res;
- struct device_node *isa_dn;
+ struct pci_controller *hose;
- if (size == 0)
- return;
+ WARN_ON(bus == NULL);
- hose->io_base_virt = reserve_phb_iospace(size);
- DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
- hose->global_number, hose->io_base_phys,
- (unsigned long) hose->io_base_virt);
-
- if (primary) {
- pci_io_base = (unsigned long)hose->io_base_virt;
- isa_dn = of_find_node_by_type(NULL, "isa");
- if (isa_dn) {
- isa_io_base = pci_io_base;
- pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
- hose->io_base_virt);
- of_node_put(isa_dn);
- }
- }
+ /* If this is not a PHB, we only flush the hash table over
+ * the area mapped by this bridge. We don't play with the PTE
+ * mappings since we might have to deal with sub-page alignemnts
+ * so flushing the hash table is the only sane way to make sure
+ * that no hash entries are covering that removed bridge area
+ * while still allowing other busses overlapping those pages
+ */
+ if (bus->self) {
+ struct resource *res = bus->resource[0];
- io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
- res = &hose->io_resource;
- res->start += io_virt_offset;
- res->end += io_virt_offset;
+ DBG("IO unmapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
- /* If this is called after the initial PCI scan, then we need to
- * proceed to IO mappings now
- */
- if (pci_initial_scan_done)
- __ioremap_explicit(hose->io_base_phys,
- (unsigned long)hose->io_base_virt,
- hose->pci_io_size,
- _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
+ __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
+ res->end - res->start + 1);
+ return 0;
+ }
-void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
- int primary)
-{
- unsigned long size = hose->pci_io_size;
- unsigned long io_virt_offset;
- struct resource *res;
+ /* Get the host bridge */
+ hose = pci_bus_to_host(bus);
- if (size == 0)
- return;
+ /* Check if we have IOs allocated */
+ if (hose->io_base_alloc == 0)
+ return 0;
- hose->io_base_virt = __ioremap(hose->io_base_phys, size,
- _PAGE_NO_CACHE | _PAGE_GUARDED);
- DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
- hose->global_number, hose->io_base_phys,
- (unsigned long) hose->io_base_virt);
+ DBG("IO unmapping for PHB %s\n",
+ ((struct device_node *)hose->arch_data)->full_name);
+ DBG(" alloc=0x%p\n", hose->io_base_alloc);
- if (primary)
- pci_io_base = (unsigned long)hose->io_base_virt;
+ /* This is a PHB, we fully unmap the IO area */
+ vunmap(hose->io_base_alloc);
- io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
- res = &hose->io_resource;
- res->start += io_virt_offset;
- res->end += io_virt_offset;
+ return 0;
}
+EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
+#endif /* CONFIG_HOTPLUG */
-static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys,
- unsigned long *start_virt, unsigned long *size)
+int __devinit pcibios_map_io_space(struct pci_bus *bus)
{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct resource *res;
-
- if (bus->self)
- res = bus->resource[0];
- else
- /* Root Bus */
- res = &hose->io_resource;
-
- if (res->end == 0 && res->start == 0)
- return 1;
+ struct vm_struct *area;
+ unsigned long phys_page;
+ unsigned long size_page;
+ unsigned long io_virt_offset;
+ struct pci_controller *hose;
- *start_virt = pci_io_base + res->start;
- *start_phys = *start_virt + hose->io_base_phys
- - (unsigned long) hose->io_base_virt;
+ WARN_ON(bus == NULL);
- if (res->end > res->start)
- *size = res->end - res->start + 1;
- else {
- printk("%s(): unexpected region 0x%lx->0x%lx\n",
- __FUNCTION__, res->start, res->end);
- return 1;
+ /* If this not a PHB, nothing to do, page tables still exist and
+ * thus HPTEs will be faulted in when needed
+ */
+ if (bus->self) {
+ DBG("IO mapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
+ DBG(" virt=0x%016lx...0x%016lx\n",
+ bus->resource[0]->start + _IO_BASE,
+ bus->resource[0]->end + _IO_BASE);
+ return 0;
}
- return 0;
-}
-
-int unmap_bus_range(struct pci_bus *bus)
-{
- unsigned long start_phys;
- unsigned long start_virt;
- unsigned long size;
+ /* Get the host bridge */
+ hose = pci_bus_to_host(bus);
+ phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
+ size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
- if (!bus) {
- printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
- return 1;
- }
-
- if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
- return 1;
- if (__iounmap_explicit((void __iomem *) start_virt, size))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL(unmap_bus_range);
+ /* Make sure IO area address is clear */
+ hose->io_base_alloc = NULL;
-int remap_bus_range(struct pci_bus *bus)
-{
- unsigned long start_phys;
- unsigned long start_virt;
- unsigned long size;
+ /* If there's no IO to map on that bus, get away too */
+ if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
+ return 0;
- if (!bus) {
- printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
- return 1;
- }
-
-
- if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
- return 1;
- if (start_phys == 0)
- return 1;
- printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
- if (__ioremap_explicit(start_phys, start_virt, size,
- _PAGE_NO_CACHE | _PAGE_GUARDED))
- return 1;
+ /* Let's allocate some IO space for that guy. We don't pass
+ * VM_IOREMAP because we don't care about alignment tricks that
+ * the core does in that case. Maybe we should due to stupid card
+ * with incomplete address decoding but I'd rather not deal with
+ * those outside of the reserved 64K legacy region.
+ */
+ area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
+ if (area == NULL)
+ return -ENOMEM;
+ hose->io_base_alloc = area->addr;
+ hose->io_base_virt = (void __iomem *)(area->addr +
+ hose->io_base_phys - phys_page);
+
+ DBG("IO mapping for PHB %s\n",
+ ((struct device_node *)hose->arch_data)->full_name);
+ DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
+ DBG(" size=0x%016lx (alloc=0x%016lx)\n",
+ hose->pci_io_size, size_page);
+
+ /* Establish the mapping */
+ if (__ioremap_at(phys_page, area->addr, size_page,
+ _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
+ return -ENOMEM;
+
+ /* Fixup hose IO resource */
+ io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ hose->io_resource.start += io_virt_offset;
+ hose->io_resource.end += io_virt_offset;
+
+ DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
+ hose->io_resource.start, hose->io_resource.end);
return 0;
}
-EXPORT_SYMBOL(remap_bus_range);
-
-static void phbs_remap_io(void)
-{
- struct pci_controller *hose, *tmp;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- remap_bus_range(hose->bus);
-}
+EXPORT_SYMBOL_GPL(pcibios_map_io_space);
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
@@ -1201,8 +806,7 @@ static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
unsigned long offset;
if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
res->start += offset;
res->end += offset;
} else if (res->flags & IORESOURCE_MEM) {
@@ -1217,9 +821,20 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
/* Update device resources. */
int i;
- for (i = 0; i < PCI_NUM_RESOURCES; i++)
- if (dev->resource[i].flags)
- fixup_resource(&dev->resource[i], dev);
+ DBG("%s: Fixup resources:\n", pci_name(dev));
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+ if (!res->flags)
+ continue;
+
+ DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
+ i, res->flags, res->start, res->end);
+
+ fixup_resource(res, dev);
+
+ DBG(" > %08lx:0x%016lx...0x%016lx\n",
+ res->flags, res->start, res->end);
+ }
}
EXPORT_SYMBOL(pcibios_fixup_device_resources);
@@ -1289,119 +904,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibios_fixup_bus);
-/*
- * Reads the interrupt pin to determine if interrupt is use by card.
- * If the interrupt is used, then gets the interrupt line from the
- * openfirmware and sets it in the pci_dev and pci_config line.
- */
-int pci_read_irq_line(struct pci_dev *pci_dev)
-{
- struct of_irq oirq;
- unsigned int virq;
-
- DBG("Try to map irq for %s...\n", pci_name(pci_dev));
-
-#ifdef DEBUG
- memset(&oirq, 0xff, sizeof(oirq));
-#endif
- /* Try to get a mapping from the device-tree */
- if (of_irq_map_pci(pci_dev, &oirq)) {
- u8 line, pin;
-
- /* If that fails, lets fallback to what is in the config
- * space and map that through the default controller. We
- * also set the type to level low since that's what PCI
- * interrupts are. If your platform does differently, then
- * either provide a proper interrupt tree or don't use this
- * function.
- */
- if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
- return -1;
- if (pin == 0)
- return -1;
- if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
- line == 0xff) {
- return -1;
- }
- DBG(" -> no map ! Using irq line %d from PCI config\n", line);
-
- virq = irq_create_mapping(NULL, line);
- if (virq != NO_IRQ)
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
- } else {
- DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.size, oirq.specifier[0], oirq.specifier[1],
- oirq.controller->full_name);
-
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
- }
- if(virq == NO_IRQ) {
- DBG(" -> failed to map !\n");
- return -1;
- }
-
- DBG(" -> mapped to linux irq %d\n", virq);
-
- pci_dev->irq = virq;
-
- return 0;
-}
-EXPORT_SYMBOL(pci_read_irq_line);
-
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- resource_size_t *start, resource_size_t *end)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- resource_size_t offset = 0;
-
- if (hose == NULL)
- return;
-
- if (rsrc->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
- /* We pass a fully fixed up address to userland for MMIO instead of
- * a BAR value because X is lame and expects to be able to use that
- * to pass to /dev/mem !
- *
- * That means that we'll have potentially 64 bits values where some
- * userland apps only expect 32 (like X itself since it thinks only
- * Sparc has 64 bits MMIO) but if we don't do that, we break it on
- * 32 bits CHRPs :-(
- *
- * Hopefully, the sysfs insterface is immune to that gunk. Once X
- * has been fixed (and the fix spread enough), we can re-enable the
- * 2 lines below and pass down a BAR value to userland. In that case
- * we'll also have to re-enable the matching code in
- * __pci_mmap_make_offset().
- *
- * BenH.
- */
-#if 0
- else if (rsrc->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-#endif
-
- *start = rsrc->start - offset;
- *end = rsrc->end - offset;
-}
-
-struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
-{
- if (!have_of)
- return NULL;
- while(node) {
- struct pci_controller *hose, *tmp;
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- if (hose->arch_data == node)
- return hose;
- node = node->parent;
- }
- return NULL;
-}
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose, *tmp;
@@ -1410,7 +912,7 @@ unsigned long pci_address_to_pio(phys_addr_t address)
if (address >= hose->io_base_phys &&
address < (hose->io_base_phys + hose->pci_io_size)) {
unsigned long base =
- (unsigned long)hose->io_base_virt - pci_io_base;
+ (unsigned long)hose->io_base_virt - _IO_BASE;
return base + (address - hose->io_base_phys);
}
}