diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 17:22:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 17:22:31 -0700 |
commit | 8e44e4347735229b518cc02938c351428bcd7492 (patch) | |
tree | e2d7b5998fa8ad76948b2e11591f4534f06f7958 /arch/powerpc/kernel | |
parent | 06aab5a3084e1d825384fa353e6df4c7949c8683 (diff) | |
parent | 09dd3fc19c09f79115267361ecd7d5c5d2c27a3a (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
Fix build of cpm_uart due to core changes
powerpc/8xx: Fix regression introduced by cache coherency rewrite
powerpc/4xx: Fix erroneous xmon warning on PowerPC 4xx
powerpc/mm: Fix 40x and 8xx vs. _PAGE_SPECIAL
powerpc: Cleanup linker script using new linker script macros.
powerpc: Fix ibm,client-architecture-support printout
powerpc: Increase NODES_SHIFT on 64bit from 4 to 8
powerpc/perf_counter: Fix vdso detection
powerpc: Move 64bit heap above 1TB on machines with 1TB segments
powerpc: Change archdata dma_data to a union
powerpc: Rename get_dma_direct_offset get_dma_offset
powerpc/mm: Remove duplicated #include
powerpc/book3e-64: Remove duplicated #include
powerpc: Check for unsupported relocs when using CONFIG_RELOCATABLE
powerpc/pmc: Don't access lppaca on Book3E
powerpc: kmalloc failure ignored in vio_build_iommu_table()
hvc_console: Provide (un)locked version for hvc_resize()
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 15 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/vio.c | 4 |
8 files changed, 44 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 87ddb3fb948..37771a51811 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -18,7 +18,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, + return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, device_to_mask(dev), flag, dev_to_node(dev)); } @@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); + iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be @@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, struct dma_attrs *attrs) { - return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size, - device_to_mask(dev), direction, attrs); + return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, + size, device_to_mask(dev), direction, attrs); } @@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction, + iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, attrs); } @@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { - return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, + return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, device_to_mask(dev), direction, attrs); } @@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { - iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, + iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); } /* We support DMA to/from any memory page via the iommu */ static int dma_iommu_dma_supported(struct device *dev, u64 mask) { - struct iommu_table *tbl = dev->archdata.dma_data; + struct iommu_table *tbl = get_iommu_table_base(dev); if (!tbl || tbl->it_offset > mask) { printk(KERN_INFO diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 21b784d7e7d..6215062caf8 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -21,13 +21,6 @@ * default the offset is PCI_DRAM_OFFSET. */ -unsigned long get_dma_direct_offset(struct device *dev) -{ - if (dev) - return (unsigned long)dev->archdata.dma_data; - - return PCI_DRAM_OFFSET; -} void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) @@ -37,7 +30,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, ret = __dma_alloc_coherent(dev, size, dma_handle, flag); if (ret == NULL) return NULL; - *dma_handle += get_dma_direct_offset(dev); + *dma_handle += get_dma_offset(dev); return ret; #else struct page *page; @@ -51,7 +44,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, return NULL; ret = page_address(page); memset(ret, 0, size); - *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); + *dma_handle = virt_to_abs(ret) + get_dma_offset(dev); return ret; #endif @@ -75,7 +68,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int i; for_each_sg(sgl, sg, nents, i) { - sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); + sg->dma_address = sg_phys(sg) + get_dma_offset(dev); sg->dma_length = sg->length; __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } @@ -110,7 +103,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, { BUG_ON(dir == DMA_NONE); __dma_sync_page(page, offset, size, dir); - return page_to_phys(page) + offset + get_dma_direct_offset(dev); + return page_to_phys(page) + offset + get_dma_offset(dev); } static inline void dma_direct_unmap_page(struct device *dev, diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 9048f96237f..24dcc0ecf24 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -17,7 +17,6 @@ #include <asm/cputable.h> #include <asm/setup.h> #include <asm/thread_info.h> -#include <asm/reg.h> #include <asm/exception-64e.h> #include <asm/bug.h> #include <asm/irqflags.h> diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index e9f4840096b..bb8209e3493 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1117,7 +1117,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) /* Hook up default DMA ops */ sd->dma_ops = pci_dma_ops; - sd->dma_data = (void *)PCI_DRAM_OFFSET; + set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); /* Additional platform DMA/iommu setup */ if (ppc_md.pci_dma_dev_setup) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 0a321643305..1168c5f440a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void) unsigned long arch_randomize_brk(struct mm_struct *mm) { - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + unsigned long base = mm->brk; + unsigned long ret; + +#ifdef CONFIG_PPC64 + /* + * If we are using 1TB segments and we are allowed to randomise + * the heap, we can put it above 1TB so it is backed by a 1TB + * segment. Otherwise the heap will be in the bottom 1TB + * which always uses 256MB segments and this may result in a + * performance penalty. + */ + if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) + base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); +#endif + + ret = PAGE_ALIGN(base + brk_rnd()); if (ret < mm->brk) return mm->brk; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 864334b337a..bafac2e41ae 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -800,7 +800,7 @@ static void __init prom_send_capabilities(void) root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { /* try calling the ibm,client-architecture-support method */ - prom_printf("Calling ibm,client-architecture..."); + prom_printf("Calling ibm,client-architecture-support..."); if (call_prom_ret("call-method", 3, 2, &ret, ADDR("ibm,client-architecture-support"), root, @@ -814,6 +814,7 @@ static void __init prom_send_capabilities(void) return; } call_prom("close", 1, 0, root); + prom_printf(" not implemented\n"); } /* no ibm,client-architecture-support call, try the old way */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 3faaf29bdb2..94e2df3cae0 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -241,6 +241,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) } /* + * Put vDSO base into mm struct. We need to do this before calling + * install_special_mapping or the perf counter mmap tracking code + * will fail to recognise it as a vDSO (since arch_vma_name fails). + */ + current->mm->context.vdso_base = vdso_base; + + /* * our vma flags don't have VM_WRITE so by default, the process isn't * allowed to write those pages. * gdb can break that with ptrace interface, and thus trigger COW on @@ -260,11 +267,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pagelist); - if (rc) + if (rc) { + current->mm->context.vdso_base = 0; goto fail_mmapsem; - - /* Put vDSO base into mm struct */ - current->mm->context.vdso_base = vdso_base; + } up_write(&mm->mmap_sem); return 0; diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index bc7b41edbdf..77f64218abf 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1054,6 +1054,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) return NULL; tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); + if (tbl == NULL) + return NULL; of_parse_dma_window(dev->dev.archdata.of_node, dma_window, &tbl->it_index, &offset, &size); @@ -1233,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) vio_cmo_set_dma_ops(viodev); else viodev->dev.archdata.dma_ops = &dma_iommu_ops; - viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); + set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); set_dev_node(&viodev->dev, of_node_to_nid(of_node)); /* init generic 'struct device' fields: */ |