From 0b59e38ffaf7b201ff6afe5b736365d16848c7e3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:21:32 +0900 Subject: sh: Merge _32/_64 ioremap implementations. There is nothing of interest in the _64 version anymore, so the _32 one can be renamed and used unconditionally. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 171 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 arch/sh/mm/ioremap.c (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 00000000000..24f6ba6bff7 --- /dev/null +++ b/arch/sh/mm/ioremap.c @@ -0,0 +1,171 @@ +/* + * arch/sh/mm/ioremap.c + * + * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2005 - 2010 Paul Mundt + * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) +{ + struct vm_struct *area; + unsigned long offset, last_addr, addr, orig_addr; + pgprot_t pgprot; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * If we're in the fixed PCI memory range, mapping through page + * tables is not only pointless, but also fundamentally broken. + * Just return the physical address instead. + * + * For boards that map a small PCI memory aperture somewhere in + * P1/P2 space, ioremap() will already do the right thing, + * and we'll never get this far. + */ + if (is_pci_memory_fixed_range(phys_addr, size)) + return (void __iomem *)phys_addr; + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + /* + * If we can't yet use the regular approach, go the fixmap route. + */ + if (!mem_init_done) + return ioremap_fixed(phys_addr, size, __pgprot(flags)); + + /* + * Ok, go for it.. + */ + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + area->phys_addr = phys_addr; + orig_addr = addr = (unsigned long)area->addr; + +#ifdef CONFIG_PMB + /* + * First try to remap through the PMB once a valid VMA has been + * established. Smaller allocations (or the rest of the size + * remaining after a PMB mapping due to the size not being + * perfectly aligned on a PMB size boundary) are then mapped + * through the UTLB using conventional page tables. + * + * PMB entries are all pre-faulted. + */ + if (unlikely(phys_addr >= P1SEG)) { + unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + + if (likely(mapped)) { + addr += mapped; + phys_addr += mapped; + size -= mapped; + } + } +#endif + + pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + if (likely(size)) + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { + vunmap((void *)orig_addr); + return NULL; + } + + return (void __iomem *)(offset + (char *)orig_addr); +} +EXPORT_SYMBOL(__ioremap_caller); + +/* + * Simple checks for non-translatable mappings. + */ +static inline int iomapping_nontranslatable(unsigned long offset) +{ +#ifdef CONFIG_29BIT + /* + * In 29-bit mode this includes the fixed P1/P2 areas, as well as + * parts of P3. + */ + if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) + return 1; +#endif + + if (is_pci_memory_fixed_range(offset, 0)) + return 1; + + return 0; +} + +void __iounmap(void __iomem *addr) +{ + unsigned long vaddr = (unsigned long __force)addr; + struct vm_struct *p; + + /* + * Nothing to do if there is no translatable mapping. + */ + if (iomapping_nontranslatable(vaddr)) + return; + +#ifdef CONFIG_PMB + /* + * Purge any PMB entries that may have been established for this + * mapping, then proceed with conventional VMA teardown. + * + * XXX: Note that due to the way that remove_vm_area() does + * matching of the resultant VMA, we aren't able to fast-forward + * the address past the PMB space until the end of the VMA where + * the page tables reside. As such, unmap_vm_area() will be + * forced to linearly scan over the area until it finds the page + * tables where PTEs that need to be unmapped actually reside, + * which is far from optimal. Perhaps we need to use a separate + * VMA for the PMB mappings? + * -- PFM. + */ + pmb_unmap(vaddr); +#endif + + p = remove_vm_area((void *)(vaddr & PAGE_MASK)); + if (!p) { + printk(KERN_ERR "%s: bad address %p\n", __func__, addr); + return; + } + + kfree(p); +} +EXPORT_SYMBOL(__iounmap); -- cgit v1.2.3-70-g09d2 From 12b6b01cb47dc3eefbef866592193661dad7afb9 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:33:08 +0900 Subject: sh: Handle unmapping of fixed slots transparently in iounmap(). iounmap() should balance whatever is done by ioremap(). Presently ioremap() can do any of fixed mappings, PMB mappings, or page table mappings. Presently only the latter two are handled through the standard unmap path, so tie in the fixed unmapping, too. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 24f6ba6bff7..e8b65f645ae 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -142,6 +142,12 @@ void __iounmap(void __iomem *addr) if (iomapping_nontranslatable(vaddr)) return; + /* + * There's no VMA if it's from an early fixed mapping. + */ + if (iounmap_fixed(addr) == 0) + return; + #ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this -- cgit v1.2.3-70-g09d2 From af1415314a4190b8ea06e53808d392fcf91555af Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:45:00 +0900 Subject: sh: Flag __ioremap_caller() __init_refok. The mem_init_done test makes sure that this path is only entered in __init cases, so leaving ioremap_fixed() as __init and flagging the caller __init_refok is sufficient. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index e8b65f645ae..a130b2278e9 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -33,8 +33,9 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) +void __iomem * __init_refok +__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; -- cgit v1.2.3-70-g09d2 From d57d64080ddc0ff13fcffc898b6251074a482ba1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 19 Jan 2010 13:34:38 +0900 Subject: sh: Prevent 64-bit pgprot clobbering across ioremap implementations. Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt --- arch/sh/boards/board-sh7785lcr.c | 6 ++--- arch/sh/boards/mach-landisk/setup.c | 2 +- arch/sh/boards/mach-lboxre2/setup.c | 2 +- arch/sh/boards/mach-sh03/setup.c | 2 +- arch/sh/include/asm/io.h | 53 ++++++++++++++++++++++--------------- arch/sh/mm/ioremap.c | 9 +++---- 6 files changed, 41 insertions(+), 33 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index 511de38d204..fe7e686c94a 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -332,15 +333,14 @@ static void __init sh7785lcr_setup(char **cmdline_p) pm_power_off = sh7785lcr_power_off; /* sm501 DRAM configuration */ - sm501_reg = ioremap_fixed(SM107_REG_ADDR, SM501_DRAM_CONTROL, - PAGE_KERNEL); + sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL); if (!sm501_reg) { printk(KERN_ERR "%s: ioremap error.\n", __func__); return; } writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL); - iounmap_fixed(sm501_reg); + iounmap(sm501_reg); } /* Return the board specific boot mode pin configuration */ diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index db22ea2e6d4..59816355d19 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -63,7 +63,7 @@ static int __init landisk_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); + cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c index 2b0b5818e1e..408dd5df7d4 100644 --- a/arch/sh/boards/mach-lboxre2/setup.c +++ b/arch/sh/boards/mach-lboxre2/setup.c @@ -57,7 +57,7 @@ static int __init lboxre2_devices_setup(void) paddrbase = virt_to_phys((void*)PA_AREA5_IO); psize = PAGE_SIZE; prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16); - cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot.pgprot); + cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot); if (!cf0_io_base) { printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); return -ENOMEM; diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index 74cfb4b8b03..f14ba0fa950 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -82,7 +82,7 @@ static int __init sh03_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); + cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 6a0dd8c1e0a..13696dfccc1 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -235,7 +235,7 @@ unsigned long long poke_real_address_q(unsigned long long addr, */ #ifdef CONFIG_MMU void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, - unsigned long flags, void *caller); + pgprot_t prot, void *caller); void __iounmap(void __iomem *addr); #ifdef CONFIG_IOREMAP_FIXED @@ -254,13 +254,13 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } #endif static inline void __iomem * -__ioremap(unsigned long offset, unsigned long size, unsigned long flags) +__ioremap(unsigned long offset, unsigned long size, pgprot_t prot) { - return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); + return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); } static inline void __iomem * -__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) +__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) { #ifdef CONFIG_29BIT unsigned long last_addr = offset + size - 1; @@ -272,7 +272,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) * mapping must be done by the PMB or by using page tables. */ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { - if (unlikely(flags & _PAGE_CACHABLE)) + if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) return (void __iomem *)P1SEGADDR(offset); return (void __iomem *)P2SEGADDR(offset); @@ -287,7 +287,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) } static inline void __iomem * -__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) +__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) { void __iomem *ret; @@ -295,30 +295,39 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; - ret = __ioremap_29bit(offset, size, flags); + ret = __ioremap_29bit(offset, size, prot); if (ret) return ret; - return __ioremap(offset, size, flags); + return __ioremap(offset, size, prot); } #else -#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) -#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) +#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) +#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) #endif /* CONFIG_MMU */ -#define ioremap(offset, size) \ - __ioremap_mode((offset), (size), 0) -#define ioremap_nocache(offset, size) \ - __ioremap_mode((offset), (size), 0) -#define ioremap_cache(offset, size) \ - __ioremap_mode((offset), (size), _PAGE_CACHABLE) -#define p3_ioremap(offset, size, flags) \ - __ioremap((offset), (size), (flags)) -#define ioremap_prot(offset, size, flags) \ - __ioremap_mode((offset), (size), (flags)) -#define iounmap(addr) \ - __iounmap((addr)) +static inline void __iomem * +ioremap(unsigned long offset, unsigned long size) +{ + return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); +} + +static inline void __iomem * +ioremap_cache(unsigned long offset, unsigned long size) +{ + return __ioremap_mode(offset, size, PAGE_KERNEL); +} + +static inline void __iomem * +ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) +{ + return __ioremap_mode(offset, size, __pgprot(flags)); +} + +#define ioremap_nocache ioremap +#define p3_ioremap __ioremap +#define iounmap __iounmap #define maybebadio(port) \ printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a130b2278e9..85b420d0062 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -35,11 +35,10 @@ */ void __iomem * __init_refok __ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) + pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; - pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -69,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) - return ioremap_fixed(phys_addr, size, __pgprot(flags)); + return ioremap_fixed(phys_addr, size, pgprot); /* * Ok, go for it.. @@ -91,8 +90,9 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * PMB entries are all pre-faulted. */ if (unlikely(phys_addr >= P1SEG)) { - unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + unsigned long mapped; + mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); if (likely(mapped)) { addr += mapped; phys_addr += mapped; @@ -101,7 +101,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, } #endif - pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); -- cgit v1.2.3-70-g09d2 From acf2c9685fb8295cb62a623d7358a1cfde8b07ea Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 19 Jan 2010 13:49:19 +0900 Subject: sh: Kill off duplicate address alignment in ioremap_fixed(). This is already taken care of in the top-level ioremap, and now that no one should be calling ioremap_fixed() directly we can simply throw the mapping displacement in as an additional argument. Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 6 ++++-- arch/sh/mm/ioremap.c | 2 +- arch/sh/mm/ioremap_fixed.c | 22 +++------------------- 3 files changed, 8 insertions(+), 22 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 13696dfccc1..70269813cef 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -239,12 +239,14 @@ void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, void __iounmap(void __iomem *addr); #ifdef CONFIG_IOREMAP_FIXED -extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, pgprot_t); +extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, + unsigned long, pgprot_t); extern int iounmap_fixed(void __iomem *); extern void ioremap_fixed_init(void); #else static inline void __iomem * -ioremap_fixed(resource_size t phys_addr, unsigned long size, pgprot_t prot) +ioremap_fixed(resource_size t phys_addr, unsigned long offset, + unsigned long size, pgprot_t prot) { BUG(); } diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 85b420d0062..bb03308e840 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -68,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) - return ioremap_fixed(phys_addr, size, pgprot); + return ioremap_fixed(phys_addr, offset, size, pgprot); /* * Ok, go for it.. diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index 425f6c6bf25..551b513e8fc 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c @@ -45,12 +45,11 @@ void __init ioremap_fixed_init(void) } void __init __iomem * -ioremap_fixed(resource_size_t phys_addr, unsigned long size, pgprot_t prot) +ioremap_fixed(resource_size_t phys_addr, unsigned long offset, + unsigned long size, pgprot_t prot) { enum fixed_addresses idx0, idx; - resource_size_t last_addr; struct ioremap_map *map; - unsigned long offset; unsigned int nrpages; int i, slot; @@ -67,18 +66,6 @@ ioremap_fixed(resource_size_t phys_addr, unsigned long size, pgprot_t prot) if (slot < 0) return NULL; - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - - /* - * Fixmap mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - phys_addr; - /* * Mappings have to fit in the FIX_IOREMAP area. */ @@ -111,7 +98,6 @@ int iounmap_fixed(void __iomem *addr) unsigned long offset; unsigned int nrpages; int i, slot; - pgprot_t prot; slot = -1; for (i = 0; i < FIX_N_IOREMAPS; i++) { @@ -133,11 +119,9 @@ int iounmap_fixed(void __iomem *addr) offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + map->size - 1) >> PAGE_SHIFT; - pgprot_val(prot) = _PAGE_WIRED; - idx = FIX_IOREMAP_BEGIN + slot + nrpages; while (nrpages > 0) { - __clear_fixmap(idx, prot); + __clear_fixmap(idx, __pgprot(_PAGE_WIRED)); --idx; --nrpages; } -- cgit v1.2.3-70-g09d2 From 9762528f37ddc7071509dddb10e7b4b3b957fd01 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 29 Jan 2010 16:14:29 +0900 Subject: sh: Kill off deprecated fixed PCI memory window accessors. This kills off the deprected fixed memory range accessors for the cases of non-translatable ioremapping. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pci.h | 14 -------------- arch/sh/mm/ioremap.c | 15 --------------- 2 files changed, 29 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 67f3999b544..f362d8a045e 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -99,20 +99,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -#ifdef CONFIG_SUPERH32 -/* - * If we're on an SH7751 or SH7780 PCI controller, PCI memory is mapped - * at the end of the address space in a special non-translatable area. - */ -#define PCI_MEM_FIXED_START 0xfd000000 -#define PCI_MEM_FIXED_END (PCI_MEM_FIXED_START + 0x01000000) - -#define is_pci_memory_fixed_range(s, e) \ - ((s) >= PCI_MEM_FIXED_START && (e) < PCI_MEM_FIXED_END) -#else -#define is_pci_memory_fixed_range(s, e) (0) -#endif - /* Board-specific fixup routines. */ int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin); diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index bb03308e840..94583c5da85 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -45,18 +45,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, if (!size || last_addr < phys_addr) return NULL; - /* - * If we're in the fixed PCI memory range, mapping through page - * tables is not only pointless, but also fundamentally broken. - * Just return the physical address instead. - * - * For boards that map a small PCI memory aperture somewhere in - * P1/P2 space, ioremap() will already do the right thing, - * and we'll never get this far. - */ - if (is_pci_memory_fixed_range(phys_addr, size)) - return (void __iomem *)phys_addr; - /* * Mappings have to be page-aligned */ @@ -125,9 +113,6 @@ static inline int iomapping_nontranslatable(unsigned long offset) return 1; #endif - if (is_pci_memory_fixed_range(offset, 0)) - return 1; - return 0; } -- cgit v1.2.3-70-g09d2 From 7bdda6209f224aa784a036df54b22cb338d2e859 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 13:23:00 +0900 Subject: sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB. Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 5 +++-- arch/sh/include/cpu-sh4/cpu/sq.h | 3 ++- arch/sh/kernel/cpu/sh4/sq.c | 13 ++++++------- arch/sh/mm/ioremap.c | 2 +- arch/sh/mm/pmb.c | 6 +++++- drivers/video/pvr2fb.c | 2 +- 6 files changed, 18 insertions(+), 13 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 2fcbedb5500..151bc922701 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -33,6 +33,7 @@ #ifndef __ASSEMBLY__ #include #include +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -71,13 +72,13 @@ struct pmb_entry { #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags); + unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); int pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { return -EINVAL; } diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h index 586d6491816..74716ba2dc3 100644 --- a/arch/sh/include/cpu-sh4/cpu/sq.h +++ b/arch/sh/include/cpu-sh4/cpu/sq.h @@ -12,6 +12,7 @@ #define __ASM_CPU_SH4_SQ_H #include +#include /* * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be @@ -28,7 +29,7 @@ /* arch/sh/kernel/cpu/sh4/sq.c */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags); + const char *name, pgprot_t prot); void sq_unmap(unsigned long vaddr); void sq_flush_range(unsigned long start, unsigned int len); diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 97aea9d69b0..fc065f9da6e 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) spin_unlock_irq(&sq_mapping_lock); } -static int __sq_remap(struct sq_mapping *map, unsigned long flags) +static int __sq_remap(struct sq_mapping *map, pgprot_t prot) { #if defined(CONFIG_MMU) struct vm_struct *vma; @@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) if (ioremap_page_range((unsigned long)vma->addr, (unsigned long)vma->addr + map->size, - vma->phys_addr, __pgprot(flags))) { + vma->phys_addr, prot)) { vunmap(vma->addr); return -EAGAIN; } @@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. - * @flags: Protection flags. + * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface. */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags) + const char *name, pgprot_t prot) { struct sq_mapping *map; unsigned long end; @@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + ret = __sq_remap(map, prot); if (unlikely(ret != 0)) goto out; @@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) return -EIO; if (likely(len)) { - int ret = sq_remap(base, len, "Userspace", - pgprot_val(PAGE_SHARED)); + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); if (ret < 0) return ret; } else diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 94583c5da85..c68d2d7d00a 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, if (unlikely(phys_addr >= P1SEG)) { unsigned long mapped; - mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); + mapped = pmb_remap(addr, phys_addr, size, pgprot); if (likely(mapped)) { addr += mapped; phys_addr += mapped; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f822f83418e..509a444a30a 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -166,12 +167,15 @@ static struct { }; long pmb_remap(unsigned long vaddr, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; + u64 flags; + + flags = pgprot_val(prot); /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 53f8f1100e8..f9975100d56 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c @@ -831,7 +831,7 @@ static int __devinit pvr2fb_common_init(void) printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, - fb_info->fix.id, pgprot_val(PAGE_SHARED)); + fb_info->fix.id, PAGE_SHARED); printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", fb_info->node, pvr2fb_map); -- cgit v1.2.3-70-g09d2