diff options
author | David S. Miller <davem@davemloft.net> | 2010-02-28 19:23:06 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-28 19:23:06 -0800 |
commit | 47871889c601d8199c51a4086f77eebd77c29b0b (patch) | |
tree | 40cdcac3bff0ee40cc33dcca61d0577cdf965f77 /arch/sh/mm/pmb.c | |
parent | c16cc0b464b8876cfd57ce1c1dbcb6f9a6a0bce3 (diff) | |
parent | 30ff056c42c665b9ea535d8515890857ae382540 (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
drivers/firmware/iscsi_ibft.c
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 586 |
1 files changed, 440 insertions, 146 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 280f6a16603..198bcff5e96 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,11 +3,8 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005, 2006, 2007 Paul Mundt - * - * P1/P2 Section mapping definitions from map32.h, which was: - * - * Copyright 2003 (c) Lineo Solutions,Inc. + * Copyright (C) 2005 - 2010 Paul Mundt + * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -24,47 +21,67 @@ #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/err.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/rwlock.h> +#include <asm/sizes.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> +#include <asm/page.h> #include <asm/mmu.h> -#include <asm/io.h> #include <asm/mmu_context.h> -#define NR_PMB_ENTRIES 16 +struct pmb_entry; + +struct pmb_entry { + unsigned long vpn; + unsigned long ppn; + unsigned long flags; + unsigned long size; -static void __pmb_unmap(struct pmb_entry *); + spinlock_t lock; + + /* + * 0 .. NR_PMB_ENTRIES for specific entry selection, or + * PMB_NO_ENTRY to search for a free one + */ + int entry; + /* Adjacent entry link for contiguous multi-entry mappings */ + struct pmb_entry *link; +}; + +static void pmb_unmap_entry(struct pmb_entry *, int depth); + +static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; -static unsigned long pmb_map; +static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); -static inline unsigned long mk_pmb_entry(unsigned int entry) +static __always_inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; } -static inline unsigned long mk_pmb_addr(unsigned int entry) +static __always_inline unsigned long mk_pmb_addr(unsigned int entry) { return mk_pmb_entry(entry) | PMB_ADDR; } -static inline unsigned long mk_pmb_data(unsigned int entry) +static __always_inline unsigned long mk_pmb_data(unsigned int entry) { return mk_pmb_entry(entry) | PMB_DATA; } static int pmb_alloc_entry(void) { - unsigned int pos; - -repeat: - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); - - if (unlikely(pos > NR_PMB_ENTRIES)) - return -ENOSPC; + int pos; - if (test_and_set_bit(pos, &pmb_map)) - goto repeat; + pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); + if (pos >= 0 && pos < NR_PMB_ENTRIES) + __set_bit(pos, pmb_map); + else + pos = -ENOSPC; return pos; } @@ -73,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags, int entry) { struct pmb_entry *pmbe; + unsigned long irqflags; + void *ret = NULL; int pos; + write_lock_irqsave(&pmb_rwlock, irqflags); + if (entry == PMB_NO_ENTRY) { pos = pmb_alloc_entry(); - if (pos < 0) - return ERR_PTR(pos); + if (unlikely(pos < 0)) { + ret = ERR_PTR(pos); + goto out; + } } else { - if (test_bit(entry, &pmb_map)) - return ERR_PTR(-ENOSPC); + if (__test_and_set_bit(entry, pmb_map)) { + ret = ERR_PTR(-ENOSPC); + goto out; + } + pos = entry; } + write_unlock_irqrestore(&pmb_rwlock, irqflags); + pmbe = &pmb_entry_list[pos]; - if (!pmbe) - return ERR_PTR(-ENOMEM); + + memset(pmbe, 0, sizeof(struct pmb_entry)); + + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; @@ -95,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->entry = pos; return pmbe; + +out: + write_unlock_irqrestore(&pmb_rwlock, irqflags); + return ret; } static void pmb_free(struct pmb_entry *pmbe) { - int pos = pmbe->entry; - - pmbe->vpn = 0; - pmbe->ppn = 0; - pmbe->flags = 0; - pmbe->entry = 0; + __clear_bit(pmbe->entry, pmb_map); - clear_bit(pos, &pmb_map); + pmbe->entry = PMB_NO_ENTRY; + pmbe->link = NULL; } /* - * Must be in P2 for __set_pmb_entry() + * Ensure that the PMB entries match our cache configuration. + * + * When we are in 32-bit address extended mode, CCR.CB becomes + * invalid, so care must be taken to manually adjust cacheable + * translations. */ -static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int pos) +static __always_inline unsigned long pmb_cache_flags(void) { - ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); + unsigned long flags = 0; -#ifdef CONFIG_CACHE_WRITETHROUGH - /* - * When we are in 32-bit address extended mode, CCR.CB becomes - * invalid, so care must be taken to manually adjust cacheable - * translations. - */ - if (likely(flags & PMB_C)) - flags |= PMB_WT; +#if defined(CONFIG_CACHE_WRITETHROUGH) + flags |= PMB_C | PMB_WT | PMB_UB; +#elif defined(CONFIG_CACHE_WRITEBACK) + flags |= PMB_C; #endif - ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); + return flags; } -static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) +/* + * Must be run uncached. + */ +static void __set_pmb_entry(struct pmb_entry *pmbe) { - jump_to_uncached(); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); - back_to_cached(); + writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); + writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, + mk_pmb_data(pmbe->entry)); } -static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) +static void __clear_pmb_entry(struct pmb_entry *pmbe) { - unsigned int entry = pmbe->entry; - unsigned long addr; + unsigned long addr, data; + unsigned long addr_val, data_val; - if (unlikely(entry >= NR_PMB_ENTRIES)) - return; + addr = mk_pmb_addr(pmbe->entry); + data = mk_pmb_data(pmbe->entry); - jump_to_uncached(); + addr_val = __raw_readl(addr); + data_val = __raw_readl(data); /* Clear V-bit */ - addr = mk_pmb_addr(entry); - ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); + writel_uncached(addr_val & ~PMB_V, addr); + writel_uncached(data_val & ~PMB_V, data); +} - addr = mk_pmb_data(entry); - ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); +static void set_pmb_entry(struct pmb_entry *pmbe) +{ + unsigned long flags; - back_to_cached(); + spin_lock_irqsave(&pmbe->lock, flags); + __set_pmb_entry(pmbe); + spin_unlock_irqrestore(&pmbe->lock, flags); } - static struct { unsigned long size; int flag; } pmb_sizes[] = { - { .size = 0x20000000, .flag = PMB_SZ_512M, }, - { .size = 0x08000000, .flag = PMB_SZ_128M, }, - { .size = 0x04000000, .flag = PMB_SZ_64M, }, - { .size = 0x01000000, .flag = PMB_SZ_16M, }, + { .size = SZ_512M, .flag = PMB_SZ_512M, }, + { .size = SZ_128M, .flag = PMB_SZ_128M, }, + { .size = SZ_64M, .flag = PMB_SZ_64M, }, + { .size = SZ_16M, .flag = PMB_SZ_16M, }, }; long pmb_remap(unsigned long vaddr, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; + u64 flags; + + flags = pgprot_val(prot); + + pmb_flags = PMB_WT | PMB_UB; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { - if (flags & _PAGE_WT) - pmb_flags = PMB_WT; - else - pmb_flags = PMB_C; - } else - pmb_flags = PMB_WT | PMB_UB; + pmb_flags |= PMB_C; + + if ((flags & _PAGE_WT) == 0) + pmb_flags &= ~(PMB_WT | PMB_UB); + } pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + unsigned long flags; + if (size < pmb_sizes[i].size) continue; @@ -200,18 +242,25 @@ again: goto out; } - set_pmb_entry(pmbe); + spin_lock_irqsave(&pmbe->lock, flags); + + __set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; + pmbe->size = pmb_sizes[i].size; + /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ - if (likely(pmbp)) + if (likely(pmbp)) { + spin_lock(&pmbp->lock); pmbp->link = pmbe; + spin_unlock(&pmbp->lock); + } pmbp = pmbe; @@ -221,16 +270,17 @@ again: * pmb_sizes[i].size again. */ i--; + + spin_unlock_irqrestore(&pmbe->lock, flags); } - if (size >= 0x1000000) + if (size >= SZ_16M) goto again; return wanted - size; out: - if (pmbp) - __pmb_unmap(pmbp); + pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; } @@ -240,24 +290,52 @@ void pmb_unmap(unsigned long addr) struct pmb_entry *pmbe = NULL; int i; + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; if (pmbe->vpn == addr) break; } } - if (unlikely(!pmbe)) - return; + read_unlock(&pmb_rwlock); - __pmb_unmap(pmbe); + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); } -static void __pmb_unmap(struct pmb_entry *pmbe) +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { - BUG_ON(!test_bit(pmbe->entry, &pmb_map)); + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} +static bool pmb_size_valid(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + + return 0; +} + +static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ do { struct pmb_entry *pmblink = pmbe; @@ -268,102 +346,312 @@ static void __pmb_unmap(struct pmb_entry *pmbe) * this entry in pmb_alloc() (even if we haven't filled * it yet). * - * Therefore, calling clear_pmb_entry() is safe as no + * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot. */ - clear_pmb_entry(pmbe); + __clear_pmb_entry(pmbe); pmbe = pmblink->link; pmb_free(pmblink); - } while (pmbe); + } while (pmbe && --depth); +} + +static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ + unsigned long flags; + + if (unlikely(!pmbe)) + return; + + write_lock_irqsave(&pmb_rwlock, flags); + __pmb_unmap_entry(pmbe, depth); + write_unlock_irqrestore(&pmb_rwlock, flags); +} + +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) +{ + return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -#ifdef CONFIG_PMB -int __uses_jump_to_uncached pmb_init(void) +static void __init pmb_notify(void) { - unsigned int i; - long size, ret; + int i; - jump_to_uncached(); + pr_info("PMB: boot mappings:\n"); + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", + pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, + pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); + } + + read_unlock(&pmb_rwlock); +} + +/* + * Sync our software copy of the PMB mappings with those in hardware. The + * mappings in the hardware PMB were either set up by the bootloader or + * very early on by the kernel. + */ +static void __init pmb_synchronize(void) +{ + struct pmb_entry *pmbp = NULL; + int i, j; /* - * Insert PMB entries for the P1 and P2 areas so that, after - * we've switched the MMU to 32-bit mode, the semantics of P1 - * and P2 are the same as in 29-bit mode, e.g. + * Run through the initial boot mappings, log the established + * ones, and blow away anything that falls outside of the valid + * PPN range. Specifically, we only care about existing mappings + * that impact the cached/uncached sections. * - * P1 - provides a cached window onto physical memory - * P2 - provides an uncached window onto physical memory + * Note that touching these can be a bit of a minefield; the boot + * loader can establish multi-page mappings with the same caching + * attributes, so we need to ensure that we aren't modifying a + * mapping that we're presently executing from, or may execute + * from in the case of straddling page boundaries. + * + * In the future we will have to tidy up after the boot loader by + * jumping between the cached and uncached mappings and tearing + * down alternating mappings while executing from the other. */ - size = __MEMORY_START + __MEMORY_SIZE; + for (i = 0; i < NR_PMB_ENTRIES; i++) { + unsigned long addr, data; + unsigned long addr_val, data_val; + unsigned long ppn, vpn, flags; + unsigned long irqflags; + unsigned int size; + struct pmb_entry *pmbe; - ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); - BUG_ON(ret != size); + addr = mk_pmb_addr(i); + data = mk_pmb_data(i); - ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); - BUG_ON(ret != size); + addr_val = __raw_readl(addr); + data_val = __raw_readl(data); - ctrl_outl(0, PMB_IRMCR); + /* + * Skip over any bogus entries + */ + if (!(data_val & PMB_V) || !(addr_val & PMB_V)) + continue; - /* PMB.SE and UB[7] */ - ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + ppn = data_val & PMB_PFN_MASK; + vpn = addr_val & PMB_PFN_MASK; - /* Flush out the TLB */ - i = ctrl_inl(MMUCR); - i |= MMUCR_TI; - ctrl_outl(i, MMUCR); + /* + * Only preserve in-range mappings. + */ + if (!pmb_ppn_in_range(ppn)) { + /* + * Invalidate anything out of bounds. + */ + writel_uncached(addr_val & ~PMB_V, addr); + writel_uncached(data_val & ~PMB_V, data); + continue; + } - back_to_cached(); + /* + * Update the caching attributes if necessary + */ + if (data_val & PMB_C) { + data_val &= ~PMB_CACHE_MASK; + data_val |= pmb_cache_flags(); - return 0; + writel_uncached(data_val, data); + } + + size = data_val & PMB_SZ_MASK; + flags = size | (data_val & PMB_CACHE_MASK); + + pmbe = pmb_alloc(vpn, ppn, flags, i); + if (IS_ERR(pmbe)) { + WARN_ON_ONCE(1); + continue; + } + + spin_lock_irqsave(&pmbe->lock, irqflags); + + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) + if (pmb_sizes[j].flag == size) + pmbe->size = pmb_sizes[j].size; + + if (pmbp) { + spin_lock(&pmbp->lock); + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. Compound mappings + * are later coalesced. + */ + if (pmb_can_merge(pmbp, pmbe)) + pmbp->link = pmbe; + + spin_unlock(&pmbp->lock); + } + + pmbp = pmbe; + + spin_unlock_irqrestore(&pmbe->lock, irqflags); + } +} + +static void __init pmb_merge(struct pmb_entry *head) +{ + unsigned long span, newsize; + struct pmb_entry *tail; + int i = 1, depth = 0; + + span = newsize = head->size; + + tail = head->link; + while (tail) { + span += tail->size; + + if (pmb_size_valid(span)) { + newsize = span; + depth = i; + } + + /* This is the end of the line.. */ + if (!tail->link) + break; + + tail = tail->link; + i++; + } + + /* + * The merged page size must be valid. + */ + if (!pmb_size_valid(newsize)) + return; + + head->flags &= ~PMB_SZ_MASK; + head->flags |= pmb_size_to_flags(newsize); + + head->size = newsize; + + __pmb_unmap_entry(head->link, depth); + __set_pmb_entry(head); } -#else -int __uses_jump_to_uncached pmb_init(void) + +static void __init pmb_coalesce(void) { + unsigned long flags; int i; - unsigned long addr, data; - jump_to_uncached(); + write_lock_irqsave(&pmb_rwlock, flags); - for (i = 0; i < PMB_ENTRY_MAX; i++) { + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - if (!(data & PMB_V)) + if (!test_bit(i, pmb_map)) continue; - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - ctrl_outl(data, addr); + pmbe = &pmb_entry_list[i]; - ppn = data & PMB_PFN_MASK; + /* + * We're only interested in compound mappings + */ + if (!pmbe->link) + continue; - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; + /* + * Nothing to do if it already uses the largest possible + * page size. + */ + if (pmbe->size == SZ_512M) + continue; - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); + pmb_merge(pmbe); + } - vpn = data & PMB_PFN_MASK; + write_unlock_irqrestore(&pmb_rwlock, flags); +} - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); +#ifdef CONFIG_UNCACHED_MAPPING +static void __init pmb_resize(void) +{ + int i; + + /* + * If the uncached mapping was constructed by the kernel, it will + * already be a reasonable size. + */ + if (uncached_size == SZ_16M) + return; + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + unsigned long flags; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + if (pmbe->vpn != uncached_start) + continue; + + /* + * Found it, now resize it. + */ + spin_lock_irqsave(&pmbe->lock, flags); + + pmbe->size = SZ_16M; + pmbe->flags &= ~PMB_SZ_MASK; + pmbe->flags |= pmb_size_to_flags(pmbe->size); + + uncached_resize(pmbe->size); + + __set_pmb_entry(pmbe); + + spin_unlock_irqrestore(&pmbe->lock, flags); } - back_to_cached(); + read_lock(&pmb_rwlock); +} +#endif + +void __init pmb_init(void) +{ + /* Synchronize software state */ + pmb_synchronize(); - return 0; + /* Attempt to combine compound mappings */ + pmb_coalesce(); + +#ifdef CONFIG_UNCACHED_MAPPING + /* Resize initial mappings, if necessary */ + pmb_resize(); +#endif + + /* Log them */ + pmb_notify(); + + writel_uncached(0, PMB_IRMCR); + + /* Flush out the TLB */ + __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); + ctrl_barrier(); +} + +bool __in_29bit_mode(void) +{ + return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; } -#endif /* CONFIG_PMB */ static int pmb_seq_show(struct seq_file *file, void *iter) { @@ -378,8 +666,8 @@ static int pmb_seq_show(struct seq_file *file, void *iter) unsigned int size; char *sz_str = NULL; - addr = ctrl_inl(mk_pmb_addr(i)); - data = ctrl_inl(mk_pmb_data(i)); + addr = __raw_readl(mk_pmb_addr(i)); + data = __raw_readl(mk_pmb_data(i)); size = data & PMB_SZ_MASK; sz_str = (size == PMB_SZ_16M) ? " 16MB": @@ -437,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; + + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } } + + read_unlock(&pmb_rwlock); } + prev_state = state; + return 0; } @@ -462,6 +757,5 @@ static int __init pmb_sysdev_init(void) { return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); } - subsys_initcall(pmb_sysdev_init); #endif |