From 067784f6239e08a084b4d8d597e14435331eae51 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:23 +0000 Subject: sh: Allocate PMB entry slot earlier Simplify set_pmb_entry() by removing the possibility of not finding a free slot in the PMB. Instead we now allocate a slot in pmb_alloc() so that if there are no free slots we fail at allocation time, rather than in set_pmb_entry(). Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 80 +++++++++++++++++++++++++++----------------------------- 1 file changed, 39 insertions(+), 41 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade3110211..b8a33949296 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -99,10 +99,31 @@ static inline void pmb_list_del(struct pmb_entry *pmbe) } } +static int pmb_alloc_entry(void) +{ + unsigned int pos; + +repeat: + pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + + if (unlikely(pos > NR_PMB_ENTRIES)) + return -ENOSPC; + + if (test_and_set_bit(pos, &pmb_map)) + goto repeat; + + return pos; +} + struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags) { struct pmb_entry *pmbe; + int pos; + + pos = pmb_alloc_entry(); + if (pos < 0) + return ERR_PTR(pos); pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); if (!pmbe) @@ -111,6 +132,7 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; + pmbe->entry = pos; spin_lock_irq(&pmb_list_lock); pmb_list_add(pmbe); @@ -131,23 +153,9 @@ void pmb_free(struct pmb_entry *pmbe) /* * Must be in P2 for __set_pmb_entry() */ -int __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int *entry) +void __set_pmb_entry(unsigned long vpn, unsigned long ppn, + unsigned long flags, int pos) { - unsigned int pos = *entry; - - if (unlikely(pos == PMB_NO_ENTRY)) - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); - -repeat: - if (unlikely(pos > NR_PMB_ENTRIES)) - return -ENOSPC; - - if (test_and_set_bit(pos, &pmb_map)) { - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); - goto repeat; - } - ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); #ifdef CONFIG_CACHE_WRITETHROUGH @@ -161,21 +169,13 @@ repeat: #endif ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); - - *entry = pos; - - return 0; } -int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) +void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) { - int ret; - jump_to_uncached(); - ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); + __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); back_to_cached(); - - return ret; } void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) @@ -239,8 +239,6 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { - int ret; - if (size < pmb_sizes[i].size) continue; @@ -250,12 +248,7 @@ again: goto out; } - ret = set_pmb_entry(pmbe); - if (ret != 0) { - pmb_free(pmbe); - err = -EBUSY; - goto out; - } + set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; @@ -311,8 +304,17 @@ static void __pmb_unmap(struct pmb_entry *pmbe) do { struct pmb_entry *pmblink = pmbe; - if (pmbe->entry != PMB_NO_ENTRY) - clear_pmb_entry(pmbe); + /* + * We may be called before this pmb_entry has been + * entered into the PMB table via set_pmb_entry(), but + * that's OK because we've allocated a unique slot for + * this entry in pmb_alloc() (even if we haven't filled + * it yet). + * + * Therefore, calling clear_pmb_entry() is safe as no + * other mapping can be using that slot. + */ + clear_pmb_entry(pmbe); pmbe = pmblink->link; @@ -322,11 +324,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe) static void pmb_cache_ctor(void *pmb) { - struct pmb_entry *pmbe = pmb; - memset(pmb, 0, sizeof(struct pmb_entry)); - - pmbe->entry = PMB_NO_ENTRY; } static int __uses_jump_to_uncached pmb_init(void) @@ -349,7 +347,7 @@ static int __uses_jump_to_uncached pmb_init(void) for (entry = 0; entry < nr_entries; entry++) { struct pmb_entry *pmbe = pmb_init_map + entry; - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); + __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry); } ctrl_outl(0, PMB_IRMCR); -- cgit v1.2.3-70-g09d2 From 8386aebb9e15a94137693ea4f4df84207f71cc75 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:28 +0000 Subject: sh: Make most PMB functions static There's no need to export the internal PMB functions for allocating, freeing and modifying PMB entries, etc. This way we can restrict the interface for PMB. Also remove the static from pmb_init() so that we have more freedom in setting up the initial PMB entries and turning on MMU 32bit mode. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 8 +------- arch/sh/kernel/setup.c | 4 ++++ arch/sh/mm/pmb.c | 17 ++++++++--------- 3 files changed, 13 insertions(+), 16 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 5025e12b786..9c84b4546c8 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -64,16 +64,10 @@ struct pmb_entry { }; /* arch/sh/mm/pmb.c */ -int __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int *entry); -int set_pmb_entry(struct pmb_entry *pmbe); -void clear_pmb_entry(struct pmb_entry *pmbe); -struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags); -void pmb_free(struct pmb_entry *pmbe); long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); +int pmb_init(void); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index f9d44f8e0df..8fdd03a6768 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -453,6 +453,10 @@ void __init setup_arch(char **cmdline_p) paging_init(); +#ifdef CONFIG_PMB + pmb_init(); +#endif + #ifdef CONFIG_SMP plat_smp_setup(); #endif diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b8a33949296..f01c8191144 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -115,8 +115,8 @@ repeat: return pos; } -struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags) +static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, + unsigned long flags) { struct pmb_entry *pmbe; int pos; @@ -141,7 +141,7 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, return pmbe; } -void pmb_free(struct pmb_entry *pmbe) +static void pmb_free(struct pmb_entry *pmbe) { spin_lock_irq(&pmb_list_lock); pmb_list_del(pmbe); @@ -153,8 +153,8 @@ void pmb_free(struct pmb_entry *pmbe) /* * Must be in P2 for __set_pmb_entry() */ -void __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int pos) +static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, + unsigned long flags, int pos) { ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); @@ -171,14 +171,14 @@ void __set_pmb_entry(unsigned long vpn, unsigned long ppn, ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); } -void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) +static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) { jump_to_uncached(); __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); back_to_cached(); } -void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) +static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; @@ -327,7 +327,7 @@ static void pmb_cache_ctor(void *pmb) memset(pmb, 0, sizeof(struct pmb_entry)); } -static int __uses_jump_to_uncached pmb_init(void) +int __uses_jump_to_uncached pmb_init(void) { unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); unsigned int entry, i; @@ -364,7 +364,6 @@ static int __uses_jump_to_uncached pmb_init(void) return 0; } -arch_initcall(pmb_init); static int pmb_seq_show(struct seq_file *file, void *iter) { -- cgit v1.2.3-70-g09d2 From edd7de803c79c7df117bf3f0e22ffdba1b1ef256 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:29 +0000 Subject: sh: Get rid of the kmem cache code Unfortunately, at the time during in boot when we want to be setting up the PMB entries, the kmem subsystem hasn't been initialised. We now match pmb_map slots with pmb_entry_list slots. When we find an empty slot in pmb_map, we set the bit, thereby acquiring the corresponding pmb_entry_list entry. There is a benefit in using this static array of struct pmb_entry's; we don't need to acquire any locks in order to traverse the list of struct pmb_entry's. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 81 ++++++++++++++++++-------------------------------------- 1 file changed, 26 insertions(+), 55 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f01c8191144..baf365fcdb4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -35,7 +35,7 @@ static void __pmb_unmap(struct pmb_entry *); -static struct kmem_cache *pmb_cache; +static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; static unsigned long pmb_map; static struct pmb_entry pmb_init_map[] = { @@ -73,32 +73,6 @@ static inline unsigned long mk_pmb_data(unsigned int entry) return mk_pmb_entry(entry) | PMB_DATA; } -static DEFINE_SPINLOCK(pmb_list_lock); -static struct pmb_entry *pmb_list; - -static inline void pmb_list_add(struct pmb_entry *pmbe) -{ - struct pmb_entry **p, *tmp; - - p = &pmb_list; - while ((tmp = *p) != NULL) - p = &tmp->next; - - pmbe->next = tmp; - *p = pmbe; -} - -static inline void pmb_list_del(struct pmb_entry *pmbe) -{ - struct pmb_entry **p, *tmp; - - for (p = &pmb_list; (tmp = *p); p = &tmp->next) - if (tmp == pmbe) { - *p = tmp->next; - return; - } -} - static int pmb_alloc_entry(void) { unsigned int pos; @@ -125,7 +99,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, if (pos < 0) return ERR_PTR(pos); - pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); + pmbe = &pmb_entry_list[pos]; if (!pmbe) return ERR_PTR(-ENOMEM); @@ -134,20 +108,19 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->flags = flags; pmbe->entry = pos; - spin_lock_irq(&pmb_list_lock); - pmb_list_add(pmbe); - spin_unlock_irq(&pmb_list_lock); - return pmbe; } static void pmb_free(struct pmb_entry *pmbe) { - spin_lock_irq(&pmb_list_lock); - pmb_list_del(pmbe); - spin_unlock_irq(&pmb_list_lock); + int pos = pmbe->entry; - kmem_cache_free(pmb_cache, pmbe); + pmbe->vpn = 0; + pmbe->ppn = 0; + pmbe->flags = 0; + pmbe->entry = 0; + + clear_bit(pos, &pmb_map); } /* @@ -202,8 +175,6 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); back_to_cached(); - - clear_bit(entry, &pmb_map); } @@ -285,11 +256,16 @@ out: void pmb_unmap(unsigned long addr) { - struct pmb_entry **p, *pmbe; + struct pmb_entry *pmbe = NULL; + int i; - for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) - if (pmbe->vpn == addr) - break; + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + if (test_bit(i, &pmb_map)) { + pmbe = &pmb_entry_list[i]; + if (pmbe->vpn == addr) + break; + } + } if (unlikely(!pmbe)) return; @@ -299,7 +275,7 @@ void pmb_unmap(unsigned long addr) static void __pmb_unmap(struct pmb_entry *pmbe) { - WARN_ON(!test_bit(pmbe->entry, &pmb_map)); + BUG_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; @@ -322,11 +298,6 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -static void pmb_cache_ctor(void *pmb) -{ - memset(pmb, 0, sizeof(struct pmb_entry)); -} - int __uses_jump_to_uncached pmb_init(void) { unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); @@ -334,9 +305,6 @@ int __uses_jump_to_uncached pmb_init(void) BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); - pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, - SLAB_PANIC, pmb_cache_ctor); - jump_to_uncached(); /* @@ -431,15 +399,18 @@ postcore_initcall(pmb_debugfs_init); static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) { static pm_message_t prev_state; + int i; /* Restore the PMB after a resume from hibernation */ if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; - spin_lock_irq(&pmb_list_lock); - for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) - set_pmb_entry(pmbe); - spin_unlock_irq(&pmb_list_lock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + if (test_bit(i, &pmb_map)) { + pmbe = &pmb_entry_list[i]; + set_pmb_entry(pmbe); + } + } } prev_state = state; return 0; -- cgit v1.2.3-70-g09d2 From 3105121949b609964f370d42d1b90fe7fc01d6b1 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:30 +0000 Subject: sh: Remap physical memory into P1 and P2 in pmb_init() Eventually we'll have complete control over what physical memory gets mapped where and we can probably do other interesting things. For now though, when the MMU is in 32-bit mode, we map physical memory into the P1 and P2 virtual address ranges with the same semantics as they have in 29-bit mode. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 4 ++-- arch/sh/mm/consistent.c | 2 +- arch/sh/mm/pmb.c | 54 ++++++++++++++---------------------------------- 3 files changed, 18 insertions(+), 42 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfe..0cf2a5708e2 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -246,7 +246,7 @@ void __iounmap(void __iomem *addr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e098ec158dd..9a8403d9344 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -85,7 +85,7 @@ EXPORT_SYMBOL(dma_free_coherent); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { -#ifdef CONFIG_CPU_SH5 +#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) void *p1addr = vaddr; #else void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index baf365fcdb4..2d009bdcf90 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -38,26 +38,6 @@ static void __pmb_unmap(struct pmb_entry *); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; static unsigned long pmb_map; -static struct pmb_entry pmb_init_map[] = { - /* vpn ppn flags (ub/sz/c/wt) */ - - /* P1 Section Mappings */ - { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, - { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, - { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, - { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, - { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, - { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, - - /* P2 Section Mappings */ - { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, - { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, - { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, - { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, - { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, - { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, -}; - static inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; @@ -156,13 +136,7 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) unsigned int entry = pmbe->entry; unsigned long addr; - /* - * Don't allow clearing of wired init entries, P1 or P2 access - * without a corresponding mapping in the PMB will lead to reset - * by the TLB. - */ - if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || - entry >= NR_PMB_ENTRIES)) + if (unlikely(entry >= NR_PMB_ENTRIES)) return; jump_to_uncached(); @@ -300,28 +274,30 @@ static void __pmb_unmap(struct pmb_entry *pmbe) int __uses_jump_to_uncached pmb_init(void) { - unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); - unsigned int entry, i; - - BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); + unsigned int i; + long size; jump_to_uncached(); /* - * Ordering is important, P2 must be mapped in the PMB before we - * can set PMB.SE, and P1 must be mapped before we jump back to - * P1 space. + * Insert PMB entries for the P1 and P2 areas so that, after + * we've switched the MMU to 32-bit mode, the semantics of P1 + * and P2 are the same as in 29-bit mode, e.g. + * + * P1 - provides a cached window onto physical memory + * P2 - provides an uncached window onto physical memory */ - for (entry = 0; entry < nr_entries; entry++) { - struct pmb_entry *pmbe = pmb_init_map + entry; + size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE, + PMB_WT | PMB_UB); + BUG_ON(size != __MEMORY_SIZE); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry); - } + size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C); + BUG_ON(size != __MEMORY_SIZE); ctrl_outl(0, PMB_IRMCR); /* PMB.SE and UB[7] */ - ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); + ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); /* Flush out the TLB */ i = ctrl_inl(MMUCR); -- cgit v1.2.3-70-g09d2 From ef269b32763b22100eda9c0bf99d462c6cd65377 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:32 +0000 Subject: sh: Fix the offset from P1SEG/P2SEG where we map RAM We need to map the gap between 0x00000000 and __MEMORY_START in the PMB, as well as RAM. With this change my 7785LCR board can switch to 32bit MMU mode at runtime. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 2d009bdcf90..7e64f6d960c 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -275,7 +275,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe) int __uses_jump_to_uncached pmb_init(void) { unsigned int i; - long size; + long size, ret; jump_to_uncached(); @@ -287,12 +287,13 @@ int __uses_jump_to_uncached pmb_init(void) * P1 - provides a cached window onto physical memory * P2 - provides an uncached window onto physical memory */ - size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE, - PMB_WT | PMB_UB); - BUG_ON(size != __MEMORY_SIZE); + size = __MEMORY_START + __MEMORY_SIZE; - size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C); - BUG_ON(size != __MEMORY_SIZE); + ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); + BUG_ON(ret != size); + + ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); + BUG_ON(ret != size); ctrl_outl(0, PMB_IRMCR); -- cgit v1.2.3-70-g09d2 From 20b5014b3e5fe7b874a3f6a1dc03b0c21cb222cd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:33 +0000 Subject: sh: Fold fixed-PMB support into dynamic PMB support The initialisation process differs for CONFIG_PMB and for CONFIG_PMB_FIXED. For CONFIG_PMB_FIXED we need to register the PMB entries that were allocated by the bootloader. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 2 ++ arch/sh/kernel/setup.c | 2 +- arch/sh/mm/Makefile | 3 +-- arch/sh/mm/pmb-fixed.c | 45 -------------------------------- arch/sh/mm/pmb.c | 65 +++++++++++++++++++++++++++++++++++++++++++---- 5 files changed, 64 insertions(+), 53 deletions(-) delete mode 100644 arch/sh/mm/pmb-fixed.c (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 9c84b4546c8..c7426ad9926 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -15,6 +15,8 @@ #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 +#define PMB_PFN_MASK 0xff000000 + #define PMB_SZ_16M 0x00000000 #define PMB_SZ_64M 0x00000010 #define PMB_SZ_128M 0x00000080 diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 8fdd03a6768..df65fe2d43b 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -453,7 +453,7 @@ void __init setup_arch(char **cmdline_p) paging_init(); -#ifdef CONFIG_PMB +#ifdef CONFIG_PMB_ENABLE pmb_init(); #endif diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3759bf85329..8a70535fa7c 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -33,8 +33,7 @@ obj-y += $(tlb-y) endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PMB) += pmb.o -obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o +obj-$(CONFIG_PMB_ENABLE) += pmb.o obj-$(CONFIG_NUMA) += numa.o # Special flags for fault_64.o. This puts restrictions on the number of diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c deleted file mode 100644 index 43c8eac4d8a..00000000000 --- a/arch/sh/mm/pmb-fixed.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * arch/sh/mm/fixed_pmb.c - * - * Copyright (C) 2009 Renesas Solutions Corp. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include - -static int __uses_jump_to_uncached fixed_pmb_init(void) -{ - int i; - unsigned long addr, data; - - jump_to_uncached(); - - for (i = 0; i < PMB_ENTRY_MAX; i++) { - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - if (!(data & PMB_V)) - continue; - - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - ctrl_outl(data, addr); - } - - back_to_cached(); - - return 0; -} -arch_initcall(fixed_pmb_init); diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 7e64f6d960c..280f6a16603 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -70,14 +70,20 @@ repeat: } static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags) + unsigned long flags, int entry) { struct pmb_entry *pmbe; int pos; - pos = pmb_alloc_entry(); - if (pos < 0) - return ERR_PTR(pos); + if (entry == PMB_NO_ENTRY) { + pos = pmb_alloc_entry(); + if (pos < 0) + return ERR_PTR(pos); + } else { + if (test_bit(entry, &pmb_map)) + return ERR_PTR(-ENOSPC); + pos = entry; + } pmbe = &pmb_entry_list[pos]; if (!pmbe) @@ -187,7 +193,8 @@ again: if (size < pmb_sizes[i].size) continue; - pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); + pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, + PMB_NO_ENTRY); if (IS_ERR(pmbe)) { err = PTR_ERR(pmbe); goto out; @@ -272,6 +279,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } +#ifdef CONFIG_PMB int __uses_jump_to_uncached pmb_init(void) { unsigned int i; @@ -309,6 +317,53 @@ int __uses_jump_to_uncached pmb_init(void) return 0; } +#else +int __uses_jump_to_uncached pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + struct pmb_entry *pmbe; + unsigned long vpn, ppn, flags; + + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + + ppn = data & PMB_PFN_MASK; + + flags = data & (PMB_C | PMB_WT | PMB_UB); + flags |= data & PMB_SZ_MASK; + + addr = PMB_ADDR + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + + vpn = data & PMB_PFN_MASK; + + pmbe = pmb_alloc(vpn, ppn, flags, i); + WARN_ON(IS_ERR(pmbe)); + } + + back_to_cached(); + + return 0; +} +#endif /* CONFIG_PMB */ static int pmb_seq_show(struct seq_file *file, void *iter) { -- cgit v1.2.3-70-g09d2