summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 18:53:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 18:53:26 -0700
commitbdab225015fbbb45ccd8913f5d7c01b2bf67d8b2 (patch)
tree5ef62301face958977a084bf2b6c5300296a25f2 /arch/mn10300/mm
parent7c5814c7199851c5fe9395d08fc1ab3c8c1531ea (diff)
parent7c7fcf762e405eb040ee10d22d656a791f616122 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits) MN10300: Save frame pointer in thread_info struct rather than global var MN10300: Change "Matsushita" to "Panasonic". MN10300: Create a defconfig for the ASB2364 board MN10300: Update the ASB2303 defconfig MN10300: ASB2364: Add support for SMSC911X and SMC911X MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA MN10300: Generic time support MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support MN10300: Map userspace atomic op regs as a vmalloc page MN10300: And Panasonic AM34 subarch and implement SMP MN10300: Delete idle_timestamp from irq_cpustat_t MN10300: Make various interrupt priority settings configurable MN10300: Optimise do_csum() MN10300: Implement atomic ops using atomic ops unit MN10300: Make the FPU operate in non-lazy mode under SMP MN10300: SMP TLB flushing MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control MN10300: Make the use of PIDR to mark TLB entries controllable MN10300: Rename __flush_tlb*() to local_flush_tlb*() MN10300: AM34 erratum requires MMUCTR read and write on exception entry ...
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r--arch/mn10300/mm/Kconfig.cache101
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-by-reg.S308
-rw-r--r--arch/mn10300/mm/cache-flush-by-tag.S251
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c155
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-inv-by-reg.S356
-rw-r--r--arch/mn10300/mm/cache-inv-by-tag.S348
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c129
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c95
-rw-r--r--arch/mn10300/mm/fault.c17
-rw-r--r--arch/mn10300/mm/init.c26
-rw-r--r--arch/mn10300/mm/misalignment.c3
-rw-r--r--arch/mn10300/mm/mmu-context.c41
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S59
-rw-r--r--arch/mn10300/mm/tlb-smp.c214
22 files changed, 2447 insertions, 636 deletions
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644
index 00000000000..c4fd923a55a
--- /dev/null
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+ prompt "CPU Caching mode"
+ default MN10300_CACHE_WBACK
+ help
+ This option determines the caching mode for the kernel.
+
+ Write-Back caching mode involves the all reads and writes causing
+ the affected cacheline to be read into the cache first before being
+ operated upon. Memory is not then updated by a write until the cache
+ is filled and a cacheline needs to be displaced from the cache to
+ make room. Only at that point is it written back.
+
+ Write-Through caching only fetches cachelines from memory on a
+ read. Writes always get written directly to memory. If the affected
+ cacheline is also in cache, it will be updated too.
+
+ The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+ bool "Write-Back"
+ help
+ The dcache operates in delayed write-back mode. It must be manually
+ flushed if writes are made that subsequently need to be executed or
+ to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+ bool "Write-Through"
+ help
+ The dcache operates in immediate write-through mode. Writes are
+ committed to RAM immediately in addition to being stored in the
+ cache. This means that the written data is immediately available for
+ execution or DMA.
+
+ This is not available for use with an SMP kernel if cache flushing
+ and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+ bool "Disabled"
+ help
+ The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+ def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+ prompt "CPU cache flush/invalidate method"
+ default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+ default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+ depends on MN10300_CACHE_ENABLED
+ help
+ This determines the method by which CPU cache flushing and
+ invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+ bool "Use the cache tag registers directly"
+ depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+ bool "Flush areas by way of automatic purge registers (AM34 only)"
+ depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+ def_bool n
+
+config MN10300_CACHE_SNOOP
+ bool "Use CPU Cache Snooping"
+ depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+ default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+ def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+ def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the icache to be invalidated, even if the dcache is in
+ write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 1557277fbc5..203fee23f7d 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,11 +2,21 @@
# Makefile for the MN10300-specific memory management code
#
-cacheflush-y := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644
index 00000000000..1dcae021167
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-reg.S
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set mask
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge
+ #
+ # DCPGCR = DCPGCR_DCP
+ #
+ mov DCPGCR_DCP,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize;
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # determine the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgloop:
+ # area purge
+ mov a2,d0
+ or DCPGCR_DCP,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgloop
+ bns dcpgloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2,d3,a2],12
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set the mask to cover everything
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge & invalidate
+ mov DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # set the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgivloop:
+ # area purge & invalidate
+ mov a2,d0
+ or DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge & invalidate of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgivloop
+ bns dcpgivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644
index 00000000000..5cd6a27dd63
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-tag.S
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_local_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush
+1:
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_local_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2],4
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_local_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush_inv
+1:
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2],4
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 00000000000..fdb1a9db20f
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start = page_to_phys(page);
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ mn10300_local_dcache_flush_page(start);
+ mn10300_local_icache_inv_page(start);
+
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given. The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_local_dcache_flush_range2(addr + off, size);
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_local_dcache_flush_range(start_page, end);
+ mn10300_local_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_dcache_flush();
+ mn10300_icache_inv();
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644
index c8ed1cbac10..00000000000
--- a/arch/mn10300/mm/cache-flush-mn10300.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
- .am33_2
- .globl mn10300_dcache_flush
- .globl mn10300_dcache_flush_page
- .globl mn10300_dcache_flush_range
- .globl mn10300_dcache_flush_range2
- .globl mn10300_dcache_flush_inv
- .globl mn10300_dcache_flush_inv_page
- .globl mn10300_dcache_flush_inv_range
- .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_end
-
- # read the addresses tagged in the cache's tag RAM and attempt to flush
- # those addresses specifically
- # - we rely on the hardware to filter out invalid tag entry addresses
- mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_loop:
- mov (a0),d0
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
- # cache
- mov d0,(a1) # conditional purge
-
-mn10300_dcache_flush_skip:
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
- add d0,d1
-mn10300_dcache_flush_range:
- movm [d2,d3],(sp)
-
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_range_end
-
- # round start addr down
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush all instances of an address from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
- # cache
-
-mn10300_dcache_flush_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
- ret [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_inv_end
-
- # hit each line in the dcache with an unconditional purge
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_inv_loop:
- mov (a1),d0 # unconditional purge
-
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
- add d0,d1
-mn10300_dcache_flush_inv_range:
- movm [d2,d3],(sp)
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush and invalidate all instances of an address
- # from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
-mn10300_dcache_flush_inv_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # in all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
- ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644
index 00000000000..c8950861ed7
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-reg.S
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+ # round the addresses out to be full cachelines, unless we're in
+ # writeback mode, in which case we would be in flush and invalidate by
+ # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ mov L1_CACHE_BYTES-1,d2
+ add d2,d1
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+ sub d0,d1,d2 # calculate the total size
+ mov d0,a2 # A2 = start address
+ mov d1,a1 # A1 = end address
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov DCPGCR,a0 # make sure the purger isn't busy
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # skip initial address alignment calculation if address is zero
+ mov d2,d1
+ cmp 0,a2
+ beq 1f
+
+dcivloop:
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * while (! start & alignsize) {
+ * alignsize <<=1;
+ * }
+ * d1 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d1
+ lsr 1,d1
+ setlb
+ add d1,d1
+ mov d1,d0
+ and a2,d0
+ leq
+
+1:
+ /* calculate invsize
+ *
+ * if (totalsize > alignsize) {
+ * invsize = alignsize;
+ * } else {
+ * invsize = totalsize;
+ * tmp = 0x80000000;
+ * while (! invsize & tmp) {
+ * tmp >>= 1;
+ * }
+ * invsize = tmp;
+ * }
+ * d1 = invsize
+ */
+ cmp d2,d1
+ bns 2f
+ mov d2,d1
+
+ mov 0x80000000,d0 # start from 31bit=1
+ setlb
+ lsr 1,d0
+ mov d0,e0
+ and d1,e0
+ leq
+ mov d0,d1
+
+2:
+ /* set mask
+ *
+ * mask = ~(invsize-1);
+ * DCPGMR = mask;
+ */
+ mov d1,d0
+ add -1,d0
+ not d0
+ mov d0,(DCPGMR)
+
+ # invalidate area
+ mov a2,d0
+ or DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
+
+ setlb # wait for the purge to complete
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ sub d1,d2 # decrease size remaining
+ add d1,a2 # increase next start address
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + invsize
+ * if (a2 < end) {
+ * goto dcivloop;
+ * } */
+ cmp a1,a2
+ bns dcivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+ .type mn10300_local_icache_inv_page,@function
+ .type mn10300_local_icache_inv_range,@function
+ .type mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+ add d0,d1
+mn10300_local_icache_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_ICEN,d2
+ beq mn10300_local_icache_inv_range_reg_end
+
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
+ * alignsize <<= 1;
+ * }
+ * d2 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+
+ /* a1 = end */
+ mov d1,a1
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov ICIVCR,a0
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* set mask
+ *
+ * mask = ~(alignsize-1);
+ * ICIVMR = mask;
+ */
+ mov d2,d1
+ add -1,d1
+ not d1
+ mov d1,(ICIVMR)
+ /* a2 = mask & start */
+ and d1,d0,a2
+
+icivloop:
+ /* area invalidate
+ *
+ * ICIVCR = (mask & start) | ICIVCR_ICI
+ */
+ mov a2,d0
+ or ICIVCR_ICI,d0
+ mov d0,(a0)
+
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + alignsize
+ * if (a2 < end) {
+ * goto icivloop;
+ * } */
+ add d2,a2
+ cmp a1,a2
+ bns icivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+ .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+ .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644
index 00000000000..e9713b40c0f
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-tag.S
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a2
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+ mov d0,a1
+
+ clr d2 # we're going to clear tag RAM
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+ LOCAL_CLI_SAVE(d3)
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_local_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0
+ add -1,d1
+ btst mn10300_local_dcache_inv_range_intr_interval,d1
+ bne mn10300_local_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ LOCAL_IRQ_RESTORE(d3)
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 00000000000..a8933a60b2d
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given. The page must be in the paged area. The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* invalidate the icache coverage on that region */
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644
index e839d0aedd6..00000000000
--- a/arch/mn10300/mm/cache-mn10300.S
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
- +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
- .am33_2
-
- .globl mn10300_icache_inv
- .globl mn10300_dcache_inv
- .globl mn10300_dcache_inv_range
- .globl mn10300_dcache_inv_range2
- .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
- ALIGN
-mn10300_icache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_ICEN,d0
- beq mn10300_icache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the icache
- and ~CHCTR_ICEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # invalidate
- or CHCTR_ICINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_ICINV,d0
- or CHCTR_ICEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_icache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the dcache
- and ~CHCTR_DCEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # invalidate
- or CHCTR_DCINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_DCINV,d0
- or CHCTR_DCEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_dcache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
- add d0,d1
-mn10300_dcache_inv_range:
- movm [d2,d3,a2],(sp)
- mov CHCTR,a2
-
- movhu (a2),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- clr d2 # we're going to clear tag ram
- # entries
-
- # read the tags from the tag RAM, and if they indicate a valid dirty
- # cache line then invalidate that line
- mov DCACHE_TAG(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache tag RAM
- # access address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
- # disable interrupts
- mov epsw,d3
- and ~EPSW_IE,epsw
- nop # note that reading CHCTR and
- # AND'ing D0 occupy two delay
- # slots after disabling
- # interrupts
-
- # disable the dcache
- movhu (a2),d0
- and ~CHCTR_DCEN,d0
- movhu d0,(a2)
-
- # and wait for it to calm down
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
-mn10300_dcache_inv_range_loop:
-
- # process the way 0 slot
- mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
- # process the way 1 slot
- mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
- # process the way 2 slot
- mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
- # process the way 3 slot
- mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
- # approx every N steps we re-enable the cache and see if there are any
- # interrupts to be processed
- # we also break out if we've reached the end of the loop
- # (the bottom nibble of the count is zero in both cases)
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- btst mn10300_dcache_inv_range_intr_interval,d1
- bne mn10300_dcache_inv_range_loop
-
- # wait for the cache to finish what it's doing
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- or CHCTR_DCEN,d0
- movhu d0,(a2)
- movhu (a2),d0
-
- # re-enable interrupts
- # - we don't bother with delay NOPs as we'll have enough instructions
- # before we disable interrupts again to give the interrupts a chance
- # to happen
- mov d3,epsw
-
- # go around again if the counter hasn't yet reached zero
- add 0,d1
- bne mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
- ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 00000000000..fd51af5eaf7
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush();
+ smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv();
+ smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ * cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 00000000000..ff1787358c8
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_page(start);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range(start, end);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range2(start, size);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv();
+ smp_cache_call(SMP_DCACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_page(start);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 00000000000..4a6e9a4b5b2
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches. The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+ unsigned long opr_mask = smp_cache_mask;
+
+ switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+ case SMP_DCACHE_NOP:
+ break;
+ case SMP_DCACHE_INV:
+ mn10300_local_dcache_inv();
+ break;
+ case SMP_DCACHE_INV_RANGE:
+ mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH:
+ mn10300_local_dcache_flush();
+ break;
+ case SMP_DCACHE_FLUSH_RANGE:
+ mn10300_local_dcache_flush_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH_INV:
+ mn10300_local_dcache_flush_inv();
+ break;
+ case SMP_DCACHE_FLUSH_INV_RANGE:
+ mn10300_local_dcache_flush_inv_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ }
+
+ switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+ case SMP_ICACHE_NOP:
+ break;
+ case SMP_ICACHE_INV:
+ mn10300_local_icache_inv();
+ break;
+ case SMP_ICACHE_INV_RANGE:
+ mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ }
+
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+ unsigned long start, unsigned long end)
+{
+ smp_cache_mask = opr_mask;
+ smp_cache_start = start;
+ smp_cache_end = end;
+ smp_cache_ipi_map = cpu_online_map;
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+ send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+ while (!cpus_empty(smp_cache_ipi_map))
+ /* nothing. lockup detection does not belong here */
+ mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 00000000000..cb52892aa66
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+ SMP_ICACHE_NOP = 0x0000,
+ SMP_ICACHE_INV = 0x0001,
+ SMP_ICACHE_INV_RANGE = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK 0x0003
+
+enum smp_dcache_ops {
+ SMP_DCACHE_NOP = 0x0000,
+ SMP_DCACHE_INV = 0x0004,
+ SMP_DCACHE_INV_RANGE = 0x0008,
+ SMP_DCACHE_FLUSH = 0x000c,
+ SMP_DCACHE_FLUSH_RANGE = 0x0010,
+ SMP_DCACHE_FLUSH_INV = 0x0014,
+ SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK 0x001c
+
+#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+ __acquires(&smp_cache_lock)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&smp_cache_lock, flags);
+ return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+ __releases(&smp_cache_lock)
+{
+ spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2..0a1f0aa92eb 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -37,96 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- mn10300_dcache_flush_page(page_to_phys(page));
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, base, off;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ppte, pte;
-
- if (end > 0x80000000UL) {
- /* addresses above 0xa0000000 do not go through the cache */
- if (end > 0xa0000000UL) {
- end = 0xa0000000UL;
- if (start >= end)
- return;
- }
-
- /* kernel addresses between 0x80000000 and 0x9fffffff do not
- * require page tables, so we just map such addresses directly */
- base = (start >= 0x80000000UL) ? start : 0x80000000UL;
- mn10300_dcache_flush_range(base, end);
- if (base == start)
- goto invalidate;
- end = base;
- }
-
- for (; start < end; start += size) {
- /* work out how much of the page to flush */
- off = start & (PAGE_SIZE - 1);
-
- size = end - start;
- if (size > PAGE_SIZE - off)
- size = PAGE_SIZE - off;
-
- /* get the physical address the page is mapped to from the page
- * tables */
- pgd = pgd_offset(current->mm, start);
- if (!pgd || !pgd_val(*pgd))
- continue;
-
- pud = pud_offset(pgd, start);
- if (!pud || !pud_val(*pud))
- continue;
-
- pmd = pmd_offset(pud, start);
- if (!pmd || !pmd_val(*pmd))
- continue;
-
- ppte = pte_offset_map(pmd, start);
- if (!ppte)
- continue;
- pte = *ppte;
- pte_unmap(ppte);
-
- if (pte_none(pte))
- continue;
-
- page = pte_page(pte);
- if (!page)
- continue;
-
- addr = page_to_phys(page);
-
- /* flush the dcache and invalidate the icache coverage on that
- * region */
- mn10300_dcache_flush_range2(addr + off, size);
- }
-#endif
-
-invalidate:
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 81f153fa51b..59c3da49d9d 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
-#ifdef CONFIG_SMP
- /* Many serial drivers do __global_cli() */
- global_irq_lock = 0;
-#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
}
#endif
-asmlinkage void monitor_signal(struct pt_regs *);
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
*/
bad_area:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
}
no_context:
- monitor_signal(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -338,14 +330,13 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
- goto no_context;
- pagefault_out_of_memory();
- return;
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e5152..48907cc3bdb 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 6dffbf97ac2..eef989c1d0c 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -449,8 +449,7 @@ found_opcode:
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
- if (tmp > noc)
- BUG(); /* match was less complete than it ought to have been */
+ BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d4..a4f7d3dcc6e 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
- [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+ [0 ... NR_CPUS - 1] =
+ MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
};
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long pteu, cnx, flags;
-
- addr &= PAGE_MASK;
-
- /* make sure the context doesn't migrate and defend against
- * interference from vmalloc'd regions */
- local_irq_save(flags);
-
- cnx = mm_context(vma->vm_mm);
-
- if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
- IPTEU = pteu;
- DPTEU = pteu;
- if (IPTEL & xPTEL_V)
- IPTEL = 0;
- if (DPTEL & xPTEL_V)
- DPTEL = 0;
- }
-
- local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
/*
* preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
* interference from vmalloc'd regions */
local_irq_save(flags);
+ cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
cnx = mm_context(vma->vm_mm);
+#endif
if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e..450f7ba3f8f 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
index 7095147dcb8..b9940177d81 100644
--- a/arch/mn10300/mm/tlb-mn10300.S
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -27,7 +27,6 @@
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
itlb_miss_set:
- mov d2,(IPTEL) # change the TLB
+ mov d2,(IPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
dtlb_miss_set:
- mov d2,(DPTEL) # change the TLB
+ mov d2,(DPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
add -4,sp # need to pass three params
# calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
- mov (IPTEU),a2
- mov a2,d0
+ mov (IPTEU),d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
- mov d0,(IPTEL)
+ mov d0,(IPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
add -4,sp # need to pass three params
+ and ~EPSW_NMID,epsw
# calculate the fault code
movhu (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
mov d0,(12,sp)
clr d0
- mov d0,(DPTEL)
+ mov d0,(DPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644
index 00000000000..0b6a5ad1960
--- /dev/null
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL 0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+ &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+ unsigned long cpu_id;
+
+ cpu_id = get_cpu();
+
+ if (!cpu_isset(cpu_id, flush_cpumask))
+ /* This was a BUG() but until someone can quote me the line
+ * from the intel manual that guarantees an IPI to multiple
+ * CPUs is retried _only_ on the erroring CPUs its staying as a
+ * return
+ *
+ * BUG();
+ */
+ goto out;
+
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ local_flush_tlb_page(flush_mm, flush_va);
+
+ smp_mb__before_clear_bit();
+ cpu_clear(cpu_id, flush_cpumask);
+ smp_mb__after_clear_bit();
+out:
+ put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+
+ /* A couple of sanity checks (to be removed):
+ * - mask must not be empty
+ * - current CPU must not be in mask
+ * - we do not send IPIs to as-yet unbooted CPUs.
+ */
+ BUG_ON(!mm);
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(cpumask, tmp));
+
+ /* I'm not happy about this global shared spinlock in the MM hot path,
+ * but we'll see how contended it is.
+ *
+ * Temporarily this turns IRQs off, so that lockups are detected by the
+ * NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+ atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+ /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+ smp_call_function(smp_flush_tlb, NULL, 1);
+
+ while (!cpus_empty(flush_cpumask))
+ /* Lockup detection does not belong here */
+ smp_mb();
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb_page(mm, va);
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+ local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, 0, 1);
+}