summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 03:02:23 -0700
committerDavid S. Miller <davem@davemloft.net>2009-06-15 03:02:23 -0700
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /arch/arm/include/asm
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h13
-rw-r--r--arch/arm/include/asm/atomic.h63
-rw-r--r--arch/arm/include/asm/bitsperlong.h1
-rw-r--r--arch/arm/include/asm/cache.h16
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/cputype.h25
-rw-r--r--arch/arm/include/asm/flat.h3
-rw-r--r--arch/arm/include/asm/hardware/arm_twd.h21
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/hardware/gic.h2
-rw-r--r--arch/arm/include/asm/hardware/pl080.h138
-rw-r--r--arch/arm/include/asm/hardware/sharpsl_pm.h106
-rw-r--r--arch/arm/include/asm/hardware/vic.h2
-rw-r--r--arch/arm/include/asm/localtimer.h63
-rw-r--r--arch/arm/include/asm/mach/map.h8
-rw-r--r--arch/arm/include/asm/mman.h2
-rw-r--r--arch/arm/include/asm/page.h9
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/processor.h1
-rw-r--r--arch/arm/include/asm/ptrace.h17
-rw-r--r--arch/arm/include/asm/signal.h2
-rw-r--r--arch/arm/include/asm/sizes.h1
-rw-r--r--arch/arm/include/asm/smp.h52
-rw-r--r--arch/arm/include/asm/smp_scu.h7
-rw-r--r--arch/arm/include/asm/smp_twd.h12
-rw-r--r--arch/arm/include/asm/suspend.h4
-rw-r--r--arch/arm/include/asm/system.h176
-rw-r--r--arch/arm/include/asm/tlbflush.h26
-rw-r--r--arch/arm/include/asm/uaccess.h2
29 files changed, 577 insertions, 207 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6116e4893c0..15f8a092b70 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
+
+/*
+ * SMP data memory barrier
+ */
+ .macro smp_dmb
+#ifdef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7
+ dmb
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#endif
+#endif
+ .endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index ee99723b3a6..9e07fe50702 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
+"1: ldrex %0, [%2]\n"
+" add %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
@@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
+"1: ldrex %0, [%2]\n"
+" sub %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
@@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
@@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;
+ smp_mb();
+
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
@@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
+ smp_mb();
+
return oldval;
}
@@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
+#define atomic_add(i, v) (void) atomic_add_return(i, v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
@@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
+#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-#define atomic_inc(v) (void) atomic_add_return(1, v)
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
-#define atomic_dec(v) (void) atomic_sub_return(1, v)
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
@@ -200,12 +244,11 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-/* Atomic operations are already serializing on ARM */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
-#include <asm-generic/atomic.h>
+#include <asm-generic/atomic-long.h>
#endif
#endif
diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h
new file mode 100644
index 00000000000..6dc0bb0c13b
--- /dev/null
+++ b/arch/arm/include/asm/bitsperlong.h
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index cb7a9e97fd7..feaa75f0013 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -7,4 +7,20 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+/*
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+ */
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bb7d695f390..1a711ea8418 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -429,6 +429,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(vma, page, vmaddr);
}
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ /* highmem pages are always flushed upon kunmap already */
+ if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
+ __cpuc_flush_dcache_page(page_address(page));
+}
+
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 7b9d27e749b..b3e656c6fb7 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,6 +8,21 @@
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#define CPUID_EXT_PFR0 "c1, 0"
+#define CPUID_EXT_PFR1 "c1, 1"
+#define CPUID_EXT_DFR0 "c1, 2"
+#define CPUID_EXT_AFR0 "c1, 3"
+#define CPUID_EXT_MMFR0 "c1, 4"
+#define CPUID_EXT_MMFR1 "c1, 5"
+#define CPUID_EXT_MMFR2 "c1, 6"
+#define CPUID_EXT_MMFR3 "c1, 7"
+#define CPUID_EXT_ISAR0 "c2, 0"
+#define CPUID_EXT_ISAR1 "c2, 1"
+#define CPUID_EXT_ISAR2 "c2, 2"
+#define CPUID_EXT_ISAR3 "c2, 3"
+#define CPUID_EXT_ISAR4 "c2, 4"
+#define CPUID_EXT_ISAR5 "c2, 5"
+
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
({ \
@@ -18,9 +33,19 @@
: "cc"); \
__val; \
})
+#define read_cpuid_ext(ext_reg) \
+ ({ \
+ unsigned int __val; \
+ asm("mrc p15, 0, %0, c0, " ext_reg \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
+ __val; \
+ })
#else
extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id)
+#define read_cpuid_ext(reg) 0
#endif
/*
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index 1d77e51907f..59426a4595c 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -5,9 +5,6 @@
#ifndef __ARM_FLAT_H__
#define __ARM_FLAT_H__
-/* An odd number of words will be pushed after this alignment, so
- deliberately misalign the value. */
-#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/arm/include/asm/hardware/arm_twd.h b/arch/arm/include/asm/hardware/arm_twd.h
deleted file mode 100644
index e521b70713c..00000000000
--- a/arch/arm/include/asm/hardware/arm_twd.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_HARDWARE_TWD_H
-#define __ASM_HARDWARE_TWD_H
-
-#define TWD_TIMER_LOAD 0x00
-#define TWD_TIMER_COUNTER 0x04
-#define TWD_TIMER_CONTROL 0x08
-#define TWD_TIMER_INTSTAT 0x0C
-
-#define TWD_WDOG_LOAD 0x20
-#define TWD_WDOG_COUNTER 0x24
-#define TWD_WDOG_CONTROL 0x28
-#define TWD_WDOG_INTSTAT 0x2C
-#define TWD_WDOG_RESETSTAT 0x30
-#define TWD_WDOG_DISABLE 0x34
-
-#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
-#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
-#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
-#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-
-#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 64f2252a25c..cdb9022716f 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -24,6 +24,8 @@
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
+#define L2X0_TAG_LATENCY_CTRL 0x108
+#define L2X0_DATA_LATENCY_CTRL 0x10C
#define L2X0_EVENT_CNT_CTRL 0x200
#define L2X0_EVENT_CNT1_CFG 0x204
#define L2X0_EVENT_CNT0_CFG 0x208
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 4924914af18..7f34333bb54 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -36,7 +36,7 @@
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
new file mode 100644
index 00000000000..6a6c66be7f6
--- /dev/null
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -0,0 +1,138 @@
+/* arch/arm/include/asm/hardware/pl080.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * ARM PrimeCell PL080 DMA controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, there are some Samsung updates to this controller block which
+ * make it not entierly compatible with the PL080 specification from
+ * ARM. When in doubt, check the Samsung documentation first.
+ *
+ * The Samsung defines are PL080S, and add an extra controll register,
+ * the ability to move more than 2^11 counts of data and some extra
+ * OneNAND features.
+*/
+
+#define PL080_INT_STATUS (0x00)
+#define PL080_TC_STATUS (0x04)
+#define PL080_TC_CLEAR (0x08)
+#define PL080_ERR_STATUS (0x0C)
+#define PL080_ERR_CLEAR (0x10)
+#define PL080_RAW_TC_STATUS (0x14)
+#define PL080_RAW_ERR_STATUS (0x18)
+#define PL080_EN_CHAN (0x1c)
+#define PL080_SOFT_BREQ (0x20)
+#define PL080_SOFT_SREQ (0x24)
+#define PL080_SOFT_LBREQ (0x28)
+#define PL080_SOFT_LSREQ (0x2C)
+
+#define PL080_CONFIG (0x30)
+#define PL080_CONFIG_M2_BE (1 << 2)
+#define PL080_CONFIG_M1_BE (1 << 1)
+#define PL080_CONFIG_ENABLE (1 << 0)
+
+#define PL080_SYNC (0x34)
+
+/* Per channel configuration registers */
+
+#define PL008_Cx_STRIDE (0x20)
+#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
+#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
+#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
+#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
+#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
+#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
+#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
+#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
+
+#define PL080_CH_SRC_ADDR (0x00)
+#define PL080_CH_DST_ADDR (0x04)
+#define PL080_CH_LLI (0x08)
+#define PL080_CH_CONTROL (0x0C)
+#define PL080_CH_CONFIG (0x10)
+#define PL080S_CH_CONTROL2 (0x10)
+#define PL080S_CH_CONFIG (0x14)
+
+#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
+#define PL080_LLI_ADDR_SHIFT (2)
+#define PL080_LLI_LM_AHB2 (1 << 0)
+
+#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
+#define PL080_CONTROL_PROT_MASK (0x7 << 28)
+#define PL080_CONTROL_PROT_SHIFT (28)
+#define PL080_CONTROL_PROT_SYS (1 << 28)
+#define PL080_CONTROL_DST_INCR (1 << 27)
+#define PL080_CONTROL_SRC_INCR (1 << 26)
+#define PL080_CONTROL_DST_AHB2 (1 << 25)
+#define PL080_CONTROL_SRC_AHB2 (1 << 24)
+#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
+#define PL080_CONTROL_DWIDTH_SHIFT (21)
+#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
+#define PL080_CONTROL_SWIDTH_SHIFT (18)
+#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
+#define PL080_CONTROL_DB_SIZE_SHIFT (15)
+#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
+#define PL080_CONTROL_SB_SIZE_SHIFT (12)
+#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
+#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
+
+#define PL080_BSIZE_1 (0x0)
+#define PL080_BSIZE_4 (0x1)
+#define PL080_BSIZE_8 (0x2)
+#define PL080_BSIZE_16 (0x3)
+#define PL080_BSIZE_32 (0x4)
+#define PL080_BSIZE_64 (0x5)
+#define PL080_BSIZE_128 (0x6)
+#define PL080_BSIZE_256 (0x7)
+
+#define PL080_WIDTH_8BIT (0x0)
+#define PL080_WIDTH_16BIT (0x1)
+#define PL080_WIDTH_32BIT (0x2)
+
+#define PL080_CONFIG_HALT (1 << 18)
+#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
+#define PL080_CONFIG_LOCK (1 << 16)
+#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
+#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
+#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
+#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
+#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
+#define PL080_CONFIG_DST_SEL_SHIFT (6)
+#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
+#define PL080_CONFIG_SRC_SEL_SHIFT (1)
+#define PL080_CONFIG_ENABLE (1 << 0)
+
+#define PL080_FLOW_MEM2MEM (0x0)
+#define PL080_FLOW_MEM2PER (0x1)
+#define PL080_FLOW_PER2MEM (0x2)
+#define PL080_FLOW_SRC2DST (0x3)
+#define PL080_FLOW_SRC2DST_DST (0x4)
+#define PL080_FLOW_MEM2PER_PER (0x5)
+#define PL080_FLOW_PER2MEM_PER (0x6)
+#define PL080_FLOW_SRC2DST_SRC (0x7)
+
+/* DMA linked list chain structure */
+
+struct pl080_lli {
+ u32 src_addr;
+ u32 dst_addr;
+ u32 next_lli;
+ u32 control0;
+};
+
+struct pl080s_lli {
+ u32 src_addr;
+ u32 dst_addr;
+ u32 next_lli;
+ u32 control0;
+ u32 control1;
+};
+
diff --git a/arch/arm/include/asm/hardware/sharpsl_pm.h b/arch/arm/include/asm/hardware/sharpsl_pm.h
deleted file mode 100644
index 2d00db22b98..00000000000
--- a/arch/arm/include/asm/hardware/sharpsl_pm.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * SharpSL Battery/PM Driver
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/interrupt.h>
-
-struct sharpsl_charger_machinfo {
- void (*init)(void);
- void (*exit)(void);
- int gpio_acin;
- int gpio_batfull;
- int batfull_irq;
- int gpio_batlock;
- int gpio_fatal;
- void (*discharge)(int);
- void (*discharge1)(int);
- void (*charge)(int);
- void (*measure_temp)(int);
- void (*presuspend)(void);
- void (*postsuspend)(void);
- void (*earlyresume)(void);
- unsigned long (*read_devdata)(int);
-#define SHARPSL_BATT_VOLT 1
-#define SHARPSL_BATT_TEMP 2
-#define SHARPSL_ACIN_VOLT 3
-#define SHARPSL_STATUS_ACIN 4
-#define SHARPSL_STATUS_LOCK 5
-#define SHARPSL_STATUS_CHRGFULL 6
-#define SHARPSL_STATUS_FATAL 7
- unsigned long (*charger_wakeup)(void);
- int (*should_wakeup)(unsigned int resume_on_alarm);
- void (*backlight_limit)(int);
- int (*backlight_get_status) (void);
- int charge_on_volt;
- int charge_on_temp;
- int charge_acin_high;
- int charge_acin_low;
- int fatal_acin_volt;
- int fatal_noacin_volt;
- int bat_levels;
- struct battery_thresh *bat_levels_noac;
- struct battery_thresh *bat_levels_acin;
- struct battery_thresh *bat_levels_noac_bl;
- struct battery_thresh *bat_levels_acin_bl;
- int status_high_acin;
- int status_low_acin;
- int status_high_noac;
- int status_low_noac;
-};
-
-struct battery_thresh {
- int voltage;
- int percentage;
-};
-
-struct battery_stat {
- int ac_status; /* APM AC Present/Not Present */
- int mainbat_status; /* APM Main Battery Status */
- int mainbat_percent; /* Main Battery Percentage Charge */
- int mainbat_voltage; /* Main Battery Voltage */
-};
-
-struct sharpsl_pm_status {
- struct device *dev;
- struct timer_list ac_timer;
- struct timer_list chrg_full_timer;
-
- int charge_mode;
-#define CHRG_ERROR (-1)
-#define CHRG_OFF (0)
-#define CHRG_ON (1)
-#define CHRG_DONE (2)
-
- unsigned int flags;
-#define SHARPSL_SUSPENDED (1 << 0) /* Device is Suspended */
-#define SHARPSL_ALARM_ACTIVE (1 << 1) /* Alarm is for charging event (not user) */
-#define SHARPSL_BL_LIMIT (1 << 2) /* Backlight Intensity Limited */
-#define SHARPSL_APM_QUEUED (1 << 3) /* APM Event Queued */
-#define SHARPSL_DO_OFFLINE_CHRG (1 << 4) /* Trigger the offline charger */
-
- int full_count;
- unsigned long charge_start_time;
- struct sharpsl_charger_machinfo *machinfo;
- struct battery_stat battstat;
-};
-
-extern struct sharpsl_pm_status sharpsl_pm;
-
-
-#define SHARPSL_LED_ERROR 2
-#define SHARPSL_LED_ON 1
-#define SHARPSL_LED_OFF 0
-
-void sharpsl_battery_kick(void);
-void sharpsl_pm_led(int val);
-irqreturn_t sharpsl_ac_isr(int irq, void *dev_id);
-irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id);
-irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id);
-
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
index f87328d4a18..5d72550a809 100644
--- a/arch/arm/include/asm/hardware/vic.h
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -41,7 +41,7 @@
#define VIC_PL192_VECT_ADDR 0xF00
#ifndef __ASSEMBLY__
-void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources);
+void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
#endif
#endif
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
new file mode 100644
index 00000000000..50c7e7cfd67
--- /dev/null
+++ b/arch/arm/include/asm/localtimer.h
@@ -0,0 +1,63 @@
+/*
+ * arch/arm/include/asm/localtimer.h
+ *
+ * Copyright (C) 2004-2005 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_LOCALTIMER_H
+#define __ASM_ARM_LOCALTIMER_H
+
+struct clock_event_device;
+
+/*
+ * Setup a per-cpu timer, whether it be a local timer or dummy broadcast
+ */
+void percpu_timer_setup(void);
+
+/*
+ * Called from assembly, this is the local timer IRQ handler
+ */
+asmlinkage void do_local_timer(struct pt_regs *);
+
+
+#ifdef CONFIG_LOCAL_TIMERS
+
+#ifdef CONFIG_HAVE_ARM_TWD
+
+#include "smp_twd.h"
+
+#define local_timer_ack() twd_timer_ack()
+#define local_timer_stop() twd_timer_stop()
+
+#else
+
+/*
+ * Platform provides this to acknowledge a local timer IRQ.
+ * Returns true if the local timer IRQ is to be processed.
+ */
+int local_timer_ack(void);
+
+/*
+ * Stop a local timer interrupt.
+ */
+void local_timer_stop(void);
+
+#endif
+
+/*
+ * Setup a local timer interrupt for a CPU.
+ */
+void local_timer_setup(struct clock_event_device *);
+
+#else
+
+static inline void local_timer_stop(void)
+{
+}
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 58cf91f38e6..742c2aaeb02 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,14 @@ struct map_desc {
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
+
+struct mem_type;
+extern const struct mem_type *get_mem_type(unsigned int type);
+/*
+ * external interface to remap single page with appropriate type
+ */
+extern int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype);
#else
#define iotable_init(map,num) do { } while (0)
#endif
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 54570d2e95b..fc26976d8e3 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1,7 +1,7 @@
#ifndef __ARM_MMAN_H__
#define __ARM_MMAN_H__
-#include <asm-generic/mman.h>
+#include <asm-generic/mman-common.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index e6eb8a67b80..be962c1349c 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -202,13 +202,6 @@ typedef struct page *pgtable_t;
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-/*
- * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
- */
-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
-#define ARCH_SLAB_MINALIGN 8
-#endif
-
-#include <asm-generic/page.h>
+#include <asm-generic/getorder.h>
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 110295c5461..1cd2d6416bd 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -342,7 +342,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
return __va(ptr);
}
-#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 1845892260e..6a89567ffc5 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -71,6 +71,7 @@ struct thread_struct {
regs->ARM_cpsr = USR26_MODE; \
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
regs->ARM_cpsr |= PSR_T_BIT; \
+ regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 236a06b9b7c..67b833c9b6b 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -50,6 +50,7 @@
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
+#define PSR_E_BIT 0x00000200
#define PSR_J_BIT 0x01000000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
@@ -65,6 +66,22 @@
#define PSR_x 0x0000ff00 /* Extension */
#define PSR_c 0x000000ff /* Control */
+/*
+ * ARMv7 groups of APSR bits
+ */
+#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
+#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+/*
+ * Default endianness state
+ */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define PSR_ENDSTATE PSR_E_BIT
+#else
+#define PSR_ENDSTATE 0
+#endif
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index d0fb487aba4..43ba0fb1c8a 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -111,7 +111,7 @@ typedef unsigned long sigset_t;
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
-#include <asm-generic/signal.h>
+#include <asm-generic/signal-defs.h>
#ifdef __KERNEL__
struct old_sigaction {
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
index ada93a8fc2e..4fc1565e4f9 100644
--- a/arch/arm/include/asm/sizes.h
+++ b/arch/arm/include/asm/sizes.h
@@ -29,6 +29,7 @@
#define SZ_512 0x00000200
#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index fad70da5911..a06e735b262 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -41,7 +41,7 @@ extern void show_ipi_list(struct seq_file *p);
asmlinkage void do_IPI(struct pt_regs *regs);
/*
- * Setup the SMP cpu_possible_map
+ * Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);
@@ -53,17 +53,7 @@ extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
-extern void smp_cross_call(cpumask_t callmap);
-
-/*
- * Broadcast a timer interrupt to the other CPUs.
- */
-extern void smp_send_timer(void);
-
-/*
- * Broadcast a clock event to other CPUs.
- */
-extern void smp_timer_broadcast(cpumask_t mask);
+extern void smp_cross_call(const struct cpumask *mask);
/*
* Boot a secondary CPU, and assign it the specified idle task.
@@ -102,46 +92,12 @@ extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
-
-/*
- * Local timer interrupt handling function (can be IPI'ed).
- */
-extern void local_timer_interrupt(void);
-
-#ifdef CONFIG_LOCAL_TIMERS
-
-/*
- * Stop a local timer interrupt.
- */
-extern void local_timer_stop(void);
-
-/*
- * Platform provides this to acknowledge a local timer IRQ
- */
-extern int local_timer_ack(void);
-
-#else
-
-static inline void local_timer_stop(void)
-{
-}
-
-#endif
-
-/*
- * Setup a local timer interrupt for a CPU.
- */
-extern void local_timer_setup(void);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* show local interrupt info
*/
extern void show_local_irqs(struct seq_file *);
-/*
- * Called from assembly, this is the local timer IRQ handler
- */
-asmlinkage void do_local_timer(struct pt_regs *);
-
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
new file mode 100644
index 00000000000..2376835015d
--- /dev/null
+++ b/arch/arm/include/asm/smp_scu.h
@@ -0,0 +1,7 @@
+#ifndef __ASMARM_ARCH_SCU_H
+#define __ASMARM_ARCH_SCU_H
+
+unsigned int scu_get_core_count(void __iomem *);
+void scu_enable(void __iomem *);
+
+#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
new file mode 100644
index 00000000000..7be0978b262
--- /dev/null
+++ b/arch/arm/include/asm/smp_twd.h
@@ -0,0 +1,12 @@
+#ifndef __ASMARM_SMP_TWD_H
+#define __ASMARM_SMP_TWD_H
+
+struct clock_event_device;
+
+extern void __iomem *twd_base;
+
+void twd_timer_stop(void);
+int twd_timer_ack(void);
+void twd_timer_setup(struct clock_event_device *);
+
+#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
deleted file mode 100644
index cf0d0bdee74..00000000000
--- a/arch/arm/include/asm/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _ASMARM_SUSPEND_H
-#define _ASMARM_SUSPEND_H
-
-#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index bd4dc8ed53d..d65b2f5bf41 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
+ smp_mb();
+
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
+ smp_mb();
return ret;
}
@@ -316,6 +319,12 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg-local.h>
+#if __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_SMP
+#error "SMP is not supported on this platform"
+#endif
+
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
@@ -329,6 +338,173 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg.h>
#endif
+#else /* __LINUX_ARM_ARCH__ >= 6 */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+/*
+ * cmpxchg only support 32-bits operands on ARMv6.
+ */
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long oldval, res;
+
+ switch (size) {
+#ifdef CONFIG_CPU_32v6K
+ case 1:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexb %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexbeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ case 2:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexh %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexheq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+#endif /* CONFIG_CPU_32v6K */
+ case 4:
+ do {
+ asm volatile("@ __cmpxchg4\n"
+ " ldrex %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ default:
+ __bad_cmpxchg(ptr, size);
+ oldval = 0;
+ }
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long ret;
+
+ smp_mb();
+ ret = __cmpxchg(ptr, old, new, size);
+ smp_mb();
+
+ return ret;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+#ifndef CONFIG_CPU_32v6K
+ case 1:
+ case 2:
+ ret = __cmpxchg_local_generic(ptr, old, new, size);
+ break;
+#endif /* !CONFIG_CPU_32v6K */
+ default:
+ ret = __cmpxchg(ptr, old, new, size);
+ }
+
+ return ret;
+}
+
+#define cmpxchg_local(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+
+#ifdef CONFIG_CPU_32v6K
+
+/*
+ * Note : ARMv7-M (currently unsupported by Linux) does not support
+ * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
+ * not be allowed to use __cmpxchg64.
+ */
+static inline unsigned long long __cmpxchg64(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ register unsigned long long oldval asm("r0");
+ register unsigned long long __old asm("r2") = old;
+ register unsigned long long __new asm("r4") = new;
+ unsigned long res;
+
+ do {
+ asm volatile(
+ " @ __cmpxchg8\n"
+ " ldrexd %1, %H1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " teqeq %H1, %H3\n"
+ " strexdeq %0, %4, %H4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (__old), "r" (__new)
+ : "memory", "cc");
+ } while (res);
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
+#define cmpxchg64(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#else /* !CONFIG_CPU_32v6K */
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#endif /* CONFIG_CPU_32v6K */
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a62218013c7..c964f3fc3bc 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -40,6 +40,12 @@
#define TLB_V6_I_ASID (1 << 18)
#define TLB_BTB (1 << 28)
+
+/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
+#define TLB_V7_UIS_PAGE (1 << 19)
+#define TLB_V7_UIS_FULL (1 << 20)
+#define TLB_V7_UIS_ASID (1 << 21)
+
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
@@ -176,9 +182,17 @@
# define v6wbi_always_flags (-1UL)
#endif
+#ifdef CONFIG_SMP
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+#else
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+#endif
+
#ifdef CONFIG_CPU_TLB_V7
-# define v7wbi_possible_flags v6wbi_tlb_flags
-# define v7wbi_always_flags v6wbi_tlb_flags
+# define v7wbi_possible_flags v7wbi_tlb_flags
+# define v7wbi_always_flags v7wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
@@ -316,6 +330,8 @@ static inline void local_flush_tlb_all(void)
asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_V7_UIS_FULL))
+ asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
@@ -351,6 +367,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V6_I_ASID))
asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
+ if (tlb_flag(TLB_V7_UIS_ASID))
+ asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
@@ -389,6 +407,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+ if (tlb_flag(TLB_V7_UIS_PAGE))
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
@@ -424,6 +444,8 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
+ if (tlb_flag(TLB_V7_UIS_PAGE))
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
if (tlb_flag(TLB_BTB)) {
/* flush the branch target cache */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7897464e0c2..0da9bc9b3b1 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -386,7 +386,9 @@ do { \
#ifdef CONFIG_MMU
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
#else
#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)