summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h6
-rw-r--r--include/asm-generic/cmpxchg.h10
-rw-r--r--include/asm-generic/hugetlb.h40
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/asm-generic/unistd.h17
6 files changed, 66 insertions, 24 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1ced6413ea0..33bd2de3bc1 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -136,12 +136,6 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 14883026015..811fb1e9b06 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -92,6 +92,16 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
*/
#include <asm-generic/cmpxchg-local.h>
+#ifndef cmpxchg_local
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#endif
+
+#ifndef cmpxchg64_local
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
new file mode 100644
index 00000000000..d06079c774a
--- /dev/null
+++ b/include/asm-generic/hugetlb.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_clear(mm, addr, ptep);
+}
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfd87685fc1..a59ff51b016 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -7,6 +7,16 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 25f01d0bc14..b1b1fa6ffff 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -99,7 +99,12 @@ struct mmu_gather {
unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */
- unsigned int fullmm;
+ /* we are in the middle of an operation to clear
+ * a full mm and can make some optimizations */
+ unsigned int fullmm : 1,
+ /* we have performed an operation which
+ * requires a complete flush of the tlb */
+ need_flush_all : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 4077b5d9ff8..0501fa3f783 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -9,20 +9,3 @@
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_LLSEEK
#endif
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#ifdef CONFIG_SYMBOL_PREFIX
-#define __SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
-#else
-#define __SYMBOL_PREFIX
-#endif
-#define cond_syscall(x) asm(".weak\t" __SYMBOL_PREFIX #x "\n\t" \
- ".set\t" __SYMBOL_PREFIX #x "," \
- __SYMBOL_PREFIX "sys_ni_syscall")
-#endif