diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/audit_change_attr.h | 4 | ||||
-rw-r--r-- | include/asm-generic/audit_write.h | 6 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 55 | ||||
-rw-r--r-- | include/asm-generic/fixmap.h | 97 | ||||
-rw-r--r-- | include/asm-generic/hash.h | 9 | ||||
-rw-r--r-- | include/asm-generic/int-l64.h | 49 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 19 | ||||
-rw-r--r-- | include/asm-generic/preempt.h | 35 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 2 | ||||
-rw-r--r-- | include/asm-generic/word-at-a-time.h | 8 |
10 files changed, 193 insertions, 91 deletions
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 89b73e5d0fd..a1865537339 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -4,9 +4,11 @@ __NR_chmod, __NR_fchmod, #ifdef __NR_chown __NR_chown, -__NR_fchown, __NR_lchown, #endif +#ifdef __NR_fchown +__NR_fchown, +#endif __NR_setxattr, __NR_lsetxattr, __NR_fsetxattr, diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h index e7020c57b13..274575d7129 100644 --- a/include/asm-generic/audit_write.h +++ b/include/asm-generic/audit_write.h @@ -10,6 +10,12 @@ __NR_truncate, #ifdef __NR_truncate64 __NR_truncate64, #endif +#ifdef __NR_ftruncate +__NR_ftruncate, +#endif +#ifdef __NR_ftruncate64 +__NR_ftruncate64, +#endif #ifdef __NR_bind __NR_bind, /* bind can affect fs object only in one way... */ #endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 639d7a4d033..6f692f8ac66 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -1,4 +1,5 @@ -/* Generic barrier definitions, based on MN10300 definitions. +/* + * Generic barrier definitions, originally based on MN10300 definitions. * * It should be possible to use these on really simple architectures, * but it serves more as a starting point for new ports. @@ -16,35 +17,65 @@ #ifndef __ASSEMBLY__ -#define nop() asm volatile ("nop") +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif /* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. * - * This implementation only contains a compiler barrier. + * Fall back to compiler barriers if nothing better is provided. */ -#define mb() asm volatile ("": : :"memory") +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb #define rmb() mb() -#define wmb() asm volatile ("": : :"memory") +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#ifndef set_mb +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #endif -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 00000000000..5a64ca4621f --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,97 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include <linux/bug.h> + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long addr; \ + __set_fixmap(idx, phys, flags); \ + addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h new file mode 100644 index 00000000000..b6312843dbd --- /dev/null +++ b/include/asm-generic/hash.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +struct fast_hash_ops; +static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) +{ +} + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h deleted file mode 100644 index 27d4ec0dfce..00000000000 --- a/include/asm-generic/int-l64.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * asm-generic/int-l64.h - * - * Integer declarations for architectures which use "long" - * for 64-bit types. - */ -#ifndef _ASM_GENERIC_INT_L64_H -#define _ASM_GENERIC_INT_L64_H - -#include <uapi/asm-generic/int-l64.h> - - -#ifndef __ASSEMBLY__ - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long s64; -typedef unsigned long u64; - -#define S8_C(x) x -#define U8_C(x) x ## U -#define S16_C(x) x -#define U16_C(x) x ## U -#define S32_C(x) x -#define U32_C(x) x ## U -#define S64_C(x) x ## L -#define U64_C(x) x ## UL - -#else /* __ASSEMBLY__ */ - -#define S8_C(x) x -#define U8_C(x) x -#define S16_C(x) x -#define U16_C(x) x -#define S32_C(x) x -#define U32_C(x) x -#define S64_C(x) x -#define U64_C(x) x - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_GENERIC_INT_L64_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0..8e4f41d9af4 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif #ifndef pte_accessible -# define pte_accessible(pte) ((void)(pte),1) +# define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault @@ -558,6 +558,18 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) } #endif +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and @@ -599,11 +611,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif - if (pmd_none(pmdval)) + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) return 1; if (unlikely(pmd_bad(pmdval))) { - if (!pmd_trans_huge(pmdval)) - pmd_clear_bad(pmd); + pmd_clear_bad(pmd); return 1; } return 0; diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index ddf2b420ac8..1cd3f5d767a 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -3,13 +3,11 @@ #include <linux/thread_info.h> -/* - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users - * that think a non-zero value indicates we cannot preempt. - */ +#define PREEMPT_ENABLED (0) + static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; + return current_thread_info()->preempt_count; } static __always_inline int *preempt_count_ptr(void) @@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } -/* - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the - * alternative is loosing a reschedule. Better schedule too often -- also this - * should be a very rare operation. - */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc) task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ } while (0) -/* - * We fold the NEED_RESCHED bit into the preempt count such that - * preempt_enable() can decrement and test for needing to reschedule with a - * single instruction. - * - * We invert the actual bit, so that when the decrement hits 0 we know we both - * need to resched (the bit is cleared) and can resched (no preempt count). - */ - static __always_inline void set_preempt_need_resched(void) { - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; } static __always_inline void clear_preempt_need_resched(void) { - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; } static __always_inline bool test_preempt_need_resched(void) { - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); + return false; } /* @@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val) static __always_inline bool __preempt_count_dec_and_test(void) { - return !--*preempt_count_ptr(); + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); } /* @@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { - return unlikely(!*preempt_count_ptr()); + return unlikely(!preempt_count() && tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index dc1269c74a5..72d8803832f 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -3,7 +3,7 @@ /* * User space memory access functions, these should work - * on a ny machine that has kernel and user data in the same + * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ #include <linux/sched.h> diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 3f21f1b72e4..d3909effd72 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#ifndef zero_bytemask +#ifdef CONFIG_64BIT +#define zero_bytemask(mask) (~0ul << fls64(mask)) +#else +#define zero_bytemask(mask) (~0ul << fls(mask)) +#endif /* CONFIG_64BIT */ +#endif /* zero_bytemask */ + #endif /* _ASM_WORD_AT_A_TIME_H */ |