summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/asm-generic/percpu.h8
-rw-r--r--include/asm-generic/pgtable.h44
-rw-r--r--include/asm-generic/unaligned.h16
-rw-r--r--include/asm-generic/vmlinux.lds.h14
5 files changed, 31 insertions, 53 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7f30cce5285..344e3091af2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -28,7 +28,7 @@ struct bug_entry {
#endif
#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
#ifndef HAVE_ARCH_WARN_ON
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d984a904143..d85172e9ed4 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void); \
@@ -34,6 +39,9 @@ do { \
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7d7bcf990e9..f605e8d0eed 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -3,25 +3,6 @@
#ifndef __ASSEMBLY__
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading, and the pte lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif
-
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
@@ -68,31 +49,6 @@ do { \
})
#endif
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *__ptep; \
- int r = 1; \
- if (!pte_dirty(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
- pte_mkclean(__pte)); \
- r; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-#endif
-
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 09ec447fe2a..16a466e5068 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -18,7 +18,8 @@
#define get_unaligned(ptr) \
__get_unaligned((ptr), sizeof(*(ptr)))
#define put_unaligned(x,ptr) \
- __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
+ ((void)sizeof(*(ptr)=(x)),\
+ __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
/*
* This function doesn't actually exist. The idea is that when
@@ -95,21 +96,21 @@ static inline void __ustw(__u16 val, __u16 *addr)
default: \
bad_unaligned_access_length(); \
}; \
- (__typeof__(*(ptr)))val; \
+ (__force __typeof__(*(ptr)))val; \
})
#define __put_unaligned(val, ptr, size) \
-do { \
+({ \
void *__gu_p = ptr; \
switch (size) { \
case 1: \
- *(__u8 *)__gu_p = val; \
+ *(__u8 *)__gu_p = (__force __u8)val; \
break; \
case 2: \
- __ustw(val, __gu_p); \
+ __ustw((__force __u16)val, __gu_p); \
break; \
case 4: \
- __ustl(val, __gu_p); \
+ __ustl((__force __u32)val, __gu_p); \
break; \
case 8: \
__ustq(val, __gu_p); \
@@ -117,6 +118,7 @@ do { \
default: \
bad_unaligned_access_length(); \
}; \
-} while(0)
+ (void)0; \
+})
#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 84155eb67f1..0240e0506a0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -224,7 +224,11 @@
}
#define NOTES \
- .notes : { *(.note.*) } :note
+ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_notes) = .; \
+ *(.note.*) \
+ VMLINUX_SYMBOL(__stop_notes) = .; \
+ }
#define INITCALLS \
*(.initcall0.init) \
@@ -245,3 +249,11 @@
*(.initcall7.init) \
*(.initcall7s.init)
+#define PERCPU(align) \
+ . = ALIGN(align); \
+ __per_cpu_start = .; \
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ *(.data.percpu) \
+ *(.data.percpu.shared_aligned) \
+ } \
+ __per_cpu_end = .;