diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/audit_change_attr.h | 18 | ||||
-rw-r--r-- | include/asm-generic/audit_dir_write.h | 14 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 19 | ||||
-rw-r--r-- | include/asm-generic/memory_model.h | 27 | ||||
-rw-r--r-- | include/asm-generic/mutex-null.h | 15 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 11 | ||||
-rw-r--r-- | include/asm-generic/rtc.h | 7 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 1 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 28 |
10 files changed, 109 insertions, 35 deletions
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h new file mode 100644 index 00000000000..cb05bf69745 --- /dev/null +++ b/include/asm-generic/audit_change_attr.h @@ -0,0 +1,18 @@ +__NR_chmod, +__NR_fchmod, +__NR_chown, +__NR_fchown, +__NR_lchown, +__NR_setxattr, +__NR_lsetxattr, +__NR_fsetxattr, +__NR_removexattr, +__NR_lremovexattr, +__NR_fremovexattr, +__NR_fchownat, +__NR_fchmodat, +#ifdef __NR_chown32 +__NR_chown32, +__NR_fchown32, +__NR_lchown32, +#endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h new file mode 100644 index 00000000000..161a7a58fba --- /dev/null +++ b/include/asm-generic/audit_dir_write.h @@ -0,0 +1,14 @@ +__NR_rename, +__NR_mkdir, +__NR_rmdir, +__NR_creat, +__NR_link, +__NR_unlink, +__NR_symlink, +__NR_mknod, +__NR_mkdirat, +__NR_mknodat, +__NR_unlinkat, +__NR_renameat, +__NR_linkat, +__NR_symlinkat, diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 0cd9711895f..8ceab7bcd8b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -38,4 +38,23 @@ #endif #endif +#define WARN_ON_ONCE(condition) \ +({ \ + static int __warn_once = 1; \ + int __ret = 0; \ + \ + if (unlikely((condition) && __warn_once)) { \ + __warn_once = 0; \ + WARN_ON(1); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +# define WARN_ON_SMP(x) do { } while (0) +#endif + #endif diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 0cfb086dd37..8078cbd2c01 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -23,29 +23,23 @@ #endif /* CONFIG_DISCONTIGMEM */ -#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE -struct page; -/* this is useful when inlined pfn_to_page is too big */ -extern struct page *pfn_to_page(unsigned long pfn); -extern unsigned long page_to_pfn(struct page *page); -#else /* * supports 3 memory models. */ #if defined(CONFIG_FLATMEM) -#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ +#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) #elif defined(CONFIG_DISCONTIGMEM) -#define pfn_to_page(pfn) \ +#define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ unsigned long __nid = arch_pfn_to_nid(pfn); \ NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ }) -#define page_to_pfn(pg) \ +#define __page_to_pfn(pg) \ ({ struct page *__pg = (pg); \ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ (unsigned long)(__pg - __pgdat->node_mem_map) + \ @@ -57,18 +51,27 @@ extern unsigned long page_to_pfn(struct page *page); * Note: section's mem_map is encorded to reflect its start_pfn. * section[i].section_mem_map == mem_map's address - start_pfn; */ -#define page_to_pfn(pg) \ +#define __page_to_pfn(pg) \ ({ struct page *__pg = (pg); \ int __sec = page_to_section(__pg); \ __pg - __section_mem_map_addr(__nr_to_section(__sec)); \ }) -#define pfn_to_page(pfn) \ +#define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ + +#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE +struct page; +/* this is useful when inlined pfn_to_page is too big */ +extern struct page *pfn_to_page(unsigned long pfn); +extern unsigned long page_to_pfn(struct page *page); +#else +#define page_to_pfn __page_to_pfn +#define pfn_to_page __pfn_to_page #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h index 5cf8b7ce0c4..254a126ede5 100644 --- a/include/asm-generic/mutex-null.h +++ b/include/asm-generic/mutex-null.h @@ -10,15 +10,10 @@ #ifndef _ASM_GENERIC_MUTEX_NULL_H #define _ASM_GENERIC_MUTEX_NULL_H -/* extra parameter only needed for mutex debugging: */ -#ifndef __IP__ -# define __IP__ -#endif - -#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) -#define __mutex_slowpath_needs_to_unlock() 1 +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) +#define __mutex_slowpath_needs_to_unlock() 1 #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index c0caf433a7d..e160e04290f 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -7,6 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset[x]) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name @@ -14,6 +16,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) #define __get_cpu_var(var) per_cpu(var, smp_processor_id()) +#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ @@ -30,6 +33,7 @@ do { \ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var +#define __raw_get_cpu_var(var) per_cpu__##var #endif /* SMP */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 358e4d309ce..c2059a3a062 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif -#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE +#ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) -#else -#define move_pte(pte, prot, old_addr, new_addr) \ -({ \ - pte_t newpte = (pte); \ - if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ - pte_page(pte) == ZERO_PAGE(old_addr)) \ - newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ - newpte; \ -}) #endif /* diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index cef08db34ad..4087037a422 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -114,6 +114,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) /* Set the current date and time in the real time clock. */ static inline int set_rtc_time(struct rtc_time *time) { + unsigned long flags; unsigned char mon, day, hrs, min, sec; unsigned char save_control, save_freq_select; unsigned int yrs; @@ -131,7 +132,7 @@ static inline int set_rtc_time(struct rtc_time *time) if (yrs > 255) /* They are unsigned */ return -EINVAL; - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || @@ -152,7 +153,7 @@ static inline int set_rtc_time(struct rtc_time *time) * whether the chip is in binary mode or not. */ if (yrs > 169) { - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); return -EINVAL; } @@ -187,7 +188,7 @@ static inline int set_rtc_time(struct rtc_time *time) CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); return 0; } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 0b49f9e070f..962cad7cfbb 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -14,5 +14,6 @@ extern char _end[]; extern char __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __initdata_begin[], __initdata_end[]; +extern char __start_rodata[], __end_rodata[]; #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9d11550b481..db5a3732f10 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -58,6 +58,20 @@ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ + *(__ksymtab_unused) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ + *(__ksymtab_unused_gpl) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ + } \ + \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ @@ -79,6 +93,20 @@ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ + /* Kernel symbol table: Normal unused symbols */ \ + __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ + *(__kcrctab_unused) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ + *(__kcrctab_unused_gpl) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ + } \ + \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |