diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic64.h | 42 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-common.h | 190 | ||||
-rw-r--r-- | include/asm-generic/errno.h | 2 | ||||
-rw-r--r-- | include/asm-generic/hardirq.h | 13 | ||||
-rw-r--r-- | include/asm-generic/kmap_types.h | 7 | ||||
-rw-r--r-- | include/asm-generic/pci.h | 13 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 3 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 14 | ||||
-rw-r--r-- | include/asm-generic/unistd.h | 7 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 11 |
11 files changed, 270 insertions, 36 deletions
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h new file mode 100644 index 00000000000..b18ce4f9ee3 --- /dev/null +++ b/include/asm-generic/atomic64.h @@ -0,0 +1,42 @@ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC_ATOMIC64_H +#define _ASM_GENERIC_ATOMIC64_H + +typedef struct { + long long counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +extern long long atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, long long i); +extern void atomic64_add(long long a, atomic64_t *v); +extern long long atomic64_add_return(long long a, atomic64_t *v); +extern void atomic64_sub(long long a, atomic64_t *v); +extern long long atomic64_sub_return(long long a, atomic64_t *v); +extern long long atomic64_dec_if_positive(atomic64_t *v); +extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); +extern long long atomic64_xchg(atomic64_t *v, long long new); +extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h new file mode 100644 index 00000000000..5406a601185 --- /dev/null +++ b/include/asm-generic/dma-mapping-common.h @@ -0,0 +1,190 @@ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +#include <linux/kmemcheck.h> +#include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/dma-attrs.h> + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + return addr; +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); + flush_write_buffers(); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); + flush_write_buffers(); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_cpu) { + ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); + + flush_write_buffers(); + } else + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_device) { + ops->sync_single_range_for_device(dev, addr, offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); + + flush_write_buffers(); + } else + dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); + flush_write_buffers(); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + + flush_write_buffers(); +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) + +#endif diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h index e8852c092fe..28cc03bf19e 100644 --- a/include/asm-generic/errno.h +++ b/include/asm-generic/errno.h @@ -106,4 +106,6 @@ #define EOWNERDEAD 130 /* Owner died */ #define ENOTRECOVERABLE 131 /* State not recoverable */ +#define ERFKILL 132 /* Operation not possible due to RF-kill */ + #endif diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 3d5d2c906ab..23bb4dad496 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -11,19 +11,6 @@ typedef struct { #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#ifndef HARDIRQ_BITS -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #ifndef ack_bad_irq static inline void ack_bad_irq(unsigned int irq) { diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index 58c33055c30..eddbce0f9fb 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -1,7 +1,7 @@ #ifndef _ASM_GENERIC_KMAP_TYPES_H #define _ASM_GENERIC_KMAP_TYPES_H -#ifdef CONFIG_DEBUG_HIGHMEM +#ifdef __WITH_KM_FENCE # define D(n) __KM_FENCE_##n , #else # define D(n) @@ -24,7 +24,10 @@ D(12) KM_SOFTIRQ1, D(13) KM_SYNC_ICACHE, D(14) KM_SYNC_DCACHE, D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ -D(16) KM_TYPE_NR +D(16) KM_IRQ_PTE, +D(17) KM_NMI, +D(18) KM_NMI_PTE, +D(19) KM_TYPE_NR }; #undef D diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 515c6e2e321..b4326b5466e 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -30,19 +30,6 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, res->end = region->end; } -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pcibios_scan_all_fns(a, b) 0 #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e410f602cab..e2bd73e8f9c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 4ce48e87853..d083561337f 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -14,6 +14,9 @@ extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; +/* Start and end of .ctors section - used for constructor calls. */ +extern char __ctors_start[], __ctors_end[]; + /* function descriptor handling (if any). Override * in asm/sections.h */ #ifndef dereference_function_descriptor diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 6d8cab22e29..b218b8513d0 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -163,7 +163,7 @@ static inline __must_check long __copy_to_user(void __user *to, #define put_user(x, ptr) \ ({ \ might_sleep(); \ - __access_ok(ptr, sizeof (*ptr)) ? \ + access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ __put_user(x, ptr) : \ -EFAULT; \ }) @@ -219,7 +219,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #define get_user(x, ptr) \ ({ \ might_sleep(); \ - __access_ok(ptr, sizeof (*ptr)) ? \ + access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ __get_user(x, ptr) : \ -EFAULT; \ }) @@ -244,7 +244,7 @@ static inline long copy_from_user(void *to, const void __user * from, unsigned long n) { might_sleep(); - if (__access_ok(from, n)) + if (access_ok(VERIFY_READ, from, n)) return __copy_from_user(to, from, n); else return n; @@ -254,7 +254,7 @@ static inline long copy_to_user(void __user *to, const void *from, unsigned long n) { might_sleep(); - if (__access_ok(to, n)) + if (access_ok(VERIFY_WRITE, to, n)) return __copy_to_user(to, from, n); else return n; @@ -278,7 +278,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count) static inline long strncpy_from_user(char *dst, const char __user *src, long count) { - if (!__access_ok(src, 1)) + if (!access_ok(VERIFY_READ, src, 1)) return -EFAULT; return __strncpy_from_user(dst, src, count); } @@ -291,6 +291,8 @@ strncpy_from_user(char *dst, const char __user *src, long count) #ifndef strnlen_user static inline long strnlen_user(const char __user *src, long n) { + if (!access_ok(VERIFY_READ, src, 1)) + return 0; return strlen((void * __force)src) + 1; } #endif @@ -316,7 +318,7 @@ static inline __must_check unsigned long clear_user(void __user *to, unsigned long n) { might_sleep(); - if (!__access_ok(to, n)) + if (!access_ok(VERIFY_WRITE, to, n)) return n; return __clear_user(to, n); diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 5b34b6233d6..1125e5a1ee5 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -618,8 +618,13 @@ __SYSCALL(__NR_migrate_pages, sys_migrate_pages) __SYSCALL(__NR_move_pages, sys_move_pages) #endif +#define __NR_rt_tgsigqueueinfo 240 +__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) +#define __NR_perf_counter_open 241 +__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) + #undef __NR_syscalls -#define __NR_syscalls 240 +#define __NR_syscalls 242 /* * All syscalls below here should go away really, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 6bdba10fef4..92b73b6140f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -440,12 +440,21 @@ INIT_TASK \ } +#ifdef CONFIG_CONSTRUCTORS +#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \ + *(.ctors) \ + VMLINUX_SYMBOL(__ctors_end) = .; +#else +#define KERNEL_CTORS() +#endif + /* init and exit section handling */ #define INIT_DATA \ *(.init.data) \ DEV_DISCARD(init.data) \ CPU_DISCARD(init.data) \ MEM_DISCARD(init.data) \ + KERNEL_CTORS() \ *(.init.rodata) \ DEV_DISCARD(init.rodata) \ CPU_DISCARD(init.rodata) \ @@ -616,7 +625,7 @@ *(.init.ramfs) \ VMLINUX_SYMBOL(__initramfs_end) = .; #else -#define INITRAMFS +#define INIT_RAM_FS #endif /** |