summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/4level-fixup.h4
-rw-r--r--include/asm-generic/dma-mapping-common.h190
-rw-r--r--include/asm-generic/hardirq.h13
-rw-r--r--include/asm-generic/kmap_types.h5
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/tlb.h12
-rw-r--r--include/asm-generic/uaccess.h14
-rw-r--r--include/asm-generic/unistd.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h43
13 files changed, 255 insertions, 48 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 9d40e879f99..77ff547730a 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -27,9 +27,9 @@
#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
#undef pud_free_tlb
-#define pud_free_tlb(tlb, x) do { } while (0)
+#define pud_free_tlb(tlb, x, addr) do { } while (0)
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, addr) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
new file mode 100644
index 00000000000..5406a601185
--- /dev/null
+++ b/include/asm-generic/dma-mapping-common.h
@@ -0,0 +1,190 @@
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+#include <linux/kmemcheck.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/dma-attrs.h>
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, attrs);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops->unmap_sg)
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+ flush_write_buffers();
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+ flush_write_buffers();
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_range_for_cpu) {
+ ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+
+ flush_write_buffers();
+ } else
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_range_for_device) {
+ ops->sync_single_range_for_device(dev, addr, offset, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+
+ flush_write_buffers();
+ } else
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+ flush_write_buffers();
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+ flush_write_buffers();
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+#endif
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 3d5d2c906ab..23bb4dad496 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -11,19 +11,6 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS 8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#ifndef ack_bad_irq
static inline void ack_bad_irq(unsigned int irq)
{
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 54e8b3d956b..eddbce0f9fb 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -24,7 +24,10 @@ D(12) KM_SOFTIRQ1,
D(13) KM_SYNC_ICACHE,
D(14) KM_SYNC_DCACHE,
D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
-D(16) KM_TYPE_NR
+D(16) KM_IRQ_PTE,
+D(17) KM_NMI,
+D(18) KM_NMI_PTE,
+D(19) KM_TYPE_NR
};
#undef D
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d7d50d7ee51..aa00800adac 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
+#endif
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index a7cdc48e8b7..725612b793c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -59,7 +59,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
-#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, a) do { } while (0)
#undef pmd_addr_end
#define pmd_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 87cf449a6df..810431d8351 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -52,7 +52,7 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, a) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e410f602cab..e2bd73e8f9c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
#ifndef pgprot_writecombine
#define pgprot_writecombine pgprot_noncached
#endif
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4ce48e87853..d083561337f 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,6 +14,9 @@ extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __start_rodata[], __end_rodata[];
+/* Start and end of .ctors section - used for constructor calls. */
+extern char __ctors_start[], __ctors_end[];
+
/* function descriptor handling (if any). Override
* in asm/sections.h */
#ifndef dereference_function_descriptor
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f490e43a90b..e43f9766259 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -123,24 +123,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define pte_free_tlb(tlb, ptep) \
+#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep); \
+ __pte_free_tlb(tlb, ptep, address); \
} while (0)
#ifndef __ARCH_HAS_4LEVEL_HACK
-#define pud_free_tlb(tlb, pudp) \
+#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp); \
+ __pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
-#define pmd_free_tlb(tlb, pmdp) \
+#define pmd_free_tlb(tlb, pmdp, address) \
do { \
tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, pmdp); \
+ __pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#define tlb_migrate_finish(mm) do {} while (0)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6d8cab22e29..b218b8513d0 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -163,7 +163,7 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
might_sleep(); \
- __access_ok(ptr, sizeof (*ptr)) ? \
+ access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
__put_user(x, ptr) : \
-EFAULT; \
})
@@ -219,7 +219,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
might_sleep(); \
- __access_ok(ptr, sizeof (*ptr)) ? \
+ access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
__get_user(x, ptr) : \
-EFAULT; \
})
@@ -244,7 +244,7 @@ static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
might_sleep();
- if (__access_ok(from, n))
+ if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
return n;
@@ -254,7 +254,7 @@ static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
might_sleep();
- if (__access_ok(to, n))
+ if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
return n;
@@ -278,7 +278,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
- if (!__access_ok(src, 1))
+ if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
@@ -291,6 +291,8 @@ strncpy_from_user(char *dst, const char __user *src, long count)
#ifndef strnlen_user
static inline long strnlen_user(const char __user *src, long n)
{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return 0;
return strlen((void * __force)src) + 1;
}
#endif
@@ -316,7 +318,7 @@ static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
might_sleep();
- if (!__access_ok(to, n))
+ if (!access_ok(VERIFY_WRITE, to, n))
return n;
return __clear_user(to, n);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 5b34b6233d6..1125e5a1ee5 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -618,8 +618,13 @@ __SYSCALL(__NR_migrate_pages, sys_migrate_pages)
__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
+#define __NR_rt_tgsigqueueinfo 240
+__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
+#define __NR_perf_counter_open 241
+__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+
#undef __NR_syscalls
-#define __NR_syscalls 240
+#define __NR_syscalls 242
/*
* All syscalls below here should go away really,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6bdba10fef4..6ad76bf5fb4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -30,9 +30,7 @@
* EXCEPTION_TABLE(...)
* NOTES
*
- * __bss_start = .;
- * BSS_SECTION(0, 0)
- * __bss_stop = .;
+ * BSS_SECTION(0, 0, 0)
* _end = .;
*
* /DISCARD/ : {
@@ -191,7 +189,7 @@
. = ALIGN(align); \
*(.data.cacheline_aligned)
-#define INIT_TASK(align) \
+#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
*(.data.init_task)
@@ -434,18 +432,28 @@
/*
* Init task
*/
-#define INIT_TASK_DATA(align) \
+#define INIT_TASK_DATA_SECTION(align) \
. = ALIGN(align); \
.data.init_task : { \
- INIT_TASK \
+ INIT_TASK_DATA(align) \
}
+#ifdef CONFIG_CONSTRUCTORS
+#define KERNEL_CTORS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__ctors_start) = .; \
+ *(.ctors) \
+ VMLINUX_SYMBOL(__ctors_end) = .;
+#else
+#define KERNEL_CTORS()
+#endif
+
/* init and exit section handling */
#define INIT_DATA \
*(.init.data) \
DEV_DISCARD(init.data) \
CPU_DISCARD(init.data) \
MEM_DISCARD(init.data) \
+ KERNEL_CTORS() \
*(.init.rodata) \
DEV_DISCARD(init.rodata) \
CPU_DISCARD(init.rodata) \
@@ -479,7 +487,8 @@
* bss (Block Started by Symbol) - uninitialized data
* zeroed during startup
*/
-#define SBSS \
+#define SBSS(sbss_align) \
+ . = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
*(.sbss) \
*(.scommon) \
@@ -488,12 +497,10 @@
#define BSS(bss_align) \
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__bss_start) = .; \
*(.bss.page_aligned) \
*(.dynbss) \
*(.bss) \
*(COMMON) \
- VMLINUX_SYMBOL(__bss_stop) = .; \
}
/*
@@ -616,7 +623,7 @@
*(.init.ramfs) \
VMLINUX_SYMBOL(__initramfs_end) = .;
#else
-#define INITRAMFS
+#define INIT_RAM_FS
#endif
/**
@@ -695,15 +702,15 @@
* matches the requirment of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
-#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
+#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
. = ALIGN(PAGE_SIZE); \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
- INIT_TASK(inittask) \
+ INIT_TASK_DATA(inittask) \
CACHELINE_ALIGNED_DATA(cacheline) \
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
CONSTRUCTORS \
- NOSAVE_DATA(nosave) \
+ NOSAVE_DATA \
PAGE_ALIGNED_DATA(pagealigned) \
}
@@ -725,8 +732,10 @@
INIT_RAM_FS \
}
-#define BSS_SECTION(sbss_align, bss_align) \
- SBSS \
+#define BSS_SECTION(sbss_align, bss_align, stop_align) \
+ . = ALIGN(sbss_align); \
+ VMLINUX_SYMBOL(__bss_start) = .; \
+ SBSS(sbss_align) \
BSS(bss_align) \
- . = ALIGN(4);
-
+ . = ALIGN(stop_align); \
+ VMLINUX_SYMBOL(__bss_stop) = .;