summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h8
-rw-r--r--include/asm-generic/bug.h34
-rw-r--r--include/asm-generic/dma-mapping-common.h20
-rw-r--r--include/asm-generic/gpio.h11
-rw-r--r--include/asm-generic/kmap_types.h3
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/scatterlist.h17
-rw-r--r--include/asm-generic/topology.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h46
9 files changed, 83 insertions, 69 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index c33749f95b3..058129e9b04 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -30,8 +30,7 @@
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically reads the value of @v.
*/
#define atomic_read(v) (*(volatile int *)&(v)->counter)
@@ -40,8 +39,7 @@
* @v: pointer of type atomic_t
* @i: required value
*
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
@@ -53,7 +51,6 @@
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
@@ -75,7 +72,6 @@ static inline int atomic_add_return(int i, atomic_t *v)
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 18c435d7c08..c2c9ba032d4 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -25,7 +25,10 @@ struct bug_entry {
};
#endif /* __ASSEMBLY__ */
-#define BUGFLAG_WARNING (1<<0)
+#define BUGFLAG_WARNING (1 << 0)
+#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
+
#endif /* CONFIG_GENERIC_BUG */
/*
@@ -56,17 +59,25 @@ struct bug_entry {
* appear at runtime. Use the versions with printk format strings
* to provide better diagnostics.
*/
-#ifndef __WARN
+#ifndef __WARN_TAINT
#ifndef __ASSEMBLY__
extern void warn_slowpath_fmt(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+extern void warn_slowpath_fmt_taint(const char *file, const int line,
+ unsigned taint, const char *fmt, ...)
+ __attribute__((format(printf, 4, 5)));
extern void warn_slowpath_null(const char *file, const int line);
#define WANT_WARN_ON_SLOWPATH
#endif
#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
+#define __WARN_printf_taint(taint, arg...) \
+ warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
+#define __WARN() __WARN_TAINT(TAINT_WARN)
#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
+#define __WARN_printf_taint(taint, arg...) \
+ do { printk(arg); __WARN_TAINT(taint); } while (0)
#endif
#ifndef WARN_ON
@@ -87,6 +98,13 @@ extern void warn_slowpath_null(const char *file, const int line);
})
#endif
+#define WARN_TAINT(condition, taint, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf_taint(taint, format); \
+ unlikely(__ret_warn_on); \
+})
+
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
#define BUG() do {} while(0)
@@ -110,6 +128,8 @@ extern void warn_slowpath_null(const char *file, const int line);
})
#endif
+#define WARN_TAINT(condition, taint, format...) WARN_ON(condition)
+
#endif
#define WARN_ON_ONCE(condition) ({ \
@@ -132,6 +152,16 @@ extern void warn_slowpath_null(const char *file, const int line);
unlikely(__ret_warn_once); \
})
+#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
+ static bool __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN_TAINT(!__warned, taint, format)) \
+ __warned = true; \
+ unlikely(__ret_warn_once); \
+})
+
#define WARN_ON_RATELIMIT(condition, state) \
WARN_ON((condition) && __ratelimit(state))
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 69206957b72..0c80bb38773 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_range_for_cpu) {
- ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-
- } else
- dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+ dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_range_for_device) {
- ops->sync_single_range_for_device(dev, addr, offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
-
- } else
- dma_sync_single_for_device(dev, addr + offset, size, dir);
+ dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 979c6a57f2f..4f3d75e1ad3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -60,7 +60,9 @@ struct module;
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
- * array must be @ngpio entries long.
+ * array must be @ngpio entries long. A name can include a single printk
+ * format specifier for an unsigned int. It is substituted by the actual
+ * number of the gpio.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -88,6 +90,9 @@ struct gpio_chip {
unsigned offset);
int (*direction_output)(struct gpio_chip *chip,
unsigned offset, int value);
+ int (*set_debounce)(struct gpio_chip *chip,
+ unsigned offset, unsigned debounce);
+
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
@@ -98,7 +103,7 @@ struct gpio_chip {
struct gpio_chip *chip);
int base;
u16 ngpio;
- char **names;
+ const char *const *names;
unsigned can_sleep:1;
unsigned exported:1;
};
@@ -121,6 +126,8 @@ extern void gpio_free(unsigned gpio);
extern int gpio_direction_input(unsigned gpio);
extern int gpio_direction_output(unsigned gpio, int value);
+extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
+
extern int gpio_get_value_cansleep(unsigned gpio);
extern void gpio_set_value_cansleep(unsigned gpio, int value);
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 97e807c8c81..0232ccb76f2 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -29,6 +29,9 @@ KMAP_D(16) KM_IRQ_PTE,
KMAP_D(17) KM_NMI,
KMAP_D(18) KM_NMI_PTE,
KMAP_D(19) KM_KDB,
+/*
+ * Remember to update debug_kmap_atomic() when adding new kmap types!
+ */
KMAP_D(20) KM_TYPE_NR
};
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2d3f7..b5043a9890d 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void);
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data.percpu"
+#define PER_CPU_BASE_SECTION ".data..percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
@@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
-#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
-#define PER_CPU_FIRST_SECTION ".first"
+#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
index 8b9454496a7..5de07355fad 100644
--- a/include/asm-generic/scatterlist.h
+++ b/include/asm-generic/scatterlist.h
@@ -11,7 +11,9 @@ struct scatterlist {
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
+#endif
};
/*
@@ -22,22 +24,11 @@ struct scatterlist {
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
-#ifndef sg_dma_len
-/*
- * Normally, you have an iommu on 64 bit machines, but not on 32 bit
- * machines. Architectures that are differnt should override this.
- */
-#if __BITS_PER_LONG == 64
+
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
-#endif /* 64 bit */
-#endif /* sg_dma_len */
-
-#ifndef ISA_DMA_THRESHOLD
-#define ISA_DMA_THRESHOLD (~0UL)
#endif
-#define ARCH_HAS_SG_CHAIN
-
#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 510df36dd5d..fd60700503c 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -34,6 +34,9 @@
#ifndef cpu_to_node
#define cpu_to_node(cpu) ((void)(cpu),0)
#endif
+#ifndef cpu_to_mem
+#define cpu_to_mem(cpu) ((void)(cpu),0)
+#endif
#ifndef parent_node
#define parent_node(node) ((void)(node),0)
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0..48c5299cbf2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -175,25 +175,25 @@
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
- *(.data.nosave) \
+ *(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data.page_aligned)
+ *(.data..page_aligned)
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
- *(.data.read_mostly)
+ *(.data..read_mostly)
#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
- *(.data.cacheline_aligned)
+ *(.data..cacheline_aligned)
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- *(.data.init_task)
+ *(.data..init_task)
/*
* Read only Data
@@ -247,10 +247,10 @@
} \
\
/* RapidIO route ops */ \
- .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
- *(.rio_route_ops) \
- VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
+ .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
+ *(.rio_switch_ops) \
+ VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
} \
\
TRACEDATA \
@@ -435,7 +435,7 @@
*/
#define INIT_TASK_DATA_SECTION(align) \
. = ALIGN(align); \
- .data.init_task : { \
+ .data..init_task : { \
INIT_TASK_DATA(align) \
}
@@ -499,7 +499,7 @@
#define BSS(bss_align) \
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
- *(.bss.page_aligned) \
+ *(.bss..page_aligned) \
*(.dynbss) \
*(.bss) \
*(COMMON) \
@@ -666,16 +666,16 @@
*/
#define PERCPU_VADDR(vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/**
* PERCPU - define output section for percpu area, simple version
@@ -687,18 +687,18 @@
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
- * .data.percpu which is required for relocatable x86_32
+ * .data..percpu which is required for relocatable x86_32
* configuration.
*/
#define PERCPU(align) \
. = ALIGN(align); \
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}