summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/sched.h21
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/asm-generic/dma-mapping-broken.h82
-rw-r--r--include/asm-generic/fcntl.h3
-rw-r--r--include/asm-generic/pgtable.h44
-rw-r--r--include/asm-generic/unaligned.h16
6 files changed, 91 insertions, 77 deletions
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
index 815bb014806..604fab7031a 100644
--- a/include/asm-generic/bitops/sched.h
+++ b/include/asm-generic/bitops/sched.h
@@ -6,28 +6,23 @@
/*
* Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (likely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
+ return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (unlikely(b[1]))
+ if (b[1])
return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
+ if (b[2])
return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
+ return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7f30cce5285..344e3091af2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -28,7 +28,7 @@ struct bug_entry {
#endif
#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
#ifndef HAVE_ARCH_WARN_ON
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index 29413d3d460..e2468f894d2 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -1,24 +1,82 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
-/* This is used for archs that do not support DMA */
+/* define the dma api to allow compilation but not linking of
+ * dma dependent code. Code that depends on the dma-mapping
+ * API needs to set 'depends on HAS_DMA' in its Kconfig
+ */
-static inline void *
+struct scatterlist;
+
+extern void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- BUG();
- return NULL;
-}
+ gfp_t flag);
-static inline void
+extern void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- BUG();
-}
+ dma_addr_t dma_handle);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+extern dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction);
+
+extern int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+
+extern void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction);
+
+extern dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction);
+
+extern void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+
+#define dma_sync_single_for_device dma_sync_single_for_cpu
+#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
+#define dma_sync_sg_for_device dma_sync_sg_for_cpu
+
+extern int
+dma_mapping_error(dma_addr_t dma_addr);
+
+extern int
+dma_supported(struct device *dev, u64 mask);
+
+extern int
+dma_set_mask(struct device *dev, u64 mask);
+
+extern int
+dma_get_cache_alignment(void);
+
+extern int
+dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
+
+extern void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index c154b9d6e7e..b8477414c5c 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -48,6 +48,9 @@
#ifndef O_NOATIME
#define O_NOATIME 01000000
#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 02000000 /* set close_on_exec */
+#endif
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7d7bcf990e9..f605e8d0eed 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -3,25 +3,6 @@
#ifndef __ASSEMBLY__
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading, and the pte lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif
-
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
@@ -68,31 +49,6 @@ do { \
})
#endif
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *__ptep; \
- int r = 1; \
- if (!pte_dirty(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
- pte_mkclean(__pte)); \
- r; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-#endif
-
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 09ec447fe2a..16a466e5068 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -18,7 +18,8 @@
#define get_unaligned(ptr) \
__get_unaligned((ptr), sizeof(*(ptr)))
#define put_unaligned(x,ptr) \
- __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
+ ((void)sizeof(*(ptr)=(x)),\
+ __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
/*
* This function doesn't actually exist. The idea is that when
@@ -95,21 +96,21 @@ static inline void __ustw(__u16 val, __u16 *addr)
default: \
bad_unaligned_access_length(); \
}; \
- (__typeof__(*(ptr)))val; \
+ (__force __typeof__(*(ptr)))val; \
})
#define __put_unaligned(val, ptr, size) \
-do { \
+({ \
void *__gu_p = ptr; \
switch (size) { \
case 1: \
- *(__u8 *)__gu_p = val; \
+ *(__u8 *)__gu_p = (__force __u8)val; \
break; \
case 2: \
- __ustw(val, __gu_p); \
+ __ustw((__force __u16)val, __gu_p); \
break; \
case 4: \
- __ustl(val, __gu_p); \
+ __ustl((__force __u32)val, __gu_p); \
break; \
case 8: \
__ustq(val, __gu_p); \
@@ -117,6 +118,7 @@ do { \
default: \
bad_unaligned_access_length(); \
}; \
-} while(0)
+ (void)0; \
+})
#endif /* _ASM_GENERIC_UNALIGNED_H */