summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig11
-rw-r--r--lib/Kconfig.debug189
-rw-r--r--lib/Makefile6
-rw-r--r--lib/audit.c55
-rw-r--r--lib/bitmap.c31
-rw-r--r--lib/bust_spinlocks.c1
-rw-r--r--lib/crc-ccitt.c6
-rw-r--r--lib/crc16.c10
-rw-r--r--lib/crc32.c54
-rw-r--r--lib/debug_locks.c45
-rw-r--r--lib/extable.c1
-rw-r--r--lib/hweight.c10
-rw-r--r--lib/idr.c71
-rw-r--r--lib/kernel_lock.c11
-rw-r--r--lib/klist.c26
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/list_debug.c79
-rw-r--r--lib/locking-selftest-hardirq.h9
-rw-r--r--lib/locking-selftest-mutex.h11
-rw-r--r--lib/locking-selftest-rlock-hardirq.h2
-rw-r--r--lib/locking-selftest-rlock-softirq.h2
-rw-r--r--lib/locking-selftest-rlock.h14
-rw-r--r--lib/locking-selftest-rsem.h14
-rw-r--r--lib/locking-selftest-softirq.h9
-rw-r--r--lib/locking-selftest-spin-hardirq.h2
-rw-r--r--lib/locking-selftest-spin-softirq.h2
-rw-r--r--lib/locking-selftest-spin.h11
-rw-r--r--lib/locking-selftest-wlock-hardirq.h2
-rw-r--r--lib/locking-selftest-wlock-softirq.h2
-rw-r--r--lib/locking-selftest-wlock.h14
-rw-r--r--lib/locking-selftest-wsem.h14
-rw-r--r--lib/locking-selftest.c1216
-rw-r--r--lib/plist.c118
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/reed_solomon/reed_solomon.c11
-rw-r--r--lib/rwsem-spinlock.c66
-rw-r--r--lib/rwsem.c53
-rw-r--r--lib/semaphore-sleepers.c1
-rw-r--r--lib/spinlock_debug.c124
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/ts_bm.c12
-rw-r--r--lib/ts_fsm.c11
-rw-r--r--lib/ts_kmp.c1
-rw-r--r--lib/vsprintf.c88
-rw-r--r--lib/zlib_inflate/inffast.c6
-rw-r--r--lib/zlib_inflate/inflate.c5
-rw-r--r--lib/zlib_inflate/inftrees.c18
48 files changed, 2156 insertions, 302 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3de93357f5a..734ce95a93d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -38,6 +38,11 @@ config LIBCRC32C
require M here. See Castagnoli93.
Module will be libcrc32c.
+config AUDIT_GENERIC
+ bool
+ depends on AUDIT && !AUDIT_ARCH
+ default y
+
#
# compression support is select'ed if needed
#
@@ -86,4 +91,10 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+#
+# plist support is select#ed if needed
+#
+config PLIST
+ boolean
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ccb0c1fdf1b..f9ae75cc014 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -8,6 +8,13 @@ config PRINTK_TIME
operations. This is useful for identifying long delays
in kernel startup.
+config ENABLE_MUST_CHECK
+ bool "Enable __must_check logic"
+ default y
+ help
+ Enable the __must_check logic in the kernel build. Disable this to
+ suppress the "warning: ignoring return value of 'foo', declared with
+ attribute warn_unused_result" messages.
config MAGIC_SYSRQ
bool "Magic SysRq key"
@@ -23,6 +30,22 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config UNUSED_SYMBOLS
+ bool "Enable unused/obsolete exported symbols"
+ default y if X86
+ help
+ Unused but exported symbols make the kernel needlessly bigger. For
+ that reason most of these unused exports will soon be removed. This
+ option is provided temporarily to provide a transition period in case
+ some external kernel module needs one of these symbols anyway. If you
+ encounter such a case in your module, consider if you are actually
+ using the right API. (rationale: since nobody in the kernel is using
+ this in a module, there is a pretty good chance it's actually the
+ wrong interface to use). If you really need the symbol, please send a
+ mail to the linux kernel mailing list mentioning the symbol and why
+ you really need it, and what the merge plan to the mainline kernel for
+ your module is.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -32,7 +55,7 @@ config DEBUG_KERNEL
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
range 12 21
- default 17 if S390
+ default 17 if S390 || LOCKDEP
default 16 if X86_NUMAQ || IA64
default 15 if SMP
default 14
@@ -91,7 +114,7 @@ config DEBUG_SLAB_LEAK
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
- depends on DEBUG_KERNEL && PREEMPT
+ depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
default y
help
If you say Y here then the kernel will use a debug variant of the
@@ -99,16 +122,26 @@ config DEBUG_PREEMPT
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.
-config DEBUG_MUTEXES
- bool "Mutex debugging, deadlock detection"
- default n
- depends on DEBUG_KERNEL
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_PI_LIST
+ bool
+ default y
+ depends on DEBUG_RT_MUTEXES
+
+config RT_MUTEX_TESTER
+ bool "Built-in scriptable tester for rt-mutexes"
+ depends on DEBUG_KERNEL && RT_MUTEXES
help
- This allows mutex semantics violations and mutex related deadlocks
- (lockups) to be detected and reported automatically.
+ This option enables a rt-mutex tester.
config DEBUG_SPINLOCK
- bool "Spinlock debugging"
+ bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
help
Say Y here and build SMP to catch missing spinlock initialization
@@ -116,13 +149,124 @@ config DEBUG_SPINLOCK
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.
+config DEBUG_MUTEXES
+ bool "Mutex debugging: basic checks"
+ depends on DEBUG_KERNEL
+ help
+ This feature allows mutex semantics violations to be detected and
+ reported.
+
+config DEBUG_RWSEMS
+ bool "RW-sem debugging: basic checks"
+ depends on DEBUG_KERNEL
+ help
+ This feature allows read-write semaphore semantics violations to
+ be detected and reported.
+
+config DEBUG_LOCK_ALLOC
+ bool "Lock debugging: detect incorrect freeing of live locks"
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select DEBUG_RWSEMS
+ select LOCKDEP
+ help
+ This feature will check whether any held lock (spinlock, rwlock,
+ mutex or rwsem) is incorrectly freed by the kernel, via any of the
+ memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
+ vfree(), etc.), whether a live lock is incorrectly reinitialized via
+ spin_lock_init()/mutex_init()/etc., or whether there is any lock
+ held during task exit.
+
+config PROVE_LOCKING
+ bool "Lock debugging: prove locking correctness"
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select DEBUG_RWSEMS
+ select DEBUG_LOCK_ALLOC
+ default n
+ help
+ This feature enables the kernel to prove that all locking
+ that occurs in the kernel runtime is mathematically
+ correct: that under no circumstance could an arbitrary (and
+ not yet triggered) combination of observed locking
+ sequences (on an arbitrary number of CPUs, running an
+ arbitrary number of tasks and interrupt contexts) cause a
+ deadlock.
+
+ In short, this feature enables the kernel to report locking
+ related deadlocks before they actually occur.
+
+ The proof does not depend on how hard and complex a
+ deadlock scenario would be to trigger: how many
+ participant CPUs, tasks and irq-contexts would be needed
+ for it to trigger. The proof also does not depend on
+ timing: if a race and a resulting deadlock is possible
+ theoretically (no matter how unlikely the race scenario
+ is), it will be proven so and will immediately be
+ reported by the kernel (once the event is observed that
+ makes the deadlock theoretically possible).
+
+ If a deadlock is impossible (i.e. the locking rules, as
+ observed by the kernel, are mathematically correct), the
+ kernel reports nothing.
+
+ NOTE: this feature can also be enabled for rwlocks, mutexes
+ and rwsems - in which case all dependencies between these
+ different locking variants are observed and mapped too, and
+ the proof of observed correctness is also maintained for an
+ arbitrary combination of these separate locking variants.
+
+ For more details, see Documentation/lockdep-design.txt.
+
+config LOCKDEP
+ bool
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select STACKTRACE
+ select FRAME_POINTER if !X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+
+config DEBUG_LOCKDEP
+ bool "Lock dependency engine debugging"
+ depends on DEBUG_KERNEL && LOCKDEP
+ help
+ If you say Y here, the lock dependency engine will do
+ additional runtime checks to debug itself, at the price
+ of more runtime overhead.
+
+config TRACE_IRQFLAGS
+ depends on DEBUG_KERNEL
+ bool
+ default y
+ depends on TRACE_IRQFLAGS_SUPPORT
+ depends on PROVE_LOCKING
+
config DEBUG_SPINLOCK_SLEEP
- bool "Sleep-inside-spinlock checking"
+ bool "Spinlock debugging: sleep-inside-spinlock checking"
depends on DEBUG_KERNEL
help
If you say Y here, various routines which may sleep will become very
noisy if they are called with a spinlock held.
+config DEBUG_LOCKING_API_SELFTESTS
+ bool "Locking API boot-time self-tests"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the kernel to run a short self-test during
+ bootup. The self-test checks whether common types of locking bugs
+ are detected by debugging mechanisms or not. (if you disable
+ lock debugging then those bugs wont be detected of course.)
+ The following locking APIs are covered: spinlocks, rwlocks,
+ mutexes and rwsems.
+
+config STACKTRACE
+ bool
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+
config DEBUG_KOBJECT
bool "kobject debugging"
depends on DEBUG_KERNEL
@@ -140,7 +284,7 @@ config DEBUG_HIGHMEM
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
- depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
+ depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV || SUPERH
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
@@ -176,9 +320,18 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_LIST
+ bool "Debug linked list manipulation"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
+
+ If unsure, say N.
+
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML)
+ depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH)
default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
@@ -188,14 +341,22 @@ config FRAME_POINTER
config UNWIND_INFO
bool "Compile the kernel with frame unwind information"
- depends on !IA64
- depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850)
+ depends on !IA64 && !PARISC
+ depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
help
If you say Y here the resulting kernel image will be slightly larger
but not slower, and it will give very useful debugging information.
If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame unwind information or frame pointers.
+config STACK_UNWIND
+ bool "Stack unwind support"
+ depends on UNWIND_INFO
+ depends on X86
+ help
+ This enables more precise stack traces, omitting all unrelated
+ occurrences of pointers into kernel code from the dump.
+
config FORCED_INLINING
bool "Force gcc to inline functions marked 'inline'"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 79358ad1f11..402762fead7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,13 +11,14 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
-obj-y += sort.o parser.o halfmd4.o iomap_copy.o
+obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
endif
+obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
@@ -25,7 +26,9 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+obj-$(CONFIG_DEBUG_LIST) += list_debug.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
@@ -47,6 +50,7 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
+obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/audit.c b/lib/audit.c
new file mode 100644
index 00000000000..3b1289fadf0
--- /dev/null
+++ b/lib/audit.c
@@ -0,0 +1,55 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+ switch(syscall) {
+ case __NR_open:
+ return 2;
+#ifdef __NR_openat
+ case __NR_openat:
+ return 3;
+#endif
+#ifdef __NR_socketcall
+ case __NR_socketcall:
+ return 4;
+#endif
+ case __NR_execve:
+ return 5;
+ default:
+ return 0;
+ }
+}
+
+static int __init audit_classes_init(void)
+{
+ audit_register_class(AUDIT_CLASS_WRITE, write_class);
+ audit_register_class(AUDIT_CLASS_READ, read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ed2ae3b0cd0..d71e38c54ea 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -317,16 +317,16 @@ EXPORT_SYMBOL(bitmap_scnprintf);
/**
* bitmap_parse - convert an ASCII hex string into a bitmap.
- * @buf: pointer to buffer in user space containing string.
- * @buflen: buffer size in bytes. If string is smaller than this
+ * @ubuf: pointer to buffer in user space containing string.
+ * @ubuflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
- * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended. -EINVAL is returned for illegal
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
@@ -452,8 +452,8 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* bitmap_parselist - convert list format ASCII string to bitmap
- * @buf: read nul-terminated user string from this buffer
- * @mask: write resulting mask here
+ * @bp: read nul-terminated user string from this buffer
+ * @maskp: write resulting mask here
* @nmaskbits: number of bits in mask to be written
*
* Input format is a comma-separated list of decimal numbers and
@@ -461,10 +461,11 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
* decimal numbers, the smallest and largest bit numbers set in
* the range.
*
- * Returns 0 on success, -errno on invalid input strings:
- * -EINVAL: second number in range smaller than first
- * -EINVAL: invalid character in string
- * -ERANGE: bit number specified too large for mask
+ * Returns 0 on success, -errno on invalid input strings.
+ * Error values:
+ * %-EINVAL: second number in range smaller than first
+ * %-EINVAL: invalid character in string
+ * %-ERANGE: bit number specified too large for mask
*/
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
@@ -625,10 +626,10 @@ EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
- * @oldbit - bit position to be mapped
- * @old: defines domain of map
- * @new: defines range of map
- * @bits: number of bits in each of these bitmaps
+ * @oldbit: bit position to be mapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
@@ -790,7 +791,7 @@ EXPORT_SYMBOL(bitmap_release_region);
*
* Allocate (set bits in) a specified region of a bitmap.
*
- * Return 0 on success, or -EBUSY if specified region wasn't
+ * Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 6bb7319e09a..a2055bc3ef6 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -7,7 +7,6 @@
* and panic() information from reaching the user.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 115d149af40..7f6dd68d2d0 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -53,9 +53,9 @@ EXPORT_SYMBOL(crc_ccitt_table);
/**
* crc_ccitt - recompute the CRC for the data buffer
- * @crc - previous CRC value
- * @buffer - data pointer
- * @len - number of bytes in the buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
diff --git a/lib/crc16.c b/lib/crc16.c
index 011fe573c66..8737b084d1f 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -47,12 +47,12 @@ u16 const crc16_table[256] = {
EXPORT_SYMBOL(crc16_table);
/**
- * Compute the CRC-16 for the data buffer
+ * crc16 - compute the CRC-16 for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
*
- * @param crc previous CRC value
- * @param buffer data pointer
- * @param len number of bytes in the buffer
- * @return the updated CRC value
+ * Returns the updated CRC value.
*/
u16 crc16(u16 crc, u8 const *buffer, size_t len)
{
diff --git a/lib/crc32.c b/lib/crc32.c
index 065198f98b3..285fd9bc61b 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,20 +42,21 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len);
+
#if CRC_LE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
-/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
int i;
@@ -68,14 +69,6 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
}
#else /* Table-based approach */
-/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
@@ -145,20 +138,21 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
}
#endif
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
+
#if CRC_BE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
-/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
int i;
@@ -173,14 +167,6 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#else /* Table-based approach */
-/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
@@ -249,6 +235,10 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
+/**
+ * bitreverse - reverse the order of bits in a u32 value
+ * @x: value to be bit-reversed
+ */
u32 bitreverse(u32 x)
{
x = (x >> 16) | (x << 16);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
new file mode 100644
index 00000000000..0ef01d14727
--- /dev/null
+++ b/lib/debug_locks.c
@@ -0,0 +1,45 @@
+/*
+ * lib/debug_locks.c
+ *
+ * Generic place for common debugging facilities for various locks:
+ * spinlocks, rwlocks, mutexes and rwsems.
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/debug_locks.h>
+
+/*
+ * We want to turn all lock-debugging facilities on/off at once,
+ * via a global flag. The reason is that once a single bug has been
+ * detected and reported, there might be cascade of followup bugs
+ * that would just muddy the log. So we report the first one and
+ * shut up after that.
+ */
+int debug_locks = 1;
+
+/*
+ * The locking-testsuite uses <debug_locks_silent> to get a
+ * 'silent failure': nothing is printed to the console when
+ * a locking bug is detected.
+ */
+int debug_locks_silent;
+
+/*
+ * Generic 'turn off all lock debugging' function:
+ */
+int debug_locks_off(void)
+{
+ if (xchg(&debug_locks, 0)) {
+ if (!debug_locks_silent) {
+ console_verbose();
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/lib/extable.c b/lib/extable.c
index 01c08b5836f..463f4560f16 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
diff --git a/lib/hweight.c b/lib/hweight.c
index 43825767170..360556a7803 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -1,5 +1,6 @@
#include <linux/module.h>
#include <asm/types.h>
+#include <asm/bitops.h>
/**
* hweightN - returns the hamming weight of a N-bit word
@@ -40,14 +41,19 @@ unsigned long hweight64(__u64 w)
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
+#ifdef ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x5555555555555555ul;
+ w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
+ w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
+ return (w * 0x0101010101010101ul) >> 56;
+#else
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
-#else
-#error BITS_PER_LONG not defined
+#endif
#endif
}
EXPORT_SYMBOL(hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index d226259c3c2..16d2143fea4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/module.h>
#endif
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h>
@@ -37,27 +38,36 @@ static kmem_cache_t *idr_layer_cache;
static struct idr_layer *alloc_layer(struct idr *idp)
{
struct idr_layer *p;
+ unsigned long flags;
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
if ((p = idp->id_free)) {
idp->id_free = p->ary[0];
idp->id_free_cnt--;
p->ary[0] = NULL;
}
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
return(p);
}
+/* only called when idp->lock is held */
+static void __free_layer(struct idr *idp, struct idr_layer *p)
+{
+ p->ary[0] = idp->id_free;
+ idp->id_free = p;
+ idp->id_free_cnt++;
+}
+
static void free_layer(struct idr *idp, struct idr_layer *p)
{
+ unsigned long flags;
+
/*
* Depends on the return element being zeroed.
*/
- spin_lock(&idp->lock);
- p->ary[0] = idp->id_free;
- idp->id_free = p;
- idp->id_free_cnt++;
- spin_unlock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
+ __free_layer(idp, p);
+ spin_unlock_irqrestore(&idp->lock, flags);
}
/**
@@ -161,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
struct idr_layer *p, *new;
int layers, v, id;
+ unsigned long flags;
id = starting_id;
build_up:
@@ -184,12 +195,14 @@ build_up:
* The allocation failed. If we built part of
* the structure tear it down.
*/
+ spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
- free_layer(idp, new);
+ __free_layer(idp, new);
}
+ spin_unlock_irqrestore(&idp->lock, flags);
return -1;
}
new->ary[0] = p;
@@ -390,6 +403,48 @@ void *idr_find(struct idr *idp, int id)
}
EXPORT_SYMBOL(idr_find);
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+ int n;
+ struct idr_layer *p, *old_p;
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+
+ id &= MAX_ID_MASK;
+
+ if (id >= (1 << n))
+ return ERR_PTR(-EINVAL);
+
+ n -= IDR_BITS;
+ while ((n > 0) && p) {
+ p = p->ary[(id >> n) & IDR_MASK];
+ n -= IDR_BITS;
+ }
+
+ n = id & IDR_MASK;
+ if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ return ERR_PTR(-ENOENT);
+
+ old_p = p->ary[n];
+ p->ary[n] = ptr;
+
+ return old_p;
+}
+EXPORT_SYMBOL(idr_replace);
+
static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
unsigned long flags)
{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cb5490ec00f..e0fdfddb406 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -14,7 +14,7 @@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -177,7 +177,12 @@ static inline void __lock_kernel(void)
static inline void __unlock_kernel(void)
{
- spin_unlock(&kernel_flag);
+ /*
+ * the BKL is not covered by lockdep, so we open-code the
+ * unlocking sequence (and thus avoid the dep-chain ops):
+ */
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable();
}
/*
diff --git a/lib/klist.c b/lib/klist.c
index 9c94f0b163a..120bd175aa7 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -123,12 +123,10 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
static void klist_release(struct kref * kref)
{
struct klist_node * n = container_of(kref, struct klist_node, n_ref);
- void (*put)(struct klist_node *) = n->n_klist->put;
+
list_del(&n->n_node);
complete(&n->n_removed);
n->n_klist = NULL;
- if (put)
- put(n);
}
static int klist_dec_and_del(struct klist_node * n)
@@ -145,10 +143,14 @@ static int klist_dec_and_del(struct klist_node * n)
void klist_del(struct klist_node * n)
{
struct klist * k = n->n_klist;
+ void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
- klist_dec_and_del(n);
+ if (!klist_dec_and_del(n))
+ put = NULL;
spin_unlock(&k->k_lock);
+ if (put)
+ put(n);
}
EXPORT_SYMBOL_GPL(klist_del);
@@ -161,10 +163,7 @@ EXPORT_SYMBOL_GPL(klist_del);
void klist_remove(struct klist_node * n)
{
- struct klist * k = n->n_klist;
- spin_lock(&k->k_lock);
- klist_dec_and_del(n);
- spin_unlock(&k->k_lock);
+ klist_del(n);
wait_for_completion(&n->n_removed);
}
@@ -260,12 +259,15 @@ static struct klist_node * to_klist_node(struct list_head * n)
struct klist_node * klist_next(struct klist_iter * i)
{
struct list_head * next;
+ struct klist_node * lnode = i->i_cur;
struct klist_node * knode = NULL;
+ void (*put)(struct klist_node *) = i->i_klist->put;
spin_lock(&i->i_klist->k_lock);
- if (i->i_cur) {
- next = i->i_cur->n_node.next;
- klist_dec_and_del(i->i_cur);
+ if (lnode) {
+ next = lnode->n_node.next;
+ if (!klist_dec_and_del(lnode))
+ put = NULL;
} else
next = i->i_head->next;
@@ -275,6 +277,8 @@ struct klist_node * klist_next(struct klist_iter * i)
}
i->i_cur = knode;
spin_unlock(&i->i_klist->k_lock);
+ if (put && lnode)
+ put(lnode);
return knode;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 8e7c7199348..1699eb9161f 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -407,6 +407,7 @@ static struct kobj_type dir_ktype = {
struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
{
struct kobject *k;
+ int ret;
if (!parent)
return NULL;
@@ -418,7 +419,13 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
k->parent = parent;
k->ktype = &dir_ktype;
kobject_set_name(k, name);
- kobject_register(k);
+ ret = kobject_register(k);
+ if (ret < 0) {
+ printk(KERN_WARNING "kobject_add_dir: "
+ "kobject_register error: %d\n", ret);
+ kobject_del(k);
+ return NULL;
+ }
return k;
}
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 52b6dc144ce..60f46803af3 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -88,7 +88,7 @@ crc32c_le(u32 crc, unsigned char const *p, size_t len)
* reflect output bytes = true
*/
-static u32 crc32c_table[256] = {
+static const u32 crc32c_table[256] = {
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
diff --git a/lib/list_debug.c b/lib/list_debug.c
new file mode 100644
index 00000000000..e80d27c9789
--- /dev/null
+++ b/lib/list_debug.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2006, Red Hat, Inc., Dave Jones
+ * Released under the General Public License (GPL).
+ *
+ * This file contains the linked list implementations for
+ * DEBUG_LIST.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+
+void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ if (unlikely(next->prev != prev)) {
+ printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
+ prev, next->prev);
+ BUG();
+ }
+ if (unlikely(prev->next != next)) {
+ printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
+ next, prev->next);
+ BUG();
+ }
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+EXPORT_SYMBOL(__list_add);
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+EXPORT_SYMBOL(list_add);
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+void list_del(struct list_head *entry)
+{
+ BUG_ON(entry->prev->next != entry);
+ BUG_ON(entry->next->prev != entry);
+
+ if (unlikely(entry->prev->next != entry)) {
+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, entry->prev->next);
+ BUG();
+ }
+ if (unlikely(entry->next->prev != entry)) {
+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, entry->next->prev);
+ BUG();
+ }
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+EXPORT_SYMBOL(list_del);
diff --git a/lib/locking-selftest-hardirq.h b/lib/locking-selftest-hardirq.h
new file mode 100644
index 00000000000..10d4a150b25
--- /dev/null
+++ b/lib/locking-selftest-hardirq.h
@@ -0,0 +1,9 @@
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_ENABLE HARDIRQ_ENABLE
+#define IRQ_DISABLE HARDIRQ_DISABLE
+#define IRQ_ENTER HARDIRQ_ENTER
+#define IRQ_EXIT HARDIRQ_EXIT
diff --git a/lib/locking-selftest-mutex.h b/lib/locking-selftest-mutex.h
new file mode 100644
index 00000000000..68601b6f584
--- /dev/null
+++ b/lib/locking-selftest-mutex.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK ML
+
+#undef UNLOCK
+#define UNLOCK MU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT MI
diff --git a/lib/locking-selftest-rlock-hardirq.h b/lib/locking-selftest-rlock-hardirq.h
new file mode 100644
index 00000000000..9f517ebcb78
--- /dev/null
+++ b/lib/locking-selftest-rlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-rlock-softirq.h b/lib/locking-selftest-rlock-softirq.h
new file mode 100644
index 00000000000..981455db7ff
--- /dev/null
+++ b/lib/locking-selftest-rlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-rlock.h b/lib/locking-selftest-rlock.h
new file mode 100644
index 00000000000..6789044f4d0
--- /dev/null
+++ b/lib/locking-selftest-rlock.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK RL
+
+#undef UNLOCK
+#define UNLOCK RU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
diff --git a/lib/locking-selftest-rsem.h b/lib/locking-selftest-rsem.h
new file mode 100644
index 00000000000..62da886680c
--- /dev/null
+++ b/lib/locking-selftest-rsem.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK RSL
+
+#undef UNLOCK
+#define UNLOCK RSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
diff --git a/lib/locking-selftest-softirq.h b/lib/locking-selftest-softirq.h
new file mode 100644
index 00000000000..a83de2a04ac
--- /dev/null
+++ b/lib/locking-selftest-softirq.h
@@ -0,0 +1,9 @@
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_DISABLE SOFTIRQ_DISABLE
+#define IRQ_ENABLE SOFTIRQ_ENABLE
+#define IRQ_ENTER SOFTIRQ_ENTER
+#define IRQ_EXIT SOFTIRQ_EXIT
diff --git a/lib/locking-selftest-spin-hardirq.h b/lib/locking-selftest-spin-hardirq.h
new file mode 100644
index 00000000000..693198dce30
--- /dev/null
+++ b/lib/locking-selftest-spin-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-spin-softirq.h b/lib/locking-selftest-spin-softirq.h
new file mode 100644
index 00000000000..c472e2a87ff
--- /dev/null
+++ b/lib/locking-selftest-spin-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-spin.h b/lib/locking-selftest-spin.h
new file mode 100644
index 00000000000..ccd1b4b0975
--- /dev/null
+++ b/lib/locking-selftest-spin.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK L
+
+#undef UNLOCK
+#define UNLOCK U
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT SI
diff --git a/lib/locking-selftest-wlock-hardirq.h b/lib/locking-selftest-wlock-hardirq.h
new file mode 100644
index 00000000000..2dd2e5122ca
--- /dev/null
+++ b/lib/locking-selftest-wlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-wlock-softirq.h b/lib/locking-selftest-wlock-softirq.h
new file mode 100644
index 00000000000..cb80d1cb944
--- /dev/null
+++ b/lib/locking-selftest-wlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-wlock.h b/lib/locking-selftest-wlock.h
new file mode 100644
index 00000000000..0815322d99e
--- /dev/null
+++ b/lib/locking-selftest-wlock.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK WL
+
+#undef UNLOCK
+#define UNLOCK WU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
diff --git a/lib/locking-selftest-wsem.h b/lib/locking-selftest-wsem.h
new file mode 100644
index 00000000000..b88c5f2dc5f
--- /dev/null
+++ b/lib/locking-selftest-wsem.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK WSL
+
+#undef UNLOCK
+#define UNLOCK WSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
new file mode 100644
index 00000000000..7945787f439
--- /dev/null
+++ b/lib/locking-selftest.c
@@ -0,0 +1,1216 @@
+/*
+ * lib/locking-selftest.c
+ *
+ * Testsuite for various locking APIs: spinlocks, rwlocks,
+ * mutexes and rw-semaphores.
+ *
+ * It is checking both false positives and false negatives.
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/lockdep.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/debug_locks.h>
+#include <linux/irqflags.h>
+
+/*
+ * Change this to 1 if you want to see the failure printouts:
+ */
+static unsigned int debug_locks_verbose;
+
+static int __init setup_debug_locks_verbose(char *str)
+{
+ get_option(&str, &debug_locks_verbose);
+
+ return 1;
+}
+
+__setup("debug_locks_verbose=", setup_debug_locks_verbose);
+
+#define FAILURE 0
+#define SUCCESS 1
+
+#define LOCKTYPE_SPIN 0x1
+#define LOCKTYPE_RWLOCK 0x2
+#define LOCKTYPE_MUTEX 0x4
+#define LOCKTYPE_RWSEM 0x8
+
+/*
+ * Normal standalone locks, for the circular and irq-context
+ * dependency tests:
+ */
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
+
+static DEFINE_RWLOCK(rwlock_A);
+static DEFINE_RWLOCK(rwlock_B);
+static DEFINE_RWLOCK(rwlock_C);
+static DEFINE_RWLOCK(rwlock_D);
+
+static DEFINE_MUTEX(mutex_A);
+static DEFINE_MUTEX(mutex_B);
+static DEFINE_MUTEX(mutex_C);
+static DEFINE_MUTEX(mutex_D);
+
+static DECLARE_RWSEM(rwsem_A);
+static DECLARE_RWSEM(rwsem_B);
+static DECLARE_RWSEM(rwsem_C);
+static DECLARE_RWSEM(rwsem_D);
+
+/*
+ * Locks that we initialize dynamically as well so that
+ * e.g. X1 and X2 becomes two instances of the same class,
+ * but X* and Y* are different classes. We do this so that
+ * we do not trigger a real lockup:
+ */
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
+
+static DEFINE_RWLOCK(rwlock_X1);
+static DEFINE_RWLOCK(rwlock_X2);
+static DEFINE_RWLOCK(rwlock_Y1);
+static DEFINE_RWLOCK(rwlock_Y2);
+static DEFINE_RWLOCK(rwlock_Z1);
+static DEFINE_RWLOCK(rwlock_Z2);
+
+static DEFINE_MUTEX(mutex_X1);
+static DEFINE_MUTEX(mutex_X2);
+static DEFINE_MUTEX(mutex_Y1);
+static DEFINE_MUTEX(mutex_Y2);
+static DEFINE_MUTEX(mutex_Z1);
+static DEFINE_MUTEX(mutex_Z2);
+
+static DECLARE_RWSEM(rwsem_X1);
+static DECLARE_RWSEM(rwsem_X2);
+static DECLARE_RWSEM(rwsem_Y1);
+static DECLARE_RWSEM(rwsem_Y2);
+static DECLARE_RWSEM(rwsem_Z1);
+static DECLARE_RWSEM(rwsem_Z2);
+
+/*
+ * non-inlined runtime initializers, to let separate locks share
+ * the same lock-class:
+ */
+#define INIT_CLASS_FUNC(class) \
+static noinline void \
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
+ struct rw_semaphore *rwsem) \
+{ \
+ spin_lock_init(lock); \
+ rwlock_init(rwlock); \
+ mutex_init(mutex); \
+ init_rwsem(rwsem); \
+}
+
+INIT_CLASS_FUNC(X)
+INIT_CLASS_FUNC(Y)
+INIT_CLASS_FUNC(Z)
+
+static void init_shared_classes(void)
+{
+ init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
+ init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
+
+ init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
+ init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
+
+ init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
+ init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
+}
+
+/*
+ * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
+ * The following functions use a lock from a simulated hardirq/softirq
+ * context, causing the locks to be marked as hardirq-safe/softirq-safe:
+ */
+
+#define HARDIRQ_DISABLE local_irq_disable
+#define HARDIRQ_ENABLE local_irq_enable
+
+#define HARDIRQ_ENTER() \
+ local_irq_disable(); \
+ irq_enter(); \
+ WARN_ON(!in_irq());
+
+#define HARDIRQ_EXIT() \
+ __irq_exit(); \
+ local_irq_enable();
+
+#define SOFTIRQ_DISABLE local_bh_disable
+#define SOFTIRQ_ENABLE local_bh_enable
+
+#define SOFTIRQ_ENTER() \
+ local_bh_disable(); \
+ local_irq_disable(); \
+ trace_softirq_enter(); \
+ WARN_ON(!in_softirq());
+
+#define SOFTIRQ_EXIT() \
+ trace_softirq_exit(); \
+ local_irq_enable(); \
+ local_bh_enable();
+
+/*
+ * Shortcuts for lock/unlock API variants, to keep
+ * the testcases compact:
+ */
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
+#define LU(x) L(x); U(x)
+#define SI(x) spin_lock_init(&lock_##x)
+
+#define WL(x) write_lock(&rwlock_##x)
+#define WU(x) write_unlock(&rwlock_##x)
+#define WLU(x) WL(x); WU(x)
+
+#define RL(x) read_lock(&rwlock_##x)
+#define RU(x) read_unlock(&rwlock_##x)
+#define RLU(x) RL(x); RU(x)
+#define RWI(x) rwlock_init(&rwlock_##x)
+
+#define ML(x) mutex_lock(&mutex_##x)
+#define MU(x) mutex_unlock(&mutex_##x)
+#define MI(x) mutex_init(&mutex_##x)
+
+#define WSL(x) down_write(&rwsem_##x)
+#define WSU(x) up_write(&rwsem_##x)
+
+#define RSL(x) down_read(&rwsem_##x)
+#define RSU(x) up_read(&rwsem_##x)
+#define RWSI(x) init_rwsem(&rwsem_##x)
+
+#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
+
+/*
+ * Generate different permutations of the same testcase, using
+ * the same basic lock-dependency/state events:
+ */
+
+#define GENERATE_TESTCASE(name) \
+ \
+static void name(void) { E(); }
+
+#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
+ \
+static void name##_12(void) { E1(); E2(); } \
+static void name##_21(void) { E2(); E1(); }
+
+#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
+ \
+static void name##_123(void) { E1(); E2(); E3(); } \
+static void name##_132(void) { E1(); E3(); E2(); } \
+static void name##_213(void) { E2(); E1(); E3(); } \
+static void name##_231(void) { E2(); E3(); E1(); } \
+static void name##_312(void) { E3(); E1(); E2(); } \
+static void name##_321(void) { E3(); E2(); E1(); }
+
+/*
+ * AA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK(X1); \
+ LOCK(X2); /* this one should fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(AA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(AA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(AA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(AA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(AA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(AA_rsem)
+
+#undef E
+
+/*
+ * Special-case for read-locking, they are
+ * allowed to recurse on the same lock class:
+ */
+static void rlock_AA1(void)
+{
+ RL(X1);
+ RL(X1); // this one should NOT fail
+}
+
+static void rlock_AA1B(void)
+{
+ RL(X1);
+ RL(X2); // this one should NOT fail
+}
+
+static void rsem_AA1(void)
+{
+ RSL(X1);
+ RSL(X1); // this one should fail
+}
+
+static void rsem_AA1B(void)
+{
+ RSL(X1);
+ RSL(X2); // this one should fail
+}
+/*
+ * The mixing of read and write locks is not allowed:
+ */
+static void rlock_AA2(void)
+{
+ RL(X1);
+ WL(X2); // this one should fail
+}
+
+static void rsem_AA2(void)
+{
+ RSL(X1);
+ WSL(X2); // this one should fail
+}
+
+static void rlock_AA3(void)
+{
+ WL(X1);
+ RL(X2); // this one should fail
+}
+
+static void rsem_AA3(void)
+{
+ WSL(X1);
+ RSL(X2); // this one should fail
+}
+
+/*
+ * ABBA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBA_rsem)
+
+#undef E
+
+/*
+ * AB BC CA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCA_rsem)
+
+#undef E
+
+/*
+ * AB CA BC deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, A); \
+ LOCK_UNLOCK_2(B, C); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCABC_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCABC_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCABC_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCABC_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCABC_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCABC_rsem)
+
+#undef E
+
+/*
+ * AB BC CD DA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCDDA_rsem)
+
+#undef E
+
+/*
+ * AB CD BD DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBDDA_rsem)
+
+#undef E
+
+/*
+ * AB CD BC DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBCDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBCDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBCDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBCDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBCDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBCDA_rsem)
+
+#undef E
+
+/*
+ * Double unlock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ UNLOCK(A); \
+ UNLOCK(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(double_unlock_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(double_unlock_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(double_unlock_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(double_unlock_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(double_unlock_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(double_unlock_rsem)
+
+#undef E
+
+/*
+ * Bad unlock ordering:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(A); /* fail */ \
+ UNLOCK(B);
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(bad_unlock_order_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(bad_unlock_order_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(bad_unlock_order_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(bad_unlock_order_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(bad_unlock_order_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(bad_unlock_order_rsem)
+
+#undef E
+
+/*
+ * initializing a held lock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ INIT(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(init_held_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(init_held_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(init_held_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(init_held_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(init_held_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(init_held_rsem)
+
+#undef E
+
+/*
+ * locking an irq-safe lock with irqs enabled:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ LOCK(A); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+#define E1() \
+ \
+ SOFTIRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ SOFTIRQ_EXIT();
+
+#define E2() \
+ \
+ HARDIRQ_DISABLE(); \
+ LOCK(A); \
+ HARDIRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Enabling irqs with an irq-safe lock held:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ IRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
+ */
+#define E1() \
+ \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * If a lock turns into softirq-safe, but earlier it took
+ * a softirq-unsafe lock:
+ */
+
+#define E1() \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock irq inversion.
+ *
+ * Deadlock scenario:
+ *
+ * CPU#1 is at #1, i.e. it has write-locked A, but has not
+ * taken B yet.
+ *
+ * CPU#2 is at #2, i.e. it has locked B.
+ *
+ * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
+ *
+ * The deadlock occurs because CPU#1 will spin on B, and CPU#2
+ * will spin on A.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(A); \
+ RU(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock recursion that is actually safe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(A); \
+ L(B); \
+ U(B); \
+ RU(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
+
+#include "locking-selftest-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ L(B); \
+ WL(A); \
+ WU(A); \
+ U(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ L(B); \
+ U(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
+
+#include "locking-selftest-softirq.h"
+// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
+# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
+# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
+#else
+# define I_SPINLOCK(x)
+# define I_RWLOCK(x)
+# define I_MUTEX(x)
+# define I_RWSEM(x)
+#endif
+
+#define I1(x) \
+ do { \
+ I_SPINLOCK(x); \
+ I_RWLOCK(x); \
+ I_MUTEX(x); \
+ I_RWSEM(x); \
+ } while (0)
+
+#define I2(x) \
+ do { \
+ spin_lock_init(&lock_##x); \
+ rwlock_init(&rwlock_##x); \
+ mutex_init(&mutex_##x); \
+ init_rwsem(&rwsem_##x); \
+ } while (0)
+
+static void reset_locks(void)
+{
+ local_irq_disable();
+ I1(A); I1(B); I1(C); I1(D);
+ I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
+ lockdep_reset();
+ I2(A); I2(B); I2(C); I2(D);
+ init_shared_classes();
+ local_irq_enable();
+}
+
+#undef I
+
+static int testcase_total;
+static int testcase_successes;
+static int expected_testcase_failures;
+static int unexpected_testcase_failures;
+
+static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
+{
+ unsigned long saved_preempt_count = preempt_count();
+ int expected_failure = 0;
+
+ WARN_ON(irqs_disabled());
+
+ testcase_fn();
+ /*
+ * Filter out expected failures:
+ */
+#ifndef CONFIG_PROVE_LOCKING
+ if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
+ expected_failure = 1;
+#endif
+ if (debug_locks != expected) {
+ if (expected_failure) {
+ expected_testcase_failures++;
+ printk("failed|");
+ } else {
+ unexpected_testcase_failures++;
+ printk("FAILED|");
+ }
+ } else {
+ testcase_successes++;
+ printk(" ok |");
+ }
+ testcase_total++;
+
+ if (debug_locks_verbose)
+ printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
+ lockclass_mask, debug_locks, expected);
+ /*
+ * Some tests (e.g. double-unlock) might corrupt the preemption
+ * count, so restore it:
+ */
+ preempt_count() = saved_preempt_count;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ if (softirq_count())
+ current->softirqs_enabled = 0;
+ else
+ current->softirqs_enabled = 1;
+#endif
+
+ reset_locks();
+}
+
+static inline void print_testname(const char *testname)
+{
+ printk("%33s:", testname);
+}
+
+#define DO_TESTCASE_1(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_1B(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_3(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_3RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_6(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+#define DO_TESTCASE_6_SUCCESS(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+/*
+ * 'read' variant: rlocks must not trigger.
+ */
+#define DO_TESTCASE_6R(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+#define DO_TESTCASE_2I(desc, name, nr) \
+ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_1("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_2IB(desc, name, nr) \
+ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_6I(desc, name, nr) \
+ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_3("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_6IRW(desc, name, nr) \
+ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_2x3(desc, name) \
+ DO_TESTCASE_3(desc, name, 12); \
+ DO_TESTCASE_3(desc, name, 21);
+
+#define DO_TESTCASE_2x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 12); \
+ DO_TESTCASE_6I(desc, name, 21);
+
+#define DO_TESTCASE_6x2(desc, name) \
+ DO_TESTCASE_2I(desc, name, 123); \
+ DO_TESTCASE_2I(desc, name, 132); \
+ DO_TESTCASE_2I(desc, name, 213); \
+ DO_TESTCASE_2I(desc, name, 231); \
+ DO_TESTCASE_2I(desc, name, 312); \
+ DO_TESTCASE_2I(desc, name, 321);
+
+#define DO_TESTCASE_6x2B(desc, name) \
+ DO_TESTCASE_2IB(desc, name, 123); \
+ DO_TESTCASE_2IB(desc, name, 132); \
+ DO_TESTCASE_2IB(desc, name, 213); \
+ DO_TESTCASE_2IB(desc, name, 231); \
+ DO_TESTCASE_2IB(desc, name, 312); \
+ DO_TESTCASE_2IB(desc, name, 321);
+
+#define DO_TESTCASE_6x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 123); \
+ DO_TESTCASE_6I(desc, name, 132); \
+ DO_TESTCASE_6I(desc, name, 213); \
+ DO_TESTCASE_6I(desc, name, 231); \
+ DO_TESTCASE_6I(desc, name, 312); \
+ DO_TESTCASE_6I(desc, name, 321);
+
+#define DO_TESTCASE_6x6RW(desc, name) \
+ DO_TESTCASE_6IRW(desc, name, 123); \
+ DO_TESTCASE_6IRW(desc, name, 132); \
+ DO_TESTCASE_6IRW(desc, name, 213); \
+ DO_TESTCASE_6IRW(desc, name, 231); \
+ DO_TESTCASE_6IRW(desc, name, 312); \
+ DO_TESTCASE_6IRW(desc, name, 321);
+
+
+void locking_selftest(void)
+{
+ /*
+ * Got a locking failure before the selftest ran?
+ */
+ if (!debug_locks) {
+ printk("----------------------------------\n");
+ printk("| Locking API testsuite disabled |\n");
+ printk("----------------------------------\n");
+ return;
+ }
+
+ /*
+ * Run the testsuite:
+ */
+ printk("------------------------\n");
+ printk("| Locking API testsuite:\n");
+ printk("----------------------------------------------------------------------------\n");
+ printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
+ printk(" --------------------------------------------------------------------------\n");
+
+ init_shared_classes();
+ debug_locks_silent = !debug_locks_verbose;
+
+ DO_TESTCASE_6R("A-A deadlock", AA);
+ DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
+ DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
+ DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
+ DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
+ DO_TESTCASE_6("double unlock", double_unlock);
+ DO_TESTCASE_6("initialize held", init_held);
+ DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
+
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("recursive read-lock");
+ printk(" |");
+ dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("recursive read-lock #2");
+ printk(" |");
+ dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("mixed read-write-lock");
+ printk(" |");
+ dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("mixed write-read-lock");
+ printk(" |");
+ dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ printk(" --------------------------------------------------------------------------\n");
+
+ /*
+ * irq-context testcases:
+ */
+ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
+ DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
+ DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
+ DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
+
+ DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
+// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
+
+ if (unexpected_testcase_failures) {
+ printk("-----------------------------------------------------------------\n");
+ debug_locks = 0;
+ printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
+ unexpected_testcase_failures, testcase_total);
+ printk("-----------------------------------------------------------------\n");
+ } else if (expected_testcase_failures && testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("%3d out of %3d testcases failed, as expected. |\n",
+ expected_testcase_failures, testcase_total);
+ printk("----------------------------------------------------\n");
+ debug_locks = 1;
+ } else if (expected_testcase_failures && !testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("All %3d testcases failed, as expected. |\n",
+ expected_testcase_failures);
+ printk("----------------------------------------\n");
+ debug_locks = 1;
+ } else {
+ printk("-------------------------------------------------------\n");
+ printk("Good, all %3d testcases passed! |\n",
+ testcase_successes);
+ printk("---------------------------------\n");
+ debug_locks = 1;
+ }
+ debug_locks_silent = 0;
+}
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 00000000000..3074a02272f
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,118 @@
+/*
+ * lib/plist.c
+ *
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This file contains the add / del functions which are considered to
+ * be too large to inline. See include/linux/plist.h for further
+ * information.
+ */
+
+#include <linux/plist.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_PI_LIST
+
+static void plist_check_prev_next(struct list_head *t, struct list_head *p,
+ struct list_head *n)
+{
+ if (n->prev != p || p->next != n) {
+ printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
+ printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
+ printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
+ WARN_ON(1);
+ }
+}
+
+static void plist_check_list(struct list_head *top)
+{
+ struct list_head *prev = top, *next = top->next;
+
+ plist_check_prev_next(top, prev, next);
+ while (next != top) {
+ prev = next;
+ next = prev->next;
+ plist_check_prev_next(top, prev, next);
+ }
+}
+
+static void plist_check_head(struct plist_head *head)
+{
+ WARN_ON(!head->lock);
+ if (head->lock)
+ WARN_ON_SMP(!spin_is_locked(head->lock));
+ plist_check_list(&head->prio_list);
+ plist_check_list(&head->node_list);
+}
+
+#else
+# define plist_check_head(h) do { } while (0)
+#endif
+
+/**
+ * plist_add - add @node to @head
+ *
+ * @node: &struct plist_node pointer
+ * @head: &struct plist_head pointer
+ */
+void plist_add(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+
+ plist_check_head(head);
+ WARN_ON(!plist_node_empty(node));
+
+ list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
+ if (node->prio < iter->prio)
+ goto lt_prio;
+ else if (node->prio == iter->prio) {
+ iter = list_entry(iter->plist.prio_list.next,
+ struct plist_node, plist.prio_list);
+ goto eq_prio;
+ }
+ }
+
+lt_prio:
+ list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
+eq_prio:
+ list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_del - Remove a @node from plist.
+ *
+ * @node: &struct plist_node pointer - entry to be removed
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_del(struct plist_node *node, struct plist_head *head)
+{
+ plist_check_head(head);
+
+ if (!list_empty(&node->plist.prio_list)) {
+ struct plist_node *next = plist_first(&node->plist);
+
+ list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
+ list_del_init(&node->plist.prio_list);
+ }
+
+ list_del_init(&node->plist.node_list);
+
+ plist_check_head(head);
+}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index b32efae7688..637d55608de 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -530,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret;
+ return !!ret;
}
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index f8ac9fa95de..2cc11faa4ff 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -54,7 +54,6 @@ static DEFINE_MUTEX(rslistlock);
/**
* rs_init - Initialize a Reed-Solomon codec
- *
* @symsize: symbol size, bits (1-8)
* @gfpoly: Field generator polynomial coefficients
* @fcr: first root of RS code generator polynomial, index form
@@ -62,7 +61,7 @@ static DEFINE_MUTEX(rslistlock);
* @nroots: RS code generator polynomial degree (number of roots)
*
* Allocate a control structure and the polynom arrays for faster
- * en/decoding. Fill the arrays according to the given parameters
+ * en/decoding. Fill the arrays according to the given parameters.
*/
static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
int prim, int nroots)
@@ -155,8 +154,7 @@ errrs:
/**
- * free_rs - Free the rs control structure, if its not longer used
- *
+ * free_rs - Free the rs control structure, if it is no longer used
* @rs: the control structure which is not longer used by the
* caller
*/
@@ -176,7 +174,6 @@ void free_rs(struct rs_control *rs)
/**
* init_rs - Find a matching or allocate a new rs control structure
- *
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
@@ -236,7 +233,6 @@ out:
#ifdef CONFIG_REED_SOLOMON_ENC8
/**
* encode_rs8 - Calculate the parity for data values (8bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
@@ -258,7 +254,6 @@ EXPORT_SYMBOL_GPL(encode_rs8);
#ifdef CONFIG_REED_SOLOMON_DEC8
/**
* decode_rs8 - Decode codeword (8bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
@@ -285,7 +280,6 @@ EXPORT_SYMBOL_GPL(decode_rs8);
#ifdef CONFIG_REED_SOLOMON_ENC16
/**
* encode_rs16 - Calculate the parity for data values (16bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
@@ -305,7 +299,6 @@ EXPORT_SYMBOL_GPL(encode_rs16);
#ifdef CONFIG_REED_SOLOMON_DEC16
/**
* decode_rs16 - Decode codeword (16bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 40ffde940a8..db4fed74b94 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,27 +17,22 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
-#if RWSEM_DEBUG
-void rwsemtrace(struct rw_semaphore *sem, const char *str)
-{
- if (sem->debug)
- printk("[%d] %s({%d,%d})\n",
- current->pid, str, sem->activity,
- list_empty(&sem->wait_list) ? 0 : 1);
-}
-#endif
-
/*
* initialise the semaphore
*/
-void fastcall init_rwsem(struct rw_semaphore *sem)
+void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held semaphore:
+ */
+ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+ lockdep_init_map(&sem->dep_map, name, key);
+#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
- sem->debug = 0;
-#endif
}
/*
@@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
struct task_struct *tsk;
int woken;
- rwsemtrace(sem, "Entering __rwsem_do_wake");
-
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) {
@@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
sem->activity += woken;
out:
- rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
}
@@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
struct rwsem_waiter waiter;
struct task_struct *tsk;
- rwsemtrace(sem, "Entering __down_read");
-
spin_lock_irq(&sem->wait_lock);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
@@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
-
out:
- rwsemtrace(sem, "Leaving __down_read");
+ ;
}
/*
@@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
- rwsemtrace(sem, "Entering __down_read_trylock");
spin_lock_irqsave(&sem->wait_lock, flags);
@@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
- rwsemtrace(sem, "Leaving __down_read_trylock");
return ret;
}
@@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
* get a write lock on the semaphore
* - we increment the waiting count anyway to indicate an exclusive lock
*/
-void fastcall __sched __down_write(struct rw_semaphore *sem)
+void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
- rwsemtrace(sem, "Entering __down_write");
-
spin_lock_irq(&sem->wait_lock);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
-
out:
- rwsemtrace(sem, "Leaving __down_write");
+ ;
+}
+
+void fastcall __sched __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
}
/*
@@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
- rwsemtrace(sem, "Entering __down_write_trylock");
-
spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
- rwsemtrace(sem, "Leaving __down_write_trylock");
return ret;
}
@@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
- rwsemtrace(sem, "Entering __up_read");
-
spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- rwsemtrace(sem, "Leaving __up_read");
}
/*
@@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
- rwsemtrace(sem, "Entering __up_write");
-
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
@@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
sem = __rwsem_do_wake(sem, 1);
spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- rwsemtrace(sem, "Leaving __up_write");
}
/*
@@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
- rwsemtrace(sem, "Entering __downgrade_write");
-
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
@@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
sem = __rwsem_do_wake(sem, 0);
spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- rwsemtrace(sem, "Leaving __downgrade_write");
}
-EXPORT_SYMBOL(init_rwsem);
+EXPORT_SYMBOL(__init_rwsem);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
-#if RWSEM_DEBUG
-EXPORT_SYMBOL(rwsemtrace);
-#endif
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 62fa4eba9ff..901d0e7da89 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -8,6 +8,26 @@
#include <linux/init.h>
#include <linux/module.h>
+/*
+ * Initialize an rwsem:
+ */
+void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held semaphore:
+ */
+ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+ lockdep_init_map(&sem->dep_map, name, key);
+#endif
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
+EXPORT_SYMBOL(__init_rwsem);
+
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
@@ -16,17 +36,6 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
-#if RWSEM_DEBUG
-#undef rwsemtrace
-void rwsemtrace(struct rw_semaphore *sem, const char *str)
-{
- printk("sem=%p\n", sem);
- printk("(sem)=%08lx\n", sem->count);
- if (sem->debug)
- printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
-}
-#endif
-
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
@@ -45,8 +54,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
struct list_head *next;
signed long oldcount, woken, loop;
- rwsemtrace(sem, "Entering __rwsem_do_wake");
-
if (downgrading)
goto dont_wake_writers;
@@ -127,7 +134,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
next->prev = &sem->wait_list;
out:
- rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
/* undo the change to count, but check for a transition 1->0 */
@@ -140,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/*
* wait for a lock to be granted
*/
-static inline struct rw_semaphore *
+static struct rw_semaphore *
rwsem_down_failed_common(struct rw_semaphore *sem,
struct rwsem_waiter *waiter, signed long adjustment)
{
@@ -186,13 +192,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- rwsemtrace(sem, "Entering rwsem_down_read_failed");
-
waiter.flags = RWSEM_WAITING_FOR_READ;
rwsem_down_failed_common(sem, &waiter,
RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
-
- rwsemtrace(sem, "Leaving rwsem_down_read_failed");
return sem;
}
@@ -204,12 +206,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- rwsemtrace(sem, "Entering rwsem_down_write_failed");
-
waiter.flags = RWSEM_WAITING_FOR_WRITE;
rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
- rwsemtrace(sem, "Leaving rwsem_down_write_failed");
return sem;
}
@@ -221,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- rwsemtrace(sem, "Entering rwsem_wake");
-
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
@@ -231,8 +228,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
- rwsemtrace(sem, "Leaving rwsem_wake");
-
return sem;
}
@@ -245,8 +240,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- rwsemtrace(sem, "Entering rwsem_downgrade_wake");
-
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
@@ -255,7 +248,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
- rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
return sem;
}
@@ -263,6 +255,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed);
EXPORT_SYMBOL(rwsem_down_write_failed);
EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);
-#if RWSEM_DEBUG
-EXPORT_SYMBOL(rwsemtrace);
-#endif
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
index 4d5f18889fa..12818052386 100644
--- a/lib/semaphore-sleepers.c
+++ b/lib/semaphore-sleepers.c
@@ -12,7 +12,6 @@
*
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index d8b6bb419d4..dafaf1de249 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -6,41 +6,73 @@
* DEBUG_SPINLOCK.
*/
-#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/debug_locks.h>
#include <linux/delay.h>
+#include <linux/module.h>
+
+void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key);
+#endif
+ lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->magic = SPINLOCK_MAGIC;
+ lock->owner = SPINLOCK_OWNER_INIT;
+ lock->owner_cpu = -1;
+}
+
+EXPORT_SYMBOL(__spin_lock_init);
+
+void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key);
+#endif
+ lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->magic = RWLOCK_MAGIC;
+ lock->owner = SPINLOCK_OWNER_INIT;
+ lock->owner_cpu = -1;
+}
+
+EXPORT_SYMBOL(__rwlock_init);
static void spin_bug(spinlock_t *lock, const char *msg)
{
- static long print_once = 1;
struct task_struct *owner = NULL;
- if (xchg(&print_once, 0)) {
- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
- owner = lock->owner;
- printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
- msg, raw_smp_processor_id(),
- current->comm, current->pid);
- printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
- ".owner_cpu: %d\n",
- lock, lock->magic,
- owner ? owner->comm : "<none>",
- owner ? owner->pid : -1,
- lock->owner_cpu);
- dump_stack();
-#ifdef CONFIG_SMP
- /*
- * We cannot continue on SMP:
- */
-// panic("bad locking");
-#endif
- }
+ if (!debug_locks_off())
+ return;
+
+ if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
+ owner = lock->owner;
+ printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
+ msg, raw_smp_processor_id(),
+ current->comm, current->pid);
+ printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+ ".owner_cpu: %d\n",
+ lock, lock->magic,
+ owner ? owner->comm : "<none>",
+ owner ? owner->pid : -1,
+ lock->owner_cpu);
+ dump_stack();
}
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
-static inline void debug_spin_lock_before(spinlock_t *lock)
+static inline void
+debug_spin_lock_before(spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -67,11 +99,12 @@ static inline void debug_spin_unlock(spinlock_t *lock)
static void __spin_lock_debug(spinlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_spin_trylock(&lock->raw_lock))
return;
__delay(1);
@@ -119,31 +152,26 @@ void _raw_spin_unlock(spinlock_t *lock)
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
- static long print_once = 1;
-
- if (xchg(&print_once, 0)) {
- printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
- msg, raw_smp_processor_id(), current->comm,
- current->pid, lock);
- dump_stack();
-#ifdef CONFIG_SMP
- /*
- * We cannot continue on SMP:
- */
- panic("bad locking");
-#endif
- }
+ if (!debug_locks_off())
+ return;
+
+ printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
+ msg, raw_smp_processor_id(), current->comm,
+ current->pid, lock);
+ dump_stack();
}
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
+#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_read_trylock(&lock->raw_lock))
return;
__delay(1);
@@ -159,12 +187,12 @@ static void __read_lock_debug(rwlock_t *lock)
}
}
}
+#endif
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
- __read_lock_debug(lock);
+ __raw_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
@@ -210,13 +238,15 @@ static inline void debug_write_unlock(rwlock_t *lock)
lock->owner_cpu = -1;
}
+#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_write_trylock(&lock->raw_lock))
return;
__delay(1);
@@ -232,12 +262,12 @@ static void __write_lock_debug(rwlock_t *lock)
}
}
}
+#endif
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
- __write_lock_debug(lock);
+ __raw_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 6f3093efbd7..2cb4a437942 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -93,7 +93,6 @@
* ==========================================================================
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index c4c1ac5fbd1..d90822c378a 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -35,7 +35,6 @@
* matchings spread over multiple fragments, then go BM.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -112,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g)
return ret;
}
-static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
- unsigned int len)
+static void compute_prefix_tbl(struct ts_bm *bm)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
- bm->bad_shift[i] = len;
- for (i = 0; i < len - 1; i++)
- bm->bad_shift[pattern[i]] = len - 1 - i;
+ bm->bad_shift[i] = bm->patlen;
+ for (i = 0; i < bm->patlen - 1; i++)
+ bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
@@ -151,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
- compute_prefix_tbl(bm, pattern, len);
memcpy(bm->pattern, pattern, len);
+ compute_prefix_tbl(bm);
return conf;
}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index ca3211206ee..af575b61526 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -12,13 +12,13 @@
*
* A finite state machine consists of n states (struct ts_fsm_token)
* representing the pattern as a finite automation. The data is read
- * sequentially on a octet basis. Every state token specifies the number
+ * sequentially on an octet basis. Every state token specifies the number
* of recurrences and the type of value accepted which can be either a
* specific character or ctype based set of characters. The available
* type of recurrences include 1, (0|1), [0 n], and [1 n].
*
- * The algorithm differs between strict/non-strict mode specyfing
- * whether the pattern has to start at the first octect. Strict mode
+ * The algorithm differs between strict/non-strict mode specifying
+ * whether the pattern has to start at the first octet. Strict mode
* is enabled by default and can be disabled by inserting
* TS_FSM_HEAD_IGNORE as the first token in the chain.
*
@@ -26,7 +26,6 @@
* however while in strict mode the average runtime can be better.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -45,7 +44,7 @@ struct ts_fsm
#define _W 0x200 /* wildcard */
/* Map to _ctype flags and some magic numbers */
-static u16 token_map[TS_FSM_TYPE_MAX+1] = {
+static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
[TS_FSM_SPECIFIC] = 0,
[TS_FSM_WILDCARD] = _W,
[TS_FSM_CNTRL] = _C,
@@ -62,7 +61,7 @@ static u16 token_map[TS_FSM_TYPE_MAX+1] = {
[TS_FSM_ASCII] = _A,
};
-static u16 token_lookup_tbl[256] = {
+static const u16 token_lookup_tbl[256] = {
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 7fd45451b44..3ced628cab4 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -30,7 +30,6 @@
* [2] See finite automation theory
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b07db5ca3f6..bed7229378f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -187,49 +187,49 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
size -= precision;
if (!(type&(ZEROPAD+LEFT))) {
while(size-->0) {
- if (buf <= end)
+ if (buf < end)
*buf = ' ';
++buf;
}
}
if (sign) {
- if (buf <= end)
+ if (buf < end)
*buf = sign;
++buf;
}
if (type & SPECIAL) {
if (base==8) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
} else if (base==16) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
- if (buf <= end)
+ if (buf < end)
*buf = digits[33];
++buf;
}
}
if (!(type & LEFT)) {
while (size-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = c;
++buf;
}
}
while (i < precision--) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
}
while (i-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = tmp[i];
++buf;
}
while (size-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = ' ';
++buf;
}
@@ -272,7 +272,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* 'z' changed to 'Z' --davidm 1/25/99 */
/* 't' added for ptrdiff_t */
- /* Reject out-of-range values early */
+ /* Reject out-of-range values early. Large positive sizes are
+ used for unknown buffer sizes. */
if (unlikely((int) size < 0)) {
/* There can be only one.. */
static int warn = 1;
@@ -282,16 +283,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
}
str = buf;
- end = buf + size - 1;
+ end = buf + size;
- if (end < buf - 1) {
- end = ((void *) -1);
- size = end - buf + 1;
+ /* Make sure end is always >= buf */
+ if (end < buf) {
+ end = ((void *)-1);
+ size = end - buf;
}
for (; *fmt ; ++fmt) {
if (*fmt != '%') {
- if (str <= end)
+ if (str < end)
*str = *fmt;
++str;
continue;
@@ -357,17 +359,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
case 'c':
if (!(flags & LEFT)) {
while (--field_width > 0) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) va_arg(args, int);
- if (str <= end)
+ if (str < end)
*str = c;
++str;
while (--field_width > 0) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
@@ -382,18 +384,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
if (!(flags & LEFT)) {
while (len < field_width--) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
}
for (i = 0; i < len; ++i) {
- if (str <= end)
+ if (str < end)
*str = *s;
++str; ++s;
}
while (len < field_width--) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
@@ -426,7 +428,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case '%':
- if (str <= end)
+ if (str < end)
*str = '%';
++str;
continue;
@@ -449,11 +451,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
break;
default:
- if (str <= end)
+ if (str < end)
*str = '%';
++str;
if (*fmt) {
- if (str <= end)
+ if (str < end)
*str = *fmt;
++str;
} else {
@@ -483,14 +485,13 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
str = number(str, end, num, base,
field_width, precision, flags);
}
- if (str <= end)
- *str = '\0';
- else if (size > 0)
- /* don't write out a null byte if the buf size is zero */
- *end = '\0';
- /* the trailing null byte doesn't count towards the total
- * ++str;
- */
+ if (size > 0) {
+ if (str < end)
+ *str = '\0';
+ else
+ end[-1] = '\0';
+ }
+ /* the trailing null byte doesn't count towards the total */
return str-buf;
}
@@ -848,3 +849,26 @@ int sscanf(const char * buf, const char * fmt, ...)
}
EXPORT_SYMBOL(sscanf);
+
+
+/* Simplified asprintf. */
+char *kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ unsigned int len;
+ char *p;
+
+ va_start(ap, fmt);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+
+ p = kmalloc(len+1, gfp);
+ if (!p)
+ return NULL;
+ va_start(ap, fmt);
+ vsnprintf(p, len+1, fmt, ap);
+ va_end(ap);
+ return p;
+}
+
+EXPORT_SYMBOL(kasprintf);
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 02a16eacb72..d84560c076d 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -63,10 +63,10 @@
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
+
+ - @start: inflate()'s starting value for strm->avail_out
*/
-void inflate_fast(strm, start)
-z_streamp strm;
-unsigned start; /* inflate()'s starting value for strm->avail_out */
+void inflate_fast(z_streamp strm, unsigned start)
{
struct inflate_state *state;
unsigned char *in; /* local strm->next_in */
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 7f922dccf1a..fceb97c3aff 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -347,7 +347,10 @@ int zlib_inflate(z_streamp strm, int flush)
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
- if (strm == NULL || strm->state == NULL || strm->next_out == NULL ||
+ /* Do not check for strm->next_out == NULL here as ppc zImage
+ inflates to strm->next_out = 0 */
+
+ if (strm == NULL || strm->state == NULL ||
(strm->next_in == NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 62343c53bf7..3fe6ce5b53e 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -8,15 +8,6 @@
#define MAXBITS 15
-const char inflate_copyright[] =
- " inflate 1.2.3 Copyright 1995-2005 Mark Adler ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
/*
Build a set of tables to decode the provided canonical Huffman code.
The code lengths are lens[0..codes-1]. The result starts at *table,
@@ -29,13 +20,8 @@ const char inflate_copyright[] =
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
-int zlib_inflate_table(type, lens, codes, table, bits, work)
-codetype type;
-unsigned short *lens;
-unsigned codes;
-code **table;
-unsigned *bits;
-unsigned short *work;
+int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
+ code **table, unsigned *bits, unsigned short *work)
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */