summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 16:26:12 +0100
committerIngo Molnar <mingo@elte.hu>2008-10-28 16:26:12 +0100
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /lib
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug181
-rw-r--r--lib/Kconfig.kgdb14
-rw-r--r--lib/Makefile11
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/bitmap.c22
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/debugobjects.c50
-rw-r--r--lib/dynamic_printk.c418
-rw-r--r--lib/idr.c142
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/iommu-helper.c14
-rw-r--r--lib/klist.c96
-rw-r--r--lib/kobject.c53
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/parser.c2
-rw-r--r--lib/percpu_counter.c8
-rw-r--r--lib/plist.c13
-rw-r--r--lib/radix-tree.c180
-rw-r--r--lib/random32.c48
-rw-r--r--lib/ratelimit.c56
-rw-r--r--lib/scatterlist.c180
-rw-r--r--lib/show_mem.c63
-rw-r--r--lib/smp_processor_id.c5
-rw-r--r--lib/string_helpers.c68
-rw-r--r--lib/swiotlb.c51
-rw-r--r--lib/syscall.c75
-rw-r--r--lib/textsearch.c16
-rw-r--r--lib/ts_bm.c26
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
-rw-r--r--lib/vsprintf.c295
38 files changed, 1777 insertions, 512 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c7ad7a5b353..85cf7ea978a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -8,10 +8,10 @@ config BITREVERSE
tristate
config GENERIC_FIND_FIRST_BIT
- def_bool n
+ bool
config GENERIC_FIND_NEXT_BIT
- def_bool n
+ bool
config CRC_CCITT
tristate "CRC-CCITT functions"
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index df27132a56f..b0f239e443b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
debugging files into. Enable this option to be able to read and
write to these files.
+ For detailed documentation on the debugfs API, see
+ Documentation/DocBook/filesystems.
+
If unsure, say N.
config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
help
Say Y here to enable the kernel to detect "soft lockups",
which are bugs that cause the kernel to loop in kernel
- mode for more than 10 seconds, without giving other tasks a
+ mode for more than 60 seconds, without giving other tasks a
chance to run.
When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on DETECT_SOFTLOCKUP
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 60 seconds, without giving other tasks a
+ chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on DETECT_SOFTLOCKUP
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
@@ -367,7 +394,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !X86 && !MIPS
+ select FRAME_POINTER if !X86 && !MIPS && !PPC
select KALLSYMS
select KALLSYMS_ALL
@@ -468,6 +495,15 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VIRTUAL
+ bool "Debug VM translations"
+ depends on DEBUG_KERNEL && X86
+ help
+ Enable some costly sanity checks in virtual to page code. This can
+ catch mistakes with virt_to_page() and friends.
+
+ If unsure, say N.
+
config DEBUG_WRITECOUNT
bool "Debug filesystem writers count"
depends on DEBUG_KERNEL
@@ -478,6 +514,18 @@ config DEBUG_WRITECOUNT
If unsure, say N.
+config DEBUG_MEMORY_INIT
+ bool "Debug memory initialisation" if EMBEDDED
+ default !EMBEDDED
+ help
+ Enable this for additional checks during memory initialisation.
+ The sanity checks verify aspects of the VM such as the memory model
+ and other information provided by the architecture. Verbose
+ information will be printed at KERN_DEBUG loglevel depending
+ on the mminit_loglevel= command-line option.
+
+ If unsure, say Y
+
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
@@ -558,6 +606,19 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc.
+config RCU_CPU_STALL_DETECTOR
+ bool "Check for stalled CPUs delaying RCU grace periods"
+ depends on CLASSIC_RCU
+ default n
+ help
+ This option causes RCU to printk information on which
+ CPUs are delaying the current grace period, but only when
+ the grace period extends for excessive time periods.
+
+ Say Y if you want RCU to perform such checks.
+
+ Say N if you are unsure.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -585,6 +646,33 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
+config DEBUG_BLOCK_EXT_DEVT
+ bool "Force extended block device numbers and spread them"
+ depends on DEBUG_KERNEL
+ depends on BLOCK
+ default n
+ help
+ BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
+ SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
+ YOU ARE DOING. Distros, please enable this and fix whatever
+ is broken.
+
+ Conventionally, block device numbers are allocated from
+ predetermined contiguous area. However, extended block area
+ may introduce non-contiguous block device numbers. This
+ option forces most block device numbers to be allocated from
+ the extended space and spreads them to discover kernel or
+ userland code paths which assume predetermined contiguous
+ device number allocation.
+
+ Note that turning on this debug option shuffles all the
+ device numbers for all IDE and SCSI devices including libata
+ ones, so root partition specified using device number
+ directly (via rdev or root=MAJ:MIN) won't work anymore.
+ Textual device names (root=/dev/sdXn) will continue to work.
+
+ Say N if you are unsure.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
@@ -622,10 +710,21 @@ config FAIL_PAGE_ALLOC
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
- depends on FAULT_INJECTION
+ depends on FAULT_INJECTION && BLOCK
help
Provide fault-injection capability for disk IO.
+config FAIL_IO_TIMEOUT
+ bool "Faul-injection capability for faking disk interrupts"
+ depends on FAULT_INJECTION && BLOCK
+ help
+ Provide fault-injection capability on end IO handling. This
+ will make the block layer "forget" an interrupt as configured,
+ thus exercising the error handling.
+
+ Only works with drivers that use the generic timeout handling,
+ for others it wont do anything.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -637,13 +736,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER
+ select FRAME_POINTER if !PPC
help
Provide stacktrace filter for fault-injection capabilities
config LATENCYTOP
bool "Latency measuring infrastructure"
- select FRAME_POINTER if !MIPS
+ select FRAME_POINTER if !MIPS && !PPC
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -654,6 +753,14 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config SYSCTL_SYSCALL_CHECK
+ bool "Sysctl checks"
+ depends on SYSCTL_SYSCALL
+ ---help---
+ sys_sysctl uses binary paths that have been found challenging
+ to properly maintain and use. This enables checks that help
+ you to keep things correct.
+
source kernel/trace/Kconfig
config PROVIDE_OHCI1394_DMA_INIT
@@ -696,6 +803,70 @@ config FIREWIRE_OHCI_REMOTE_DMA
If unsure, say N.
+menuconfig BUILD_DOCSRC
+ bool "Build targets in Documentation/ tree"
+ depends on HEADERS_CHECK
+ help
+ This option attempts to build objects from the source files in the
+ kernel Documentation/ tree.
+
+ Say N if you are unsure.
+
+config DYNAMIC_PRINTK_DEBUG
+ bool "Enable dynamic printk() call support"
+ default n
+ depends on PRINTK
+ select PRINTK_DEBUG
+ help
+
+ Compiles debug level messages into the kernel, which would not
+ otherwise be available at runtime. These messages can then be
+ enabled/disabled on a per module basis. This mechanism implicitly
+ enables all pr_debug() and dev_dbg() calls. The impact of this
+ compile option is a larger kernel text size of about 2%.
+
+ Usage:
+
+ Dynamic debugging is controlled by the debugfs file,
+ dynamic_printk/modules. This file contains a list of the modules that
+ can be enabled. The format of the file is the module name, followed
+ by a set of flags that can be enabled. The first flag is always the
+ 'enabled' flag. For example:
+
+ <module_name> <enabled=0/1>
+ .
+ .
+ .
+
+ <module_name> : Name of the module in which the debug call resides
+ <enabled=0/1> : whether the messages are enabled or not
+
+ From a live system:
+
+ snd_hda_intel enabled=0
+ fixup enabled=0
+ driver enabled=0
+
+ Enable a module:
+
+ $echo "set enabled=1 <module_name>" > dynamic_printk/modules
+
+ Disable a module:
+
+ $echo "set enabled=0 <module_name>" > dynamic_printk/modules
+
+ Enable all modules:
+
+ $echo "set enabled=1 all" > dynamic_printk/modules
+
+ Disable all modules:
+
+ $echo "set enabled=0 all" > dynamic_printk/modules
+
+ Finally, passing "dynamic_printk" at the command line enables
+ debugging for all modules. This mode can be turned off via the above
+ disable command.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a..9b5d1d7f2ef 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,20 +1,20 @@
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
config HAVE_ARCH_KGDB
bool
menuconfig KGDB
bool "KGDB: kernel debugging with remote gdb"
- select FRAME_POINTER
depends on HAVE_ARCH_KGDB
depends on DEBUG_KERNEL && EXPERIMENTAL
help
If you say Y here, it will be possible to remotely debug the
- kernel using gdb. Documentation of kernel debugger is available
- at http://kgdb.sourceforge.net as well as in DocBook form
- in Documentation/DocBook/. If unsure, say N.
+ kernel using gdb. It is recommended but not required, that
+ you also turn on the kernel config option
+ CONFIG_FRAME_POINTER to aid in producing more reliable stack
+ backtraces in the external debugger. Documentation of
+ kernel debugger is available at http://kgdb.sourceforge.net
+ as well as in DocBook form in Documentation/DocBook/. If
+ unsure, say N.
if KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 818c4d45551..16feaab057b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,15 +11,16 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o ratelimit.o
+ proportions.o prio_heap.o ratelimit.o show_mem.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
-obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
+obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
+ string_helpers.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -78,6 +79,10 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_LMB) += lmb.o
+obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+
+obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 00000000000..d74257fd0fe
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
+#include <linux/bcd.h>
+#include <linux/module.h>
+
+unsigned bcd2bin(unsigned char val)
+{
+ return (val & 0x0f) + (val >> 4) * 10;
+}
+EXPORT_SYMBOL(bcd2bin);
+
+unsigned char bin2bcd(unsigned val)
+{
+ return ((val / 10) << 4) + val % 10;
+}
+EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 482df94ea21..1338469ac84 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -996,3 +996,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
return 0;
}
EXPORT_SYMBOL(bitmap_allocate_region);
+
+/**
+ * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
+ * @dst: destination buffer
+ * @src: bitmap to copy
+ * @nbits: number of bits in the bitmap
+ *
+ * Require nbits % BITS_PER_LONG == 0.
+ */
+void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
+{
+ unsigned long *d = dst;
+ int i;
+
+ for (i = 0; i < nbits/BITS_PER_LONG; i++) {
+ if (BITS_PER_LONG == 64)
+ d[i] = cpu_to_le64(src[i]);
+ else
+ d[i] = cpu_to_le32(src[i]);
+ }
+}
+EXPORT_SYMBOL(bitmap_copy_le);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213..f5f3ad8b62f 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
/**
* memparse - parse a string with mem suffixes into a number
* @ptr: Where parse begins
- * @retptr: (output) Pointer to next char after parse completes
+ * @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
* megabyte, or one gigabyte, respectively.
*/
-unsigned long long memparse (char *ptr, char **retptr)
+unsigned long long memparse(const char *ptr, char **retptr)
{
- unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+ char *endptr; /* local pointer to end of parsed string */
- switch (**retptr) {
+ unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
case 'G':
case 'g':
ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
case 'K':
case 'k':
ret <<= 10;
- (*retptr)++;
+ endptr++;
default:
break;
}
+
+ if (retptr)
+ *retptr = endptr;
+
return ret;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e..5f97dc25ef9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
}
EXPORT_SYMBOL(__next_cpu);
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+ return min_t(int, nr_cpu_ids,
+ find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0ef01d14727..0218b4693dd 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -8,6 +8,7 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
+#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -37,6 +38,7 @@ int debug_locks_off(void)
{
if (xchg(&debug_locks, 0)) {
if (!debug_locks_silent) {
+ oops_in_progress = 1;
console_verbose();
return 1;
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 85b18d79be8..e3ab374e133 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
/*
* Allocate a new object. If the pool is empty, switch off the debugger.
+ * Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
static void free_object(struct debug_obj *obj)
{
unsigned long idx = (unsigned long)(obj - obj_static_pool);
+ unsigned long flags;
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
} else {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
}
}
@@ -171,6 +173,7 @@ static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
int i;
@@ -179,11 +182,14 @@ static void debug_objects_oom(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
spin_lock_irqsave(&db->lock, flags);
- hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_move_list(&db->list, &freelist);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
- spin_unlock_irqrestore(&db->lock, flags);
}
}
@@ -205,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
if (limit < 5 && obj->descr != descr_test) {
limit++;
- printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+ WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
obj_states[obj->state], obj->descr->name);
- WARN_ON(1);
}
debug_objects_warnings++;
}
@@ -226,15 +231,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
static void debug_object_is_on_stack(void *addr, int onstack)
{
- void *stack = current->stack;
int is_on_stack;
static int limit;
if (limit > 4)
return;
- is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
-
+ is_on_stack = object_is_on_stack(addr);
if (is_on_stack == onstack)
return;
@@ -501,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
return;
default:
hlist_del(&obj->node);
+ spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
- break;
+ return;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
@@ -513,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
@@ -548,11 +553,18 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- free_object(obj);
+ hlist_add_head(&obj->node, &freelist);
break;
}
}
spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
}
@@ -735,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
- printk(KERN_ERR "ODEBUG: selftest object not found\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
goto out;
}
if (obj && obj->state != state) {
- printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
obj->state, state);
- WARN_ON(1);
goto out;
}
if (fixups != debug_objects_fixups) {
- printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
fixups, debug_objects_fixups);
- WARN_ON(1);
goto out;
}
if (warnings != debug_objects_warnings) {
- printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
warnings, debug_objects_warnings);
- WARN_ON(1);
goto out;
}
res = 0;
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
new file mode 100644
index 00000000000..d640f87bdc9
--- /dev/null
+++ b/lib/dynamic_printk.c
@@ -0,0 +1,418 @@
+/*
+ * lib/dynamic_printk.c
+ *
+ * make pr_debug()/dev_dbg() calls runtime configurable based upon their
+ * their source module.
+ *
+ * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+
+extern struct mod_debug __start___verbose[];
+extern struct mod_debug __stop___verbose[];
+
+struct debug_name {
+ struct hlist_node hlist;
+ struct hlist_node hlist2;
+ int hash1;
+ int hash2;
+ char *name;
+ int enable;
+ int type;
+};
+
+static int nr_entries;
+static int num_enabled;
+int dynamic_enabled = DYNAMIC_ENABLED_NONE;
+static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] =
+ { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
+static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] =
+ { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
+static DECLARE_MUTEX(debug_list_mutex);
+
+/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
+ * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
+ * use independent hash functions, to reduce the chance of false positives.
+ */
+long long dynamic_printk_enabled;
+EXPORT_SYMBOL_GPL(dynamic_printk_enabled);
+long long dynamic_printk_enabled2;
+EXPORT_SYMBOL_GPL(dynamic_printk_enabled2);
+
+/* returns the debug module pointer. */
+static struct debug_name *find_debug_module(char *module_name)
+{
+ int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct debug_name *element;
+
+ element = NULL;
+ for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
+ head = &module_table[i];
+ hlist_for_each_entry_rcu(element, node, head, hlist)
+ if (!strcmp(element->name, module_name))
+ return element;
+ }
+ return NULL;
+}
+
+/* returns the debug module pointer. */
+static struct debug_name *find_debug_module_hash(char *module_name, int hash)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct debug_name *element;
+
+ element = NULL;
+ head = &module_table[hash];
+ hlist_for_each_entry_rcu(element, node, head, hlist)
+ if (!strcmp(element->name, module_name))
+ return element;
+ return NULL;
+}
+
+/* caller must hold mutex*/
+static int __add_debug_module(char *mod_name, int hash, int hash2)
+{
+ struct debug_name *new;
+ char *module_name;
+ int ret = 0;
+
+ if (find_debug_module(mod_name)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL);
+ if (!module_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ module_name = strcpy(module_name, mod_name);
+ module_name[strlen(mod_name)] = '\0';
+ new = kzalloc(sizeof(struct debug_name), GFP_KERNEL);
+ if (!new) {
+ kfree(module_name);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_HLIST_NODE(&new->hlist);
+ INIT_HLIST_NODE(&new->hlist2);
+ new->name = module_name;
+ new->hash1 = hash;
+ new->hash2 = hash2;
+ hlist_add_head_rcu(&new->hlist, &module_table[hash]);
+ hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]);
+ nr_entries++;
+out:
+ return ret;
+}
+
+int unregister_dynamic_debug_module(char *mod_name)
+{
+ struct debug_name *element;
+ int ret = 0;
+
+ down(&debug_list_mutex);
+ element = find_debug_module(mod_name);
+ if (!element) {
+ ret = -EINVAL;
+ goto out;
+ }
+ hlist_del_rcu(&element->hlist);
+ hlist_del_rcu(&element->hlist2);
+ synchronize_rcu();
+ kfree(element->name);
+ if (element->enable)
+ num_enabled--;
+ kfree(element);
+ nr_entries--;
+out:
+ up(&debug_list_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
+
+int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
+ char *flags, int hash, int hash2)
+{
+ struct debug_name *elem;
+ int ret = 0;
+
+ down(&debug_list_mutex);
+ elem = find_debug_module(mod_name);
+ if (!elem) {
+ if (__add_debug_module(mod_name, hash, hash2))
+ goto out;
+ elem = find_debug_module(mod_name);
+ if (dynamic_enabled == DYNAMIC_ENABLED_ALL &&
+ !strcmp(mod_name, share_name)) {
+ elem->enable = true;
+ num_enabled++;
+ }
+ }
+ elem->type |= type;
+out:
+ up(&debug_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_dynamic_debug_module);
+
+int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash)
+{
+ struct debug_name *elem;
+ int ret = 0;
+
+ if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
+ return 1;
+ rcu_read_lock();
+ elem = find_debug_module_hash(mod_name, hash);
+ if (elem && elem->enable)
+ ret = 1;
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper);
+
+static void set_all(bool enable)
+{
+ struct debug_name *e;
+ struct hlist_node *node;
+ int i;
+ long long enable_mask;
+
+ for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
+ if (module_table[i].first != NULL) {
+ hlist_for_each_entry(e, node, &module_table[i], hlist) {
+ e->enable = enable;
+ }
+ }
+ }
+ if (enable)
+ enable_mask = ULLONG_MAX;
+ else
+ enable_mask = 0;
+ dynamic_printk_enabled = enable_mask;
+ dynamic_printk_enabled2 = enable_mask;
+}
+
+static int disabled_hash(int i, bool first_table)
+{
+ struct debug_name *e;
+ struct hlist_node *node;
+
+ if (first_table) {
+ hlist_for_each_entry(e, node, &module_table[i], hlist) {
+ if (e->enable)
+ return 0;
+ }
+ } else {
+ hlist_for_each_entry(e, node, &module_table2[i], hlist2) {
+ if (e->enable)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static ssize_t pr_debug_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *ppos)
+{
+ char *buffer, *s, *value_str, *setting_str;
+ int err, value;
+ struct debug_name *elem = NULL;
+ int all = 0;
+
+ if (length > PAGE_SIZE || length < 0)
+ return -EINVAL;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(buffer, buf, length))
+ goto out;
+
+ err = -EINVAL;
+ if (length < PAGE_SIZE)
+ buffer[length] = '\0';
+ else if (buffer[PAGE_SIZE-1])
+ goto out;
+
+ err = -EINVAL;
+ down(&debug_list_mutex);
+
+ if (strncmp("set", buffer, 3))
+ goto out_up;
+ s = buffer + 3;
+ setting_str = strsep(&s, "=");
+ if (s == NULL)
+ goto out_up;
+ setting_str = strstrip(setting_str);
+ value_str = strsep(&s, " ");
+ if (s == NULL)
+ goto out_up;
+ s = strstrip(s);
+ if (!strncmp(s, "all", 3))
+ all = 1;
+ else
+ elem = find_debug_module(s);
+ if (!strncmp(setting_str, "enable", 6)) {
+ value = !!simple_strtol(value_str, NULL, 10);
+ if (all) {
+ if (value) {
+ set_all(true);
+ num_enabled = nr_entries;
+ dynamic_enabled = DYNAMIC_ENABLED_ALL;
+ } else {
+ set_all(false);
+ num_enabled = 0;
+ dynamic_enabled = DYNAMIC_ENABLED_NONE;
+ }
+ err = 0;
+ } else {
+ if (elem) {
+ if (value && (elem->enable == 0)) {
+ dynamic_printk_enabled |=
+ (1LL << elem->hash1);
+ dynamic_printk_enabled2 |=
+ (1LL << elem->hash2);
+ elem->enable = 1;
+ num_enabled++;
+ dynamic_enabled = DYNAMIC_ENABLED_SOME;
+ err = 0;
+ printk(KERN_DEBUG
+ "debugging enabled for module %s",
+ elem->name);
+ } else if (!value && (elem->enable == 1)) {
+ elem->enable = 0;
+ num_enabled--;
+ if (disabled_hash(elem->hash1, true))
+ dynamic_printk_enabled &=
+ ~(1LL << elem->hash1);
+ if (disabled_hash(elem->hash2, false))
+ dynamic_printk_enabled2 &=
+ ~(1LL << elem->hash2);
+ if (num_enabled)
+ dynamic_enabled =
+ DYNAMIC_ENABLED_SOME;
+ else
+ dynamic_enabled =
+ DYNAMIC_ENABLED_NONE;
+ err = 0;
+ printk(KERN_DEBUG
+ "debugging disabled for module "
+ "%s", elem->name);
+ }
+ }
+ }
+ }
+ if (!err)
+ err = length;
+out_up:
+ up(&debug_list_mutex);
+out:
+ free_page((unsigned long)buffer);
+ return err;
+}
+
+static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos)
+{
+ return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL;
+}
+
+static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos >= DEBUG_HASH_TABLE_SIZE)
+ return NULL;
+ return pos;
+}
+
+static void pr_debug_seq_stop(struct seq_file *s, void *v)
+{
+ /* Nothing to do */
+}
+
+static int pr_debug_seq_show(struct seq_file *s, void *v)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct debug_name *elem;
+ unsigned int i = *(loff_t *) v;
+
+ rcu_read_lock();
+ head = &module_table[i];
+ hlist_for_each_entry_rcu(elem, node, head, hlist) {
+ seq_printf(s, "%s enabled=%d", elem->name, elem->enable);
+ seq_printf(s, "\n");
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static struct seq_operations pr_debug_seq_ops = {
+ .start = pr_debug_seq_start,
+ .next = pr_debug_seq_next,
+ .stop = pr_debug_seq_stop,
+ .show = pr_debug_seq_show
+};
+
+static int pr_debug_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &pr_debug_seq_ops);
+}
+
+static const struct file_operations pr_debug_operations = {
+ .open = pr_debug_open,
+ .read = seq_read,
+ .write = pr_debug_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init dynamic_printk_init(void)
+{
+ struct dentry *dir, *file;
+ struct mod_debug *iter;
+ unsigned long value;
+
+ dir = debugfs_create_dir("dynamic_printk", NULL);
+ if (!dir)
+ return -ENOMEM;
+ file = debugfs_create_file("modules", 0644, dir, NULL,
+ &pr_debug_operations);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+ for (value = (unsigned long)__start___verbose;
+ value < (unsigned long)__stop___verbose;
+ value += sizeof(struct mod_debug)) {
+ iter = (struct mod_debug *)value;
+ register_dynamic_debug_module(iter->modname,
+ iter->type,
+ iter->logical_modname,
+ iter->flag_names, iter->hash, iter->hash2);
+ }
+ return 0;
+}
+module_init(dynamic_printk_init);
+/* may want to move this earlier so we can get traces as early as possible */
+
+static int __init dynamic_printk_setup(char *str)
+{
+ if (str)
+ return -ENOENT;
+ set_all(true);
+ return 0;
+}
+/* Use early_param(), so we can get debug output as early as possible */
+early_param("dynamic_printk", dynamic_printk_setup);
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f02..e728c7fccc4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions. Also removed _irq on spinlocks.
*
+ * Modified by Nadia Derbey to make it RCU safe.
+ *
* Small id to pointer translation service.
*
* It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
static struct kmem_cache *idr_layer_cache;
-static struct idr_layer *alloc_layer(struct idr *idp)
+static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
return(p);
}
+static void idr_layer_rcu_free(struct rcu_head *head)
+{
+ struct idr_layer *layer;
+
+ layer = container_of(head, struct idr_layer, rcu_head);
+ kmem_cache_free(idr_layer_cache, layer);
+}
+
+static inline void free_layer(struct idr_layer *p)
+{
+ call_rcu(&p->rcu_head, idr_layer_rcu_free);
+}
+
/* only called when idp->lock is held */
-static void __free_layer(struct idr *idp, struct idr_layer *p)
+static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
-static void free_layer(struct idr *idp, struct idr_layer *p)
+static void move_to_free_list(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
- __free_layer(idp, p);
+ __move_to_free_list(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
* @gfp_mask: memory allocation flags
*
* This function should be called prior to locking and calling the
- * following function. It preallocates enough memory to satisfy
+ * idr_get_new* functions. It preallocates enough memory to satisfy
* the worst possible allocation.
*
* If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
- free_layer(idp, new);
+ move_to_free_list(idp, new);
}
return 1;
}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
/* if already at the top layer, we need to grow */
if (!(p = pa[l])) {
*starting_id = id;
- return -2;
+ return IDR_NEED_TO_GROW;
}
/* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_ID_BIT) || (id < 0))
- return -3;
+ return IDR_NOMORE_SPACE;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- if (!(new = alloc_layer(idp)))
+ new = get_from_free_list(idp);
+ if (!new)
return -1;
- p->ary[m] = new;
+ rcu_assign_pointer(p->ary[m], new);
p->count++;
}
pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = alloc_layer(idp)))
+ if (!(p = get_from_free_list(idp)))
return -1;
layers = 1;
}
@@ -204,7 +220,7 @@ build_up:
layers++;
if (!p->count)
continue;
- if (!(new = alloc_layer(idp))) {
+ if (!(new = get_from_free_list(idp))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
- __free_layer(idp, new);
+ __move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -1;
@@ -225,10 +241,10 @@ build_up:
__set_bit(0, &new->bitmap);
p = new;
}
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
idp->layers = layers;
v = sub_alloc(idp, &id, pa);
- if (v == -2)
+ if (v == IDR_NEED_TO_GROW)
goto build_up;
return(v);
}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
* Successfully found an empty slot. Install the user
* pointer and mark the slot full.
*/
- pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
+ (struct idr_layer *)ptr);
pa[0]->count++;
idr_mark_full(pa, id);
}
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
static void idr_remove_warning(int id)
{
- printk("idr_remove called for id=%d which is not allocated.\n", id);
+ printk(KERN_WARNING
+ "idr_remove called for id=%d which is not allocated.\n", id);
dump_stack();
}
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
int n;
*paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
- p->ary[n] = NULL;
+ rcu_assign_pointer(p->ary[n], NULL);
+ to_free = NULL;
while(*paa && ! --((**paa)->count)){
- free_layer(idp, **paa);
+ if (to_free)
+ free_layer(to_free);
+ to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
+ if (to_free)
+ free_layer(to_free);
} else
idr_remove_warning(id);
}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
void idr_remove(struct idr *idp, int id)
{
struct idr_layer *p;
+ struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
- idp->top->ary[0]) { // We can drop a layer
-
+ idp->top->ary[0]) {
+ /*
+ * Single child at leftmost slot: we can shrink the tree.
+ * This level is not needed anymore since when layers are
+ * inserted, they are inserted at the top of the existing
+ * tree.
+ */
+ to_free = idp->top;
p = idp->top->ary[0];
- idp->top->bitmap = idp->top->count = 0;
- free_layer(idp, idp->top);
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
--idp->layers;
+ to_free->bitmap = to_free->count = 0;
+ free_layer(to_free);
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
- p = alloc_layer(idp);
+ p = get_from_free_list(idp);
+ /*
+ * Note: we don't call the rcu callback here, since the only
+ * layers that fall into the freelist are those that have been
+ * preallocated.
+ */
kmem_cache_free(idr_layer_cache, p);
}
return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
id += 1 << n;
while (n < fls(id)) {
- if (p) {
- memset(p, 0, sizeof *p);
- free_layer(idp, p);
- }
+ if (p)
+ free_layer(p);
n += IDR_BITS;
p = *--paa;
}
}
- idp->top = NULL;
+ rcu_assign_pointer(idp->top, NULL);
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
void idr_destroy(struct idr *idp)
{
while (idp->id_free_cnt) {
- struct idr_layer *p = alloc_layer(idp);
+ struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
}
}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
* return indicates that @id is not valid or you passed %NULL in
* idr_get_new().
*
- * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
*/
void *idr_find(struct idr *idp, int id)
{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
struct idr_layer *p;
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
while (n > 0 && p) {
n -= IDR_BITS;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
max = 1 << n;
id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
*
- * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ * The caller must serialize with writers.
*/
void *idr_replace(struct idr *idp, void *ptr, int id)
{
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
- p->ary[n] = ptr;
+ rcu_assign_pointer(p->ary[n], ptr);
return old_p;
}
EXPORT_SYMBOL(idr_replace);
-static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
+static void idr_cache_ctor(void *idr_layer)
{
memset(idr_layer, 0, sizeof(struct idr_layer));
}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
restart:
/* get vacant slot */
t = idr_get_empty_slot(&ida->idr, idr_id, pa);
- if (t < 0) {
- if (t == -1)
- return -EAGAIN;
- else /* will be -3 */
- return -ENOSPC;
- }
+ if (t < 0)
+ return _idr_rc_to_errno(t);
if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
return -EAGAIN;
memset(bitmap, 0, sizeof(struct ida_bitmap));
- pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
+ rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
+ (void *)bitmap);
pa[0]->count++;
}
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
* allocation.
*/
if (ida->idr.id_free_cnt || ida->free_bitmap) {
- struct idr_layer *p = alloc_layer(&ida->idr);
+ struct idr_layer *p = get_from_free_list(&ida->idr);
if (p)
kmem_cache_free(idr_layer_cache, p);
}
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be06..1a8e8a97812 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
#define DUMPBITS(n) {b>>=(n);k-=(n);}
+#ifndef NO_INFLATE_MALLOC
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ error("Malloc error");
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+#else
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+#endif
/*
Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
int e; /* last block flag */
int r; /* result code */
unsigned h; /* maximum struct huft's malloc'ed */
- void *ptr;
/* initialize window, bit buffer */
wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
h = 0;
do {
hufts = 0;
- gzip_mark(&ptr);
- if ((r = inflate_block(&e)) != 0) {
- gzip_release(&ptr);
- return r;
- }
- gzip_release(&ptr);
+#ifdef ARCH_HAS_DECOMP_WDOG
+ arch_decomp_wdog();
+#endif
+ r = inflate_block(&e);
+ if (r)
+ return r;
if (hufts > h)
h = hufts;
} while (!e);
diff --git a/lib/iomap.c b/lib/iomap.c
index 37a3ea4cac9..d3222938515 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access)
static int count = 10;
if (count) {
count--;
- printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
- WARN_ON(1);
+ WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
}
}
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77..75dbda03f4f 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
return index;
}
-static inline void set_bit_area(unsigned long *map, unsigned long i,
- int len)
+void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
{
unsigned long end = i + len;
while (i < end) {
@@ -64,7 +63,7 @@ again:
start = index + 1;
goto again;
}
- set_bit_area(map, index, nr);
+ iommu_area_reserve(map, index, nr);
}
return index;
}
@@ -80,3 +79,12 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
}
}
EXPORT_SYMBOL(iommu_area_free);
+
+unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
+ unsigned long io_page_size)
+{
+ unsigned long size = (addr & (io_page_size - 1)) + len;
+
+ return DIV_ROUND_UP(size, io_page_size);
+}
+EXPORT_SYMBOL(iommu_num_pages);
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa..bbdd3015c2c 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
#include <linux/klist.h>
#include <linux/module.h>
+/*
+ * Use the lowest bit of n_klist to mark deleted nodes and exclude
+ * dead ones from iteration.
+ */
+#define KNODE_DEAD 1LU
+#define KNODE_KLIST_MASK ~KNODE_DEAD
+
+static struct klist *knode_klist(struct klist_node *knode)
+{
+ return (struct klist *)
+ ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
+}
+
+static bool knode_dead(struct klist_node *knode)
+{
+ return (unsigned long)knode->n_klist & KNODE_DEAD;
+}
+
+static void knode_set_klist(struct klist_node *knode, struct klist *klist)
+{
+ knode->n_klist = klist;
+ /* no knode deserves to start its life dead */
+ WARN_ON(knode_dead(knode));
+}
+
+static void knode_kill(struct klist_node *knode)
+{
+ /* and no knode should die twice ever either, see we're very humane */
+ WARN_ON(knode_dead(knode));
+ *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
+}
/**
* klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
kref_init(&n->n_ref);
- n->n_klist = k;
+ knode_set_klist(n, k);
if (k->get)
k->get(n);
}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
{
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
+ WARN_ON(!knode_dead(n));
list_del(&n->n_node);
complete(&n->n_removed);
- n->n_klist = NULL;
+ knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
return kref_put(&n->n_ref, klist_release);
}
-/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
- */
-void klist_del(struct klist_node *n)
+static void klist_put(struct klist_node *n, bool kill)
{
- struct klist *k = n->n_klist;
+ struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
+ if (kill)
+ knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+void klist_del(struct klist_node *n)
+{
+ klist_put(n, true);
+}
EXPORT_SYMBOL_GPL(klist_del);
/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
- i->i_head = &k->k_list;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
- klist_del(i->i_cur);
+ klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
*/
struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head *next;
- struct klist_node *lnode = i->i_cur;
- struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *next;
spin_lock(&i->i_klist->k_lock);
- if (lnode) {
- next = lnode->n_node.next;
- if (!klist_dec_and_del(lnode))
+
+ if (last) {
+ next = to_klist_node(last->n_node.next);
+ if (!klist_dec_and_del(last))
put = NULL;
} else
- next = i->i_head->next;
+ next = to_klist_node(i->i_klist->k_list.next);
- if (next != i->i_head) {
- knode = to_klist_node(next);
- kref_get(&knode->n_ref);
+ i->i_cur = NULL;
+ while (next != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(next))) {
+ kref_get(&next->n_ref);
+ i->i_cur = next;
+ break;
+ }
+ next = to_klist_node(next->n_node.next);
}
- i->i_cur = knode;
+
spin_unlock(&i->i_klist->k_lock);
- if (put && lnode)
- put(lnode);
- return knode;
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/kobject.c b/lib/kobject.c
index dcade0543bd..0487d1f6480 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- pr_debug("kobject: (%p): attempted to be registered with empty "
+ WARN(1, "kobject: (%p): attempted to be registered with empty "
"name!\n", kobj);
- WARN_ON(1);
return -EINVAL;
}
@@ -216,13 +215,18 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- /* Free the old name, if necessary. */
- kfree(kobj->name);
+ const char *old_name = kobj->name;
+ char *s;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (!kobj->name)
return -ENOMEM;
+ /* ewww... some of these buggers have '/' in the name ... */
+ while ((s = strchr(kobj->name, '/')))
+ s[0] = '!';
+
+ kfree(old_name);
return 0;
}
@@ -383,11 +387,17 @@ EXPORT_SYMBOL_GPL(kobject_init_and_add);
* kobject_rename - change the name of an object
* @kobj: object in question.
* @new_name: object's new name
+ *
+ * It is the responsibility of the caller to provide mutual
+ * exclusion between two different calls of kobject_rename
+ * on the same kobject and to ensure that new_name is valid and
+ * won't conflict with other kobjects.
*/
int kobject_rename(struct kobject *kobj, const char *new_name)
{
int error = 0;
const char *devpath = NULL;
+ const char *dup_name = NULL, *name;
char *devpath_string = NULL;
char *envp[2];
@@ -397,19 +407,6 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
if (!kobj->parent)
return -EINVAL;
- /* see if this name is already in use */
- if (kobj->kset) {
- struct kobject *temp_kobj;
- temp_kobj = kset_find_obj(kobj->kset, new_name);
- if (temp_kobj) {
- printk(KERN_WARNING "kobject '%s' cannot be renamed "
- "to '%s' as '%s' is already in existence.\n",
- kobject_name(kobj), new_name, new_name);
- kobject_put(temp_kobj);
- return -EINVAL;
- }
- }
-
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
@@ -424,15 +421,27 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
envp[0] = devpath_string;
envp[1] = NULL;
+ name = dup_name = kstrdup(new_name, GFP_KERNEL);
+ if (!name) {
+ error = -ENOMEM;
+ goto out;
+ }
+
error = sysfs_rename_dir(kobj, new_name);
+ if (error)
+ goto out;
+
+ /* Install the new kobject name */
+ dup_name = kobj->name;
+ kobj->name = name;
/* This function is mostly/only used for network interface.
* Some hotplug package track interfaces by their name and
* therefore want to know when the name is changed by the user. */
- if (!error)
- kobject_uevent_env(kobj, KOBJ_MOVE, envp);
+ kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
+ kfree(dup_name);
kfree(devpath_string);
kfree(devpath);
kobject_put(kobj);
@@ -577,12 +586,10 @@ static void kobject_release(struct kref *kref)
void kobject_put(struct kobject *kobj)
{
if (kobj) {
- if (!kobj->state_initialized) {
- printk(KERN_WARNING "kobject: '%s' (%p): is not "
+ if (!kobj->state_initialized)
+ WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
"initialized, yet kobject_put() is being "
"called.\n", kobject_name(kobj), kobj);
- WARN_ON(1);
- }
kref_put(&kobj->kref, kobject_release);
}
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a6316..3f914725bda 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval)
goto exit;
- call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC);
+ retval = call_usermodehelper(argv[0], argv,
+ env->envp, UMH_WAIT_EXEC);
}
exit:
@@ -284,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
int len;
if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
- printk(KERN_ERR "add_uevent_var: too many keys\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
return -ENOMEM;
}
@@ -296,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
va_end(args);
if (len >= (sizeof(env->buf) - env->buflen)) {
- printk(KERN_ERR "add_uevent_var: buffer size too small\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
return -ENOMEM;
}
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655b..1a39f4e3ae1 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
- if (unlikely(next->prev != prev)) {
- printk(KERN_ERR "list_add corruption. next->prev should be "
- "prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- BUG();
- }
- if (unlikely(prev->next != next)) {
- printk(KERN_ERR "list_add corruption. prev->next should be "
- "next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- BUG();
- }
+ WARN(next->prev != prev,
+ "list_add corruption. next->prev should be "
+ "prev (%p), but was %p. (next=%p).\n",
+ prev, next->prev, next);
+ WARN(prev->next != next,
+ "list_add corruption. prev->next should be "
+ "next (%p), but was %p. (prev=%p).\n",
+ next, prev->next, prev);
next->prev = new;
new->next = next;
new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
EXPORT_SYMBOL(__list_add);
/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-EXPORT_SYMBOL(list_add);
-
-/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
*/
void list_del(struct list_head *entry)
{
- if (unlikely(entry->prev->next != entry)) {
- printk(KERN_ERR "list_del corruption. prev->next should be %p, "
- "but was %p\n", entry, entry->prev->next);
- BUG();
- }
- if (unlikely(entry->next->prev != entry)) {
- printk(KERN_ERR "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, entry->next->prev);
- BUG();
- }
+ WARN(entry->prev->next != entry,
+ "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, entry->prev->next);
+ WARN(entry->next->prev != entry,
+ "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, entry->next->prev);
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
diff --git a/lib/lmb.c b/lib/lmb.c
index 5d7b9286503..97e54703708 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit)
if (lmb.memory.region[0].size < lmb.rmo_size)
lmb.rmo_size = lmb.memory.region[0].size;
+ memory_limit = lmb_end_of_DRAM();
+
/* And truncate any reserves above the limit also. */
for (i = 0; i < lmb.reserved.cnt; i++) {
p = &lmb.reserved.region[i];
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a..5dc6b29c157 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
t += 31 + *ip++;
}
m_pos = op - 1;
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
} else if (t >= 16) {
m_pos = op;
@@ -157,8 +156,7 @@ match:
}
t += 7 + *ip++;
}
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
if (m_pos == op)
goto eof_found;
diff --git a/lib/parser.c b/lib/parser.c
index 4f0cbc03e0e..b00d02059a5 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -100,7 +100,7 @@ static int match_one(char *s, const char *p, substring_t args[])
* format identifiers which will be taken into account when matching the
* tokens, and whose locations will be returned in the @args array.
*/
-int match_token(char *s, match_table_t table, substring_t args[])
+int match_token(char *s, const match_table_t table, substring_t args[])
{
const struct match_token *p;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 4a8ba4bf5f6..a8663890a88 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- if (set)
- *pcount = 0;
+ *pcount = 0;
}
- if (set)
- fbc->count = ret;
+ fbc->count = ret;
spin_unlock(&fbc->lock);
return ret;
diff --git a/lib/plist.c b/lib/plist.c
index 3074a02272f..d6c64a824e1 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,12 +31,13 @@
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
- if (n->prev != p || p->next != n) {
- printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
- printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
- printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
- WARN_ON(1);
- }
+ WARN(n->prev != p || p->next != n,
+ "top: %p, n: %p, p: %p\n"
+ "prev: %p, n: %p, p: %p\n"
+ "next: %p, n: %p, p: %p\n",
+ t, t->next, t->prev,
+ p, p->next, p->prev,
+ n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 56ec21a7f73..be86b32bc87 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert);
* Returns: the slot corresponding to the position @index in the
* radix tree @root. This is useful for update-if-exists operations.
*
- * This function cannot be called under rcu_read_lock, it must be
- * excluded from writers, as must the returned slot for subsequent
- * use by radix_tree_deref_slot() and radix_tree_replace slot.
- * Caller must hold tree write locked across slot lookup and
- * replace.
+ * This function can be called under rcu_read_lock iff the slot is not
+ * modified by radix_tree_replace_slot, otherwise it must be called
+ * exclusive from other writers. Any dereference of the slot must be done
+ * using radix_tree_deref_slot.
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
unsigned int height, shift;
struct radix_tree_node *node, **slot;
- node = root->rnode;
+ node = rcu_dereference(root->rnode);
if (node == NULL)
return NULL;
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = *slot;
+ node = rcu_dereference(*slot);
if (node == NULL)
return NULL;
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
EXPORT_SYMBOL(radix_tree_next_hole);
static unsigned int
-__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index,
/* Bottom level: grab some items */
for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
- struct radix_tree_node *node;
index++;
- node = slot->slots[i];
- if (node) {
- results[nr_found++] = rcu_dereference(node);
+ if (slot->slots[i]) {
+ results[nr_found++] = &(slot->slots[i]);
if (nr_found == max_items)
goto out;
}
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup(node, results + ret, cur_index,
+ slots_found = __lookup(node, (void ***)results + ret, cur_index,
max_items - ret, &next_index);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
}
EXPORT_SYMBOL(radix_tree_gang_lookup);
+/**
+ * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ *
+ * Performs an index-ascending scan of the tree for present items. Places
+ * their slots at *@results and returns the number of items which were
+ * placed at *@results.
+ *
+ * The implementation is naive.
+ *
+ * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
+ * be dereferenced with radix_tree_deref_slot, and if using only RCU
+ * protection, radix_tree_deref_slot may fail requiring a retry.
+ */
+unsigned int
+radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items)
+{
+ unsigned long max_index;
+ struct radix_tree_node *node;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup(node, results + ret, cur_index,
+ max_items - ret, &next_index);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
+
/*
* FIXME: the two tag_get()s here should use find_next_bit() instead of
* open-coding the search.
*/
static unsigned int
-__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index, unsigned int tag)
{
unsigned int nr_found = 0;
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
unsigned long j = index & RADIX_TREE_MAP_MASK;
for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
- struct radix_tree_node *node;
index++;
if (!tag_get(slot, tag, j))
continue;
- node = slot->slots[j];
/*
* Even though the tag was found set, we need to
* recheck that we have a non-NULL node, because
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
* lookup ->slots[x] without a lock (ie. can't
* rely on its value remaining the same).
*/
- if (node) {
- node = rcu_dereference(node);
- results[nr_found++] = node;
+ if (slot->slots[j]) {
+ results[nr_found++] = &(slot->slots[j]);
if (nr_found == max_items)
goto out;
}
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup_tag(node, results + ret, cur_index,
- max_items - ret, &next_index, tag);
+ slots_found = __lookup_tag(node, (void ***)results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
+ * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
+ * radix tree based on a tag
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Performs an index-ascending scan of the tree for present items which
+ * have the tag indexed by @tag set. Places the slots at *@results and
+ * returns the number of slots which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag)
+{
+ struct radix_tree_node *node;
+ unsigned long max_index;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup_tag(node, results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
+
+
+/**
* radix_tree_shrink - shrink height of a radix tree to minimal
* @root radix tree root
*/
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
EXPORT_SYMBOL(radix_tree_tagged);
static void
-radix_tree_node_ctor(struct kmem_cache *cachep, void *node)
+radix_tree_node_ctor(void *node)
{
memset(node, 0, sizeof(struct radix_tree_node));
}
diff --git a/lib/random32.c b/lib/random32.c
index ca87d86992b..217d5c4b666 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state)
return (state->s1 ^ state->s2 ^ state->s3);
}
-static void __set_random32(struct rnd_state *state, unsigned long s)
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
{
- if (s == 0)
- s = 1; /* default seed is 1 */
-
-#define LCG(n) (69069 * n)
- state->s1 = LCG(s);
- state->s2 = LCG(state->s1);
- state->s3 = LCG(state->s2);
-
- /* "warm it up" */
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
+ return (x < m) ? x + m : x;
}
/**
@@ -107,7 +96,7 @@ void srandom32(u32 entropy)
*/
for_each_possible_cpu (i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
- __set_random32(state, state->s1 ^ entropy);
+ state->s1 = __seed(state->s1 ^ entropy, 1);
}
}
EXPORT_SYMBOL(srandom32);
@@ -122,7 +111,19 @@ static int __init random32_init(void)
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
- __set_random32(state, i + jiffies);
+
+#define LCG(x) ((x) * 69069) /* super-duper LCG */
+ state->s1 = __seed(LCG(i + jiffies), 1);
+ state->s2 = __seed(LCG(state->s1), 7);
+ state->s3 = __seed(LCG(state->s2), 15);
+
+ /* "warm it up" */
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
}
return 0;
}
@@ -135,13 +136,18 @@ core_initcall(random32_init);
static int __init random32_reseed(void)
{
int i;
- unsigned long seed;
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+ state->s1 = __seed(seeds[0], 1);
+ state->s2 = __seed(seeds[1], 7);
+ state->s3 = __seed(seeds[2], 15);
- get_random_bytes(&seed, sizeof(seed));
- __set_random32(state, seed);
+ /* mix it in */
+ __random32(state);
}
return 0;
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd..26187edcc7e 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
*
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
*
+ * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
+ * parameter. Now every user can use their own standalone ratelimit_state.
+ *
* This file is released under the GPLv2.
*
*/
@@ -11,41 +14,44 @@
#include <linux/jiffies.h>
#include <linux/module.h>
+static DEFINE_SPINLOCK(ratelimit_lock);
+
/*
* __ratelimit - rate limiting
- * @ratelimit_jiffies: minimum time in jiffies between two callbacks
- * @ratelimit_burst: number of callbacks we do before ratelimiting
+ * @rs: ratelimit_state data
*
- * This enforces a rate limit: not more than @ratelimit_burst callbacks
- * in every ratelimit_jiffies
+ * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
+ * in every @rs->ratelimit_jiffies
*/
-int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+int __ratelimit(struct ratelimit_state *rs)
{
- static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned toks = 10 * 5 * HZ;
- static unsigned long last_msg;
- static int missed;
unsigned long flags;
- unsigned long now = jiffies;
- spin_lock_irqsave(&ratelimit_lock, flags);
- toks += now - last_msg;
- last_msg = now;
- if (toks > (ratelimit_burst * ratelimit_jiffies))
- toks = ratelimit_burst * ratelimit_jiffies;
- if (toks >= ratelimit_jiffies) {
- int lost = missed;
-
- missed = 0;
- toks -= ratelimit_jiffies;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
- if (lost)
- printk(KERN_WARNING "%s: %d messages suppressed\n",
- __func__, lost);
+ if (!rs->interval)
return 1;
+
+ spin_lock_irqsave(&ratelimit_lock, flags);
+ if (!rs->begin)
+ rs->begin = jiffies;
+
+ if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (rs->missed)
+ printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+ __func__, rs->missed);
+ rs->begin = 0;
+ rs->printed = 0;
+ rs->missed = 0;
}
- missed++;
+ if (rs->burst && rs->burst > rs->printed)
+ goto print;
+
+ rs->missed++;
spin_unlock_irqrestore(&ratelimit_lock, flags);
return 0;
+
+print:
+ rs->printed++;
+ spin_unlock_irqrestore(&ratelimit_lock, flags);
+ return 1;
}
EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d7..8d2688ff135 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
+ * sg_miter_start - start mapping iteration over a sg list
+ * @miter: sg mapping iter to be started
+ * @sgl: sg list to iterate over
+ * @nents: number of sg entries
+ *
+ * Description:
+ * Starts mapping iterator @miter.
+ *
+ * Context:
+ * Don't care.
+ */
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags)
+{
+ memset(miter, 0, sizeof(struct sg_mapping_iter));
+
+ miter->__sg = sgl;
+ miter->__nents = nents;
+ miter->__offset = 0;
+ miter->__flags = flags;
+}
+EXPORT_SYMBOL(sg_miter_start);
+
+/**
+ * sg_miter_next - proceed mapping iterator to the next mapping
+ * @miter: sg mapping iter to proceed
+ *
+ * Description:
+ * Proceeds @miter@ to the next mapping. @miter@ should have been
+ * started using sg_miter_start(). On successful return,
+ * @miter@->page, @miter@->addr and @miter@->length point to the
+ * current mapping.
+ *
+ * Context:
+ * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
+ * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ *
+ * Returns:
+ * true if @miter contains the next mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_next(struct sg_mapping_iter *miter)
+{
+ unsigned int off, len;
+
+ /* check for end and drop resources from the last iteration */
+ if (!miter->__nents)
+ return false;
+
+ sg_miter_stop(miter);
+
+ /* get to the next sg if necessary. __offset is adjusted by stop */
+ if (miter->__offset == miter->__sg->length && --miter->__nents) {
+ miter->__sg = sg_next(miter->__sg);
+ miter->__offset = 0;
+ }
+
+ /* map the next page */
+ off = miter->__sg->offset + miter->__offset;
+ len = miter->__sg->length - miter->__offset;
+
+ miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
+ miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
+ miter->consumed = miter->length;
+
+ if (miter->__flags & SG_MITER_ATOMIC)
+ miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
+ else
+ miter->addr = kmap(miter->page) + off;
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_next);
+
+/**
+ * sg_miter_stop - stop mapping iteration
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+void sg_miter_stop(struct sg_mapping_iter *miter)
+{
+ WARN_ON(miter->consumed > miter->length);
+
+ /* drop resources from the last iteration */
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+ WARN_ON(!irqs_disabled());
+ kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ } else
+ kunmap(miter->addr);
+
+ miter->page = NULL;
+ miter->addr = NULL;
+ miter->length = 0;
+ miter->consumed = 0;
+ }
+}
+EXPORT_SYMBOL(sg_miter_stop);
+
+/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
@@ -309,56 +420,33 @@ EXPORT_SYMBOL(sg_alloc_table);
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, int to_buffer)
{
- struct scatterlist *sg;
- size_t buf_off = 0;
- int i;
-
- WARN_ON(!irqs_disabled());
-
- for_each_sg(sgl, sg, nents, i) {
- struct page *page;
- int n = 0;
- unsigned int sg_off = sg->offset;
- unsigned int sg_copy = sg->length;
-
- if (sg_copy > buflen)
- sg_copy = buflen;
- buflen -= sg_copy;
-
- while (sg_copy > 0) {
- unsigned int page_copy;
- void *p;
-
- page_copy = PAGE_SIZE - sg_off;
- if (page_copy > sg_copy)
- page_copy = sg_copy;
-
- page = nth_page(sg_page(sg), n);
- p = kmap_atomic(page, KM_BIO_SRC_IRQ);
-
- if (to_buffer)
- memcpy(buf + buf_off, p + sg_off, page_copy);
- else {
- memcpy(p + sg_off, buf + buf_off, page_copy);
- flush_kernel_dcache_page(page);
- }
-
- kunmap_atomic(p, KM_BIO_SRC_IRQ);
-
- buf_off += page_copy;
- sg_off += page_copy;
- if (sg_off == PAGE_SIZE) {
- sg_off = 0;
- n++;
- }
- sg_copy -= page_copy;
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+
+ sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+
+ local_irq_save(flags);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_buffer)
+ memcpy(buf + offset, miter.addr, len);
+ else {
+ memcpy(miter.addr, buf + offset, len);
+ flush_kernel_dcache_page(miter.page);
}
- if (!buflen)
- break;
+ offset += len;
}
- return buf_off;
+ sg_miter_stop(&miter);
+
+ local_irq_restore(flags);
+ return offset;
}
/**
diff --git a/lib/show_mem.c b/lib/show_mem.c
new file mode 100644
index 00000000000..238e72a18ce
--- /dev/null
+++ b/lib/show_mem.c
@@ -0,0 +1,63 @@
+/*
+ * Generic show_mem() implementation
+ *
+ * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
+ * All code subject to the GPL version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/nmi.h>
+#include <linux/quicklist.h>
+
+void show_mem(void)
+{
+ pg_data_t *pgdat;
+ unsigned long total = 0, reserved = 0, shared = 0,
+ nonshared = 0, highmem = 0;
+
+ printk(KERN_INFO "Mem-Info:\n");
+ show_free_areas();
+
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+ pgdat_resize_lock(pgdat, &flags);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ unsigned long pfn = pgdat->node_start_pfn + i;
+
+ if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
+ touch_nmi_watchdog();
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+
+ if (PageHighMem(page))
+ highmem++;
+
+ if (PageReserved(page))
+ reserved++;
+ else if (page_count(page) == 1)
+ nonshared++;
+ else if (page_count(page) > 1)
+ shared += page_count(page) - 1;
+
+ total++;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk(KERN_INFO "%lu pages RAM\n", total);
+#ifdef CONFIG_HIGHMEM
+ printk(KERN_INFO "%lu pages HighMem\n", highmem);
+#endif
+ printk(KERN_INFO "%lu pages reserved\n", reserved);
+ printk(KERN_INFO "%lu pages shared\n", shared);
+ printk(KERN_INFO "%lu pages non-shared\n", nonshared);
+#ifdef CONFIG_QUICKLIST
+ printk(KERN_INFO "%lu pages in pagetable cache\n",
+ quicklist_total_size());
+#endif
+}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181..0f8fc22ed10 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- cpumask_t this_mask;
if (likely(preempt_count))
goto out;
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- this_mask = cpumask_of_cpu(this_cpu);
-
- if (cpus_equal(current->cpus_allowed, this_mask))
+ if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
goto out;
/*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 00000000000..ab431d4cc97
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,68 @@
+/*
+ * Helpers for formatting and printing strings
+ *
+ * Copyright 31 August 2008 James Bottomley
+ */
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/string_helpers.h>
+
+/**
+ * string_get_size - get the size in the specified units
+ * @size: The size to be converted
+ * @units: units to use (powers of 1000 or 1024)
+ * @buf: buffer to format to
+ * @len: length of buffer
+ *
+ * This function returns a string formatted to 3 significant figures
+ * giving the size in the required units. Returns 0 on success or
+ * error on failure. @buf is always zero terminated.
+ *
+ */
+int string_get_size(u64 size, const enum string_size_units units,
+ char *buf, int len)
+{
+ const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
+ "EB", "ZB", "YB", NULL};
+ const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+ "EiB", "ZiB", "YiB", NULL };
+ const char **units_str[] = {
+ [STRING_UNITS_10] = units_10,
+ [STRING_UNITS_2] = units_2,
+ };
+ const unsigned int divisor[] = {
+ [STRING_UNITS_10] = 1000,
+ [STRING_UNITS_2] = 1024,
+ };
+ int i, j;
+ u64 remainder = 0, sf_cap;
+ char tmp[8];
+
+ tmp[0] = '\0';
+ i = 0;
+ if (size >= divisor[units]) {
+ while (size >= divisor[units] && units_str[units][i]) {
+ remainder = do_div(size, divisor[units]);
+ i++;
+ }
+
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+ if (j) {
+ remainder *= 1000;
+ do_div(remainder, divisor[units]);
+ snprintf(tmp, sizeof(tmp), ".%03lld",
+ (unsigned long long)remainder);
+ tmp[j+1] = '\0';
+ }
+ }
+
+ snprintf(buf, len, "%lld%s %s", (unsigned long long)size,
+ tmp, units_str[units][i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(string_get_size);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8c..f8eebd48914 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
}
static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- dma_addr_t mask = 0xffffffff;
- /* If the device has a mask, use it, otherwise default to 32 bits */
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+static int is_swiotlb_buffer(char *addr)
+{
+ return addr >= io_tlb_start && addr < io_tlb_end;
}
/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
- /*
- * XXX fix me: the DMA API should pass us an explicit DMA mask
- * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
- * bit range instead of a 16MB one).
- */
- flags |= GFP_DMA;
-
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
- dma_addr_t handle;
- handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(handle))
+ ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+ if (!ret)
return NULL;
-
- ret = bus_to_virt(handle);
}
memset(ret, 0, size);
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (address_needs_mapping(hwdev, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
WARN_ON(irqs_disabled());
- if (!(vaddr >= (void *)io_tlb_start
- && vaddr < (void *)io_tlb_end))
+ if (!is_swiotlb_buffer(vaddr))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+ unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(hwdev, dev_addr))
+ if (address_needs_mapping(hwdev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ if (swiotlb_force ||
+ address_needs_mapping(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
@@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 00000000000..a4f7067f72f
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,75 @@
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <asm/syscall.h>
+
+static int collect_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ if (unlikely(!regs))
+ return -EAGAIN;
+
+ *sp = user_stack_pointer(regs);
+ *pc = instruction_pointer(regs);
+
+ *callno = syscall_get_nr(target, regs);
+ if (*callno != -1L && maxargs > 0)
+ syscall_get_arguments(target, regs, 0, maxargs, args);
+
+ return 0;
+}
+
+/**
+ * task_current_syscall - Discover what a blocked task is doing.
+ * @target: thread to examine
+ * @callno: filled with system call number or -1
+ * @args: filled with @maxargs system call arguments
+ * @maxargs: number of elements in @args to fill
+ * @sp: filled with user stack pointer
+ * @pc: filled with user PC
+ *
+ * If @target is blocked in a system call, returns zero with *@callno
+ * set to the the call's number and @args filled in with its arguments.
+ * Registers not used for system call arguments may not be available and
+ * it is not kosher to use &struct user_regset calls while the system
+ * call is still in progress. Note we may get this result if @target
+ * has finished its system call but not yet returned to user mode, such
+ * as when it's stopped for signal handling or syscall exit tracing.
+ *
+ * If @target is blocked in the kernel during a fault or exception,
+ * returns zero with *@callno set to -1 and does not fill in @args.
+ * If so, it's now safe to examine @target using &struct user_regset
+ * get() calls as long as we're sure @target won't return to user mode.
+ *
+ * Returns -%EAGAIN if @target does not remain blocked.
+ *
+ * Returns -%EINVAL if @maxargs is too large (maximum is six).
+ */
+int task_current_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ long state;
+ unsigned long ncsw;
+
+ if (unlikely(maxargs > 6))
+ return -EINVAL;
+
+ if (target == current)
+ return collect_syscall(target, callno, args, maxargs, sp, pc);
+
+ state = target->state;
+ if (unlikely(!state))
+ return -EAGAIN;
+
+ ncsw = wait_task_inactive(target, state);
+ if (unlikely(!ncsw) ||
+ unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(wait_task_inactive(target, state) != ncsw))
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index a3e500ad51d..9fbcb44c554 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
* USAGE
*
* Before a search can be performed, a configuration must be created
- * by calling textsearch_prepare() specyfing the searching algorithm and
- * the pattern to look for. The returned configuration may then be used
- * for an arbitary amount of times and even in parallel as long as a
- * separate struct ts_state variable is provided to every instance.
+ * by calling textsearch_prepare() specifying the searching algorithm,
+ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
+ * to perform case insensitive matching. But it might slow down
+ * performance of algorithm, so you should use it at own your risk.
+ * The returned configuration may then be used for an arbitary
+ * amount of times and even in parallel as long as a separate struct
+ * ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
* panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
- *
* ==========================================================================
*/
@@ -265,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
return ERR_PTR(-EINVAL);
ops = lookup_ts_algo(algo);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,
@@ -280,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
if (ops == NULL)
goto errout;
- conf = ops->init(pattern, len, gfp_mask);
+ conf = ops->init(pattern, len, gfp_mask, flags);
if (IS_ERR(conf)) {
err = PTR_ERR(conf);
goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 4a7fce72898..9e66ee4020e 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
@@ -64,6 +65,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen - 1, bs;
+ const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
- if (text[shift-i] != bm->pattern[bm->patlen-1-i])
+ if ((icase ? toupper(text[shift-i])
+ : text[shift-i])
+ != bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
return ret;
}
-static void compute_prefix_tbl(struct ts_bm *bm)
+static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
- for (i = 0; i < bm->patlen - 1; i++)
+ for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
+ if (flags & TS_IGNORECASE)
+ bm->bad_shift[tolower(bm->pattern[i])]
+ = bm->patlen - 1 - i;
+ }
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
- memcpy(bm->pattern, pattern, len);
- compute_prefix_tbl(bm);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ bm->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(bm->pattern, pattern, len);
+ compute_prefix_tbl(bm, flags);
return conf;
}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526..5696a35184e 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
int i, err = -EINVAL;
struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
goto errout;
+ if (flags & TS_IGNORECASE)
+ goto errout;
+
for (i = 0; i < ntokens; i++) {
struct ts_fsm_token *t = &tokens[i];
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
fsm = ts_config_priv(conf);
fsm->ntokens = ntokens;
memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4..632f783e65f 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
struct ts_kmp *kmp = ts_config_priv(conf);
unsigned int i, q = 0, text_len, consumed = state->offset;
const u8 *text;
+ const int icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
break;
for (i = 0; i < text_len; i++) {
- while (q > 0 && kmp->pattern[q] != text[i])
+ while (q > 0 && kmp->pattern[q]
+ != (icase ? toupper(text[i]) : text[i]))
q = kmp->prefix_tbl[q - 1];
- if (kmp->pattern[q] == text[i])
+ if (kmp->pattern[q]
+ == (icase ? toupper(text[i]) : text[i]))
q++;
if (unlikely(q == kmp->pattern_len)) {
state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
}
static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
- unsigned int *prefix_tbl)
+ unsigned int *prefix_tbl, int flags)
{
unsigned int k, q;
+ const u8 icase = flags & TS_IGNORECASE;
for (k = 0, q = 1; q < len; q++) {
- while (k > 0 && pattern[k] != pattern[q])
+ while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
+ != (icase ? toupper(pattern[q]) : pattern[q]))
k = prefix_tbl[k-1];
- if (pattern[k] == pattern[q])
+ if ((icase ? toupper(pattern[k]) : pattern[k])
+ == (icase ? toupper(pattern[q]) : pattern[q]))
k++;
prefix_tbl[q] = k;
}
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_kmp *kmp;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
kmp = ts_config_priv(conf);
kmp->pattern_len = len;
- compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
- memcpy(kmp->pattern, pattern, len);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(kmp->pattern, pattern, len);
return conf;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1dc2d1d18fa..a013bbc2371 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -24,47 +24,57 @@
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
+#include <linux/ioport.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
+#include <asm/sections.h> /* for dereference_function_descriptor() */
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
+static unsigned int simple_guess_base(const char *cp)
+{
+ if (cp[0] == '0') {
+ if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+ return 16;
+ else
+ return 8;
+ } else {
+ return 10;
+ }
+}
+
/**
* simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
- unsigned long result = 0,value;
+ unsigned long result = 0;
- if (!base) {
- base = 10;
- if (*cp == '0') {
- base = 8;
- cp++;
- if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) {
- cp++;
- base = 16;
- }
- }
- } else if (base == 16) {
- if (cp[0] == '0' && TOLOWER(cp[1]) == 'x')
- cp += 2;
- }
- while (isxdigit(*cp) &&
- (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) {
- result = result*base + value;
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
cp++;
}
+
if (endp)
*endp = (char *)cp;
return result;
}
-
EXPORT_SYMBOL(simple_strtoul);
/**
@@ -73,13 +83,12 @@ EXPORT_SYMBOL(simple_strtoul);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-long simple_strtol(const char *cp,char **endp,unsigned int base)
+long simple_strtol(const char *cp, char **endp, unsigned int base)
{
- if(*cp=='-')
- return -simple_strtoul(cp+1,endp,base);
- return simple_strtoul(cp,endp,base);
+ if(*cp == '-')
+ return -simple_strtoul(cp + 1, endp, base);
+ return simple_strtoul(cp, endp, base);
}
-
EXPORT_SYMBOL(simple_strtol);
/**
@@ -88,34 +97,30 @@ EXPORT_SYMBOL(simple_strtol);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result = 0,value;
+ unsigned long long result = 0;
- if (!base) {
- base = 10;
- if (*cp == '0') {
- base = 8;
- cp++;
- if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) {
- cp++;
- base = 16;
- }
- }
- } else if (base == 16) {
- if (cp[0] == '0' && TOLOWER(cp[1]) == 'x')
- cp += 2;
- }
- while (isxdigit(*cp)
- && (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) {
- result = result*base + value;
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
cp++;
}
+
if (endp)
*endp = (char *)cp;
return result;
}
-
EXPORT_SYMBOL(simple_strtoull);
/**
@@ -124,14 +129,13 @@ EXPORT_SYMBOL(simple_strtoull);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-long long simple_strtoll(const char *cp,char **endp,unsigned int base)
+long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
if(*cp=='-')
- return -simple_strtoull(cp+1,endp,base);
- return simple_strtoull(cp,endp,base);
+ return -simple_strtoull(cp + 1, endp, base);
+ return simple_strtoull(cp, endp, base);
}
-
/**
* strict_strtoul - convert a string to an unsigned long strictly
* @cp: The string to be converted
@@ -154,7 +158,27 @@ long long simple_strtoll(const char *cp,char **endp,unsigned int base)
* simple_strtoul just ignores the successive invalid characters and
* return the converted value of prefix part of the string.
*/
-int strict_strtoul(const char *cp, unsigned int base, unsigned long *res);
+int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+ char *tail;
+ unsigned long val;
+ size_t len;
+
+ *res = 0;
+ len = strlen(cp);
+ if (len == 0)
+ return -EINVAL;
+
+ val = simple_strtoul(cp, &tail, base);
+ if ((*tail == '\0') ||
+ ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+ *res = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(strict_strtoul);
/**
* strict_strtol - convert a string to a long strictly
@@ -168,7 +192,20 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res);
* It returns 0 if conversion is successful and *res is set to the converted
* value, otherwise it returns -EINVAL and *res is set to 0.
*/
-int strict_strtol(const char *cp, unsigned int base, long *res);
+int strict_strtol(const char *cp, unsigned int base, long *res)
+{
+ int ret;
+ if (*cp == '-') {
+ ret = strict_strtoul(cp + 1, base, (unsigned long *)res);
+ if (!ret)
+ *res = -(*res);
+ } else {
+ ret = strict_strtoul(cp, base, (unsigned long *)res);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(strict_strtol);
/**
* strict_strtoull - convert a string to an unsigned long long strictly
@@ -192,7 +229,27 @@ int strict_strtol(const char *cp, unsigned int base, long *res);
* simple_strtoull just ignores the successive invalid characters and
* return the converted value of prefix part of the string.
*/
-int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res);
+int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
+{
+ char *tail;
+ unsigned long long val;
+ size_t len;
+
+ *res = 0;
+ len = strlen(cp);
+ if (len == 0)
+ return -EINVAL;
+
+ val = simple_strtoull(cp, &tail, base);
+ if ((*tail == '\0') ||
+ ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+ *res = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(strict_strtoull);
/**
* strict_strtoll - convert a string to a long long strictly
@@ -206,53 +263,20 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res);
* It returns 0 if conversion is successful and *res is set to the converted
* value, otherwise it returns -EINVAL and *res is set to 0.
*/
-int strict_strtoll(const char *cp, unsigned int base, long long *res);
-
-#define define_strict_strtoux(type, valtype) \
-int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\
-{ \
- char *tail; \
- valtype val; \
- size_t len; \
- \
- *res = 0; \
- len = strlen(cp); \
- if (len == 0) \
- return -EINVAL; \
- \
- val = simple_strtoul(cp, &tail, base); \
- if ((*tail == '\0') || \
- ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\
- *res = val; \
- return 0; \
- } \
- \
- return -EINVAL; \
-} \
-
-#define define_strict_strtox(type, valtype) \
-int strict_strto##type(const char *cp, unsigned int base, valtype *res) \
-{ \
- int ret; \
- if (*cp == '-') { \
- ret = strict_strtou##type(cp+1, base, res); \
- if (!ret) \
- *res = -(*res); \
- } else \
- ret = strict_strtou##type(cp, base, res); \
- \
- return ret; \
-} \
-
-define_strict_strtoux(l, unsigned long)
-define_strict_strtox(l, long)
-define_strict_strtoux(ll, unsigned long long)
-define_strict_strtox(ll, long long)
+int strict_strtoll(const char *cp, unsigned int base, long long *res)
+{
+ int ret;
+ if (*cp == '-') {
+ ret = strict_strtoull(cp + 1, base, (unsigned long long *)res);
+ if (!ret)
+ *res = -(*res);
+ } else {
+ ret = strict_strtoull(cp, base, (unsigned long long *)res);
+ }
-EXPORT_SYMBOL(strict_strtoul);
-EXPORT_SYMBOL(strict_strtol);
+ return ret;
+}
EXPORT_SYMBOL(strict_strtoll);
-EXPORT_SYMBOL(strict_strtoull);
static int skip_atoi(const char **s)
{
@@ -513,16 +537,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
return buf;
}
-static inline void *dereference_function_descriptor(void *ptr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- void *p;
- if (!probe_kernel_address(ptr, p))
- ptr = p;
-#endif
- return ptr;
-}
-
static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
{
unsigned long value = (unsigned long) ptr;
@@ -537,18 +551,51 @@ static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int
#endif
}
+static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags)
+{
+#ifndef IO_RSRC_PRINTK_SIZE
+#define IO_RSRC_PRINTK_SIZE 4
+#endif
+
+#ifndef MEM_RSRC_PRINTK_SIZE
+#define MEM_RSRC_PRINTK_SIZE 8
+#endif
+
+ /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
+ char sym[4*sizeof(resource_size_t) + 8];
+ char *p = sym, *pend = sym + sizeof(sym);
+ int size = -1;
+
+ if (res->flags & IORESOURCE_IO)
+ size = IO_RSRC_PRINTK_SIZE;
+ else if (res->flags & IORESOURCE_MEM)
+ size = MEM_RSRC_PRINTK_SIZE;
+
+ *p++ = '[';
+ p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+ *p++ = '-';
+ p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD);
+ *p++ = ']';
+ *p = 0;
+
+ return string(buf, end, sym, field_width, precision, flags);
+}
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
- * Right now we just handle 'F' (for symbolic Function descriptor pointers)
- * and 'S' (for Symbolic direct pointers), but this can easily be
- * extended in the future (network address types etc).
+ * Right now we handle:
+ *
+ * - 'F' For symbolic function descriptor pointers
+ * - 'S' For symbolic direct pointers
+ * - 'R' For a struct resource pointer, it prints the range of
+ * addresses (not the name nor the flags)
*
- * The difference between 'S' and 'F' is that on ia64 and ppc64 function
- * pointers are really function descriptors, which contain a pointer the
- * real address.
+ * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
+ * function pointers are really function descriptors, which contain a
+ * pointer to the real address.
*/
static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
{
@@ -558,6 +605,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
/* Fallthrough */
case 'S':
return symbol_string(buf, end, ptr, field_width, precision, flags);
+ case 'R':
+ return resource_string(buf, end, ptr, field_width, precision, flags);
}
flags |= SMALL;
if (field_width == -1) {
@@ -574,6 +623,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
* @fmt: The format string to use
* @args: Arguments for the format string
*
+ * This function follows C99 vsnprintf, but has some extensions:
+ * %pS output the name of a text symbol
+ * %pF output the name of a function pointer
+ * %pR output the address range in a struct resource
+ *
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
@@ -799,7 +853,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* the trailing null byte doesn't count towards the total */
return str-buf;
}
-
EXPORT_SYMBOL(vsnprintf);
/**
@@ -815,6 +868,8 @@ EXPORT_SYMBOL(vsnprintf);
*
* Call this function if you are already dealing with a va_list.
* You probably want scnprintf() instead.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
@@ -823,7 +878,6 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
i=vsnprintf(buf,size,fmt,args);
return (i >= size) ? (size - 1) : i;
}
-
EXPORT_SYMBOL(vscnprintf);
/**
@@ -837,6 +891,8 @@ EXPORT_SYMBOL(vscnprintf);
* generated for the given input, excluding the trailing null,
* as per ISO C99. If the return is greater than or equal to
* @size, the resulting string is truncated.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
*/
int snprintf(char * buf, size_t size, const char *fmt, ...)
{
@@ -848,7 +904,6 @@ int snprintf(char * buf, size_t size, const char *fmt, ...)
va_end(args);
return i;
}
-
EXPORT_SYMBOL(snprintf);
/**
@@ -886,12 +941,13 @@ EXPORT_SYMBOL(scnprintf);
*
* Call this function if you are already dealing with a va_list.
* You probably want sprintf() instead.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
return vsnprintf(buf, INT_MAX, fmt, args);
}
-
EXPORT_SYMBOL(vsprintf);
/**
@@ -903,6 +959,8 @@ EXPORT_SYMBOL(vsprintf);
* The function returns the number of characters written
* into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
*/
int sprintf(char * buf, const char *fmt, ...)
{
@@ -914,7 +972,6 @@ int sprintf(char * buf, const char *fmt, ...)
va_end(args);
return i;
}
-
EXPORT_SYMBOL(sprintf);
/**
@@ -1143,7 +1200,6 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
return num;
}
-
EXPORT_SYMBOL(vsscanf);
/**
@@ -1162,5 +1218,4 @@ int sscanf(const char * buf, const char * fmt, ...)
va_end(args);
return i;
}
-
EXPORT_SYMBOL(sscanf);