summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile9
-rw-r--r--lib/genalloc.c188
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kernel_lock.c55
-rw-r--r--lib/klist.c265
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/smp_processor_id.c55
10 files changed, 527 insertions, 64 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb45225248..2d4d4e3bc4a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,12 @@ config ZLIB_DEFLATE
tristate
#
+# Generic allocator support is selected if needed
+#
+config GENERIC_ALLOCATOR
+ boolean
+
+#
# reed solomon support is select'ed if needed
#
config REED_SOLOMON
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ac23847ce0e..0c421295e61 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -151,7 +151,8 @@ config DEBUG_FS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV)
+ depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV || UML)
+ default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
and slower, but it will give very useful debugging information.
diff --git a/lib/Makefile b/lib/Makefile
index 7c70db79c0e..dcb4231916e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,9 +4,10 @@
lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
- kobject.o kref.o idr.o div64.o int_sqrt.o \
- bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \
- halfmd4.o
+ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
+ sha1.o halfmd4.o
+
+lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += sort.o parser.o
@@ -19,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 00000000000..d6d30d2e716
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,188 @@
+/*
+ * Basic general purpose allocator for managing special purpose memory
+ * not managed by the regular kmalloc/kfree interface.
+ * Uses for this includes on-device special memory, uncached memory
+ * etc.
+ *
+ * This code is based on the buddy allocator found in the sym53c8xx_2
+ * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
+ * and adapted for general purpose use.
+ *
+ * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+
+#include <asm/page.h>
+
+
+struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
+ unsigned long (*fp)(struct gen_pool *),
+ unsigned long data)
+{
+ struct gen_pool *poolp;
+ unsigned long tmp;
+ int i;
+
+ /*
+ * This is really an arbitrary limit, +10 is enough for
+ * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
+ * this can be increased without problems.
+ */
+ if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
+ ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
+ return NULL;
+
+ if (!max_chunk_shift)
+ max_chunk_shift = PAGE_SHIFT;
+
+ poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
+ if (!poolp)
+ return NULL;
+ memset(poolp, 0, sizeof(struct gen_pool));
+ poolp->h = kmalloc(sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
+ GFP_KERNEL);
+ if (!poolp->h) {
+ printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
+ kfree(poolp);
+ return NULL;
+ }
+ memset(poolp->h, 0, sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
+
+ spin_lock_init(&poolp->lock);
+ poolp->get_new_chunk = fp;
+ poolp->max_chunk_shift = max_chunk_shift;
+ poolp->private = data;
+
+ for (i = 0; i < nr_chunks; i++) {
+ tmp = poolp->get_new_chunk(poolp);
+ printk(KERN_INFO "allocated %lx\n", tmp);
+ if (!tmp)
+ break;
+ gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
+ }
+
+ return poolp;
+}
+EXPORT_SYMBOL(gen_pool_create);
+
+
+/*
+ * Simple power of two buddy-like generic allocator.
+ * Provides naturally aligned memory chunks.
+ */
+unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+{
+ int j, i, s, max_chunk_size;
+ unsigned long a, flags;
+ struct gen_pool_link *h = poolp->h;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return 0;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ j = i;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (!h[j].next) {
+ if (s == max_chunk_size) {
+ struct gen_pool_link *ptr;
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
+ spin_lock_irqsave(&poolp->lock, flags);
+ h[j].next = ptr;
+ if (h[j].next)
+ h[j].next->next = NULL;
+ break;
+ }
+ j++;
+ s <<= 1;
+ }
+ a = (unsigned long) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ /*
+ * This should be split into a seperate function doing
+ * the chunk split in order to support custom
+ * handling memory not physically accessible by host
+ */
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (struct gen_pool_link *) (a + s);
+ h[j].next->next = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ return a;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+
+/*
+ * Counter-part of the generic allocator.
+ */
+void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+{
+ struct gen_pool_link *q;
+ struct gen_pool_link *h = poolp->h;
+ unsigned long a, b, flags;
+ int i, s, max_chunk_size;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ a = ptr;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (1) {
+ if (s == max_chunk_size) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ b = a ^ s;
+ q = &h[i];
+
+ while (q->next && q->next != (struct gen_pool_link *)b)
+ q = q->next;
+
+ if (!q->next) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ i++;
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+}
+EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/idr.c b/lib/idr.c
index 81fc430602e..c5be889de44 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -175,7 +175,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count)
continue;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 99b0ae3d51d..bd2bc5d887b 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,61 +9,6 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
-#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
- defined(CONFIG_DEBUG_PREEMPT)
-
-/*
- * Debugging check.
- */
-unsigned int smp_processor_id(void)
-{
- unsigned long preempt_count = preempt_count();
- int this_cpu = __smp_processor_id();
- cpumask_t this_mask;
-
- if (likely(preempt_count))
- goto out;
-
- if (irqs_disabled())
- goto out;
-
- /*
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
- this_mask = cpumask_of_cpu(this_cpu);
-
- if (cpus_equal(current->cpus_allowed, this_mask))
- goto out;
-
- /*
- * It is valid to assume CPU-locality during early bootup:
- */
- if (system_state != SYSTEM_RUNNING)
- goto out;
-
- /*
- * Avoid recursion:
- */
- preempt_disable();
-
- if (!printk_ratelimit())
- goto out_enable;
-
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
-
-out_enable:
- preempt_enable_no_resched();
-out:
- return this_cpu;
-}
-
-EXPORT_SYMBOL(smp_processor_id);
-
-#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
-
#ifdef CONFIG_PREEMPT_BKL
/*
* The 'big kernel semaphore'
diff --git a/lib/klist.c b/lib/klist.c
new file mode 100644
index 00000000000..738ab810160
--- /dev/null
+++ b/lib/klist.c
@@ -0,0 +1,265 @@
+/*
+ * klist.c - Routines for manipulating klists.
+ *
+ *
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and
+ * list "node" (struct klist_node) objects. For struct klist, a spinlock
+ * is included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
+ *
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
+ *
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the reference
+ * count of the previous klist_node and increments the count of the next
+ * klist_node. It then drops the lock and returns.
+ *
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block
+ * until it is actually removed. This is useful for objects (like devices)
+ * that have been removed from the system and must be freed (but must wait
+ * until all accessors have finished).
+ *
+ * Copyright (C) 2005 Patrick Mochel
+ *
+ * This file is released under the GPL v2.
+ */
+
+#include <linux/klist.h>
+#include <linux/module.h>
+
+
+/**
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ */
+
+void klist_init(struct klist * k)
+{
+ INIT_LIST_HEAD(&k->k_list);
+ spin_lock_init(&k->k_lock);
+}
+
+EXPORT_SYMBOL_GPL(klist_init);
+
+
+static void add_head(struct klist * k, struct klist_node * n)
+{
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+static void add_tail(struct klist * k, struct klist_node * n)
+{
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+
+static void klist_node_init(struct klist * k, struct klist_node * n)
+{
+ INIT_LIST_HEAD(&n->n_node);
+ init_completion(&n->n_removed);
+ kref_init(&n->n_ref);
+ n->n_klist = k;
+}
+
+
+/**
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @k: klist it's going on.
+ * @n: node we're adding.
+ */
+
+void klist_add_head(struct klist * k, struct klist_node * n)
+{
+ klist_node_init(k, n);
+ add_head(k, n);
+}
+
+EXPORT_SYMBOL_GPL(klist_add_head);
+
+
+/**
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @k: klist it's going on.
+ * @n: node we're adding.
+ */
+
+void klist_add_tail(struct klist * k, struct klist_node * n)
+{
+ klist_node_init(k, n);
+ add_tail(k, n);
+}
+
+EXPORT_SYMBOL_GPL(klist_add_tail);
+
+
+static void klist_release(struct kref * kref)
+{
+ struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+ list_del(&n->n_node);
+ complete(&n->n_removed);
+ n->n_klist = NULL;
+}
+
+static int klist_dec_and_del(struct klist_node * n)
+{
+ return kref_put(&n->n_ref, klist_release);
+}
+
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+
+void klist_del(struct klist_node * n)
+{
+ struct klist * k = n->n_klist;
+
+ spin_lock(&k->k_lock);
+ klist_dec_and_del(n);
+ spin_unlock(&k->k_lock);
+}
+
+EXPORT_SYMBOL_GPL(klist_del);
+
+
+/**
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
+ */
+
+void klist_remove(struct klist_node * n)
+{
+ struct klist * k = n->n_klist;
+ spin_lock(&k->k_lock);
+ klist_dec_and_del(n);
+ spin_unlock(&k->k_lock);
+ wait_for_completion(&n->n_removed);
+}
+
+EXPORT_SYMBOL_GPL(klist_remove);
+
+
+/**
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
+ */
+
+int klist_node_attached(struct klist_node * n)
+{
+ return (n->n_klist != NULL);
+}
+
+EXPORT_SYMBOL_GPL(klist_node_attached);
+
+
+/**
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
+ *
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
+ */
+
+void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
+{
+ i->i_klist = k;
+ i->i_head = &k->k_list;
+ i->i_cur = n;
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_init_node);
+
+
+/**
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
+ *
+ * Similar to klist_iter_init_node(), but start with the list head.
+ */
+
+void klist_iter_init(struct klist * k, struct klist_iter * i)
+{
+ klist_iter_init_node(k, i, NULL);
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_init);
+
+
+/**
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
+ *
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
+ */
+
+void klist_iter_exit(struct klist_iter * i)
+{
+ if (i->i_cur) {
+ klist_del(i->i_cur);
+ i->i_cur = NULL;
+ }
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_exit);
+
+
+static struct klist_node * to_klist_node(struct list_head * n)
+{
+ return container_of(n, struct klist_node, n_node);
+}
+
+
+/**
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
+ */
+
+struct klist_node * klist_next(struct klist_iter * i)
+{
+ struct list_head * next;
+ struct klist_node * knode = NULL;
+
+ spin_lock(&i->i_klist->k_lock);
+ if (i->i_cur) {
+ next = i->i_cur->n_node.next;
+ klist_dec_and_del(i->i_cur);
+ } else
+ next = i->i_head->next;
+
+ if (next != i->i_head) {
+ knode = to_klist_node(next);
+ kref_get(&knode->n_ref);
+ }
+ i->i_cur = knode;
+ spin_unlock(&i->i_klist->k_lock);
+ return knode;
+}
+
+EXPORT_SYMBOL_GPL(klist_next);
+
+
diff --git a/lib/kobject.c b/lib/kobject.c
index 94048826624..dd0917dd9fa 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(kobject_set_name);
* @new_name: object's new name
*/
-int kobject_rename(struct kobject * kobj, char *new_name)
+int kobject_rename(struct kobject * kobj, const char *new_name)
{
int error = 0;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2a4e7671eaf..8e49d21057e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -197,7 +197,7 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
int i = 0;
int retval;
char *kobj_path = NULL;
- char *name = NULL;
+ const char *name = NULL;
char *action_string;
u64 seq;
struct kobject *top_kobj = kobj;
@@ -246,10 +246,10 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
if (hotplug_ops->name)
name = hotplug_ops->name(kset, kobj);
if (name == NULL)
- name = kset->kobj.name;
+ name = kobject_name(&kset->kobj);
argv [0] = hotplug_path;
- argv [1] = name;
+ argv [1] = (char *)name; /* won't be changed but 'const' has to go */
argv [2] = NULL;
/* minimal command environment */
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 00000000000..42c08ef828c
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,55 @@
+/*
+ * lib/smp_processor_id.c
+ *
+ * DEBUG_PREEMPT variant of smp_processor_id().
+ */
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+unsigned int debug_smp_processor_id(void)
+{
+ unsigned long preempt_count = preempt_count();
+ int this_cpu = raw_smp_processor_id();
+ cpumask_t this_mask;
+
+ if (likely(preempt_count))
+ goto out;
+
+ if (irqs_disabled())
+ goto out;
+
+ /*
+ * Kernel threads bound to a single CPU can safely use
+ * smp_processor_id():
+ */
+ this_mask = cpumask_of_cpu(this_cpu);
+
+ if (cpus_equal(current->cpus_allowed, this_mask))
+ goto out;
+
+ /*
+ * It is valid to assume CPU-locality during early bootup:
+ */
+ if (system_state != SYSTEM_RUNNING)
+ goto out;
+
+ /*
+ * Avoid recursion:
+ */
+ preempt_disable();
+
+ if (!printk_ratelimit())
+ goto out_enable;
+
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
+out_enable:
+ preempt_enable_no_resched();
+out:
+ return this_cpu;
+}
+
+EXPORT_SYMBOL(debug_smp_processor_id);
+