summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-02-06 01:37:57 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 10:41:09 -0800
commit13f14b4d8be225cbb11ff2be7c048590a9ccf87b (patch)
tree2a2421bc456e30aa0347c79b2ed8a966dcd2ce2b
parentb41ecbebd4091a15233abab2d771e65fb82cdb20 (diff)
Use ilog2() in fs/namespace.c
We can use ilog2() in fs/namespace.c to compute hash_bits and hash_mask at compile time, not runtime. [akpm@linux-foundation.org: clean it all up] Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/namespace.c45
1 files changed, 11 insertions, 34 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index 61bf376e29e..e9c10cd01e1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,18 +25,21 @@
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/ramfs.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
#include "internal.h"
+#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
+#define HASH_SIZE (1UL << HASH_SHIFT)
+
/* spinlock for vfsmount related operations, inplace of dcache_lock */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
static struct list_head *mount_hashtable __read_mostly;
-static int hash_mask __read_mostly, hash_bits __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
@@ -48,8 +51,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
- tmp = tmp + (tmp >> hash_bits);
- return tmp & hash_mask;
+ tmp = tmp + (tmp >> HASH_SHIFT);
+ return tmp & (HASH_SIZE - 1);
}
struct vfsmount *alloc_vfsmnt(const char *name)
@@ -1813,9 +1816,7 @@ static void __init init_mount_tree(void)
void __init mnt_init(void)
{
- struct list_head *d;
- unsigned int nr_hash;
- int i;
+ unsigned u;
int err;
init_rwsem(&namespace_sem);
@@ -1828,35 +1829,11 @@ void __init mnt_init(void)
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
- /*
- * Find the power-of-two list-heads that can fit into the allocation..
- * We don't guarantee that "sizeof(struct list_head)" is necessarily
- * a power-of-two.
- */
- nr_hash = PAGE_SIZE / sizeof(struct list_head);
- hash_bits = 0;
- do {
- hash_bits++;
- } while ((nr_hash >> hash_bits) != 0);
- hash_bits--;
+ printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
+
+ for (u = 0; u < HASH_SIZE; u++)
+ INIT_LIST_HEAD(&mount_hashtable[u]);
- /*
- * Re-calculate the actual number of entries and the mask
- * from the number of bits we can fit.
- */
- nr_hash = 1UL << hash_bits;
- hash_mask = nr_hash - 1;
-
- printk("Mount-cache hash table entries: %d\n", nr_hash);
-
- /* And initialize the newly allocated array */
- d = mount_hashtable;
- i = nr_hash;
- do {
- INIT_LIST_HEAD(d);
- d++;
- i--;
- } while (i);
err = sysfs_init();
if (err)
printk(KERN_WARNING "%s: sysfs_init error: %d\n",