summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:14:04 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:04 -0400
commitf9efa9c784aa3b801feb367f72c6867d26fb348e (patch)
treed9f678310c6c8a4daf2003fa4f6b4164111dbbd7 /fs/btrfs
parent3f157a2fd2ad731e1ed9964fecdc5f459f04a4a4 (diff)
Btrfs: Reduce contention on the root node
This calls unlock_up sooner in btrfs_search_slot in order to decrease the amount of work done with the higher level tree locks held. Also, it changes btrfs_tree_lock to spin for a big against the page lock before scheduling. This makes a big difference in context switch rate under highly contended workloads. Longer term, a better locking structure is needed than the page lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/locking.c10
2 files changed, 21 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0cb80f32a9c..c6759fc1004 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1313,16 +1313,13 @@ again:
slot = p->slots[level];
BUG_ON(btrfs_header_nritems(b) == 1);
}
+ unlock_up(p, level, lowest_unlock);
+
/* this is only true while dropping a snapshot */
if (level == lowest_level) {
- unlock_up(p, level, lowest_unlock);
break;
}
- if (should_reada)
- reada_for_search(root, p, level, slot,
- key->objectid);
-
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
blocksize = btrfs_level_size(root, level - 1);
@@ -1340,6 +1337,11 @@ again:
btrfs_release_path(NULL, p);
if (tmp)
free_extent_buffer(tmp);
+ if (should_reada)
+ reada_for_search(root, p,
+ level, slot,
+ key->objectid);
+
tmp = read_tree_block(root, blocknr,
blocksize, gen);
if (tmp)
@@ -1348,12 +1350,15 @@ again:
} else {
if (tmp)
free_extent_buffer(tmp);
+ if (should_reada)
+ reada_for_search(root, p,
+ level, slot,
+ key->objectid);
b = read_node_slot(root, b, slot);
}
}
if (!p->skip_locking)
btrfs_tree_lock(b);
- unlock_up(p, level, lowest_unlock);
} else {
p->slots[level] = slot;
if (ins_len > 0 && btrfs_leaf_free_space(root, b) <
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 80813a307b4..058a506a0dd 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -27,6 +27,16 @@
int btrfs_tree_lock(struct extent_buffer *eb)
{
+ int i;
+
+ if (!TestSetPageLocked(eb->first_page))
+ return 0;
+ for (i = 0; i < 512; i++) {
+ cpu_relax();
+ if (!TestSetPageLocked(eb->first_page))
+ return 0;
+ }
+ cpu_relax();
lock_page(eb->first_page);
return 0;
}