summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-09-16 18:31:37 -0700
committerOlof Johansson <olof@lixom.net>2012-09-16 18:31:37 -0700
commit2e6185f1fea6cf88e9ce25cde1d6291ddfb3d4f0 (patch)
treec45ae7bace055c258fba5c4c6c0340b1e3f17f05 /fs/btrfs/extent-tree.c
parent7405a749ae14f846cc2892c36d1a9343b0264b7c (diff)
parentfd301cc4e5ba839050be135a178031bcd0d363a5 (diff)
Merge tag 'tegra-for-3.7-drivers-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into next/drivers
From Stephen Warren: ARM: tegra: i2c driver enhancements mostly related to clocking This branch contains a number of fixes and cleanups to the Tegra I2C driver related to clocks. These are based on the common clock conversion in order to avoid duplicating the clock driver changes before and after the conversion. Finally, a bug-fix related to I2C_M_NOSTART is included. This branch is based on previous pull request tegra-for-3.7-common-clk. * tag 'tegra-for-3.7-drivers-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra: i2c: tegra: dynamically control fast clk i2c: tegra: I2_M_NOSTART functionality not supported in Tegra20 ARM: tegra: clock: remove unused clock entry for i2c ARM: tegra: clock: add connection name in i2c clock entry i2c: tegra: pass proper name for getting clock ARM: tegra: clock: add i2c fast clock entry in clock table ARM: Tegra: Add smp_twd clock for Tegra20 ARM: tegra: cpu-tegra: explicitly manage re-parenting ARM: tegra: fix overflow in tegra20_pll_clk_round_rate() ARM: tegra: Fix data type for io address ARM: tegra: remove tegra_timer from tegra_list_clks ARM: tegra30: clocks: fix the wrong tegra_audio_sync_clk_ops name ARM: tegra: clocks: separate tegra_clk_32k_ops from Tegra20 and Tegra30 ARM: tegra: Remove duplicate code ARM: tegra: Port tegra to generic clock framework ARM: tegra: Add clk_tegra structure and helper functions ARM: tegra: Rename tegra20 clock file ARM: tegra20: Separate out clk ops and clk data ARM: tegra30: Separate out clk ops and clk data ARM: tegra: fix U16 divider range check ... + sync to v3.6-rc4 Resolved remove/modify conflict in arch/arm/mach-sa1100/leds-hackkit.c caused by the sync with v3.6-rc4. Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c123
1 files changed, 58 insertions, 65 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4e1b153b7c4..ba58024d40d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
/*
+ * We need to try and merge add/drops of the same ref since we
+ * can run into issues with relocate dropping the implicit ref
+ * and then it being added back again before the drop can
+ * finish. If we merged anything we need to re-loop so we can
+ * get a good ref.
+ */
+ btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
+ locked_ref);
+
+ /*
* locked_ref is the head node, so we have to go one
* node back for any delayed ref updates
*/
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- /*
- * we modified num_entries, but as we're currently running
- * delayed refs, skip
- * wake_up(&delayed_refs->seq_wait);
- * here.
- */
+ if (locked_ref) {
+ /*
+ * when we play the delayed ref, also correct the
+ * ref_mod on head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->node.ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->node.ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
spin_unlock(&delayed_refs->lock);
ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2350,22 +2371,6 @@ next:
return count;
}
-static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- unsigned long num_refs,
- struct list_head *first_seq)
-{
- spin_unlock(&delayed_refs->lock);
- pr_debug("waiting for more refs (num %ld, first %p)\n",
- num_refs, first_seq);
- wait_event(fs_info->tree_mod_seq_wait,
- num_refs != delayed_refs->num_entries ||
- fs_info->tree_mod_seq_list.next != first_seq);
- pr_debug("done waiting for more refs (num %ld, first %p)\n",
- delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
- spin_lock(&delayed_refs->lock);
-}
-
#ifdef SCRAMBLE_DELAYED_REFS
/*
* Normally delayed refs get processed in ascending bytenr order. This
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
- struct list_head *first_seq = NULL;
int ret;
u64 delayed_start;
int run_all = count == (unsigned long)-1;
int run_most = 0;
- unsigned long num_refs = 0;
- int consider_waiting;
+ int loops;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
- consider_waiting = 0;
+ loops = 0;
spin_lock(&delayed_refs->lock);
#ifdef SCRAMBLE_DELAYED_REFS
@@ -2512,31 +2515,6 @@ again:
if (ret)
break;
- if (delayed_start >= delayed_refs->run_delayed_start) {
- if (consider_waiting == 0) {
- /*
- * btrfs_find_ref_cluster looped. let's do one
- * more cycle. if we don't run any delayed ref
- * during that cycle (because we can't because
- * all of them are blocked) and if the number of
- * refs doesn't change, we avoid busy waiting.
- */
- consider_waiting = 1;
- num_refs = delayed_refs->num_entries;
- first_seq = root->fs_info->tree_mod_seq_list.next;
- } else {
- wait_for_more_refs(root->fs_info, delayed_refs,
- num_refs, first_seq);
- /*
- * after waiting, things have changed. we
- * dropped the lock and someone else might have
- * run some refs, built new clusters and so on.
- * therefore, we restart staleness detection.
- */
- consider_waiting = 0;
- }
- }
-
ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
spin_unlock(&delayed_refs->lock);
@@ -2549,9 +2527,26 @@ again:
if (count == 0)
break;
- if (ret || delayed_refs->run_delayed_start == 0) {
+ if (delayed_start >= delayed_refs->run_delayed_start) {
+ if (loops == 0) {
+ /*
+ * btrfs_find_ref_cluster looped. let's do one
+ * more cycle. if we don't run any delayed ref
+ * during that cycle (because we can't because
+ * all of them are blocked), bail out.
+ */
+ loops = 1;
+ } else {
+ /*
+ * no runnable refs left, stop trying
+ */
+ BUG_ON(run_all);
+ break;
+ }
+ }
+ if (ret) {
/* refs were run, let's reset staleness detection */
- consider_waiting = 0;
+ loops = 0;
}
}
@@ -3007,17 +3002,16 @@ again:
}
spin_unlock(&block_group->lock);
- num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
+ /*
+ * Try to preallocate enough space based on how big the block group is.
+ * Keep in mind this has to include any pinned space which could end up
+ * taking up quite a bit since it's not folded into the other space
+ * cache.
+ */
+ num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages)
num_pages = 1;
- /*
- * Just to make absolutely sure we have enough space, we're going to
- * preallocate 12 pages worth of space for each block group. In
- * practice we ought to use at most 8, but we need extra space so we can
- * add our header and have a terminator between the extents and the
- * bitmaps.
- */
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (root->fs_info->quota_enabled) {
ret = btrfs_qgroup_reserve(root, num_bytes +
nr_extents * root->leafsize);
- if (ret)
+ if (ret) {
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
return ret;
+ }
}
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- smp_mb();
- if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
- wake_up(&root->fs_info->tree_mod_seq_wait);
/*
* we don't take a ref on the node because we're removing it from the