summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 17:29:09 -0700
committerKent Overstreet <kmo@daterainc.com>2013-11-10 21:56:04 -0800
commit35fcd848d72683141052aa9880542461577f2dbe (patch)
treef88ebdbc88c9c7eebf33f603a2deb24e39e2bb9a /drivers/md/bcache/request.c
parente8e1d4682c8cb06dbcb5ef7bb851bf9bcb889c84 (diff)
bcache: Convert bucket_wait to wait_queue_head_t
At one point we did do fancy asynchronous waiting stuff with bucket_wait, but that's all gone (and bucket_wait is used a lot less than it used to be). So use the standard primitives. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d85c7001df6..26d18f4bf4a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -350,14 +350,8 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
struct cache_set *c = s->op.c;
struct open_bucket *b;
BKEY_PADDED(key) alloc;
- struct closure cl, *w = NULL;
unsigned i;
- if (s->writeback) {
- closure_init_stack(&cl);
- w = &cl;
- }
-
/*
* We might have to allocate a new bucket, which we can't do with a
* spinlock held. So if we have to allocate, we drop the lock, allocate
@@ -375,7 +369,8 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
spin_unlock(&c->data_bucket_lock);
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key,
+ 1, s->writeback))
return false;
spin_lock(&c->data_bucket_lock);