summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--fs/bio.c28
-rw-r--r--mm/backing-dev.c3
3 files changed, 32 insertions, 18 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 069a61017c0..aa1e9535e35 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -196,6 +196,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
+ CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
};
#define CFQ_CFQQ_FNS(name) \
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(coop_preempt);
#undef CFQ_CFQQ_FNS
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -945,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
{
if (!cfqq) {
cfqq = cfq_get_next_queue(cfqd);
- if (cfqq)
+ if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
cfq_clear_cfqq_coop(cfqq);
}
+ if (cfqq)
+ cfq_clear_cfqq_coop_preempt(cfqq);
+
__cfq_set_active_queue(cfqd, cfqq);
return cfqq;
}
@@ -2051,7 +2056,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* it's a metadata request and the current queue is doing regular IO.
*/
if (rq_is_meta(rq) && !cfqq->meta_pending)
- return false;
+ return true;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
@@ -2066,8 +2071,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
- if (cfq_rq_close(cfqd, rq))
+ if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
+ cfqd->busy_queues == 1)) {
+ /*
+ * Mark new queue coop_preempt, so its coop flag will not be
+ * cleared when new queue gets scheduled at the very first time
+ */
+ cfq_mark_cfqq_coop_preempt(new_cfqq);
+ cfq_mark_cfqq_coop(new_cfqq);
return true;
+ }
return false;
}
diff --git a/fs/bio.c b/fs/bio.c
index 402cb84a92a..12da5db8682 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
* @gfp_mask: allocation mask to use
* @nr_iovecs: number of iovecs
*
- * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
- * contains __GFP_WAIT, the allocation is guaranteed to succeed.
+ * bio_alloc will allocate a bio and associated bio_vec array that can hold
+ * at least @nr_iovecs entries. Allocations will be done from the
+ * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
+ *
+ * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
+ * a bio. This is due to the mempool guarantees. To make this work, callers
+ * must never allocate more than 1 bio at a time from this pool. Callers
+ * that need to allocate more than 1 bio must always submit the previously
+ * allocated bio for IO before attempting to allocate a new one. Failure to
+ * do so can cause livelocks under memory pressure.
*
* RETURNS:
* Pointer to new bio on success, NULL on failure.
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
}
/**
- * bio_alloc - allocate a bio for I/O
+ * bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
*
* Description:
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before attempting to allocate a new one. Failure to
- * do so can cause livelocks under memory pressure.
+ * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
+ * %__GFP_WAIT, the allocation is guaranteed to succeed.
*
**/
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
*
* Description:
* Put a reference to a &struct bio, either one you have gotten with
- * bio_alloc or bio_get. The last put of a bio will free it.
+ * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
**/
void bio_put(struct bio *bio)
{
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1065b715ef6..11aee09dd2a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -628,6 +628,8 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
void bdi_unregister(struct backing_dev_info *bdi)
{
if (bdi->dev) {
+ bdi_prune_sb(bdi);
+
if (!bdi_cap_flush_forker(bdi))
bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
@@ -697,7 +699,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
spin_unlock(&inode_lock);
}
- bdi_prune_sb(bdi);
bdi_unregister(bdi);
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)