diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 26 | ||||
-rw-r--r-- | block/blk-ioc.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 12 | ||||
-rw-r--r-- | block/blk-sysfs.c | 6 | ||||
-rw-r--r-- | block/blk-tag.c | 9 | ||||
-rw-r--r-- | block/cfq-iosched.c | 23 |
6 files changed, 47 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index b754a4a2f9b..2987fe47b5e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); static void drive_stat_acct(struct request *rq, int new_io) { + struct hd_struct *part; int rw = rq_data_dir(rq); if (!blk_fs_request(rq) || !rq->rq_disk) return; - if (!new_io) { - __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); - } else { - struct hd_struct *part = get_part(rq->rq_disk, rq->sector); + part = get_part(rq->rq_disk, rq->sector); + if (!new_io) + __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); + else { disk_round_stats(rq->rq_disk); rq->rq_disk->in_flight++; if (part) { @@ -253,9 +254,11 @@ EXPORT_SYMBOL(__generic_unplug_device); **/ void generic_unplug_device(struct request_queue *q) { - spin_lock_irq(q->queue_lock); - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); + if (blk_queue_plugged(q)) { + spin_lock_irq(q->queue_lock); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + } } EXPORT_SYMBOL(generic_unplug_device); @@ -1536,10 +1539,11 @@ static int __end_that_request_first(struct request *req, int error, } if (blk_fs_request(req) && req->rq_disk) { + struct hd_struct *part = get_part(req->rq_disk, req->sector); const int rw = rq_data_dir(req); - all_stat_add(req->rq_disk, sectors[rw], - nr_bytes >> 9, req->sector); + all_stat_add(req->rq_disk, part, sectors[rw], + nr_bytes >> 9, req->sector); } total_bytes = bio_nbytes = 0; @@ -1725,8 +1729,8 @@ static void end_that_request_last(struct request *req, int error) const int rw = rq_data_dir(req); struct hd_struct *part = get_part(disk, req->sector); - __all_stat_inc(disk, ios[rw], req->sector); - __all_stat_add(disk, ticks[rw], duration, req->sector); + __all_stat_inc(disk, part, ios[rw], req->sector); + __all_stat_add(disk, part, ticks[rw], duration, req->sector); disk_round_stats(disk); disk->in_flight--; if (part) { diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e34df7c9fc3..012f065ac8e 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - rcu_read_unlock(); cfq_dtor(ioc); + rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); return 1; diff --git a/block/blk-merge.c b/block/blk-merge.c index 73b23562af2..651136aae76 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) + if (!bio_flagged(nxt, BIO_SEG_VALID)) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) @@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, q->last_merge = NULL; return 0; } - if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) + if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) @@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, return 0; } len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) + if (!bio_flagged(req->bio, BIO_SEG_VALID)) blk_recount_segments(q, req->bio); if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && !BIOVEC_VIRT_OVERSIZE(len)) { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e85c4013e8a..304ec73ab82 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -146,11 +146,13 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); + spin_lock_irq(q->queue_lock); if (nm) - set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); else - clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_NOMERGES, q); + spin_unlock_irq(q->queue_lock); return ret; } diff --git a/block/blk-tag.c b/block/blk-tag.c index de64e042997..32667beb03e 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) __blk_free_tags(bqt); q->queue_tags = NULL; - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** @@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); **/ void blk_queue_free_tags(struct request_queue *q) { - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); @@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use + * + * Queue lock must be held here if the function is called to resize an + * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) @@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, * assign it, all done */ q->queue_tags = tags; - queue_flag_set(QUEUE_FLAG_QUEUED, q); + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f4e1006c253..b399c62936e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1142,6 +1142,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq) kmem_cache_free(cfq_pool, cfqq); } +static void +__call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + struct cfq_io_context *cic; + struct hlist_node *n; + + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) + func(ioc, cic); +} + /* * Call func for each cic attached to this ioc. */ @@ -1149,12 +1160,8 @@ static void call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { - struct cfq_io_context *cic; - struct hlist_node *n; - rcu_read_lock(); - hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) - func(ioc, cic); + __call_for_each_cic(ioc, func); rcu_read_unlock(); } @@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc) * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. */ - call_for_each_cic(ioc, cic_free_func); + __call_for_each_cic(ioc, cic_free_func); } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) @@ -1296,10 +1303,10 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* - * no prio set, place us in the middle of the BE classes + * no prio set, inherit CPU scheduling settings */ cfqq->ioprio = task_nice_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_BE; + cfqq->ioprio_class = task_nice_ioclass(tsk); break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); |