diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-cgroup.h | 8 | ||||
-rw-r--r-- | block/blk-core.c | 23 | ||||
-rw-r--r-- | block/blk-exec.c | 2 | ||||
-rw-r--r-- | block/blk-flush.c | 108 | ||||
-rw-r--r-- | block/blk-ioc.c | 2 | ||||
-rw-r--r-- | block/blk-iopoll.c | 3 | ||||
-rw-r--r-- | block/blk-lib.c | 8 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 91 | ||||
-rw-r--r-- | block/blk-mq-cpu.c | 14 | ||||
-rw-r--r-- | block/blk-mq-cpumap.c | 10 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 31 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 307 | ||||
-rw-r--r-- | block/blk-mq.h | 7 | ||||
-rw-r--r-- | block/blk-softirq.c | 19 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-timeout.c | 2 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 8 | ||||
-rw-r--r-- | block/deadline-iosched.c | 8 | ||||
-rw-r--r-- | block/partitions/atari.h | 4 | ||||
-rw-r--r-- | block/partitions/efi.h | 9 | ||||
-rw-r--r-- | block/partitions/karma.c | 3 |
25 files changed, 376 insertions, 301 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4aefd46d7d9..e4a4145926f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) * under queue_lock. If it's not pointing to @blkg now, it never * will. Hint assignment itself can race safely. */ - if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) + if (rcu_access_pointer(blkcg->blkg_hint) == blkg) rcu_assign_pointer(blkcg->blkg_hint, NULL); /* diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 15a8d640de5..371fe8e92ab 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -439,9 +439,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat) uint64_t v; do { - start = u64_stats_fetch_begin_bh(&stat->syncp); + start = u64_stats_fetch_begin_irq(&stat->syncp); v = stat->cnt; - } while (u64_stats_fetch_retry_bh(&stat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); return v; } @@ -512,9 +512,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) struct blkg_rwstat tmp; do { - start = u64_stats_fetch_begin_bh(&rwstat->syncp); + start = u64_stats_fetch_begin_irq(&rwstat->syncp); tmp = *rwstat; - } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); return tmp; } diff --git a/block/blk-core.c b/block/blk-core.c index c00e0bdeab4..34d7c196338 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -708,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) + q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); + if (!q->flush_rq) return NULL; + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) + goto fail; + q->request_fn = rfn; q->prep_rq_fn = NULL; q->unprep_rq_fn = NULL; @@ -733,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, /* init elevator */ if (elevator_init(q, NULL)) { mutex_unlock(&q->sysfs_lock); - return NULL; + goto fail; } mutex_unlock(&q->sysfs_lock); return q; + +fail: + kfree(q->flush_rq); + return NULL; } EXPORT_SYMBOL(blk_init_allocated_queue); @@ -1127,7 +1135,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask, false); + return blk_mq_alloc_request(q, rw, gfp_mask); else return blk_old_get_request(q, rw, gfp_mask); } @@ -1278,6 +1286,11 @@ void __blk_put_request(struct request_queue *q, struct request *req) if (unlikely(!q)) return; + if (q->mq_ops) { + blk_mq_free_request(req); + return; + } + blk_pm_put_request(req); elv_completed_request(q, req); @@ -1915,7 +1928,7 @@ EXPORT_SYMBOL(submit_bio); * in some cases below, so export this function. * Request stacking drivers like request-based dm may change the queue * limits while requests are in the queue (e.g. dm's table swapping). - * Such request stacking drivers should check those requests agaist + * Such request stacking drivers should check those requests against * the new queue limits again when they dispatch those requests, * although such checkings are also done against the old queue limits * when submitting requests. @@ -2340,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) if (!req->bio) return false; - trace_block_rq_complete(req->q, req); + trace_block_rq_complete(req->q, req, nr_bytes); /* * For fs requests, rq is just carrier of independent bio's diff --git a/block/blk-exec.c b/block/blk-exec.c index bbfc072a79c..dbf4502b1d6 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, * be resued after dying flag is set */ if (q->mq_ops) { - blk_mq_insert_request(q, rq, true); + blk_mq_insert_request(rq, at_head, true, false); return; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 9288aaf35c2..43e6b4755e9 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,20 +130,29 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_data_run(struct work_struct *work) +static void mq_flush_run(struct work_struct *work) { struct request *rq; - rq = container_of(work, struct request, mq_flush_data); + rq = container_of(work, struct request, mq_flush_work); memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_run_request(rq, true, false); + blk_mq_insert_request(rq, false, true, false); } -static void blk_mq_flush_data_insert(struct request *rq) +static bool blk_flush_queue_rq(struct request *rq, bool add_front) { - INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); - kblockd_schedule_work(rq->q, &rq->mq_flush_data); + if (rq->q->mq_ops) { + INIT_WORK(&rq->mq_flush_work, mq_flush_run); + kblockd_schedule_work(rq->q, &rq->mq_flush_work); + return false; + } else { + if (add_front) + list_add(&rq->queuelist, &rq->q->queue_head); + else + list_add_tail(&rq->queuelist, &rq->q->queue_head); + return true; + } } /** @@ -187,12 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &q->flush_data_in_flight); - if (q->mq_ops) - blk_mq_flush_data_insert(rq); - else { - list_add(&rq->queuelist, &q->queue_head); - queued = true; - } + queued = blk_flush_queue_rq(rq, true); break; case REQ_FSEQ_DONE: @@ -216,9 +220,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, } kicked = blk_kick_flush(q); - /* blk_mq_run_flush will run queue */ - if (q->mq_ops) - return queued; return kicked | queued; } @@ -230,10 +231,9 @@ static void flush_end_io(struct request *flush_rq, int error) struct request *rq, *n; unsigned long flags = 0; - if (q->mq_ops) { - blk_mq_free_request(flush_rq); + if (q->mq_ops) spin_lock_irqsave(&q->mq_flush_lock, flags); - } + running = &q->flush_queue[q->flush_running_idx]; BUG_ON(q->flush_pending_idx == q->flush_running_idx); @@ -263,49 +263,14 @@ static void flush_end_io(struct request *flush_rq, int error) * kblockd. */ if (queued || q->flush_queue_delayed) { - if (!q->mq_ops) - blk_run_queue_async(q); - else - /* - * This can be optimized to only run queues with requests - * queued if necessary. - */ - blk_mq_run_queues(q, true); + WARN_ON(q->mq_ops); + blk_run_queue_async(q); } q->flush_queue_delayed = 0; if (q->mq_ops) spin_unlock_irqrestore(&q->mq_flush_lock, flags); } -static void mq_flush_work(struct work_struct *work) -{ - struct request_queue *q; - struct request *rq; - - q = container_of(work, struct request_queue, mq_flush_work); - - /* We don't need set REQ_FLUSH_SEQ, it's for consistency */ - rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ, - __GFP_WAIT|GFP_ATOMIC, true); - rq->cmd_type = REQ_TYPE_FS; - rq->end_io = flush_end_io; - - blk_mq_run_request(rq, true, false); -} - -/* - * We can't directly use q->flush_rq, because it doesn't have tag and is not in - * hctx->rqs[]. so we must allocate a new request, since we can't sleep here, - * so offload the work to workqueue. - * - * Note: we assume a flush request finished in any hardware queue will flush - * the whole disk cache. - */ -static void mq_run_flush(struct request_queue *q) -{ - kblockd_schedule_work(q, &q->mq_flush_work); -} - /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -340,19 +305,31 @@ static bool blk_kick_flush(struct request_queue *q) * different from running_idx, which means flush is in flight. */ q->flush_pending_idx ^= 1; + if (q->mq_ops) { - mq_run_flush(q); - return true; + struct blk_mq_ctx *ctx = first_rq->mq_ctx; + struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); + + blk_mq_rq_init(hctx, q->flush_rq); + q->flush_rq->mq_ctx = ctx; + + /* + * Reuse the tag value from the fist waiting request, + * with blk-mq the tag is generated during request + * allocation and drivers can rely on it being inside + * the range they asked for. + */ + q->flush_rq->tag = first_rq->tag; + } else { + blk_rq_init(q, q->flush_rq); } - blk_rq_init(q, &q->flush_rq); - q->flush_rq.cmd_type = REQ_TYPE_FS; - q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; - q->flush_rq.rq_disk = first_rq->rq_disk; - q->flush_rq.end_io = flush_end_io; + q->flush_rq->cmd_type = REQ_TYPE_FS; + q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; + q->flush_rq->rq_disk = first_rq->rq_disk; + q->flush_rq->end_io = flush_end_io; - list_add_tail(&q->flush_rq.queuelist, &q->queue_head); - return true; + return blk_flush_queue_rq(q->flush_rq, false); } static void flush_data_end_io(struct request *rq, int error) @@ -437,7 +414,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { - blk_mq_run_request(rq, false, true); + blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; @@ -558,5 +535,4 @@ EXPORT_SYMBOL(blkdev_issue_flush); void blk_mq_init_flush(struct request_queue *q) { spin_lock_init(&q->mq_flush_lock); - INIT_WORK(&q->mq_flush_work, mq_flush_work); } diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 242df01413f..1a27f45ec77 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq) * under queue_lock. If it's not pointing to @icq now, it never * will. Hint assignment itself can race safely. */ - if (rcu_dereference_raw(ioc->icq_hint) == icq) + if (rcu_access_pointer(ioc->icq_hint) == icq) rcu_assign_pointer(ioc->icq_hint, NULL); ioc_exit_icq(icq); diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index 1855bf51edb..c11d24e379e 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -14,9 +14,6 @@ #include "blk.h" -int blk_iopoll_enabled = 1; -EXPORT_SYMBOL(blk_iopoll_enabled); - static unsigned int blk_iopoll_budget __read_mostly = 256; static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); diff --git a/block/blk-lib.c b/block/blk-lib.c index 2da76c999ef..97a733cf3d5 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, atomic_inc(&bb.done); submit_bio(type, bio); + + /* + * We can loop for a long time in here, if someone does + * full device discards (like mkfs). Be nice and allow + * us to schedule out to avoid softlocking if preempt + * is disabled. + */ + cond_resched(); } blk_finish_plug(&plug); diff --git a/block/blk-map.c b/block/blk-map.c index ae4ae1047fd..cca6356d216 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -285,7 +285,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. Can be called multple times to append multple + * buffer is used. Can be called multiple times to append multiple * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, diff --git a/block/blk-merge.c b/block/blk-merge.c index 8f8adaa9546..6c583f9c5b6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, if (!bio) return 0; + /* + * This should probably be returning 0, but blk_add_request_payload() + * (Christoph!!!!) + */ + if (bio->bi_rw & REQ_DISCARD) + return 1; + + if (bio->bi_rw & REQ_WRITE_SAME) + return 1; + fbio = bio; cluster = blk_queue_cluster(q); seg_size = 0; @@ -161,30 +171,60 @@ new_segment: *bvprv = *bvec; } -/* - * map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_phys_segments entries - */ -int blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist) +static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist, + struct scatterlist **sg) { struct bio_vec bvec, bvprv = { NULL }; - struct req_iterator iter; - struct scatterlist *sg; + struct bvec_iter iter; int nsegs, cluster; nsegs = 0; cluster = blk_queue_cluster(q); - /* - * for each bio in rq - */ - sg = NULL; - rq_for_each_segment(bvec, rq, iter) { - __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, - &nsegs, &cluster); - } /* segments in rq */ + if (bio->bi_rw & REQ_DISCARD) { + /* + * This is a hack - drivers should be neither modifying the + * biovec, nor relying on bi_vcnt - but because of + * blk_add_request_payload(), a discard bio may or may not have + * a payload we need to set up here (thank you Christoph) and + * bi_vcnt is really the only way of telling if we need to. + */ + + if (bio->bi_vcnt) + goto single_segment; + + return 0; + } + + if (bio->bi_rw & REQ_WRITE_SAME) { +single_segment: + *sg = sglist; + bvec = bio_iovec(bio); + sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); + return 1; + } + + for_each_bio(bio) + bio_for_each_segment(bvec, bio, iter) + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, + &nsegs, &cluster); + return nsegs; +} + +/* + * map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries + */ +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *sg = NULL; + int nsegs = 0; + + if (rq->bio) + nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); if (unlikely(rq->cmd_flags & REQ_COPY_USER) && (blk_rq_bytes(rq) & q->dma_pad_mask)) { @@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg); int blk_bio_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist) { - struct bio_vec bvec, bvprv = { NULL }; - struct scatterlist *sg; - int nsegs, cluster; - struct bvec_iter iter; - - nsegs = 0; - cluster = blk_queue_cluster(q); - - sg = NULL; - bio_for_each_segment(bvec, bio, iter) { - __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, - &nsegs, &cluster); - } /* segments in bio */ + struct scatterlist *sg = NULL; + int nsegs; + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); + bio->bi_next = next; if (sg) sg_mark_end(sg); diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 3146befb56a..136ef8643bb 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -11,7 +11,7 @@ #include "blk-mq.h" static LIST_HEAD(blk_mq_cpu_notify_list); -static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); +static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) notify->notify(notify->data, action, cpu); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); return NOTIFY_OK; } @@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { BUG_ON(!notifier->notify); - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_del(¬ifier->list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index f8721278601..09792132961 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -9,15 +9,6 @@ #include "blk.h" #include "blk-mq.h" -static void show_map(unsigned int *map, unsigned int nr) -{ - int i; - - pr_info("blk-mq: CPU -> queue map\n"); - for_each_online_cpu(i) - pr_info(" CPU%2u -> Queue %u\n", i, map[i]); -} - static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, const int cpu) { @@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) map[i] = map[first_sibling]; } - show_map(map, nr_cpus); free_cpumask_var(cpus); return 0; } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index b91ce75bd35..b0ba264b052 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) return blk_mq_tag_sysfs_show(hctx->tags, page); } +static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + unsigned int i, queue_num, first = 1; + ssize_t ret = 0; + + blk_mq_disable_hotplug(); + + for_each_online_cpu(i) { + queue_num = hctx->queue->mq_map[i]; + if (queue_num != hctx->queue_num) + continue; + + if (first) + ret += sprintf(ret + page, "%u", i); + else + ret += sprintf(ret + page, ", %u", i); + + first = 0; + } + + blk_mq_enable_hotplug(); + + ret += sprintf(ret + page, "\n"); + return ret; +} + static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_sysfs_dispatched_show, @@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { .attr = {.name = "tags", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_tags_show, }; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { + .attr = {.name = "cpu_list", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_cpus_show, +}; static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_queued.attr, @@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_pending.attr, &blk_mq_hw_sysfs_ipi.attr, &blk_mq_hw_sysfs_tags.attr, + &blk_mq_hw_sysfs_cpus.attr, NULL, }; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 5d70edc9855..83ae96c51a2 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; - int cpu; + unsigned int cpu; if (!tags) return 0; diff --git a/block/blk-mq.c b/block/blk-mq.c index 57039fcd9c9..b1bcc619d0e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, set_bit(ctx->index_hw, hctx->ctx_map); } -static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, - bool reserved) +static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; @@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - gfp_t gfp, bool reserved) -{ - return blk_mq_alloc_rq(hctx, gfp, reserved); -} - static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -226,15 +220,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, return rq; } -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, - gfp_t gfp, bool reserved) +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) { struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); + rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); if (rq) blk_mq_put_ctx(rq->mq_ctx); return rq; @@ -258,7 +251,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request); /* * Re-init and set pdu, if we have it */ -static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) +void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) { blk_rq_init(hctx->queue, rq); @@ -290,38 +283,10 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } -static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) -{ - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); - - /* don't actually finish bio if it's part of flush sequence */ - if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); -} - -void blk_mq_complete_request(struct request *rq, int error) +bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) { - struct bio *bio = rq->bio; - unsigned int bytes = 0; - - trace_block_rq_complete(rq->q, rq); - - while (bio) { - struct bio *next = bio->bi_next; - - bio->bi_next = NULL; - bytes += bio->bi_iter.bi_size; - blk_mq_bio_endio(rq, bio, error); - bio = next; - } - - blk_account_io_completion(rq, bytes); + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + return true; blk_account_io_done(rq); @@ -329,49 +294,57 @@ void blk_mq_complete_request(struct request *rq, int error) rq->end_io(rq, error); else blk_mq_free_request(rq); + return false; } +EXPORT_SYMBOL(blk_mq_end_io_partial); -void __blk_mq_end_io(struct request *rq, int error) -{ - if (!blk_mark_rq_complete(rq)) - blk_mq_complete_request(rq, error); -} - -static void blk_mq_end_io_remote(void *data) +static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; - __blk_mq_end_io(rq, rq->errors); + rq->q->softirq_done_fn(rq); } -/* - * End IO on this request on a multiqueue enabled driver. We'll either do - * it directly inline, or punt to a local IPI handler on the matching - * remote CPU. - */ -void blk_mq_end_io(struct request *rq, int error) +void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; int cpu; - if (!ctx->ipi_redirect) - return __blk_mq_end_io(rq, error); + if (!ctx->ipi_redirect) { + rq->q->softirq_done_fn(rq); + return; + } cpu = get_cpu(); if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { - rq->errors = error; - rq->csd.func = blk_mq_end_io_remote; + rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; - __smp_call_function_single(ctx->cpu, &rq->csd, 0); + smp_call_function_single_async(ctx->cpu, &rq->csd); } else { - __blk_mq_end_io(rq, error); + rq->q->softirq_done_fn(rq); } put_cpu(); } -EXPORT_SYMBOL(blk_mq_end_io); -static void blk_mq_start_request(struct request *rq) +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Ends all I/O on a request. It does not handle partial completions. + * The actual completion happens out-of-order, through a IPI handler. + **/ +void blk_mq_complete_request(struct request *rq) +{ + if (unlikely(blk_should_fake_timeout(rq->q))) + return; + if (!blk_mark_rq_complete(rq)) + __blk_mq_complete_request(rq); +} +EXPORT_SYMBOL(blk_mq_complete_request); + +static void blk_mq_start_request(struct request *rq, bool last) { struct request_queue *q = rq->q; @@ -384,6 +357,25 @@ static void blk_mq_start_request(struct request *rq) */ rq->deadline = jiffies + q->rq_timeout; set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * Make sure space for the drain appears. We know we can do + * this because max_hw_segments has been adjusted to be one + * fewer than the device can handle. + */ + rq->nr_phys_segments++; + } + + /* + * Flag the last request in the series so that drivers know when IO + * should be kicked off, if they don't do it on a per-request basis. + * + * Note: the flag isn't the only condition drivers should do kick off. + * If drive is busy, the last request might not have the bit set. + */ + if (last) + rq->cmd_flags |= REQ_END; } static void blk_mq_requeue_request(struct request *rq) @@ -392,6 +384,11 @@ static void blk_mq_requeue_request(struct request *rq) trace_block_rq_requeue(q, rq); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + + rq->cmd_flags &= ~REQ_END; + + if (q->dma_drain_size && blk_rq_bytes(rq)) + rq->nr_phys_segments--; } struct blk_mq_timeout_data { @@ -517,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) LIST_HEAD(rq_list); int bit, queued; - if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; hctx->run++; @@ -559,19 +556,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) rq = list_first_entry(&rq_list, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_start_request(rq); - /* - * Last request in the series. Flag it as such, this - * enables drivers to know when IO should be kicked off, - * if they don't do it on a per-request basis. - * - * Note: the flag isn't the only condition drivers - * should do kick off. If drive is busy, the last - * request might not have the bit set. - */ - if (list_empty(&rq_list)) - rq->cmd_flags |= REQ_END; + blk_mq_start_request(rq, list_empty(&rq_list)); ret = q->mq_ops->queue_rq(hctx, rq); switch (ret) { @@ -589,8 +575,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); - rq->errors = -EIO; case BLK_MQ_RQ_QUEUE_ERROR: + rq->errors = -EIO; blk_mq_end_io(rq, rq->errors); break; } @@ -617,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { - if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; if (!async) @@ -637,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async) queue_for_each_hw_ctx(q, hctx, i) { if ((!blk_mq_hctx_has_pending(hctx) && list_empty_careful(&hctx->dispatch)) || - test_bit(BLK_MQ_S_STOPPED, &hctx->flags)) + test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; blk_mq_run_hw_queue(hctx, async); @@ -693,13 +679,16 @@ static void blk_mq_work_fn(struct work_struct *work) } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, - struct request *rq) + struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; trace_block_rq_insert(hctx->queue, rq); - list_add_tail(&rq->queuelist, &ctx->rq_list); + if (at_head) + list_add(&rq->queuelist, &ctx->rq_list); + else + list_add_tail(&rq->queuelist, &ctx->rq_list); blk_mq_hctx_mark_pending(hctx, ctx); /* @@ -708,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, blk_mq_add_timer(rq); } -void blk_mq_insert_request(struct request_queue *q, struct request *rq, - bool run_queue) +void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, + bool async) { + struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; + + current_ctx = blk_mq_get_ctx(q); + if (!cpu_online(ctx->cpu)) + rq->mq_ctx = ctx = current_ctx; - ctx = rq->mq_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && + !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { blk_insert_flush(rq); } else { - current_ctx = blk_mq_get_ctx(q); - - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq->mq_ctx = ctx; - } spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); - - blk_mq_put_ctx(current_ctx); - } - - if (run_queue) - __blk_mq_run_hw_queue(hctx); -} -EXPORT_SYMBOL(blk_mq_insert_request); - -/* - * This is a special version of blk_mq_insert_request to bypass FLUSH request - * check. Should only be used internally. - */ -void blk_mq_run_request(struct request *rq, bool run_queue, bool async) -{ - struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; - - current_ctx = blk_mq_get_ctx(q); - - ctx = rq->mq_ctx; - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - rq->mq_ctx = ctx; } - hctx = q->mq_ops->map_queue(q, ctx->cpu); - - /* ctx->cpu might be offline */ - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq); - spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -798,7 +754,7 @@ static void blk_mq_insert_requests(struct request_queue *q, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); rq->mq_ctx = ctx; - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, false); } spin_unlock(&ctx->lock); @@ -888,6 +844,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) return; @@ -899,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); + if (is_sync) + rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) @@ -950,7 +913,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) __blk_mq_free_request(hctx, ctx, rq); else { blk_mq_bio_to_request(rq, bio); - __blk_mq_insert_request(hctx, rq); + __blk_mq_insert_request(hctx, rq, false); } spin_unlock(&ctx->lock); @@ -1031,8 +994,46 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, - void (*init)(void *, struct blk_mq_hw_ctx *, +static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, + int (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < hctx->queue_depth; i++) { + struct request *rq = hctx->rqs[i]; + + ret = init(data, hctx, rq, i); + if (ret) + break; + } + + return ret; +} + +int blk_mq_init_commands(struct request_queue *q, + int (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + int ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_init_hw_commands(hctx, init, data); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL(blk_mq_init_commands); + +static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx, + void (*free)(void *, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data) { @@ -1041,12 +1042,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, for (i = 0; i < hctx->queue_depth; i++) { struct request *rq = hctx->rqs[i]; - init(data, hctx, rq, i); + free(data, hctx, rq, i); } } -void blk_mq_init_commands(struct request_queue *q, - void (*init)(void *, struct blk_mq_hw_ctx *, +void blk_mq_free_commands(struct request_queue *q, + void (*free)(void *, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data) { @@ -1054,9 +1055,9 @@ void blk_mq_init_commands(struct request_queue *q, unsigned int i; queue_for_each_hw_ctx(q, hctx, i) - blk_mq_init_hw_commands(hctx, init, data); + blk_mq_free_hw_commands(hctx, free, data); } -EXPORT_SYMBOL(blk_mq_init_commands); +EXPORT_SYMBOL(blk_mq_free_commands); static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) { @@ -1309,15 +1310,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, reg->queue_depth = BLK_MQ_MAX_DEPTH; } - /* - * Set aside a tag for flush requests. It will only be used while - * another flush request is in progress but outside the driver. - * - * TODO: only allocate if flushes are supported - */ - reg->queue_depth++; - reg->reserved_tags++; - if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) return ERR_PTR(-EINVAL); @@ -1360,17 +1352,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, q->mq_ops = reg->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + q->sg_reserved_size = INT_MAX; + blk_queue_make_request(q, blk_mq_make_request); blk_queue_rq_timed_out(q, reg->ops->timeout); if (reg->timeout) blk_queue_rq_timeout(q, reg->timeout); + if (reg->ops->complete) + blk_queue_softirq_done(q, reg->ops->complete); + blk_mq_init_flush(q); blk_mq_init_cpu_queues(q, reg->nr_hw_queues); - if (blk_mq_init_hw_queues(q, reg, driver_data)) + q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, + cache_line_size()), GFP_KERNEL); + if (!q->flush_rq) goto err_hw; + if (blk_mq_init_hw_queues(q, reg, driver_data)) + goto err_flush_rq; + blk_mq_map_swqueue(q); mutex_lock(&all_q_mutex); @@ -1378,6 +1380,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, mutex_unlock(&all_q_mutex); return q; + +err_flush_rq: + kfree(q->flush_rq); err_hw: kfree(q->mq_map); err_map: @@ -1463,6 +1468,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; } +void blk_mq_disable_hotplug(void) +{ + mutex_lock(&all_q_mutex); +} + +void blk_mq_enable_hotplug(void) +{ + mutex_unlock(&all_q_mutex); +} + static int __init blk_mq_init(void) { blk_mq_cpu_init(); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5c3917984b0..ebbe6bac9d6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -22,13 +22,12 @@ struct blk_mq_ctx { struct kobject kobj; }; -void __blk_mq_end_io(struct request *rq, int error); -void blk_mq_complete_request(struct request *rq, int error); -void blk_mq_run_request(struct request *rq, bool run_queue, bool async); +void __blk_mq_complete_request(struct request *rq); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); +void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); /* * CPU hotplug helpers @@ -40,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_cpu_init(void); +void blk_mq_enable_hotplug(void); +void blk_mq_disable_hotplug(void); /* * CPU -> queue mappings diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 57790c1a97e..ebd6b6f1bde 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h) while (!list_empty(&local_list)) { struct request *rq; - rq = list_entry(local_list.next, struct request, csd.list); - list_del_init(&rq->csd.list); + rq = list_entry(local_list.next, struct request, queuelist); + list_del_init(&rq->queuelist); rq->q->softirq_done_fn(rq); } } @@ -45,9 +45,14 @@ static void trigger_softirq(void *data) local_irq_save(flags); list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&rq->csd.list, list); + /* + * We reuse queuelist for a list of requests to process. Since the + * queuelist is used by the block layer only for requests waiting to be + * submitted to the device it is unused now. + */ + list_add_tail(&rq->queuelist, list); - if (list->next == &rq->csd.list) + if (list->next == &rq->queuelist) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); @@ -65,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq) data->info = rq; data->flags = 0; - __smp_call_function_single(cpu, data, 0); + smp_call_function_single_async(cpu, data); return 0; } @@ -136,7 +141,7 @@ void __blk_complete_request(struct request *req) struct list_head *list; do_local: list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&req->csd.list, list); + list_add_tail(&req->queuelist, list); /* * if the list only contains our just added request, @@ -144,7 +149,7 @@ do_local: * entries there, someone already raised the irq but it * hasn't run yet. */ - if (list->next == &req->csd.list) + if (list->next == &req->queuelist) raise_softirq_irqoff(BLOCK_SOFTIRQ); } else if (raise_blk_irq(ccpu, req)) goto do_local; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8095c4a21fc..7500f876dae 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj) if (q->mq_ops) blk_mq_free_queue(q); + kfree(q->flush_rq); + blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index bba81c9348e..d96f7061c6f 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req) case BLK_EH_HANDLED: /* Can we use req->errors here? */ if (q->mq_ops) - blk_mq_complete_request(req, req->errors); + __blk_mq_complete_request(req); else __blk_complete_request(req); break; diff --git a/block/blk.h b/block/blk.h index c90e1d8f7a2..d23b415b8a2 100644 --- a/block/blk.h +++ b/block/blk.h @@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) q->flush_queue_delayed = 1; return NULL; } - if (unlikely(blk_queue_dying(q)) || + if (unlikely(blk_queue_bypass(q)) || !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) return NULL; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f5de45b6af3..e0985f1955e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2364,10 +2364,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && - time_before(rq_fifo_time(next), rq_fifo_time(rq)) && + time_before(next->fifo_time, rq->fifo_time) && cfqq == RQ_CFQQ(next)) { list_move(&rq->queuelist, &next->queuelist); - rq_set_fifo_time(rq, rq_fifo_time(next)); + rq->fifo_time = next->fifo_time; } if (cfqq->next_rq == next) @@ -2811,7 +2811,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) return NULL; rq = rq_entry_fifo(cfqq->fifo.next); - if (time_before(jiffies, rq_fifo_time(rq))) + if (time_before(jiffies, rq->fifo_time)) rq = NULL; cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); @@ -3924,7 +3924,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_init_prio_data(cfqq, RQ_CIC(rq)); - rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); + rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_add_rq_rb(rq); cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 9ef66406c62..a753df2b3fc 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq) /* * set expire time and add to fifo list */ - rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); + rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); } @@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req, * and move into next position (next will be deleted) in fifo */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { - if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { + if (time_before(next->fifo_time, req->fifo_time)) { list_move(&req->queuelist, &next->queuelist); - rq_set_fifo_time(req, rq_fifo_time(next)); + req->fifo_time = next->fifo_time; } } @@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) /* * rq is expired! */ - if (time_after_eq(jiffies, rq_fifo_time(rq))) + if (time_after_eq(jiffies, rq->fifo_time)) return 1; return 0; diff --git a/block/partitions/atari.h b/block/partitions/atari.h index fe2d32a89f3..f2ec43bfeec 100644 --- a/block/partitions/atari.h +++ b/block/partitions/atari.h @@ -11,6 +11,8 @@ * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de) */ +#include <linux/compiler.h> + struct partition_info { u8 flg; /* bit 0: active; bit 7: bootable */ @@ -29,6 +31,6 @@ struct rootsector u32 bsl_st; /* start of bad sector list */ u32 bsl_cnt; /* length of bad sector list */ u16 checksum; /* checksum for bootable disks */ -} __attribute__((__packed__)); +} __packed; int atari_partition(struct parsed_partitions *state); diff --git a/block/partitions/efi.h b/block/partitions/efi.h index 4efcafba7e6..abd0b19288a 100644 --- a/block/partitions/efi.h +++ b/block/partitions/efi.h @@ -32,6 +32,7 @@ #include <linux/major.h> #include <linux/string.h> #include <linux/efi.h> +#include <linux/compiler.h> #define MSDOS_MBR_SIGNATURE 0xaa55 #define EFI_PMBR_OSTYPE_EFI 0xEF @@ -87,13 +88,13 @@ typedef struct _gpt_header { * * uint8_t reserved2[ BlockSize - 92 ]; */ -} __attribute__ ((packed)) gpt_header; +} __packed gpt_header; typedef struct _gpt_entry_attributes { u64 required_to_function:1; u64 reserved:47; u64 type_guid_specific:16; -} __attribute__ ((packed)) gpt_entry_attributes; +} __packed gpt_entry_attributes; typedef struct _gpt_entry { efi_guid_t partition_type_guid; @@ -102,7 +103,7 @@ typedef struct _gpt_entry { __le64 ending_lba; gpt_entry_attributes attributes; efi_char16_t partition_name[72 / sizeof (efi_char16_t)]; -} __attribute__ ((packed)) gpt_entry; +} __packed gpt_entry; typedef struct _gpt_mbr_record { u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */ @@ -124,7 +125,7 @@ typedef struct _legacy_mbr { __le16 unknown; gpt_mbr_record partition_record[4]; __le16 signature; -} __attribute__ ((packed)) legacy_mbr; +} __packed legacy_mbr; /* Functions */ extern int efi_partition(struct parsed_partitions *state); diff --git a/block/partitions/karma.c b/block/partitions/karma.c index 0ea19312706..9721fa589bb 100644 --- a/block/partitions/karma.c +++ b/block/partitions/karma.c @@ -8,6 +8,7 @@ #include "check.h" #include "karma.h" +#include <linux/compiler.h> int karma_partition(struct parsed_partitions *state) { @@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state) } d_partitions[2]; u8 d_blank[208]; __le16 d_magic; - } __attribute__((packed)) *label; + } __packed *label; struct d_partition *p; data = read_part_sector(state, 0, §); |