diff options
author | Paul Mackerras <paulus@samba.org> | 2006-03-29 13:24:50 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-29 13:24:50 +1100 |
commit | bac30d1a78d0f11c613968fc8b351a91ed465386 (patch) | |
tree | e52f3c876522a2f6047a6ec1c27df2e8a79486b8 /block/ll_rw_blk.c | |
parent | e8222502ee6157e2713da9e0792c21f4ad458d50 (diff) | |
parent | ca9ba4471c1203bb6e759b76e83167fec54fe590 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 062067fa7ea..5b26af8597f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -785,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->hardsect_size = max(t->hardsect_size,b->hardsect_size); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -906,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) __FUNCTION__, depth); } - tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; - tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; - memset(tag_index, 0, depth * sizeof(struct request *)); - memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; @@ -2479,10 +2479,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, rq->rq_disk = bd_disk; rq->flags |= REQ_NOMERGE; rq->end_io = done; - elv_add_request(q, rq, where, 1); - generic_unplug_device(q); + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where, 1); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); } - EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); /** @@ -3512,7 +3514,7 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); @@ -3537,11 +3539,17 @@ void put_io_context(struct io_context *ioc) BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + struct rb_node *n = rb_first(&ioc->cic_root); + + cic = rb_entry(n, struct cfq_io_context, rb_node); + cic->dtor(ioc); + } rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -3554,6 +3562,7 @@ void exit_io_context(void) { unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; local_irq_save(flags); task_lock(current); @@ -3565,9 +3574,11 @@ void exit_io_context(void) if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); - + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } + put_io_context(ioc); } @@ -3596,7 +3607,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; tsk->io_context = ret; } |