diff options
author | Dave Jones <davej@redhat.com> | 2006-06-29 16:01:54 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2006-06-29 16:01:54 -0400 |
commit | 55b4d6a52195a8f277ffddf755ddaff359878f41 (patch) | |
tree | 06a3183a562f8da4688f65023f7a18dcad702956 /block/ll_rw_blk.c | |
parent | adf8a287150667feb5747f8beade62acacc17d4e (diff) | |
parent | 1f1332f727c3229eb2166a83fec5d3de6a73dce2 (diff) |
Merge ../linus
Conflicts:
drivers/char/agp/Kconfig
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 7eb36c53f4b..eee03a3876a 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) + if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else @@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. @@ -2742,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 0; /* - * not contigious + * not contiguous */ if (req->sector + req->nr_sectors != next->sector) return 0; @@ -2824,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); @@ -3359,12 +3365,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { @@ -3398,7 +3403,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, } -static struct notifier_block blk_cpu_notifier = { +static struct notifier_block __devinitdata blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; @@ -3410,7 +3415,7 @@ static struct notifier_block blk_cpu_notifier = { * * Description: * Ends all I/O on a request. It does not handle partial completions, - * unless the driver actually implements this in its completionc callback + * unless the driver actually implements this in its completion callback * through requeueing. Theh actual completion happens out-of-order, * through a softirq handler. The user must have registered a completion * callback through blk_queue_softirq_done(). @@ -3536,9 +3541,7 @@ int __init blk_dev_init(void) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&blk_cpu_notifier); -#endif + register_hotcpu_notifier(&blk_cpu_notifier); blk_max_low_pfn = max_low_pfn; blk_max_pfn = max_pfn; |