diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 45 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 16 | ||||
-rw-r--r-- | drivers/md/dm.c | 12 | ||||
-rw-r--r-- | drivers/md/md.c | 10 |
4 files changed, 68 insertions, 15 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7e65bad522c..ac89a5deaca 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -238,15 +238,47 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde } +static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) +{ + /* Iterate the disks of an mddev, using rcu to protect access to the + * linked list, and raising the refcount of devices we return to ensure + * they don't disappear while in use. + * As devices are only added or removed when raid_disk is < 0 and + * nr_pending is 0 and In_sync is clear, the entries we return will + * still be in the same position on the list when we re-enter + * list_for_each_continue_rcu. + */ + struct list_head *pos; + rcu_read_lock(); + if (rdev == NULL) + /* start at the beginning */ + pos = &mddev->disks; + else { + /* release the previous rdev and start from there. */ + rdev_dec_pending(rdev, mddev); + pos = &rdev->same_set; + } + list_for_each_continue_rcu(pos, &mddev->disks) { + rdev = list_entry(pos, mdk_rdev_t, same_set); + if (rdev->raid_disk >= 0 && + test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) { + /* this is a usable devices */ + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + return rdev; + } + } + rcu_read_unlock(); + return NULL; +} + static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { - mdk_rdev_t *rdev; + mdk_rdev_t *rdev = NULL; mddev_t *mddev = bitmap->mddev; - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) - if (test_bit(In_sync, &rdev->flags) - && !test_bit(Faulty, &rdev->flags)) { + while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; if (page->index == bitmap->file_pages-1) size = roundup(bitmap->last_page_size, @@ -281,8 +313,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) + page->index * (PAGE_SIZE/512), size, page); - } - rcu_read_unlock(); + } if (wait) md_super_wait(mddev); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 71dd65aa31b..c2fcf28b4c7 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -63,6 +63,7 @@ struct multipath { const char *hw_handler_name; struct work_struct activate_path; + struct pgpath *pgpath_to_activate; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void) static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) { + unsigned long flags; struct pgpath *pgpath, *tmp; struct multipath *m = ti->private; @@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) if (m->hw_handler_name) scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); dm_put_device(ti, pgpath->path.dev); + spin_lock_irqsave(&m->lock, flags); + if (m->pgpath_to_activate == pgpath) + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); free_pgpath(pgpath); } } @@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work) __choose_pgpath(m); pgpath = m->current_pgpath; + m->pgpath_to_activate = m->current_pgpath; if ((pgpath && !m->queue_io) || (!pgpath && !m->queue_if_no_path)) @@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work) int ret; struct multipath *m = container_of(work, struct multipath, activate_path); - struct dm_path *path = &m->current_pgpath->path; + struct dm_path *path; + unsigned long flags; + spin_lock_irqsave(&m->lock, flags); + path = &m->pgpath_to_activate->path; + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); + if (!path) + return; ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); pg_init_done(path, ret); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e1187..ace998ce59f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q, struct dm_table *map = dm_get_table(md); struct dm_target *ti; sector_t max_sectors; - int max_size; + int max_size = 0; if (unlikely(!map)) - return 0; + goto out; ti = dm_table_find_target(map, bvm->bi_sector); + if (!dm_target_is_valid(ti)) + goto out_table; /* * Find maximum amount of I/O that won't need splitting @@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q, if (max_size && ti->type->merge) max_size = ti->type->merge(ti, bvm, biovec, max_size); +out_table: + dm_table_put(map); + +out: /* * Always allow an entire first page */ if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) max_size = biovec->bv_len; - dm_table_put(map); - return max_size; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 8cfadc5bd2b..deeac4b4417 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3841,8 +3841,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) del_timer_sync(&mddev->safemode_timer); - invalidate_partition(disk, 0); - switch(mode) { case 1: /* readonly */ err = -ENXIO; @@ -5763,7 +5761,11 @@ void md_do_sync(mddev_t *mddev) * time 'round when curr_resync == 2 */ continue; - prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); + /* We need to wait 'interruptible' so as not to + * contribute to the load average, and not to + * be caught by 'softlockup' + */ + prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); if (!kthread_should_stop() && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying %s of %s" @@ -5771,6 +5773,8 @@ void md_do_sync(mddev_t *mddev) " share one or more physical units)\n", desc, mdname(mddev), mdname(mddev2)); mddev_put(mddev2); + if (signal_pending(current)) + flush_signals(current); schedule(); finish_wait(&resync_wait, &wq); goto try_again; |