diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 104 | ||||
-rw-r--r-- | drivers/md/bitmap.h | 10 | ||||
-rw-r--r-- | drivers/md/md.c | 41 | ||||
-rw-r--r-- | drivers/md/md.h | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 24 | ||||
-rw-r--r-- | drivers/md/raid1.h | 2 | ||||
-rw-r--r-- | drivers/md/raid5.c | 16 |
7 files changed, 151 insertions, 48 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 70bd738b8b9..574b09afedd 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap) kunmap_atomic(sb, KM_USER0); } +/* + * bitmap_new_disk_sb + * @bitmap + * + * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb + * reads and verifies the on-disk bitmap superblock and populates bitmap_info. + * This function verifies 'bitmap_info' and populates the on-disk bitmap + * structure, which is to be written to disk. + * + * Returns: 0 on success, -Exxx on error + */ +static int bitmap_new_disk_sb(struct bitmap *bitmap) +{ + bitmap_super_t *sb; + unsigned long chunksize, daemon_sleep, write_behind; + int err = -EINVAL; + + bitmap->sb_page = alloc_page(GFP_KERNEL); + if (IS_ERR(bitmap->sb_page)) { + err = PTR_ERR(bitmap->sb_page); + bitmap->sb_page = NULL; + return err; + } + bitmap->sb_page->index = 0; + + sb = kmap_atomic(bitmap->sb_page, KM_USER0); + + sb->magic = cpu_to_le32(BITMAP_MAGIC); + sb->version = cpu_to_le32(BITMAP_MAJOR_HI); + + chunksize = bitmap->mddev->bitmap_info.chunksize; + BUG_ON(!chunksize); + if (!is_power_of_2(chunksize)) { + kunmap_atomic(sb, KM_USER0); + printk(KERN_ERR "bitmap chunksize not a power of 2\n"); + return -EINVAL; + } + sb->chunksize = cpu_to_le32(chunksize); + + daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; + if (!daemon_sleep || + (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { + printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n"); + daemon_sleep = 5 * HZ; + } + sb->daemon_sleep = cpu_to_le32(daemon_sleep); + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; + + /* + * FIXME: write_behind for RAID1. If not specified, what + * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. + */ + write_behind = bitmap->mddev->bitmap_info.max_write_behind; + if (write_behind > COUNTER_MAX) + write_behind = COUNTER_MAX / 2; + sb->write_behind = cpu_to_le32(write_behind); + bitmap->mddev->bitmap_info.max_write_behind = write_behind; + + /* keep the array size field of the bitmap superblock up to date */ + sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); + + memcpy(sb->uuid, bitmap->mddev->uuid, 16); + + bitmap->flags |= BITMAP_STALE; + sb->state |= cpu_to_le32(BITMAP_STALE); + bitmap->events_cleared = bitmap->mddev->events; + sb->events_cleared = cpu_to_le64(bitmap->mddev->events); + + bitmap->flags |= BITMAP_HOSTENDIAN; + sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); + + kunmap_atomic(sb, KM_USER0); + + return 0; +} + /* read the superblock from the bitmap file and initialize some bitmap fields */ static int bitmap_read_sb(struct bitmap *bitmap) { @@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) reason = "unrecognized superblock version"; else if (chunksize < 512) reason = "bitmap chunksize too small"; - else if ((1 << ffz(~chunksize)) != chunksize) + else if (!is_power_of_2(chunksize)) reason = "bitmap chunksize not a power of 2"; else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) reason = "daemon sleep period out of range"; @@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) } printk(KERN_INFO "%s: bitmap initialized from disk: " - "read %lu/%lu pages, set %lu bits\n", - bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); + "read %lu/%lu pages, set %lu of %lu bits\n", + bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks); return 0; @@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect return 0; } - if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { + if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { DEFINE_WAIT(__wait); /* note that it is safe to do the prepare_to_wait * after the test as long as we do it before dropping @@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); } - if (!success && ! (*bmc & NEEDED_MASK)) + if (!success && !NEEDED(*bmc)) *bmc |= NEEDED_MASK; - if ((*bmc & COUNTER_MAX) == COUNTER_MAX) + if (COUNTER(*bmc) == COUNTER_MAX) wake_up(&bitmap->overflow_wait); (*bmc)--; @@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev) vfs_fsync(file, 1); } /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ - if (!mddev->bitmap_info.external) - err = bitmap_read_sb(bitmap); - else { + if (!mddev->bitmap_info.external) { + /* + * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is + * instructing us to create a new on-disk bitmap instance. + */ + if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) + err = bitmap_new_disk_sb(bitmap); + else + err = bitmap_read_sb(bitmap); + } else { err = 0; if (mddev->bitmap_info.chunksize == 0 || mddev->bitmap_info.daemon_sleep == 0) @@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev) bitmap->chunks = chunks; bitmap->pages = pages; bitmap->missing_pages = pages; - bitmap->counter_bits = COUNTER_BITS; - - bitmap->syncchunk = ~0UL; #ifdef INJECT_FATAL_FAULT_1 bitmap->bp = NULL; diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index d0aeaf46d93..b2a127e891a 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -85,7 +85,6 @@ typedef __u16 bitmap_counter_t; #define COUNTER_BITS 16 #define COUNTER_BIT_SHIFT 4 -#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) @@ -196,19 +195,10 @@ struct bitmap { mddev_t *mddev; /* the md device that the bitmap is for */ - int counter_bits; /* how many bits per block counter */ - /* bitmap chunksize -- how much data does each bit represent? */ unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ unsigned long chunks; /* total number of data chunks for the array */ - /* We hold a count on the chunk currently being synced, and drop - * it when the last block is started. If the resync is aborted - * midway, we need to be able to drop that count, so we remember - * the counted chunk.. - */ - unsigned long syncchunk; - __u64 events_cleared; int need_sync; diff --git a/drivers/md/md.c b/drivers/md/md.c index aa640a85bb2..4332fc2f25d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev) mddev->suspended = 0; wake_up(&mddev->sb_wait); mddev->pers->quiesce(mddev, 0); + + md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ } EXPORT_SYMBOL_GPL(mddev_resume); @@ -1750,6 +1753,18 @@ static struct super_type super_types[] = { }, }; +static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) +{ + if (mddev->sync_super) { + mddev->sync_super(mddev, rdev); + return; + } + + BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); + + super_types[mddev->major_version].sync_super(mddev, rdev); +} + static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) { mdk_rdev_t *rdev, *rdev2; @@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev) if (list_empty(&mddev->disks)) return 0; /* nothing to do */ - if (blk_get_integrity(mddev->gendisk)) - return 0; /* already registered */ + if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) + return 0; /* shouldn't register, or already is */ list_for_each_entry(rdev, &mddev->disks, same_set) { /* skip spares and non-functional disks */ if (test_bit(Faulty, &rdev->flags)) @@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares) /* Don't update this superblock */ rdev->sb_loaded = 2; } else { - super_types[mddev->major_version]. - sync_super(mddev, rdev); + sync_super(mddev, rdev); rdev->sb_loaded = 1; } } @@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (rdev->raid_disk == -1) return -EEXIST; /* personality does all needed checks */ - if (rdev->mddev->pers->hot_add_disk == NULL) + if (rdev->mddev->pers->hot_remove_disk == NULL) return -EINVAL; err = rdev->mddev->pers-> hot_remove_disk(rdev->mddev, rdev->raid_disk); @@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev) if (mddev->flags) md_update_sb(mddev, 0); - md_wakeup_thread(mddev->thread); - md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ - md_new_event(mddev); sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_action); @@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev) bitmap_destroy(mddev); goto out; } + + md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ + set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); mddev->changed = 1; @@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (mddev->degraded) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + if (!err) + md_new_event(mddev); md_wakeup_thread(mddev->thread); return err; } @@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev) * Tune reconstruction: */ window = 32*(PAGE_SIZE/512); - printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", - window/2,(unsigned long long) max_sectors/2); + printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", + window/2, (unsigned long long)max_sectors/2); atomic_set(&mddev->recovery_active, 0); last_check = 0; @@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev) } EXPORT_SYMBOL_GPL(md_do_sync); - static int remove_and_add_spares(mddev_t *mddev) { mdk_rdev_t *rdev; @@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev) */ void md_check_recovery(mddev_t *mddev) { + if (mddev->suspended) + return; + if (mddev->bitmap) bitmap_daemon_work(mddev); diff --git a/drivers/md/md.h b/drivers/md/md.h index 0b1fd3f1d85..1c26c7a08ae 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -124,6 +124,7 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ +#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ int suspended; atomic_t active_io; @@ -330,6 +331,7 @@ struct mddev_s atomic_t flush_pending; struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ + void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); }; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5d096096f95..f7431b6d844 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) return best_disk; } -static int raid1_congested(void *data, int bits) +int md_raid1_congested(mddev_t *mddev, int bits) { - mddev_t *mddev = data; conf_t *conf = mddev->private; int i, ret = 0; - if (mddev_congested(mddev, bits)) - return 1; - rcu_read_lock(); for (i = 0; i < mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); + BUG_ON(!q); + /* Note the '|| 1' - when read_balance prefers * non-congested targets, it can be removed */ @@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits) rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(md_raid1_congested); +static int raid1_congested(void *data, int bits) +{ + mddev_t *mddev = data; + + return mddev_congested(mddev, bits) || + md_raid1_congested(mddev, bits); +} static void flush_pending_writes(conf_t *conf) { @@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev) return PTR_ERR(conf); list_for_each_entry(rdev, &mddev->disks, same_set) { + if (!mddev->gendisk) + continue; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk @@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev) md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); - mddev->queue->backing_dev_info.congested_fn = raid1_congested; - mddev->queue->backing_dev_info.congested_data = mddev; + if (mddev->queue) { + mddev->queue->backing_dev_info.congested_fn = raid1_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + } return md_integrity_register(mddev); } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 5fc4ca1af86..e743a64fac4 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -126,4 +126,6 @@ struct r1bio_s { */ #define R1BIO_Returned 6 +extern int md_raid1_congested(mddev_t *mddev, int bits); + #endif diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 346e69bfdab..b72edf35ec5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio) static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) { - bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); + bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); } /* Find first data disk in a raid6 stripe */ @@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi = &sh->dev[i].req; bi->bi_rw = rw; - if (rw == WRITE) + if (rw & WRITE) bi->bi_end_io = raid5_end_write_request; else bi->bi_end_io = raid5_end_read_request; @@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; - if (rw == WRITE && + if ((rw & WRITE) && test_bit(R5_ReWrite, &sh->dev[i].flags)) atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { - if (rw == WRITE) + if (rw & WRITE) set_bit(STRIPE_DEGRADED, &sh->state); pr_debug("skip op %ld on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector); @@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, init_async_submit(&submit, flags, tx, NULL, NULL, NULL); bio_for_each_segment(bvl, bio, i) { - int len = bio_iovec_idx(bio, i)->bv_len; + int len = bvl->bv_len; int clen; int b_offset = 0; @@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, clen = len; if (clen > 0) { - b_offset += bio_iovec_idx(bio, i)->bv_offset; - bio_page = bio_iovec_idx(bio, i)->bv_page; + b_offset += bvl->bv_offset; + bio_page = bvl->bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, b_offset, clen, &submit); @@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) printk(KERN_INFO "md/raid:%s: device %s operational as raid" " disk %d\n", mdname(mddev), bdevname(rdev->bdev, b), raid_disk); - } else + } else if (rdev->saved_raid_disk != raid_disk) /* Cannot rely on bitmap to complete recovery */ conf->fullsync = 1; } |