summaryrefslogtreecommitdiffstats
path: root/drivers/md/linear.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/linear.c')
-rw-r--r--drivers/md/linear.c218
1 files changed, 92 insertions, 126 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 64f1f3e046e..15c8b7b25a9 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -27,19 +27,27 @@
*/
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
{
- dev_info_t *hash;
- linear_conf_t *conf = mddev_to_conf(mddev);
- sector_t idx = sector >> conf->sector_shift;
+ int lo, mid, hi;
+ linear_conf_t *conf;
+
+ lo = 0;
+ hi = mddev->raid_disks - 1;
+ conf = rcu_dereference(mddev->private);
/*
- * sector_div(a,b) returns the remainer and sets a to a/b
+ * Binary Search
*/
- (void)sector_div(idx, conf->spacing);
- hash = conf->hash_table[idx];
- while (sector >= hash->num_sectors + hash->start_sector)
- hash++;
- return hash;
+ while (hi > lo) {
+
+ mid = (hi + lo) / 2;
+ if (sector < conf->disks[mid].end_sector)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ return conf->disks + lo;
}
/**
@@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q,
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
+ rcu_read_lock();
dev0 = which_dev(mddev, sector);
- maxsectors = dev0->num_sectors - (sector - dev0->start_sector);
+ maxsectors = dev0->end_sector - sector;
+ rcu_read_unlock();
if (maxsectors < bio_sectors)
maxsectors = 0;
@@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q,
static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
int i;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
+
for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
blk_unplug(r_queue);
}
+ rcu_read_unlock();
}
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
int i, ret = 0;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
+
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
+
+ rcu_read_unlock();
return ret;
}
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
+ sector_t array_sectors;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__);
+ array_sectors = conf->array_sectors;
+ rcu_read_unlock();
- return conf->array_sectors;
+ return array_sectors;
}
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
{
linear_conf_t *conf;
- dev_info_t **table;
mdk_rdev_t *rdev;
- int i, nb_zone, cnt;
- sector_t min_sectors;
- sector_t curr_sector;
+ int i, cnt;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
GFP_KERNEL);
@@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
list_for_each_entry(rdev, &mddev->disks, same_set) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
+ sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
printk("linear: disk numbering problem. Aborting!\n");
@@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
disk->rdev = rdev;
+ if (mddev->chunk_sectors) {
+ sectors = rdev->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev->sectors = sectors * mddev->chunk_sectors;
+ }
blk_queue_stack_limits(mddev->queue,
rdev->bdev->bd_disk->queue);
@@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
- disk->num_sectors = rdev->sectors;
conf->array_sectors += rdev->sectors;
-
cnt++;
+
}
if (cnt != raid_disks) {
printk("linear: not enough drives present. Aborting!\n");
goto out;
}
- min_sectors = conf->array_sectors;
- sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
- if (min_sectors == 0)
- min_sectors = 1;
-
- /* min_sectors is the minimum spacing that will fit the hash
- * table in one PAGE. This may be much smaller than needed.
- * We find the smallest non-terminal set of consecutive devices
- * that is larger than min_sectors and use the size of that as
- * the actual spacing
- */
- conf->spacing = conf->array_sectors;
- for (i=0; i < cnt-1 ; i++) {
- sector_t tmp = 0;
- int j;
- for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
- tmp += conf->disks[j].num_sectors;
- if (tmp >= min_sectors && tmp < conf->spacing)
- conf->spacing = tmp;
- }
-
- /* spacing may be too large for sector_div to work with,
- * so we might need to pre-shift
- */
- conf->sector_shift = 0;
- if (sizeof(sector_t) > sizeof(u32)) {
- sector_t space = conf->spacing;
- while (space > (sector_t)(~(u32)0)) {
- space >>= 1;
- conf->sector_shift++;
- }
- }
/*
- * This code was restructured to work around a gcc-2.95.3 internal
- * compiler error. Alter it with care.
+ * Here we calculate the device offsets.
*/
- {
- sector_t sz;
- unsigned round;
- unsigned long base;
-
- sz = conf->array_sectors >> conf->sector_shift;
- sz += 1; /* force round-up */
- base = conf->spacing >> conf->sector_shift;
- round = sector_div(sz, base);
- nb_zone = sz + (round ? 1 : 0);
- }
- BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
-
- conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
- GFP_KERNEL);
- if (!conf->hash_table)
- goto out;
+ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
- /*
- * Here we generate the linear hash table
- * First calculate the device offsets.
- */
- conf->disks[0].start_sector = 0;
for (i = 1; i < raid_disks; i++)
- conf->disks[i].start_sector =
- conf->disks[i-1].start_sector +
- conf->disks[i-1].num_sectors;
-
- table = conf->hash_table;
- i = 0;
- for (curr_sector = 0;
- curr_sector < conf->array_sectors;
- curr_sector += conf->spacing) {
-
- while (i < raid_disks-1 &&
- curr_sector >= conf->disks[i+1].start_sector)
- i++;
-
- *table ++ = conf->disks + i;
- }
-
- if (conf->sector_shift) {
- conf->spacing >>= conf->sector_shift;
- /* round spacing up so that when we divide by it,
- * we err on the side of "too-low", which is safest.
- */
- conf->spacing++;
- }
-
- BUG_ON(table - conf->hash_table > nb_zone);
+ conf->disks[i].end_sector =
+ conf->disks[i-1].end_sector +
+ conf->disks[i].rdev->sectors;
return conf;
@@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev)
{
linear_conf_t *conf;
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
@@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev)
return 0;
}
+static void free_conf(struct rcu_head *head)
+{
+ linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
+ kfree(conf);
+}
+
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
{
/* Adding a drive to a linear array allows the array to grow.
@@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
* The current one is never freed until the array is stopped.
* This avoids races.
*/
- linear_conf_t *newconf;
+ linear_conf_t *newconf, *oldconf;
if (rdev->saved_raid_disk != mddev->raid_disks)
return -EINVAL;
@@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
if (!newconf)
return -ENOMEM;
- newconf->prev = mddev_to_conf(mddev);
- mddev->private = newconf;
+ oldconf = rcu_dereference(mddev->private);
mddev->raid_disks++;
+ rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
+ call_rcu(&oldconf->rcu, free_conf);
return 0;
}
static int linear_stop (mddev_t *mddev)
{
- linear_conf_t *conf = mddev_to_conf(mddev);
-
+ linear_conf_t *conf = mddev->private;
+
+ /*
+ * We do not require rcu protection here since
+ * we hold reconfig_mutex for both linear_add and
+ * linear_stop, so they cannot race.
+ * We should make sure any old 'conf's are properly
+ * freed though.
+ */
+ rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- do {
- linear_conf_t *t = conf->prev;
- kfree(conf->hash_table);
- kfree(conf);
- conf = t;
- } while (conf);
+ kfree(conf);
return 0;
}
@@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
+ sector_t start_sector;
int cpu;
if (unlikely(bio_barrier(bio))) {
@@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bio_sectors(bio));
part_stat_unlock();
+ rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
-
- if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
- tmp_dev->start_sector)
- || (bio->bi_sector <
- tmp_dev->start_sector))) {
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+
+
+ if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
+ || (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
printk("linear_make_request: Sector %llu out of bounds on "
"dev %s: %llu sectors, offset %llu\n",
(unsigned long long)bio->bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->num_sectors,
- (unsigned long long)tmp_dev->start_sector);
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ rcu_read_unlock();
bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
- tmp_dev->start_sector + tmp_dev->num_sectors)) {
+ tmp_dev->end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
struct bio_pair *bp;
+ sector_t end_sector = tmp_dev->end_sector;
+
+ rcu_read_unlock();
- bp = bio_split(bio,
- tmp_dev->start_sector + tmp_dev->num_sectors
- - bio->bi_sector);
+ bp = bio_split(bio, end_sector - bio->bi_sector);
if (linear_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
@@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
}
bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - tmp_dev->start_sector
+ bio->bi_sector = bio->bi_sector - start_sector
+ tmp_dev->rdev->data_offset;
+ rcu_read_unlock();
return 1;
}
@@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
static void linear_status (struct seq_file *seq, mddev_t *mddev)
{
- seq_printf(seq, " %dk rounding", mddev->chunk_size/1024);
+ seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}