summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/Kconfig18
-rw-r--r--drivers/md/Makefile77
-rw-r--r--drivers/md/bitmap.c505
-rw-r--r--drivers/md/bitmap.h6
-rw-r--r--drivers/md/dm-crypt.c342
-rw-r--r--drivers/md/dm-delay.c6
-rw-r--r--drivers/md/dm-exception-store.c4
-rw-r--r--drivers/md/dm-exception-store.h3
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-ioctl.c207
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-mpath.c11
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap-persistent.c6
-rw-r--r--drivers/md/dm-snap.c62
-rw-r--r--drivers/md/dm-stripe.c89
-rw-r--r--drivers/md/dm-table.c99
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-zero.c5
-rw-r--r--drivers/md/dm.c374
-rw-r--r--drivers/md/dm.h14
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c361
-rw-r--r--drivers/md/md.h61
-rw-r--r--drivers/md/mktables.c132
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c37
-rw-r--r--drivers/md/raid10.c43
-rw-r--r--drivers/md/raid5.c183
-rw-r--r--drivers/md/raid5.h9
-rw-r--r--drivers/md/raid6algos.c154
-rw-r--r--drivers/md/raid6altivec.uc130
-rw-r--r--drivers/md/raid6int.uc117
-rw-r--r--drivers/md/raid6mmx.c142
-rw-r--r--drivers/md/raid6recov.c132
-rw-r--r--drivers/md/raid6sse1.c162
-rw-r--r--drivers/md/raid6sse2.c262
-rw-r--r--drivers/md/raid6test/Makefile75
-rw-r--r--drivers/md/raid6test/test.c124
-rw-r--r--drivers/md/raid6x86.h61
-rw-r--r--drivers/md/unroll.awk20
44 files changed, 1559 insertions, 2516 deletions
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c..00000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4a6feac8c94..bf1a95e3155 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -121,7 +121,7 @@ config MD_RAID10
config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
- select MD_RAID6_PQ
+ select RAID6_PQ
select ASYNC_MEMCPY
select ASYNC_XOR
select ASYNC_PQ
@@ -165,22 +165,6 @@ config MULTICORE_RAID456
If unsure, say N.
-config MD_RAID6_PQ
- tristate
-
-config ASYNC_RAID6_TEST
- tristate "Self test for hardware accelerated raid6 recovery"
- depends on MD_RAID6_PQ
- select ASYNC_RAID6_RECOV
- ---help---
- This is a one-shot self test that permutes through the
- recovery of all the possible two disk failure scenarios for a
- N-disk array. Recovery is performed with the asynchronous
- raid6 recovery routines, and will optionally use an offload
- engine if one is available.
-
- If unsure, say N.
-
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index e355e7f6a53..5e3aac41919 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -12,13 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
-raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
- raid6int1.o raid6int2.o raid6int4.o \
- raid6int8.o raid6int16.o raid6int32.o \
- raid6altivec1.o raid6altivec2.o raid6altivec4.o \
- raid6altivec8.o \
- raid6mmx.o raid6sse1.o raid6sse2.o
-hostprogs-y += mktables
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
@@ -29,7 +22,6 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
-obj-$(CONFIG_MD_RAID6_PQ) += raid6_pq.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
@@ -45,75 +37,6 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
- < $< > $@ || ( rm -f $@ && exit 1 )
-
-ifeq ($(CONFIG_ALTIVEC),y)
-altivec_flags := -maltivec -mabi=altivec
-endif
-
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
-
-targets += raid6int1.c
-$(obj)/raid6int1.c: UNROLL := 1
-$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int2.c
-$(obj)/raid6int2.c: UNROLL := 2
-$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int4.c
-$(obj)/raid6int4.c: UNROLL := 4
-$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int8.c
-$(obj)/raid6int8.c: UNROLL := 8
-$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int16.c
-$(obj)/raid6int16.c: UNROLL := 16
-$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += raid6int32.c
-$(obj)/raid6int32.c: UNROLL := 32
-$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec1.o += $(altivec_flags)
-targets += raid6altivec1.c
-$(obj)/raid6altivec1.c: UNROLL := 1
-$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec2.o += $(altivec_flags)
-targets += raid6altivec2.c
-$(obj)/raid6altivec2.c: UNROLL := 2
-$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec4.o += $(altivec_flags)
-targets += raid6altivec4.c
-$(obj)/raid6altivec4.c: UNROLL := 4
-$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-CFLAGS_raid6altivec8.o += $(altivec_flags)
-targets += raid6altivec8.c
-$(obj)/raid6altivec8.c: UNROLL := 8
-$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
-
-targets += raid6tables.c
-$(obj)/raid6tables.c: $(obj)/mktables FORCE
- $(call if_changed,mktable)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1742435ce3a..ed4900ade93 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -13,7 +13,6 @@
* Still to do:
*
* flush after percent set rather than just time based. (maybe both).
- * wait if count gets too high, wake when it drops to half.
*/
#include <linux/blkdev.h>
@@ -30,6 +29,7 @@
#include "md.h"
#include "bitmap.h"
+#include <linux/dm-dirty-log.h>
/* debug macros */
#define DEBUG 0
@@ -51,9 +51,6 @@
#define INJECT_FATAL_FAULT_3 0 /* undef */
#endif
-//#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
-#define DPRINTK(x...) do { } while(0)
-
#ifndef PRINTK
# if DEBUG > 0
# define PRINTK(x...) printk(KERN_DEBUG x)
@@ -62,12 +59,11 @@
# endif
#endif
-static inline char * bmname(struct bitmap *bitmap)
+static inline char *bmname(struct bitmap *bitmap)
{
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
-
/*
* just a placeholder - calls kmalloc for bitmap pages
*/
@@ -78,7 +74,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
#ifdef INJECT_FAULTS_1
page = NULL;
#else
- page = kmalloc(PAGE_SIZE, GFP_NOIO);
+ page = kzalloc(PAGE_SIZE, GFP_NOIO);
#endif
if (!page)
printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
@@ -107,7 +103,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
* if we find our page, we increment the page's refcount so that it stays
* allocated while we're using it
*/
-static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
+static int bitmap_checkpage(struct bitmap *bitmap,
+ unsigned long page, int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
{
@@ -121,7 +118,6 @@ __acquires(bitmap->lock)
return -EINVAL;
}
-
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
return 0;
@@ -131,43 +127,34 @@ __acquires(bitmap->lock)
if (!create)
return -ENOENT;
- spin_unlock_irq(&bitmap->lock);
-
/* this page has not been allocated yet */
- if ((mappage = bitmap_alloc_page(bitmap)) == NULL) {
+ spin_unlock_irq(&bitmap->lock);
+ mappage = bitmap_alloc_page(bitmap);
+ spin_lock_irq(&bitmap->lock);
+
+ if (mappage == NULL) {
PRINTK("%s: bitmap map page allocation failed, hijacking\n",
bmname(bitmap));
/* failed - set the hijacked flag so that we can use the
* pointer as a counter */
- spin_lock_irq(&bitmap->lock);
if (!bitmap->bp[page].map)
bitmap->bp[page].hijacked = 1;
- goto out;
- }
-
- /* got a page */
-
- spin_lock_irq(&bitmap->lock);
-
- /* recheck the page */
-
- if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
+ } else if (bitmap->bp[page].map ||
+ bitmap->bp[page].hijacked) {
/* somebody beat us to getting the page */
bitmap_free_page(bitmap, mappage);
return 0;
- }
+ } else {
- /* no page was in place and we have one, so install it */
+ /* no page was in place and we have one, so install it */
- memset(mappage, 0, PAGE_SIZE);
- bitmap->bp[page].map = mappage;
- bitmap->missing_pages--;
-out:
+ bitmap->bp[page].map = mappage;
+ bitmap->missing_pages--;
+ }
return 0;
}
-
/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
@@ -183,26 +170,15 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
bitmap->bp[page].hijacked = 0;
bitmap->bp[page].map = NULL;
- return;
+ } else {
+ /* normal case, free the page */
+ ptr = bitmap->bp[page].map;
+ bitmap->bp[page].map = NULL;
+ bitmap->missing_pages++;
+ bitmap_free_page(bitmap, ptr);
}
-
- /* normal case, free the page */
-
-#if 0
-/* actually ... let's not. We will probably need the page again exactly when
- * memory is tight and we are flusing to disk
- */
- return;
-#else
- ptr = bitmap->bp[page].map;
- bitmap->bp[page].map = NULL;
- bitmap->missing_pages++;
- bitmap_free_page(bitmap, ptr);
- return;
-#endif
}
-
/*
* bitmap file handling - read and write the bitmap file and its superblock
*/
@@ -220,11 +196,14 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
mdk_rdev_t *rdev;
sector_t target;
+ int did_alloc = 0;
- if (!page)
+ if (!page) {
page = alloc_page(GFP_KERNEL);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ did_alloc = 1;
+ }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (! test_bit(In_sync, &rdev->flags)
@@ -242,6 +221,8 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
return page;
}
}
+ if (did_alloc)
+ put_page(page);
return ERR_PTR(-EIO);
}
@@ -286,49 +267,51 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
mddev_t *mddev = bitmap->mddev;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
- int size = PAGE_SIZE;
- loff_t offset = mddev->bitmap_info.offset;
- if (page->index == bitmap->file_pages-1)
- size = roundup(bitmap->last_page_size,
- bdev_logical_block_size(rdev->bdev));
- /* Just make sure we aren't corrupting data or
- * metadata
- */
- if (mddev->external) {
- /* Bitmap could be anywhere. */
- if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) >
- rdev->data_offset &&
- rdev->sb_start + offset <
- rdev->data_offset + mddev->dev_sectors +
- (PAGE_SIZE/512))
- goto bad_alignment;
- } else if (offset < 0) {
- /* DATA BITMAP METADATA */
- if (offset
- + (long)(page->index * (PAGE_SIZE/512))
- + size/512 > 0)
- /* bitmap runs in to metadata */
- goto bad_alignment;
- if (rdev->data_offset + mddev->dev_sectors
- > rdev->sb_start + offset)
- /* data runs in to bitmap */
- goto bad_alignment;
- } else if (rdev->sb_start < rdev->data_offset) {
- /* METADATA BITMAP DATA */
- if (rdev->sb_start
- + offset
- + page->index*(PAGE_SIZE/512) + size/512
- > rdev->data_offset)
- /* bitmap runs in to data */
- goto bad_alignment;
- } else {
- /* DATA METADATA BITMAP - no problems */
- }
- md_super_write(mddev, rdev,
- rdev->sb_start + offset
- + page->index * (PAGE_SIZE/512),
- size,
- page);
+ int size = PAGE_SIZE;
+ loff_t offset = mddev->bitmap_info.offset;
+ if (page->index == bitmap->file_pages-1)
+ size = roundup(bitmap->last_page_size,
+ bdev_logical_block_size(rdev->bdev));
+ /* Just make sure we aren't corrupting data or
+ * metadata
+ */
+ if (mddev->external) {
+ /* Bitmap could be anywhere. */
+ if (rdev->sb_start + offset + (page->index
+ * (PAGE_SIZE/512))
+ > rdev->data_offset
+ &&
+ rdev->sb_start + offset
+ < (rdev->data_offset + mddev->dev_sectors
+ + (PAGE_SIZE/512)))
+ goto bad_alignment;
+ } else if (offset < 0) {
+ /* DATA BITMAP METADATA */
+ if (offset
+ + (long)(page->index * (PAGE_SIZE/512))
+ + size/512 > 0)
+ /* bitmap runs in to metadata */
+ goto bad_alignment;
+ if (rdev->data_offset + mddev->dev_sectors
+ > rdev->sb_start + offset)
+ /* data runs in to bitmap */
+ goto bad_alignment;
+ } else if (rdev->sb_start < rdev->data_offset) {
+ /* METADATA BITMAP DATA */
+ if (rdev->sb_start
+ + offset
+ + page->index*(PAGE_SIZE/512) + size/512
+ > rdev->data_offset)
+ /* bitmap runs in to data */
+ goto bad_alignment;
+ } else {
+ /* DATA METADATA BITMAP - no problems */
+ }
+ md_super_write(mddev, rdev,
+ rdev->sb_start + offset
+ + page->index * (PAGE_SIZE/512),
+ size,
+ page);
}
if (wait)
@@ -364,10 +347,9 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
bh = bh->b_this_page;
}
- if (wait) {
+ if (wait)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
- }
}
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
@@ -424,7 +406,7 @@ static struct page *read_page(struct file *file, unsigned long index,
struct buffer_head *bh;
sector_t block;
- PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
+ PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
page = alloc_page(GFP_KERNEL);
@@ -478,7 +460,7 @@ static struct page *read_page(struct file *file, unsigned long index,
}
out:
if (IS_ERR(page))
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
+ printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
(int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT,
PTR_ERR(page));
@@ -664,11 +646,14 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
- case MASK_SET: sb->state |= cpu_to_le32(bits);
- break;
- case MASK_UNSET: sb->state &= cpu_to_le32(~bits);
- break;
- default: BUG();
+ case MASK_SET:
+ sb->state |= cpu_to_le32(bits);
+ break;
+ case MASK_UNSET:
+ sb->state &= cpu_to_le32(~bits);
+ break;
+ default:
+ BUG();
}
kunmap_atomic(sb, KM_USER0);
return old;
@@ -710,12 +695,14 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
static inline struct page *filemap_get_page(struct bitmap *bitmap,
unsigned long chunk)
{
- if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL;
+ if (bitmap->filemap == NULL)
+ return NULL;
+ if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
+ return NULL;
return bitmap->filemap[file_page_index(bitmap, chunk)
- file_page_index(bitmap, 0)];
}
-
static void bitmap_file_unmap(struct bitmap *bitmap)
{
struct page **map, *sb_page;
@@ -766,7 +753,6 @@ static void bitmap_file_put(struct bitmap *bitmap)
}
}
-
/*
* bitmap_file_kick - if an error occurs while manipulating the bitmap file
* then it is no longer reliable, so we stop using it and we mark the file
@@ -785,7 +771,6 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = d_path(&bitmap->file->f_path, path,
PAGE_SIZE);
-
printk(KERN_ALERT
"%s: kicking failed bitmap file %s from array!\n",
bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
@@ -803,27 +788,36 @@ static void bitmap_file_kick(struct bitmap *bitmap)
}
enum bitmap_page_attr {
- BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced
- BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared
- BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced
+ BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
+ BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */
+ BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
};
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ __set_bit(attr, &bitmap->logattrs);
}
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ __clear_bit(attr, &bitmap->logattrs);
}
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ if (page)
+ return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ else
+ return test_bit(attr, &bitmap->logattrs);
}
/*
@@ -836,30 +830,32 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
- struct page *page;
+ struct page *page = NULL;
void *kaddr;
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
if (!bitmap->filemap) {
- return;
- }
-
- page = filemap_get_page(bitmap, chunk);
- if (!page) return;
- bit = file_page_offset(bitmap, chunk);
+ struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
+ if (log)
+ log->type->mark_region(log, chunk);
+ } else {
- /* set the bit */
- kaddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- set_bit(bit, kaddr);
- else
- ext2_set_bit(bit, kaddr);
- kunmap_atomic(kaddr, KM_USER0);
- PRINTK("set file bit %lu page %lu\n", bit, page->index);
+ page = filemap_get_page(bitmap, chunk);
+ if (!page)
+ return;
+ bit = file_page_offset(bitmap, chunk);
+ /* set the bit */
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ set_bit(bit, kaddr);
+ else
+ ext2_set_bit(bit, kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+ PRINTK("set file bit %lu page %lu\n", bit, page->index);
+ }
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
-
}
/* this gets called when the md device is ready to unplug its underlying
@@ -874,6 +870,16 @@ void bitmap_unplug(struct bitmap *bitmap)
if (!bitmap)
return;
+ if (!bitmap->filemap) {
+ /* Must be using a dirty_log */
+ struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
+ dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
+ need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
+ if (dirty || need_write)
+ if (log->type->flush(log))
+ bitmap->flags |= BITMAP_WRITE_ERROR;
+ goto out;
+ }
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -892,7 +898,7 @@ void bitmap_unplug(struct bitmap *bitmap)
wait = 1;
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (dirty | need_write)
+ if (dirty || need_write)
write_page(bitmap, page, 0);
}
if (wait) { /* if any writes were performed, we need to wait on them */
@@ -902,9 +908,11 @@ void bitmap_unplug(struct bitmap *bitmap)
else
md_super_wait(bitmap->mddev);
}
+out:
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
}
+EXPORT_SYMBOL(bitmap_unplug);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
@@ -943,12 +951,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
printk(KERN_INFO "%s: bitmap file is out of date, doing full "
"recovery\n", bmname(bitmap));
- bytes = (chunks + 7) / 8;
+ bytes = DIV_ROUND_UP(bitmap->chunks, 8);
if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t);
-
- num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
if (file && i_size_read(file->f_mapping->host) < bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
@@ -966,7 +973,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
bitmap->filemap_attr = kzalloc(
- roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
+ roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL);
if (!bitmap->filemap_attr)
goto err;
@@ -1021,7 +1028,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (outofdate) {
/*
* if bitmap is out of date, dirty the
- * whole page and write it out
+ * whole page and write it out
*/
paddr = kmap_atomic(page, KM_USER0);
memset(paddr + offset, 0xff,
@@ -1052,7 +1059,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
}
- /* everything went OK */
+ /* everything went OK */
ret = 0;
bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
@@ -1080,21 +1087,16 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
- for (i=0; i < bitmap->file_pages; i++)
+ for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
}
-
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
{
sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
bitmap->bp[page].count += inc;
-/*
- if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
- (unsigned long long)offset, inc, bitmap->bp[page].count);
-*/
bitmap_checkfree(bitmap, page);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1114,6 +1116,7 @@ void bitmap_daemon_work(mddev_t *mddev)
struct page *page = NULL, *lastpage = NULL;
int blocks;
void *paddr;
+ struct dm_dirty_log *log = mddev->bitmap_info.log;
/* Use a mutex to guard daemon_work against
* bitmap_destroy.
@@ -1138,11 +1141,12 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc;
- if (!bitmap->filemap)
- /* error or shutdown */
- break;
-
- page = filemap_get_page(bitmap, j);
+ if (!bitmap->filemap) {
+ if (!log)
+ /* error or shutdown */
+ break;
+ } else
+ page = filemap_get_page(bitmap, j);
if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */
@@ -1197,14 +1201,11 @@ void bitmap_daemon_work(mddev_t *mddev)
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
&blocks, 0);
if (bmc) {
-/*
- if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
-*/
if (*bmc)
bitmap->allclean = 0;
if (*bmc == 2) {
- *bmc=1; /* maybe clear the bit next time */
+ *bmc = 1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
} else if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */
@@ -1214,14 +1215,17 @@ void bitmap_daemon_work(mddev_t *mddev)
-1);
/* clear the bit */
- paddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(bitmap, j),
- paddr);
- else
- ext2_clear_bit(file_page_offset(bitmap, j),
- paddr);
- kunmap_atomic(paddr, KM_USER0);
+ if (page) {
+ paddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ else
+ ext2_clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ kunmap_atomic(paddr, KM_USER0);
+ } else
+ log->type->clear_region(log, j);
}
} else
j |= PAGE_COUNTER_MASK;
@@ -1229,12 +1233,16 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */
- if (lastpage != NULL) {
+ if (lastpage != NULL || log != NULL) {
spin_lock_irqsave(&bitmap->lock, flags);
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- write_page(bitmap, lastpage, 0);
+ if (lastpage)
+ write_page(bitmap, lastpage, 0);
+ else
+ if (log->type->flush(log))
+ bitmap->flags |= BITMAP_WRITE_ERROR;
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1243,7 +1251,7 @@ void bitmap_daemon_work(mddev_t *mddev)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout =
+ bitmap->mddev->thread->timeout =
bitmap->mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1262,34 +1270,38 @@ __acquires(bitmap->lock)
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
sector_t csize;
+ int err;
- if (bitmap_checkpage(bitmap, page, create) < 0) {
+ err = bitmap_checkpage(bitmap, page, create);
+
+ if (bitmap->bp[page].hijacked ||
+ bitmap->bp[page].map == NULL)
+ csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
+ PAGE_COUNTER_SHIFT - 1);
+ else
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
- *blocks = csize - (offset & (csize- 1));
+ *blocks = csize - (offset & (csize - 1));
+
+ if (err < 0)
return NULL;
- }
+
/* now locked ... */
if (bitmap->bp[page].hijacked) { /* hijacked pointer */
/* should we use the first or second counter field
* of the hijacked pointer? */
int hi = (pageoff > PAGE_COUNTER_MASK);
- csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
- PAGE_COUNTER_SHIFT - 1);
- *blocks = csize - (offset & (csize- 1));
return &((bitmap_counter_t *)
&bitmap->bp[page].map)[hi];
- } else { /* page is allocated */
- csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
- *blocks = csize - (offset & (csize- 1));
+ } else /* page is allocated */
return (bitmap_counter_t *)
&(bitmap->bp[page].map[pageoff]);
- }
}
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
{
- if (!bitmap) return 0;
+ if (!bitmap)
+ return 0;
if (behind) {
int bw;
@@ -1322,17 +1334,16 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock);
- blk_unplug(bitmap->mddev->queue);
+ md_unplug(bitmap->mddev);
schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
}
- switch(*bmc) {
+ switch (*bmc) {
case 0:
bitmap_file_set_bit(bitmap, offset);
- bitmap_count_page(bitmap,offset, 1);
- blk_plug_device_unlocked(bitmap->mddev->queue);
+ bitmap_count_page(bitmap, offset, 1);
/* fall through */
case 1:
*bmc = 2;
@@ -1345,16 +1356,19 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
offset += blocks;
if (sectors > blocks)
sectors -= blocks;
- else sectors = 0;
+ else
+ sectors = 0;
}
bitmap->allclean = 0;
return 0;
}
+EXPORT_SYMBOL(bitmap_startwrite);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
int success, int behind)
{
- if (!bitmap) return;
+ if (!bitmap)
+ return;
if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
@@ -1381,7 +1395,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
- sysfs_notify_dirent(bitmap->sysfs_can_clear);
+ sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
}
if (!success && ! (*bmc & NEEDED_MASK))
@@ -1391,18 +1405,22 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
wake_up(&bitmap->overflow_wait);
(*bmc)--;
- if (*bmc <= 2) {
+ if (*bmc <= 2)
set_page_attr(bitmap,
- filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
+ filemap_get_page(
+ bitmap,
+ offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN);
- }
+
spin_unlock_irqrestore(&bitmap->lock, flags);
offset += blocks;
if (sectors > blocks)
sectors -= blocks;
- else sectors = 0;
+ else
+ sectors = 0;
}
}
+EXPORT_SYMBOL(bitmap_endwrite);
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
int degraded)
@@ -1455,14 +1473,14 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
}
return rv;
}
+EXPORT_SYMBOL(bitmap_start_sync);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
-/*
- if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted);
-*/ if (bitmap == NULL) {
+
+ if (bitmap == NULL) {
*blocks = 1024;
return;
}
@@ -1471,26 +1489,23 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
if (bmc == NULL)
goto unlock;
/* locked */
-/*
- if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
-*/
if (RESYNC(*bmc)) {
*bmc &= ~RESYNC_MASK;
if (!NEEDED(*bmc) && aborted)
*bmc |= NEEDED_MASK;
else {
- if (*bmc <= 2) {
+ if (*bmc <= 2)
set_page_attr(bitmap,
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN);
- }
}
}
unlock:
spin_unlock_irqrestore(&bitmap->lock, flags);
bitmap->allclean = 0;
}
+EXPORT_SYMBOL(bitmap_end_sync);
void bitmap_close_sync(struct bitmap *bitmap)
{
@@ -1507,6 +1522,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
sector += blocks;
}
}
+EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{
@@ -1536,6 +1552,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->last_end_sync = jiffies;
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
}
+EXPORT_SYMBOL(bitmap_cond_end_sync);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
@@ -1552,9 +1569,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
spin_unlock_irq(&bitmap->lock);
return;
}
- if (! *bmc) {
+ if (!*bmc) {
struct page *page;
- *bmc = 1 | (needed?NEEDED_MASK:0);
+ *bmc = 1 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
@@ -1663,15 +1680,17 @@ int bitmap_create(mddev_t *mddev)
unsigned long pages;
struct file *file = mddev->bitmap_info.file;
int err;
- sector_t start;
- struct sysfs_dirent *bm;
+ struct sysfs_dirent *bm = NULL;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
- if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
+ if (!file
+ && !mddev->bitmap_info.offset
+ && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
return 0;
BUG_ON(file && mddev->bitmap_info.offset);
+ BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1685,7 +1704,8 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev;
- bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
+ if (mddev->kobj.sd)
+ bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) {
bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm);
@@ -1719,9 +1739,9 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */
- chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
+ chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
CHUNK_BLOCK_SHIFT(bitmap);
- pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
+ pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
BUG_ON(!pages);
@@ -1741,27 +1761,11 @@ int bitmap_create(mddev_t *mddev)
if (!bitmap->bp)
goto error;
- /* now that we have some pages available, initialize the in-memory
- * bitmap from the on-disk bitmap */
- start = 0;
- if (mddev->degraded == 0
- || bitmap->events_cleared == mddev->events)
- /* no need to keep dirty bits to optimise a re-add of a missing device */
- start = mddev->recovery_cp;
- err = bitmap_init_from_disk(bitmap, start);
-
- if (err)
- goto error;
-
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
pages, bmname(bitmap));
mddev->bitmap = bitmap;
- mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
- md_wakeup_thread(mddev->thread);
-
- bitmap_update_sb(bitmap);
return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
@@ -1770,15 +1774,69 @@ int bitmap_create(mddev_t *mddev)
return err;
}
+int bitmap_load(mddev_t *mddev)
+{
+ int err = 0;
+ sector_t sector = 0;
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ goto out;
+
+ /* Clear out old bitmap info first: Either there is none, or we
+ * are resuming after someone else has possibly changed things,
+ * so we should forget old cached info.
+ * All chunks should be clean, but some might need_sync.
+ */
+ while (sector < mddev->resync_max_sectors) {
+ int blocks;
+ bitmap_start_sync(bitmap, sector, &blocks, 0);
+ sector += blocks;
+ }
+ bitmap_close_sync(bitmap);
+
+ if (mddev->bitmap_info.log) {
+ unsigned long i;
+ struct dm_dirty_log *log = mddev->bitmap_info.log;
+ for (i = 0; i < bitmap->chunks; i++)
+ if (!log->type->in_sync(log, i, 1))
+ bitmap_set_memory_bits(bitmap,
+ (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
+ 1);
+ } else {
+ sector_t start = 0;
+ if (mddev->degraded == 0
+ || bitmap->events_cleared == mddev->events)
+ /* no need to keep dirty bits to optimise a
+ * re-add of a missing device */
+ start = mddev->recovery_cp;
+
+ err = bitmap_init_from_disk(bitmap, start);
+ }
+ if (err)
+ goto out;
+
+ mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
+ md_wakeup_thread(mddev->thread);
+
+ bitmap_update_sb(bitmap);
+
+ if (bitmap->flags & BITMAP_WRITE_ERROR)
+ err = -EIO;
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(bitmap_load);
+
static ssize_t
location_show(mddev_t *mddev, char *page)
{
ssize_t len;
- if (mddev->bitmap_info.file) {
+ if (mddev->bitmap_info.file)
len = sprintf(page, "file");
- } else if (mddev->bitmap_info.offset) {
+ else if (mddev->bitmap_info.offset)
len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
- } else
+ else
len = sprintf(page, "none");
len += sprintf(page+len, "\n");
return len;
@@ -1867,7 +1925,7 @@ timeout_show(mddev_t *mddev, char *page)
ssize_t len;
unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
-
+
len = sprintf(page, "%lu", secs);
if (jifs)
len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
@@ -2049,12 +2107,3 @@ struct attribute_group md_bitmap_group = {
.attrs = md_bitmap_attrs,
};
-
-/* the bitmap API -- for raid personalities */
-EXPORT_SYMBOL(bitmap_startwrite);
-EXPORT_SYMBOL(bitmap_endwrite);
-EXPORT_SYMBOL(bitmap_start_sync);
-EXPORT_SYMBOL(bitmap_end_sync);
-EXPORT_SYMBOL(bitmap_unplug);
-EXPORT_SYMBOL(bitmap_close_sync);
-EXPORT_SYMBOL(bitmap_cond_end_sync);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 3797dea4723..e872a7bad6b 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -222,6 +222,10 @@ struct bitmap {
unsigned long file_pages; /* number of pages in the file */
int last_page_size; /* bytes in the last page */
+ unsigned long logattrs; /* used when filemap_attr doesn't exist
+ * because we are working with a dirty_log
+ */
+
unsigned long flags;
int allclean;
@@ -243,12 +247,14 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
+
};
/* the bitmap API */
/* these are used only by md/bitmap */
int bitmap_create(mddev_t *mddev);
+int bitmap_load(mddev_t *mddev);
void bitmap_flush(mddev_t *mddev);
void bitmap_destroy(mddev_t *mddev);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3bdbb611570..368e8e98f70 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -107,11 +107,10 @@ struct crypt_config {
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- /*
- * crypto related data
- */
+ char *cipher;
+ char *cipher_mode;
+
struct crypt_iv_operations *iv_gen_ops;
- char *iv_mode;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -135,8 +134,6 @@ struct crypt_config {
unsigned int dmreq_start;
struct ablkcipher_request *req;
- char cipher[CRYPTO_MAX_ALG_NAME];
- char chainmode[CRYPTO_MAX_ALG_NAME];
struct crypto_ablkcipher *tfm;
unsigned long flags;
unsigned int key_size;
@@ -999,82 +996,135 @@ static int crypt_wipe_key(struct crypt_config *cc)
return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
-/*
- * Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
- */
-static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+static void crypt_dtr(struct dm_target *ti)
{
- struct crypt_config *cc;
- struct crypto_ablkcipher *tfm;
- char *tmp;
- char *cipher;
- char *chainmode;
- char *ivmode;
- char *ivopts;
- unsigned int key_size;
- unsigned long long tmpll;
+ struct crypt_config *cc = ti->private;
- if (argc != 5) {
- ti->error = "Not enough arguments";
+ ti->private = NULL;
+
+ if (!cc)
+ return;
+
+ if (cc->io_queue)
+ destroy_workqueue(cc->io_queue);
+ if (cc->crypt_queue)
+ destroy_workqueue(cc->crypt_queue);
+
+ if (cc->bs)
+ bioset_free(cc->bs);
+
+ if (cc->page_pool)
+ mempool_destroy(cc->page_pool);
+ if (cc->req_pool)
+ mempool_destroy(cc->req_pool);
+ if (cc->io_pool)
+ mempool_destroy(cc->io_pool);
+
+ if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+ cc->iv_gen_ops->dtr(cc);
+
+ if (cc->tfm && !IS_ERR(cc->tfm))
+ crypto_free_ablkcipher(cc->tfm);
+
+ if (cc->dev)
+ dm_put_device(ti, cc->dev);
+
+ kzfree(cc->cipher);
+ kzfree(cc->cipher_mode);
+
+ /* Must zero key material before freeing */
+ kzfree(cc);
+}
+
+static int crypt_ctr_cipher(struct dm_target *ti,
+ char *cipher_in, char *key)
+{
+ struct crypt_config *cc = ti->private;
+ char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
+ char *cipher_api = NULL;
+ int ret = -EINVAL;
+
+ /* Convert to crypto api definition? */
+ if (strchr(cipher_in, '(')) {
+ ti->error = "Bad cipher specification";
return -EINVAL;
}
- tmp = argv[0];
+ /*
+ * Legacy dm-crypt cipher specification
+ * cipher-mode-iv:ivopts
+ */
+ tmp = cipher_in;
cipher = strsep(&tmp, "-");
+
+ cc->cipher = kstrdup(cipher, GFP_KERNEL);
+ if (!cc->cipher)
+ goto bad_mem;
+
+ if (tmp) {
+ cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
+ if (!cc->cipher_mode)
+ goto bad_mem;
+ }
+
chainmode = strsep(&tmp, "-");
ivopts = strsep(&tmp, "-");
ivmode = strsep(&ivopts, ":");
if (tmp)
- DMWARN("Unexpected additional cipher options");
-
- key_size = strlen(argv[1]) >> 1;
-
- cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
- if (cc == NULL) {
- ti->error =
- "Cannot allocate transparent encryption context";
- return -ENOMEM;
- }
+ DMWARN("Ignoring unexpected additional cipher options");
- /* Compatibility mode for old dm-crypt cipher strings */
- if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
+ /* Compatibility mode for old dm-crypt mappings */
+ if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
+ kfree(cc->cipher_mode);
+ cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
chainmode = "cbc";
ivmode = "plain";
}
if (strcmp(chainmode, "ecb") && !ivmode) {
- ti->error = "This chaining mode requires an IV mechanism";
- goto bad_cipher;
+ ti->error = "IV mechanism required";
+ return -EINVAL;
}
- if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
- ti->error = "Chain mode + cipher name is too long";
- goto bad_cipher;
+ cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
+ if (!cipher_api)
+ goto bad_mem;
+
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", chainmode, cipher);
+ if (ret < 0) {
+ kfree(cipher_api);
+ goto bad_mem;
}
- tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
- if (IS_ERR(tfm)) {
+ /* Allocate cipher */
+ cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
+ if (IS_ERR(cc->tfm)) {
+ ret = PTR_ERR(cc->tfm);
ti->error = "Error allocating crypto tfm";
- goto bad_cipher;
+ goto bad;
}
- strcpy(cc->cipher, cipher);
- strcpy(cc->chainmode, chainmode);
- cc->tfm = tfm;
-
- if (crypt_set_key(cc, argv[1]) < 0) {
+ /* Initialize and set key */
+ ret = crypt_set_key(cc, key);
+ if (ret < 0) {
ti->error = "Error decoding and setting key";
- goto bad_ivmode;
+ goto bad;
}
- /*
- * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
- * See comments at iv code
- */
+ /* Initialize IV */
+ cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
+ if (cc->iv_size)
+ /* at least a 64 bit sector number should fit in our buffer */
+ cc->iv_size = max(cc->iv_size,
+ (unsigned int)(sizeof(u64) / sizeof(u8)));
+ else if (ivmode) {
+ DMWARN("Selected cipher does not support IVs");
+ ivmode = NULL;
+ }
+ /* Choose ivmode, see comments at iv code. */
if (ivmode == NULL)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
@@ -1088,159 +1138,138 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
else {
+ ret = -EINVAL;
ti->error = "Invalid IV mode";
- goto bad_ivmode;
+ goto bad;
}
- if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
- cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
- goto bad_ivmode;
-
- if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
- cc->iv_gen_ops->init(cc) < 0) {
- ti->error = "Error initialising IV";
- goto bad_slab_pool;
+ /* Allocate IV */
+ if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
+ ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
+ if (ret < 0) {
+ ti->error = "Error creating IV";
+ goto bad;
+ }
}
- cc->iv_size = crypto_ablkcipher_ivsize(tfm);
- if (cc->iv_size)
- /* at least a 64 bit sector number should fit in our buffer */
- cc->iv_size = max(cc->iv_size,
- (unsigned int)(sizeof(u64) / sizeof(u8)));
- else {
- if (cc->iv_gen_ops) {
- DMWARN("Selected cipher does not support IVs");
- if (cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
- cc->iv_gen_ops = NULL;
+ /* Initialize IV (set keys for ESSIV etc) */
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
+ ret = cc->iv_gen_ops->init(cc);
+ if (ret < 0) {
+ ti->error = "Error initialising IV";
+ goto bad;
}
}
+ ret = 0;
+bad:
+ kfree(cipher_api);
+ return ret;
+
+bad_mem:
+ ti->error = "Cannot allocate cipher strings";
+ return -ENOMEM;
+}
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct crypt_config *cc;
+ unsigned int key_size;
+ unsigned long long tmpll;
+ int ret;
+
+ if (argc != 5) {
+ ti->error = "Not enough arguments";
+ return -EINVAL;
+ }
+
+ key_size = strlen(argv[1]) >> 1;
+
+ cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
+ if (!cc) {
+ ti->error = "Cannot allocate encryption context";
+ return -ENOMEM;
+ }
+
+ ti->private = cc;
+ ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
+ if (ret < 0)
+ goto bad;
+
+ ret = -ENOMEM;
cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
if (!cc->io_pool) {
ti->error = "Cannot allocate crypt io mempool";
- goto bad_slab_pool;
+ goto bad;
}
cc->dmreq_start = sizeof(struct ablkcipher_request);
- cc->dmreq_start += crypto_ablkcipher_reqsize(tfm);
+ cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
- cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) &
+ cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
~(crypto_tfm_ctx_alignment() - 1);
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + cc->iv_size);
if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool";
- goto bad_req_pool;
+ goto bad;
}
cc->req = NULL;
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
- goto bad_page_pool;
+ goto bad;
}
cc->bs = bioset_create(MIN_IOS, 0);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
- goto bad_bs;
+ goto bad;
}
+ ret = -EINVAL;
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
ti->error = "Invalid iv_offset sector";
- goto bad_device;
+ goto bad;
}
cc->iv_offset = tmpll;
- if (sscanf(argv[4], "%llu", &tmpll) != 1) {
- ti->error = "Invalid device sector";
- goto bad_device;
- }
- cc->start = tmpll;
-
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
ti->error = "Device lookup failed";
- goto bad_device;
+ goto bad;
}
- if (ivmode && cc->iv_gen_ops) {
- if (ivopts)
- *(ivopts - 1) = ':';
- cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
- if (!cc->iv_mode) {
- ti->error = "Error kmallocing iv_mode string";
- goto bad_ivmode_string;
- }
- strcpy(cc->iv_mode, ivmode);
- } else
- cc->iv_mode = NULL;
+ if (sscanf(argv[4], "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
+ goto bad;
+ }
+ cc->start = tmpll;
+ ret = -ENOMEM;
cc->io_queue = create_singlethread_workqueue("kcryptd_io");
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
- goto bad_io_queue;
+ goto bad;
}
cc->crypt_queue = create_singlethread_workqueue("kcryptd");
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
- goto bad_crypt_queue;
+ goto bad;
}
ti->num_flush_requests = 1;
- ti->private = cc;
return 0;
-bad_crypt_queue:
- destroy_workqueue(cc->io_queue);
-bad_io_queue:
- kfree(cc->iv_mode);
-bad_ivmode_string:
- dm_put_device(ti, cc->dev);
-bad_device:
- bioset_free(cc->bs);
-bad_bs:
- mempool_destroy(cc->page_pool);
-bad_page_pool:
- mempool_destroy(cc->req_pool);
-bad_req_pool:
- mempool_destroy(cc->io_pool);
-bad_slab_pool:
- if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
-bad_ivmode:
- crypto_free_ablkcipher(tfm);
-bad_cipher:
- /* Must zero key material before freeing */
- kzfree(cc);
- return -EINVAL;
-}
-
-static void crypt_dtr(struct dm_target *ti)
-{
- struct crypt_config *cc = (struct crypt_config *) ti->private;
-
- destroy_workqueue(cc->io_queue);
- destroy_workqueue(cc->crypt_queue);
-
- if (cc->req)
- mempool_free(cc->req, cc->req_pool);
-
- bioset_free(cc->bs);
- mempool_destroy(cc->page_pool);
- mempool_destroy(cc->req_pool);
- mempool_destroy(cc->io_pool);
-
- kfree(cc->iv_mode);
- if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
- cc->iv_gen_ops->dtr(cc);
- crypto_free_ablkcipher(cc->tfm);
- dm_put_device(ti, cc->dev);
-
- /* Must zero key material before freeing */
- kzfree(cc);
+bad:
+ crypt_dtr(ti);
+ return ret;
}
static int crypt_map(struct dm_target *ti, struct bio *bio,
@@ -1255,7 +1284,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
+ io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
@@ -1268,7 +1297,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
static int crypt_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
- struct crypt_config *cc = (struct crypt_config *) ti->private;
+ struct crypt_config *cc = ti->private;
unsigned int sz = 0;
switch (type) {
@@ -1277,11 +1306,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- if (cc->iv_mode)
- DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
- cc->iv_mode);
+ if (cc->cipher_mode)
+ DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
else
- DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
+ DMEMIT("%s ", cc->cipher);
if (cc->key_size > 0) {
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1378,7 +1406,7 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return max_size;
bvm->bi_bdev = cc->dev->bdev;
- bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
+ bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 852052880d7..baa11912cc9 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -198,6 +198,7 @@ out:
atomic_set(&dc->may_delay, 1);
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = dc;
return 0;
@@ -281,14 +282,13 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read +
- (bio->bi_sector - ti->begin);
+ bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 2b7907b6dd0..0bdb201c2c2 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
/* Validate the chunk size against the device block size */
if (chunk_size %
- (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
+ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
+ chunk_size %
+ (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index e8dfa06af3b..0b2536247cf 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -126,8 +126,9 @@ struct dm_exception_store {
};
/*
- * Obtain the cow device used by a given snapshot.
+ * Obtain the origin or cow device used by a given snapshot.
*/
+struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/*
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af..0590c75b0ab 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
/*
* For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+ if (where[i].count || (rw & REQ_HARDBARRIER))
do_region(rw, i, where + i, dp, io);
}
@@ -412,8 +412,8 @@ retry:
}
set_current_state(TASK_RUNNING);
- if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
- rw &= ~(1 << BIO_RW_BARRIER);
+ if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+ rw &= ~REQ_HARDBARRIER;
goto retry;
}
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d7500e1c26f..3e39193e503 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -249,55 +249,66 @@ static void __hash_remove(struct hash_cell *hc)
static void dm_hash_remove_all(int keep_open_devices)
{
- int i, dev_skipped, dev_removed;
+ int i, dev_skipped;
struct hash_cell *hc;
- struct list_head *tmp, *n;
+ struct mapped_device *md;
+
+retry:
+ dev_skipped = 0;
down_write(&_hash_lock);
-retry:
- dev_skipped = dev_removed = 0;
for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_safe (tmp, n, _name_buckets + i) {
- hc = list_entry(tmp, struct hash_cell, name_list);
+ list_for_each_entry(hc, _name_buckets + i, name_list) {
+ md = hc->md;
+ dm_get(md);
- if (keep_open_devices &&
- dm_lock_for_deletion(hc->md)) {
+ if (keep_open_devices && dm_lock_for_deletion(md)) {
+ dm_put(md);
dev_skipped++;
continue;
}
+
__hash_remove(hc);
- dev_removed = 1;
- }
- }
- /*
- * Some mapped devices may be using other mapped devices, so if any
- * still exist, repeat until we make no further progress.
- */
- if (dev_skipped) {
- if (dev_removed)
- goto retry;
+ up_write(&_hash_lock);
- DMWARN("remove_all left %d open device(s)", dev_skipped);
+ dm_put(md);
+ if (likely(keep_open_devices))
+ dm_destroy(md);
+ else
+ dm_destroy_immediate(md);
+
+ /*
+ * Some mapped devices may be using other mapped
+ * devices, so repeat until we make no further
+ * progress. If a new mapped device is created
+ * here it will also get removed.
+ */
+ goto retry;
+ }
}
up_write(&_hash_lock);
+
+ if (dev_skipped)
+ DMWARN("remove_all left %d open device(s)", dev_skipped);
}
-static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
- const char *new)
+static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
struct dm_table *table;
+ struct mapped_device *md;
/*
* duplicate new.
*/
new_name = kstrdup(new, GFP_KERNEL);
if (!new_name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
down_write(&_hash_lock);
@@ -306,24 +317,24 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
*/
hc = __get_name_cell(new);
if (hc) {
- DMWARN("asked to rename to an already existing name %s -> %s",
- old, new);
+ DMWARN("asked to rename to an already-existing name %s -> %s",
+ param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_name);
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
}
/*
* Is there such a device as 'old' ?
*/
- hc = __get_name_cell(old);
+ hc = __get_name_cell(param->name);
if (!hc) {
- DMWARN("asked to rename a non existent device %s -> %s",
- old, new);
+ DMWARN("asked to rename a non-existent device %s -> %s",
+ param->name, new);
up_write(&_hash_lock);
kfree(new_name);
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
}
/*
@@ -345,13 +356,14 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
dm_table_put(table);
}
- if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
- *flags |= DM_UEVENT_GENERATED_FLAG;
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
- dm_put(hc->md);
+ md = hc->md;
up_write(&_hash_lock);
kfree(old_name);
- return 0;
+
+ return md;
}
/*-----------------------------------------------------------------
@@ -573,7 +585,7 @@ static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
* Fills in a dm_ioctl structure, ready for sending back to
* userland.
*/
-static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
+static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
{
struct gendisk *disk = dm_disk(md);
struct dm_table *table;
@@ -617,8 +629,6 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
dm_table_put(table);
}
}
-
- return 0;
}
static int dev_create(struct dm_ioctl *param, size_t param_size)
@@ -640,15 +650,17 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
if (r) {
dm_put(md);
+ dm_destroy(md);
return r;
}
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(md, param);
+ __dev_status(md, param);
+
dm_put(md);
- return r;
+ return 0;
}
/*
@@ -742,6 +754,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
+ dm_destroy(md);
return 0;
}
@@ -762,6 +775,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
{
int r;
char *new_name = (char *) param + param->data_start;
+ struct mapped_device *md;
if (new_name < param->data ||
invalid_str(new_name, (void *) param + param_size) ||
@@ -774,10 +788,14 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
if (r)
return r;
- param->data_size = 0;
+ md = dm_hash_rename(param, new_name);
+ if (IS_ERR(md))
+ return PTR_ERR(md);
+
+ __dev_status(md, param);
+ dm_put(md);
- return dm_hash_rename(param->event_nr, &param->flags, param->name,
- new_name);
+ return 0;
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -818,8 +836,6 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
geometry.start = indata[3];
r = dm_set_geometry(md, &geometry);
- if (!r)
- r = __dev_status(md, param);
param->data_size = 0;
@@ -843,13 +859,17 @@ static int do_suspend(struct dm_ioctl *param)
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended_md(md))
+ if (!dm_suspended_md(md)) {
r = dm_suspend(md, suspend_flags);
+ if (r)
+ goto out;
+ }
- if (!r)
- r = __dev_status(md, param);
+ __dev_status(md, param);
+out:
dm_put(md);
+
return r;
}
@@ -911,7 +931,7 @@ static int do_resume(struct dm_ioctl *param)
dm_table_destroy(old_map);
if (!r)
- r = __dev_status(md, param);
+ __dev_status(md, param);
dm_put(md);
return r;
@@ -935,16 +955,16 @@ static int dev_suspend(struct dm_ioctl *param, size_t param_size)
*/
static int dev_status(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct mapped_device *md;
md = find_device(param);
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
+ __dev_status(md, param);
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1019,7 +1039,7 @@ static void retrieve_status(struct dm_table *table,
*/
static int dev_wait(struct dm_ioctl *param, size_t param_size)
{
- int r;
+ int r = 0;
struct mapped_device *md;
struct dm_table *table;
@@ -1040,9 +1060,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
* changed to trigger the event, so we may as well tell
* him and save an ioctl.
*/
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1050,8 +1068,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
- out:
+out:
dm_put(md);
+
return r;
}
@@ -1112,28 +1131,9 @@ static int populate_table(struct dm_table *table,
next = spec->next;
}
- r = dm_table_set_type(table);
- if (r) {
- DMWARN("unable to set table type");
- return r;
- }
-
return dm_table_complete(table);
}
-static int table_prealloc_integrity(struct dm_table *t,
- struct mapped_device *md)
-{
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_dev_internal *dd;
-
- list_for_each_entry(dd, devices, list)
- if (bdev_get_integrity(dd->dm_dev.bdev))
- return blk_integrity_register(dm_disk(md), NULL);
-
- return 0;
-}
-
static int table_load(struct dm_ioctl *param, size_t param_size)
{
int r;
@@ -1155,21 +1155,30 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
- r = table_prealloc_integrity(t, md);
- if (r) {
- DMERR("%s: could not register integrity profile.",
- dm_device_name(md));
+ /* Protect md->type and md->queue against concurrent table loads. */
+ dm_lock_md_type(md);
+ if (dm_get_md_type(md) == DM_TYPE_NONE)
+ /* Initial table load: acquire type of table. */
+ dm_set_md_type(md, dm_table_get_type(t));
+ else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+ DMWARN("can't change device type after initial table load.");
dm_table_destroy(t);
+ dm_unlock_md_type(md);
+ r = -EINVAL;
goto out;
}
- r = dm_table_alloc_md_mempools(t);
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
if (r) {
- DMWARN("unable to allocate mempools for this table");
+ DMWARN("unable to set up device queue for new table.");
dm_table_destroy(t);
+ dm_unlock_md_type(md);
goto out;
}
+ dm_unlock_md_type(md);
+ /* stage inactive table */
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
@@ -1186,7 +1195,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
up_write(&_hash_lock);
param->flags |= DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(md, param);
+ __dev_status(md, param);
out:
dm_put(md);
@@ -1196,7 +1205,6 @@ out:
static int table_clear(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct hash_cell *hc;
struct mapped_device *md;
@@ -1216,11 +1224,12 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- r = __dev_status(hc->md, param);
+ __dev_status(hc->md, param);
md = hc->md;
up_write(&_hash_lock);
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1265,7 +1274,6 @@ static void retrieve_deps(struct dm_table *table,
static int table_deps(struct dm_ioctl *param, size_t param_size)
{
- int r = 0;
struct mapped_device *md;
struct dm_table *table;
@@ -1273,9 +1281,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1283,9 +1289,9 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
- out:
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1294,7 +1300,6 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
*/
static int table_status(struct dm_ioctl *param, size_t param_size)
{
- int r;
struct mapped_device *md;
struct dm_table *table;
@@ -1302,9 +1307,7 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
+ __dev_status(md, param);
table = dm_get_live_or_inactive_table(md, param);
if (table) {
@@ -1312,9 +1315,9 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
dm_table_put(table);
}
-out:
dm_put(md);
- return r;
+
+ return 0;
}
/*
@@ -1333,10 +1336,6 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (!md)
return -ENXIO;
- r = __dev_status(md, param);
- if (r)
- goto out;
-
if (tmsg < (struct dm_target_msg *) param->data ||
invalid_str(tmsg->message, (void *) param + param_size)) {
DMWARN("Invalid target message parameters.");
@@ -1593,18 +1592,22 @@ static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
#endif
static const struct file_operations _ctl_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = dm_ctl_ioctl,
.compat_ioctl = dm_compat_ctl_ioctl,
.owner = THIS_MODULE,
};
static struct miscdevice _dm_misc = {
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = MAPPER_CTRL_MINOR,
.name = DM_NAME,
- .nodename = "mapper/control",
+ .nodename = DM_DIR "/" DM_CONTROL_NODE,
.fops = &_ctl_fops
};
+MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
+MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE);
+
/*
* Create misc character device and link to DM_DIR/control.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf8347504..d8587bac568 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+ .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = job->offset,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 9200dbf2391..3921e3bb43c 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = lc;
return 0;
@@ -73,7 +74,7 @@ static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct linear_c *lc = ti->private;
- return lc->start + (bi_sector - ti->begin);
+ return lc->start + dm_target_offset(ti, bi_sector);
}
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 826bce7343b..487ecda90ad 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < nr_params) {
ti->error = "not enough path parameters";
+ r = -EINVAL;
goto bad;
}
@@ -892,6 +893,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
return 0;
@@ -1271,6 +1273,15 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (error == -EOPNOTSUPP)
return error;
+ if (clone->cmd_flags & REQ_DISCARD)
+ /*
+ * Pass all discard request failures up.
+ * FIXME: only fail_path if the discard failed due to a
+ * transport problem. This requires precise understanding
+ * of the underlying failure (e.g. the SCSI sense).
+ */
+ return error;
+
if (mpio->pgpath)
fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723d..7c081bcbc3c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -445,7 +445,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
{
if (unlikely(!bio->bi_size))
return 0;
- return m->offset + (bio->bi_sector - m->ms->ti->begin);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index c097d8a4823..cc2bdb83f9a 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -266,7 +266,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
- return 1 + ((ps->exceptions_per_area + 1) * area);
+ return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
}
/*
@@ -780,8 +780,8 @@ static int persistent_commit_merge(struct dm_exception_store *store,
* ps->current_area does not get reduced by prepare_merge() until
* after commit_merge() has removed the nr_merged previous exceptions.
*/
- ps->next_free = (area_location(ps, ps->current_area) - 1) +
- (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
+ ps->next_free = area_location(ps, ps->current_area) +
+ ps->current_committed + 1;
return 0;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 54853773510..5974d3094d9 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -148,6 +148,12 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
+{
+ return s->origin;
+}
+EXPORT_SYMBOL(dm_snap_origin);
+
struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
{
return s->cow;
@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE;
}
- origin_path = argv[0];
- argv++;
- argc--;
-
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ti->error = "Cannot allocate snapshot context private "
@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ origin_path = argv[0];
+ argv++;
+ argc--;
+
+ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+ if (r) {
+ ti->error = "Cannot get origin device";
+ goto bad_origin;
+ }
+
cow_path = argv[0];
argv++;
argc--;
@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
- r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
- if (r) {
- ti->error = "Cannot get origin device";
- goto bad_origin;
- }
-
s->ti = ti;
s->valid = 1;
s->active = 0;
@@ -1212,15 +1218,15 @@ bad_kcopyd:
dm_exception_table_exit(&s->complete, exception_cache);
bad_hash_tables:
- dm_put_device(ti, s->origin);
-
-bad_origin:
dm_exception_store_destroy(s->store);
bad_store:
dm_put_device(ti, s->cow);
bad_cow:
+ dm_put_device(ti, s->origin);
+
+bad_origin:
kfree(s);
bad:
@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
mempool_destroy(s->pending_pool);
- dm_put_device(ti, s->origin);
-
dm_exception_store_destroy(s->store);
dm_put_device(ti, s->cow);
+ dm_put_device(ti, s->origin);
+
kfree(s);
}
@@ -1686,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
if (unlikely(bio_empty_barrier(bio))) {
- if (!map_context->flush_request)
+ if (!map_context->target_request_nr)
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dm_snapshot *snap = ti->private;
+ int r;
+
+ r = fn(ti, snap->origin, 0, ti->len, data);
+
+ if (!r)
+ r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
- return fn(ti, snap->origin, 0, ti->len, data);
+ return r;
}
@@ -2159,6 +2171,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
return 0;
}
+static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct dm_dev *dev = ti->private;
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = dev->bdev;
+ bvm->bi_sector = bvm->bi_sector;
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2176,6 +2203,7 @@ static struct target_type origin_target = {
.map = origin_map,
.resume = origin_resume,
.status = origin_status,
+ .merge = origin_merge,
.iterate_devices = origin_iterate_devices,
};
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db76..c297f6da91e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -25,6 +25,8 @@ struct stripe {
struct stripe_c {
uint32_t stripes;
+ int stripes_shift;
+ sector_t stripes_mask;
/* The size of this target / num. stripes */
sector_t stripe_width;
@@ -162,16 +164,22 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
-
sc->stripes = stripes;
sc->stripe_width = width;
+
+ if (stripes & (stripes - 1))
+ sc->stripes_shift = -1;
+ else {
+ sc->stripes_shift = ffs(stripes) - 1;
+ sc->stripes_mask = ((sector_t) stripes) - 1;
+ }
+
ti->split_io = chunk_size;
ti->num_flush_requests = stripes;
+ ti->num_discard_requests = stripes;
+ sc->chunk_shift = ffs(chunk_size) - 1;
sc->chunk_mask = ((sector_t) chunk_size) - 1;
- for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
- chunk_size >>= 1;
- sc->chunk_shift--;
/*
* Get the stripe destinations.
@@ -207,26 +215,79 @@ static void stripe_dtr(struct dm_target *ti)
kfree(sc);
}
+static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
+ uint32_t *stripe, sector_t *result)
+{
+ sector_t offset = dm_target_offset(sc->ti, sector);
+ sector_t chunk = offset >> sc->chunk_shift;
+
+ if (sc->stripes_shift < 0)
+ *stripe = sector_div(chunk, sc->stripes);
+ else {
+ *stripe = chunk & sc->stripes_mask;
+ chunk >>= sc->stripes_shift;
+ }
+
+ *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask);
+}
+
+static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
+ uint32_t target_stripe, sector_t *result)
+{
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, result);
+ if (stripe == target_stripe)
+ return;
+ *result &= ~sc->chunk_mask; /* round down */
+ if (target_stripe < stripe)
+ *result += sc->chunk_mask + 1; /* next chunk */
+}
+
+static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
+ uint32_t target_stripe)
+{
+ sector_t begin, end;
+
+ stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
+ target_stripe, &end);
+ if (begin < end) {
+ bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
+ bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
+ bio->bi_size = to_bytes(end - begin);
+ return DM_MAPIO_REMAPPED;
+ } else {
+ /* The range doesn't map to the target stripe */
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+}
+
static int stripe_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct stripe_c *sc = (struct stripe_c *) ti->private;
- sector_t offset, chunk;
+ struct stripe_c *sc = ti->private;
uint32_t stripe;
+ unsigned target_request_nr;
if (unlikely(bio_empty_barrier(bio))) {
- BUG_ON(map_context->flush_request >= sc->stripes);
- bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
+ target_request_nr = map_context->target_request_nr;
+ BUG_ON(target_request_nr >= sc->stripes);
+ bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ target_request_nr = map_context->target_request_nr;
+ BUG_ON(target_request_nr >= sc->stripes);
+ return stripe_map_discard(sc, bio, target_request_nr);
+ }
- offset = bio->bi_sector - ti->begin;
- chunk = offset >> sc->chunk_shift;
- stripe = sector_div(chunk, sc->stripes);
+ stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ bio->bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
- bio->bi_sector = sc->stripe[stripe].physical_start +
- (chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
+
return DM_MAPIO_REMAPPED;
}
@@ -284,7 +345,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9924ea23032..f9fc07d7a4b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,6 +54,8 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
+ unsigned discards_supported:1;
+
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
atomic_set(&t->holders, 0);
+ t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t)
msleep(1);
smp_mb();
- /* free the indexes (see dm_table_complete) */
+ /* free the indexes */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
+ if (!tgt->num_discard_requests)
+ t->discards_supported = 0;
+
return 0;
bad:
@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
-int dm_table_set_type(struct dm_table *t)
+static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0;
@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t)
/*
* Builds the btree to index the map.
*/
-int dm_table_complete(struct dm_table *t)
+static int dm_table_build_index(struct dm_table *t)
{
int r = 0;
unsigned int leaf_nodes;
@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t)
return r;
}
+/*
+ * Register the mapped device for blk_integrity support if
+ * the underlying devices support it.
+ */
+static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+{
+ struct list_head *devices = dm_table_get_devices(t);
+ struct dm_dev_internal *dd;
+
+ list_for_each_entry(dd, devices, list)
+ if (bdev_get_integrity(dd->dm_dev.bdev))
+ return blk_integrity_register(dm_disk(md), NULL);
+
+ return 0;
+}
+
+/*
+ * Prepares the table for use by building the indices,
+ * setting the type, and allocating mempools.
+ */
+int dm_table_complete(struct dm_table *t)
+{
+ int r;
+
+ r = dm_table_set_type(t);
+ if (r) {
+ DMERR("unable to set table type");
+ return r;
+ }
+
+ r = dm_table_build_index(t);
+ if (r) {
+ DMERR("unable to build btrees");
+ return r;
+ }
+
+ r = dm_table_prealloc_integrity(t, t->md);
+ if (r) {
+ DMERR("could not register integrity profile.");
+ return r;
+ }
+
+ r = dm_table_alloc_md_mempools(t);
+ if (r)
+ DMERR("unable to allocate mempools");
+
+ return r;
+}
+
static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)
@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+ if (!dm_table_supports_discards(t))
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
dm_table_set_integrity(t);
/*
@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
return t->md;
}
+static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_discard(q);
+}
+
+bool dm_table_supports_discards(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ if (!t->discards_supported)
+ return 0;
+
+ /*
+ * Ensure that at least one underlying device supports discards.
+ * t->devices includes internal dm devices such as mirror logs
+ * so we need to use iterate_devices here, which targets
+ * supporting discard must provide.
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_discard_capable, NULL))
+ return 1;
+ }
+
+ return 0;
+}
+
EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_get_device);
EXPORT_SYMBOL(dm_put_device);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 11dea11dc0b..8da366cf381 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -113,6 +113,11 @@ void dm_unregister_target(struct target_type *tt)
*/
static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
{
+ /*
+ * Return error for discards instead of -EOPNOTSUPP
+ */
+ tt->num_discard_requests = 1;
+
return 0;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bbc97030c0c..cc2b3cb8194 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -22,6 +22,11 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
+ /*
+ * Silently drop discards, avoiding -EOPNOTSUPP.
+ */
+ ti->num_discard_requests = 1;
+
return 0;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d21e1284604..ac384b2a6a3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -15,10 +15,12 @@
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
+#include <linux/delay.h>
#include <trace/events/block.h>
@@ -123,6 +125,10 @@ struct mapped_device {
unsigned long flags;
struct request_queue *queue;
+ unsigned type;
+ /* Protect queue and type against concurrent access. */
+ struct mutex type_lock;
+
struct gendisk *disk;
char name[16];
@@ -338,6 +344,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
+ lock_kernel();
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
@@ -355,6 +362,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
out:
spin_unlock(&_minor_lock);
+ unlock_kernel();
return md ? 0 : -ENXIO;
}
@@ -362,8 +370,12 @@ out:
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
+
+ lock_kernel();
atomic_dec(&md->open_count);
dm_put(md);
+ unlock_kernel();
+
return 0;
}
@@ -614,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md)) {
- if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+ if (!(io->bio->bi_rw & REQ_HARDBARRIER))
bio_list_add_head(&md->deferred,
io->bio);
} else
@@ -626,13 +638,19 @@ static void dec_pending(struct dm_io *io, int error)
io_error = io->error;
bio = io->bio;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+ if (bio->bi_rw & REQ_HARDBARRIER) {
/*
* There can be just one barrier request so we use
* a per-device variable for error reporting.
* Note that you can't touch the bio after end_io_acct
+ *
+ * We ignore -EOPNOTSUPP for empty flush reported by
+ * underlying devices. We assume that if the device
+ * doesn't support empty barriers, it doesn't need
+ * cache flushing commands.
*/
- if (!md->barrier_error && io_error != -EOPNOTSUPP)
+ if (!md->barrier_error &&
+ !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
md->barrier_error = io_error;
end_io_acct(io);
free_io(md, io);
@@ -792,12 +810,12 @@ static void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
int run_queue = 1;
- bool is_barrier = blk_barrier_rq(clone);
+ bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- if (blk_pc_request(rq) && !is_barrier) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
rq->errors = clone->errors;
rq->resid_len = clone->resid_len;
@@ -844,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone)
struct request_queue *q = rq->q;
unsigned long flags;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request.
* Leave it to dm_end_request(), which handles this special
@@ -943,7 +961,7 @@ static void dm_complete_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request. So can't use
* softirq_done with the original.
@@ -972,7 +990,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
- if (unlikely(blk_barrier_rq(clone))) {
+ if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
/*
* Barrier clones share an original request.
* Leave it to dm_end_request(), which handles this special
@@ -1012,17 +1030,27 @@ static void end_clone_request(struct request *clone, int error)
dm_complete_request(clone, error);
}
-static sector_t max_io_len(struct mapped_device *md,
- sector_t sector, struct dm_target *ti)
+/*
+ * Return maximum size of I/O possible at the supplied sector up to the current
+ * target boundary.
+ */
+static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
- sector_t offset = sector - ti->begin;
- sector_t len = ti->len - offset;
+ sector_t target_offset = dm_target_offset(ti, sector);
+
+ return ti->len - target_offset;
+}
+
+static sector_t max_io_len(sector_t sector, struct dm_target *ti)
+{
+ sector_t len = max_io_len_target_boundary(sector, ti);
/*
* Does the target need to split even further ?
*/
if (ti->split_io) {
sector_t boundary;
+ sector_t offset = dm_target_offset(ti, sector);
boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
- offset;
if (len > boundary)
@@ -1106,7 +1134,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1161,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
- clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+ clone->bi_rw &= ~REQ_HARDBARRIER;
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
@@ -1164,36 +1192,96 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
return tio;
}
-static void __flush_target(struct clone_info *ci, struct dm_target *ti,
- unsigned flush_nr)
+static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
+ unsigned request_nr, sector_t len)
{
struct dm_target_io *tio = alloc_tio(ci, ti);
struct bio *clone;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+ /*
+ * Discard requests require the bio's inline iovecs be initialized.
+ * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
+ * and discard, so no need for concern about wasted bvec allocations.
+ */
+ clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
__bio_clone(clone, ci->bio);
clone->bi_destructor = dm_bio_destructor;
+ if (len) {
+ clone->bi_sector = ci->sector;
+ clone->bi_size = to_bytes(len);
+ }
__map_bio(ti, clone, tio);
}
+static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_requests, sector_t len)
+{
+ unsigned request_nr;
+
+ for (request_nr = 0; request_nr < num_requests; request_nr++)
+ __issue_target_request(ci, ti, request_nr, len);
+}
+
static int __clone_and_map_empty_barrier(struct clone_info *ci)
{
- unsigned target_nr = 0, flush_nr;
+ unsigned target_nr = 0;
struct dm_target *ti;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- for (flush_nr = 0; flush_nr < ti->num_flush_requests;
- flush_nr++)
- __flush_target(ci, ti, flush_nr);
+ __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
ci->sector_count = 0;
return 0;
}
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+ struct bio *clone, *bio = ci->bio;
+ struct dm_target_io *tio;
+
+ tio = alloc_tio(ci, ti);
+ clone = clone_bio(bio, ci->sector, ci->idx,
+ bio->bi_vcnt - ci->idx, ci->sector_count,
+ ci->md->bs);
+ __map_bio(ti, clone, tio);
+ ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ struct dm_target *ti;
+ sector_t len;
+
+ do {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
+
+ /*
+ * Even though the device advertised discard support,
+ * reconfiguration might have changed that since the
+ * check was performed.
+ */
+ if (!ti->num_discard_requests)
+ return -EOPNOTSUPP;
+
+ len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+
+ __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+
+ ci->sector += len;
+ } while (ci->sector_count -= len);
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -1204,27 +1292,21 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio_empty_barrier(bio)))
return __clone_and_map_empty_barrier(ci);
+ if (unlikely(bio->bi_rw & REQ_DISCARD))
+ return __clone_and_map_discard(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->md, ci->sector, ti);
-
- /*
- * Allocate a target io object.
- */
- tio = alloc_tio(ci, ti);
+ max = max_io_len(ci->sector, ti);
if (ci->sector_count <= max) {
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
- clone = clone_bio(bio, ci->sector, ci->idx,
- bio->bi_vcnt - ci->idx, ci->sector_count,
- ci->md->bs);
- __map_bio(ti, clone, tio);
- ci->sector_count = 0;
+ __clone_and_map_simple(ci, ti);
} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
@@ -1245,6 +1327,7 @@ static int __clone_and_map(struct clone_info *ci)
len += bv_len;
}
+ tio = alloc_tio(ci, ti);
clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
ci->md->bs);
__map_bio(ti, clone, tio);
@@ -1267,13 +1350,12 @@ static int __clone_and_map(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->md, ci->sector, ti);
-
- tio = alloc_tio(ci, ti);
+ max = max_io_len(ci->sector, ti);
}
len = min(remaining, max);
+ tio = alloc_tio(ci, ti);
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset + offset, len,
ci->md->bs);
@@ -1301,7 +1383,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
- if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+ if (!(bio->bi_rw & REQ_HARDBARRIER))
bio_io_error(bio);
else
if (!md->barrier_error)
@@ -1355,7 +1437,7 @@ static int dm_merge_bvec(struct request_queue *q,
/*
* Find maximum amount of I/O that won't need splitting
*/
- max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
+ max_sectors = min(max_io_len(bvm->bi_sector, ti),
(sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
if (max_size < 0)
@@ -1414,7 +1496,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
* we have to queue this io for later.
*/
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
- unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
up_read(&md->io_lock);
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1455,20 +1537,9 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return _dm_request(q, bio);
}
-/*
- * Mark this request as flush request, so that dm_request_fn() can
- * recognize.
- */
-static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
- rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
static bool dm_rq_is_flush_request(struct request *rq)
{
- if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- rq->cmd[0] == REQ_LB_OP_FLUSH)
+ if (rq->cmd_flags & REQ_FLUSH)
return true;
else
return false;
@@ -1849,6 +1920,28 @@ static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
static void dm_rq_barrier_work(struct work_struct *work);
+static void dm_init_md_queue(struct mapped_device *md)
+{
+ /*
+ * Request-based dm devices cannot be stacked on top of bio-based dm
+ * devices. The type of this dm device has not been decided yet.
+ * The type is decided at the first table loading time.
+ * To prevent problematic device stacking, clear the queue flag
+ * for request stacking support until then.
+ *
+ * This queue is new, so no concurrency on the queue_flags.
+ */
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info.congested_data = md;
+ blk_queue_make_request(md->queue, dm_request);
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
+ md->queue->unplug_fn = dm_unplug_all;
+ blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+}
+
/*
* Allocate and initialise a blank device with a given minor.
*/
@@ -1874,8 +1967,10 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_minor;
+ md->type = DM_TYPE_NONE;
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
+ mutex_init(&md->type_lock);
spin_lock_init(&md->deferred_lock);
spin_lock_init(&md->barrier_error_lock);
rwlock_init(&md->map_lock);
@@ -1886,34 +1981,11 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_init_queue(dm_request_fn, NULL);
+ md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;
- /*
- * Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device has not been decided yet,
- * although we initialized the queue using blk_init_queue().
- * The type is decided at the first table loading time.
- * To prevent problematic device stacking, clear the queue flag
- * for request stacking support until then.
- *
- * This queue is new, so no concurrency on the queue_flags.
- */
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
- md->saved_make_request_fn = md->queue->make_request_fn;
- md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
- md->queue->backing_dev_info.congested_data = md;
- blk_queue_make_request(md->queue, dm_request);
- blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- md->queue->unplug_fn = dm_unplug_all;
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
- blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_prep_fn);
- blk_queue_lld_busy(md->queue, dm_lld_busy);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
- dm_rq_prepare_flush);
+ dm_init_md_queue(md);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -2128,6 +2200,72 @@ int dm_create(int minor, struct mapped_device **result)
return 0;
}
+/*
+ * Functions to manage md->type.
+ * All are required to hold md->type_lock.
+ */
+void dm_lock_md_type(struct mapped_device *md)
+{
+ mutex_lock(&md->type_lock);
+}
+
+void dm_unlock_md_type(struct mapped_device *md)
+{
+ mutex_unlock(&md->type_lock);
+}
+
+void dm_set_md_type(struct mapped_device *md, unsigned type)
+{
+ md->type = type;
+}
+
+unsigned dm_get_md_type(struct mapped_device *md)
+{
+ return md->type;
+}
+
+/*
+ * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ */
+static int dm_init_request_based_queue(struct mapped_device *md)
+{
+ struct request_queue *q = NULL;
+
+ if (md->queue->elevator)
+ return 1;
+
+ /* Fully initialize the queue */
+ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
+ if (!q)
+ return 0;
+
+ md->queue = q;
+ md->saved_make_request_fn = md->queue->make_request_fn;
+ dm_init_md_queue(md);
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+
+ elv_register_queue(md->queue);
+
+ return 1;
+}
+
+/*
+ * Setup the DM device's queue based on md's type
+ */
+int dm_setup_md_queue(struct mapped_device *md)
+{
+ if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
+ !dm_init_request_based_queue(md)) {
+ DMWARN("Cannot initialize queue for request-based mapped device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
@@ -2141,6 +2279,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
+ dm_deleting_md(md) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
@@ -2175,6 +2314,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr)
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
@@ -2183,27 +2323,55 @@ const char *dm_device_name(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_device_name);
-void dm_put(struct mapped_device *md)
+static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
- BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ might_sleep();
- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_live_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED,
- MINOR(disk_devt(dm_disk(md))));
- set_bit(DMF_FREEING, &md->flags);
- spin_unlock(&_minor_lock);
- if (!dm_suspended_md(md)) {
- dm_table_presuspend_targets(map);
- dm_table_postsuspend_targets(map);
- }
- dm_sysfs_exit(md);
- dm_table_put(map);
- dm_table_destroy(__unbind(md));
- free_dev(md);
+ spin_lock(&_minor_lock);
+ map = dm_get_live_table(md);
+ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
}
+
+ /*
+ * Rare, but there may be I/O requests still going to complete,
+ * for example. Wait for all references to disappear.
+ * No one should increment the reference count of the mapped_device,
+ * after the mapped_device state becomes DMF_FREEING.
+ */
+ if (wait)
+ while (atomic_read(&md->holders))
+ msleep(1);
+ else if (atomic_read(&md->holders))
+ DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
+ dm_device_name(md), atomic_read(&md->holders));
+
+ dm_sysfs_exit(md);
+ dm_table_put(map);
+ dm_table_destroy(__unbind(md));
+ free_dev(md);
+}
+
+void dm_destroy(struct mapped_device *md)
+{
+ __dm_destroy(md, true);
+}
+
+void dm_destroy_immediate(struct mapped_device *md)
+{
+ __dm_destroy(md, false);
+}
+
+void dm_put(struct mapped_device *md)
+{
+ atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
@@ -2258,7 +2426,12 @@ static void process_barrier(struct mapped_device *md, struct bio *bio)
if (!bio_empty_barrier(bio)) {
__split_and_process_bio(md, bio);
- dm_flush(md);
+ /*
+ * If the request isn't supported, don't waste time with
+ * the second flush.
+ */
+ if (md->barrier_error != -EOPNOTSUPP)
+ dm_flush(md);
}
if (md->barrier_error != DM_ENDIO_REQUEUE)
@@ -2296,7 +2469,7 @@ static void dm_wq_work(struct work_struct *work)
if (dm_request_based(md))
generic_make_request(c);
else {
- if (bio_rw_flagged(c, BIO_RW_BARRIER))
+ if (c->bi_rw & REQ_HARDBARRIER)
process_barrier(md, c);
else
__split_and_process_bio(md, c);
@@ -2315,11 +2488,11 @@ static void dm_queue_flush(struct mapped_device *md)
queue_work(md->wq, &md->work);
}
-static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
+static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- tio->info.flush_request = flush_nr;
+ tio->info.target_request_nr = request_nr;
}
/* Issue barrier requests to targets and wait for their completion. */
@@ -2337,7 +2510,7 @@ static int dm_rq_barrier(struct mapped_device *md)
ti = dm_table_get_target(map, i);
for (j = 0; j < ti->num_flush_requests; j++) {
clone = clone_rq(md->flush_request, md, GFP_NOIO);
- dm_rq_set_flush_nr(clone, j);
+ dm_rq_set_target_request_nr(clone, j);
atomic_inc(&md->pending[rq_data_dir(clone)]);
map_request(ti, clone, md);
}
@@ -2403,13 +2576,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
goto out;
}
- /* cannot change the device type, once a table is bound */
- if (md->map &&
- (dm_table_get_type(md->map) != dm_table_get_type(table))) {
- DMWARN("can't change the device type after a table is bound");
- goto out;
- }
-
map = __bind(md, table, &limits);
out:
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index bad1724d486..0c2dd5f4af7 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -59,13 +59,20 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
-int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+
+int dm_setup_md_queue(struct mapped_device *md);
+
/*
* To check the return value from dm_table_find_target().
*/
@@ -122,6 +129,11 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
+/*
+ * mapped_device operations
+ */
+void dm_destroy(struct mapped_device *md);
+void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a..ba19060bcf3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
dev_info_t *tmp_dev;
sector_t start_sector;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555..43cf9cc9c1d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,6 +36,7 @@
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
#include <linux/ctype.h>
@@ -261,7 +262,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
* Once ->stop is called and completes, the module will be completely
* unused.
*/
-static void mddev_suspend(mddev_t *mddev)
+void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
@@ -269,13 +270,15 @@ static void mddev_suspend(mddev_t *mddev)
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
}
+EXPORT_SYMBOL_GPL(mddev_suspend);
-static void mddev_resume(mddev_t *mddev)
+void mddev_resume(mddev_t *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
}
+EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(mddev_t *mddev, int bits)
{
@@ -353,7 +356,7 @@ static void md_submit_barrier(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
- bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ bio->bi_rw &= ~REQ_HARDBARRIER;
if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
@@ -384,6 +387,51 @@ void md_barrier_request(mddev_t *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_barrier_request);
+/* Support for plugging.
+ * This mirrors the plugging support in request_queue, but does not
+ * require having a whole queue
+ */
+static void plugger_work(struct work_struct *work)
+{
+ struct plug_handle *plug =
+ container_of(work, struct plug_handle, unplug_work);
+ plug->unplug_fn(plug);
+}
+static void plugger_timeout(unsigned long data)
+{
+ struct plug_handle *plug = (void *)data;
+ kblockd_schedule_work(NULL, &plug->unplug_work);
+}
+void plugger_init(struct plug_handle *plug,
+ void (*unplug_fn)(struct plug_handle *))
+{
+ plug->unplug_flag = 0;
+ plug->unplug_fn = unplug_fn;
+ init_timer(&plug->unplug_timer);
+ plug->unplug_timer.function = plugger_timeout;
+ plug->unplug_timer.data = (unsigned long)plug;
+ INIT_WORK(&plug->unplug_work, plugger_work);
+}
+EXPORT_SYMBOL_GPL(plugger_init);
+
+void plugger_set_plug(struct plug_handle *plug)
+{
+ if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
+ mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+}
+EXPORT_SYMBOL_GPL(plugger_set_plug);
+
+int plugger_remove_plug(struct plug_handle *plug)
+{
+ if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
+ del_timer(&plug->unplug_timer);
+ return 1;
+ } else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plugger_remove_plug);
+
+
static inline mddev_t *mddev_get(mddev_t *mddev)
{
atomic_inc(&mddev->active);
@@ -416,7 +464,7 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
-static void mddev_init(mddev_t *mddev)
+void mddev_init(mddev_t *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
@@ -436,6 +484,7 @@ static void mddev_init(mddev_t *mddev)
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
+EXPORT_SYMBOL_GPL(mddev_init);
static mddev_t * mddev_find(dev_t unit)
{
@@ -532,25 +581,31 @@ static void mddev_unlock(mddev_t * mddev)
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
- * So hold open_mutex instead - we are allowed to take
- * it while holding reconfig_mutex, and md_run can
- * use it to wait for the remove to complete.
+ * So hold set sysfs_active while the remove in happeing,
+ * and anything else which might set ->to_remove or my
+ * otherwise change the sysfs namespace will fail with
+ * -EBUSY if sysfs_active is still set.
+ * We set sysfs_active under reconfig_mutex and elsewhere
+ * test it under the same mutex to ensure its correct value
+ * is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
- mutex_lock(&mddev->open_mutex);
+ mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
- if (to_remove != &md_redundancy_group)
- sysfs_remove_group(&mddev->kobj, to_remove);
- if (mddev->pers == NULL ||
- mddev->pers->sync_request == NULL) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
+ if (mddev->kobj.sd) {
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
}
- mutex_unlock(&mddev->open_mutex);
+ mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
@@ -675,11 +730,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*
- * As we might need to resubmit the request if BIO_RW_BARRIER
+ * As we might need to resubmit the request if REQ_HARDBARRIER
* causes ENOTSUPP, we allocate a spare bio...
*/
struct bio *bio = bio_alloc(GFP_NOIO, 1);
- int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+ int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -691,7 +746,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
atomic_inc(&mddev->pending_writes);
if (!test_bit(BarriersNotsupp, &rdev->flags)) {
struct bio *rbio;
- rw |= (1<<BIO_RW_BARRIER);
+ rw |= REQ_HARDBARRIER;
rbio = bio_clone(bio, GFP_NOIO);
rbio->bi_private = bio;
rbio->bi_end_io = super_written_barrier;
@@ -736,7 +791,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct completion event;
int ret;
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
bio->bi_bdev = bdev;
bio->bi_sector = sector;
@@ -1811,11 +1866,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
- if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
- kobject_del(&rdev->kobj);
- goto fail;
- }
- rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
+ if (sysfs_create_link(&rdev->kobj, ko, "block"))
+ /* failure here is OK */;
+ rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -2083,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
* with the rest of the array)
*/
mdk_rdev_t *rdev;
-
- /* First make sure individual recovery_offsets are correct */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
-
- }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
@@ -2114,13 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req;
int nospares = 0;
- mddev->utime = get_seconds();
- if (mddev->external)
- return;
repeat:
+ /* First make sure individual recovery_offsets are correct */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->curr_resync_completed > rdev->recovery_offset)
+ rdev->recovery_offset = mddev->curr_resync_completed;
+
+ }
+ if (!mddev->persistent) {
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ return;
+ }
+
spin_lock_irq(&mddev->write_lock);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ mddev->utime = get_seconds();
+
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -2168,19 +2225,6 @@ repeat:
MD_BUG();
mddev->events --;
}
-
- /*
- * do not write anything to disk if using
- * nonpersistent superblocks
- */
- if (!mddev->persistent) {
- if (!mddev->external)
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-
- spin_unlock_irq(&mddev->write_lock);
- wake_up(&mddev->sb_wait);
- return;
- }
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -2334,8 +2378,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(In_sync, &rdev->flags);
err = 0;
}
- if (!err && rdev->sysfs_state)
- sysfs_notify_dirent(rdev->sysfs_state);
+ if (!err)
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
@@ -2430,14 +2474,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
rdev->raid_disk = -1;
return err;
} else
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
- printk(KERN_WARNING
- "md: cannot register "
- "%s for %s\n",
- nm, mdname(rdev->mddev));
-
+ /* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks)
@@ -2447,7 +2487,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
@@ -2695,6 +2735,24 @@ static struct kobj_type rdev_ktype = {
.default_attrs = rdev_default_attrs,
};
+void md_rdev_init(mdk_rdev_t *rdev)
+{
+ rdev->desc_nr = -1;
+ rdev->saved_raid_disk = -1;
+ rdev->raid_disk = -1;
+ rdev->flags = 0;
+ rdev->data_offset = 0;
+ rdev->sb_events = 0;
+ rdev->last_read_error.tv_sec = 0;
+ rdev->last_read_error.tv_nsec = 0;
+ atomic_set(&rdev->nr_pending, 0);
+ atomic_set(&rdev->read_errors, 0);
+ atomic_set(&rdev->corrected_errors, 0);
+
+ INIT_LIST_HEAD(&rdev->same_set);
+ init_waitqueue_head(&rdev->blocked_wait);
+}
+EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
@@ -2718,6 +2776,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
return ERR_PTR(-ENOMEM);
}
+ md_rdev_init(rdev);
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
@@ -2727,18 +2786,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj, &rdev_ktype);
- rdev->desc_nr = -1;
- rdev->saved_raid_disk = -1;
- rdev->raid_disk = -1;
- rdev->flags = 0;
- rdev->data_offset = 0;
- rdev->sb_events = 0;
- rdev->last_read_error.tv_sec = 0;
- rdev->last_read_error.tv_nsec = 0;
- atomic_set(&rdev->nr_pending, 0);
- atomic_set(&rdev->read_errors, 0);
- atomic_set(&rdev->corrected_errors, 0);
-
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
@@ -2767,9 +2814,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
}
}
- INIT_LIST_HEAD(&rdev->same_set);
- init_waitqueue_head(&rdev->blocked_wait);
-
return rdev;
abort_free:
@@ -2960,7 +3004,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
* - new personality will access other array.
*/
- if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+ if (mddev->sync_thread ||
+ mddev->reshape_position != MaxSector ||
+ mddev->sysfs_active)
return -EBUSY;
if (!mddev->pers->quiesce) {
@@ -3324,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
- else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
@@ -3405,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN,
- &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
@@ -3419,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- if (mddev->external)
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -3437,7 +3480,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
if (err)
return err;
else {
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
}
@@ -3735,7 +3778,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
@@ -4281,13 +4324,14 @@ static int md_alloc(dev_t dev, char *name)
disk->disk_name);
error = 0;
}
- if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
abort:
mutex_unlock(&disks_mutex);
- if (!error) {
+ if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
+ mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
@@ -4325,14 +4369,14 @@ static void md_safemode_timeout(unsigned long data)
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
-static int md_run(mddev_t *mddev)
+int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
@@ -4344,13 +4388,9 @@ static int md_run(mddev_t *mddev)
if (mddev->pers)
return -EBUSY;
-
- /* These two calls synchronise us with the
- * sysfs_remove_group calls in mddev_unlock,
- * so they must have completed.
- */
- mutex_lock(&mddev->open_mutex);
- mutex_unlock(&mddev->open_mutex);
+ /* Cannot run until previous stop completes properly */
+ if (mddev->sysfs_active)
+ return -EBUSY;
/*
* Analyze all RAID superblock(s)
@@ -4397,7 +4437,7 @@ static int md_run(mddev_t *mddev)
return -EINVAL;
}
}
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
spin_lock(&pers_lock);
@@ -4496,11 +4536,12 @@ static int md_run(mddev_t *mddev)
return err;
}
if (mddev->pers->sync_request) {
- if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -4518,8 +4559,7 @@ static int md_run(mddev_t *mddev)
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
- printk("md: cannot register %s for %s\n",
- nm, mdname(mddev));
+ /* failure here is OK */;
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -4531,12 +4571,12 @@ static int md_run(mddev_t *mddev)
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
md_new_event(mddev);
- sysfs_notify_dirent(mddev->sysfs_state);
- if (mddev->sysfs_action)
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
+EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(mddev_t *mddev)
{
@@ -4545,7 +4585,11 @@ static int do_md_run(mddev_t *mddev)
err = md_run(mddev);
if (err)
goto out;
-
+ err = bitmap_load(mddev);
+ if (err) {
+ bitmap_destroy(mddev);
+ goto out;
+ }
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -4573,7 +4617,7 @@ static int restart_array(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -4644,9 +4688,10 @@ static void md_clean(mddev_t *mddev)
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
+ mddev->plug = NULL;
}
-static void md_stop_writes(mddev_t *mddev)
+void md_stop_writes(mddev_t *mddev)
{
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4666,11 +4711,10 @@ static void md_stop_writes(mddev_t *mddev)
md_update_sb(mddev, 1);
}
}
+EXPORT_SYMBOL_GPL(md_stop_writes);
-static void md_stop(mddev_t *mddev)
+void md_stop(mddev_t *mddev)
{
- md_stop_writes(mddev);
-
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
@@ -4678,6 +4722,7 @@ static void md_stop(mddev_t *mddev)
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
+EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(mddev_t *mddev, int is_open)
{
@@ -4697,7 +4742,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
@@ -4711,26 +4756,29 @@ out:
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
{
- int err = 0;
struct gendisk *disk = mddev->gendisk;
mdk_rdev_t *rdev;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open) {
+ if (atomic_read(&mddev->openers) > is_open ||
+ mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
- err = -EBUSY;
- } else if (mddev->pers) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
+ md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) {
@@ -4740,21 +4788,17 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
}
set_capacity(disk, 0);
+ mutex_unlock(&mddev->open_mutex);
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
-
- err = 0;
- }
- mutex_unlock(&mddev->open_mutex);
- if (err)
- return err;
+ } else
+ mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
-
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
@@ -4771,13 +4815,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
-
}
- err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
- sysfs_notify_dirent(mddev->sysfs_state);
- return err;
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+ return 0;
}
#ifndef MODULE
@@ -5138,7 +5180,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (err)
export_rdev(rdev);
else
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
md_update_sb(mddev, 1);
if (mddev->degraded)
@@ -5331,8 +5373,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
- if (fd >= 0)
+ if (fd >= 0) {
err = bitmap_create(mddev);
+ if (!err)
+ err = bitmap_load(mddev);
+ }
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
@@ -5581,6 +5626,8 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
+ if (!rv)
+ rv = bitmap_load(mddev);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
@@ -5813,7 +5860,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
} else {
@@ -5902,6 +5949,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
mddev_t *mddev = mddev_find(bdev->bd_dev);
int err;
+ lock_kernel();
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
@@ -5910,6 +5958,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
/* Wait until bdev->bd_disk is definitely gone */
flush_scheduled_work();
/* Then retry the open from the top */
+ unlock_kernel();
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5923,6 +5972,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_size_change(mddev->gendisk, bdev);
out:
+ unlock_kernel();
return err;
}
@@ -5931,8 +5981,10 @@ static int md_release(struct gendisk *disk, fmode_t mode)
mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
+ lock_kernel();
atomic_dec(&mddev->openers);
mddev_put(mddev);
+ unlock_kernel();
return 0;
}
@@ -6059,10 +6111,12 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ if (mddev->event_work.func)
+ schedule_work(&mddev->event_work);
md_new_event_inintr(mddev);
}
@@ -6514,15 +6568,15 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
@@ -6558,22 +6612,31 @@ int md_allow_write(mddev_t *mddev)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
- if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
+void md_unplug(mddev_t *mddev)
+{
+ if (mddev->queue)
+ blk_unplug(mddev->queue);
+ if (mddev->plug)
+ mddev->plug->unplug_fn(mddev->plug);
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev)
@@ -6752,7 +6815,7 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed =
@@ -6829,7 +6892,7 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6848,7 +6911,7 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
- blk_unplug(mddev->queue);
+ md_unplug(mddev);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
@@ -6950,10 +7013,7 @@ static int remove_and_add_spares(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
- printk(KERN_WARNING
- "md: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ /* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -7039,14 +7099,13 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
- sysfs_notify_dirent(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags)
@@ -7085,7 +7144,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
goto unlock;
}
@@ -7147,7 +7206,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
} else
md_wakeup_thread(mddev->sync_thread);
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
unlock:
@@ -7156,7 +7215,7 @@ void md_check_recovery(mddev_t *mddev)
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
- sysfs_notify_dirent(mddev->sysfs_action);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
}
mddev_unlock(mddev);
}
@@ -7164,7 +7223,7 @@ void md_check_recovery(mddev_t *mddev)
void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags),
msecs_to_jiffies(5000));
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec00..3931299788d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,26 @@
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
+/* generic plugging support - like that provided with request_queue,
+ * but does not require a request_queue
+ */
+struct plug_handle {
+ void (*unplug_fn)(struct plug_handle *);
+ struct timer_list unplug_timer;
+ struct work_struct unplug_work;
+ unsigned long unplug_flag;
+};
+#define PLUGGED_FLAG 1
+void plugger_init(struct plug_handle *plug,
+ void (*unplug_fn)(struct plug_handle *));
+void plugger_set_plug(struct plug_handle *plug);
+int plugger_remove_plug(struct plug_handle *plug);
+static inline void plugger_flush(struct plug_handle *plug)
+{
+ del_timer_sync(&plug->unplug_timer);
+ cancel_work_sync(&plug->unplug_work);
+}
+
/*
* MD's 'extended' device
*/
@@ -67,7 +87,7 @@ struct mdk_rdev_s
#define Faulty 1 /* device is known to have a fault */
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
-#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
+#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
#define AllReserved 6 /* If whole device is reserved for
* one array */
#define AutoDetected 7 /* added by auto-detect */
@@ -120,11 +140,15 @@ struct mddev_s
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* superblock update in progress */
+#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
int suspended;
atomic_t active_io;
int ro;
+ int sysfs_active; /* set when sysfs deletes
+ * are happening, so run/
+ * takeover/stop are not safe
+ */
struct gendisk *gendisk;
@@ -254,7 +278,7 @@ struct mddev_s
* fails. Only supported
*/
struct bio *biolist; /* bios that need to be retried
- * because BIO_RW_BARRIER is not supported
+ * because REQ_HARDBARRIER is not supported
*/
atomic_t recovery_active; /* blocks scheduled, but not written */
@@ -297,9 +321,14 @@ struct mddev_s
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
+ /* When md is serving under dm, it might use a
+ * dirty_log to store the bits.
+ */
+ struct dm_dirty_log *log;
+
struct mutex mutex;
unsigned long chunksize;
- unsigned long daemon_sleep; /* how many seconds between updates? */
+ unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
} bitmap_info;
@@ -308,6 +337,8 @@ struct mddev_s
struct list_head all_mddevs;
struct attribute_group *to_remove;
+ struct plug_handle *plug; /* if used by personality */
+
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -318,6 +349,7 @@ struct mddev_s
struct bio *barrier;
atomic_t flush_pending;
struct work_struct barrier_work;
+ struct work_struct event_work; /* used by dm to report failure event */
};
@@ -382,6 +414,18 @@ struct md_sysfs_entry {
};
extern struct attribute_group md_bitmap_group;
+static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
+{
+ if (sd)
+ return sysfs_get_dirent(sd, NULL, name);
+ return sd;
+}
+static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
+{
+ if (sd)
+ sysfs_notify_dirent(sd);
+}
+
static inline char * mdname (mddev_t * mddev)
{
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
@@ -474,5 +518,14 @@ extern int md_integrity_register(mddev_t *mddev);
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void restore_bitmap_write_access(struct file *file);
+extern void md_unplug(mddev_t *mddev);
+
+extern void mddev_init(mddev_t *mddev);
+extern int md_run(mddev_t *mddev);
+extern void md_stop(mddev_t *mddev);
+extern void md_stop_writes(mddev_t *mddev);
+extern void md_rdev_init(mdk_rdev_t *rdev);
+extern void mddev_suspend(mddev_t *mddev);
+extern void mddev_resume(mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/mktables.c b/drivers/md/mktables.c
deleted file mode 100644
index 3b1500843bb..00000000000
--- a/drivers/md/mktables.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2 or (at your
- * option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * mktables.c
- *
- * Make RAID-6 tables. This is a host user space program to be run at
- * compile time.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include <stdlib.h>
-#include <time.h>
-
-static uint8_t gfmul(uint8_t a, uint8_t b)
-{
- uint8_t v = 0;
-
- while (b) {
- if (b & 1)
- v ^= a;
- a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
- b >>= 1;
- }
-
- return v;
-}
-
-static uint8_t gfpow(uint8_t a, int b)
-{
- uint8_t v = 1;
-
- b %= 255;
- if (b < 0)
- b += 255;
-
- while (b) {
- if (b & 1)
- v = gfmul(v, a);
- a = gfmul(a, a);
- b >>= 1;
- }
-
- return v;
-}
-
-int main(int argc, char *argv[])
-{
- int i, j, k;
- uint8_t v;
- uint8_t exptbl[256], invtbl[256];
-
- printf("#include <linux/raid/pq.h>\n");
-
- /* Compute multiplication table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfmul[256][256] =\n"
- "{\n");
- for (i = 0; i < 256; i++) {
- printf("\t{\n");
- for (j = 0; j < 256; j += 8) {
- printf("\t\t");
- for (k = 0; k < 8; k++)
- printf("0x%02x,%c", gfmul(i, j + k),
- (k == 7) ? '\n' : ' ');
- }
- printf("\t},\n");
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfmul);\n");
- printf("#endif\n");
-
- /* Compute power-of-2 table (exponent) */
- v = 1;
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexp[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++) {
- exptbl[i + j] = v;
- printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
- v = gfmul(v, 2);
- if (v == 1)
- v = 0; /* For entry 255, not a real entry */
- }
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfexp);\n");
- printf("#endif\n");
-
- /* Compute inverse table x^-1 == x^254 */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfinv[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++) {
- invtbl[i + j] = v = gfpow(i + j, 254);
- printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
- }
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfinv);\n");
- printf("#endif\n");
-
- /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexi[256] =\n" "{\n");
- for (i = 0; i < 256; i += 8) {
- printf("\t");
- for (j = 0; j < 8; j++)
- printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
- (j == 7) ? '\n' : ' ');
- }
- printf("};\n");
- printf("#ifdef __KERNEL__\n");
- printf("EXPORT_SYMBOL(raid6_gfexi);\n");
- printf("#endif\n");
-
- return 0;
-}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699a..0307d217e7a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
- else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+ else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+ bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2c..6f7af46d623 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012d..ad83a4dcadc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- bool do_barriers;
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+ unsigned long do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
if (unlikely(!mddev->barriers_work &&
- bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ (bio->bi_rw & REQ_HARDBARRIER))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+ do_barriers = bio->bi_rw & REQ_HARDBARRIER;
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
- (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_barriers | do_sync;
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -1121,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
+ int count = 0;
+ unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
@@ -1132,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1633,7 +1635,7 @@ static void raid1d(mddev_t *mddev)
sync_request_write(mddev, r1_bio);
unplug = 1;
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
- /* some requests in the r1bio were BIO_RW_BARRIER
+ /* some requests in the r1bio were REQ_HARDBARRIER
* requests which failed with -EOPNOTSUPP. Hohumm..
* Better resubmit without the barrier.
* We know which devices to resubmit for, because
@@ -1641,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1664,7 @@ static void raid1d(mddev_t *mddev)
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_end_io = raid1_end_write_request;
- bio->bi_rw = WRITE |
- (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = WRITE | do_sync;
bio->bi_private = r1_bio;
r1_bio->bios[i] = bio;
generic_make_request(bio);
@@ -1698,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
@@ -1715,7 +1716,7 @@ static void raid1d(mddev_t *mddev)
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e2..84718383124 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+ if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
md_barrier_request(mddev, bio);
return 0;
}
@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+
+ /* Each of these 'make_request' calls will call 'wait_barrier'.
+ * If the first succeeds but the second blocks due to the resync
+ * thread raising the barrier, we will deadlock because the
+ * IO to the underlying device will be queued in generic_make_request
+ * and will never complete, so will never reduce nr_pending.
+ * So increment nr_waiting here so no new raise_barriers will
+ * succeed, and so the second wait_barrier cannot block.
+ */
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting++;
+ spin_unlock_irq(&conf->resync_lock);
+
if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
+ spin_lock_irq(&conf->resync_lock);
+ conf->nr_waiting--;
+ wake_up(&conf->wait_barrier);
+ spin_unlock_irq(&conf->resync_lock);
+
bio_pair_release(bp);
return 0;
bad_map:
@@ -879,7 +897,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
generic_make_request(read_bio);
@@ -947,7 +965,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
conf->mirrors[d].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+ mbio->bi_rw = WRITE | do_sync;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1098,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
int i;
conf_t *conf = mddev->private;
mirror_info_t *tmp;
+ int count = 0;
+ unsigned long flags;
/*
* Find all non-in_sync disks within the RAID10 configuration
@@ -1108,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
if (tmp->rdev
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return 0;
+ return count;
}
@@ -1716,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
+ const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
@@ -1730,7 +1751,7 @@ static void raid10d(mddev_t *mddev)
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc..69b0a169e43 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state)) {
list_add_tail(&sh->lru, &conf->delayed_list);
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
} else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0) {
list_add_tail(&sh->lru, &conf->bitmap_list);
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
} else {
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
@@ -434,7 +434,6 @@ static int has_failed(raid5_conf_t *conf)
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
@@ -464,7 +463,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- raid5_unplug_device(conf->mddev->queue)
+ md_raid5_unplug_device(conf)
);
conf->inactive_blocked = 0;
} else
@@ -1337,10 +1336,14 @@ static int grow_stripes(raid5_conf_t *conf, int num)
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- sprintf(conf->cache_name[0],
- "raid%d-%s", conf->level, mdname(conf->mddev));
- sprintf(conf->cache_name[1],
- "raid%d-%s-alt", conf->level, mdname(conf->mddev));
+ if (conf->mddev->gendisk)
+ sprintf(conf->cache_name[0],
+ "raid%d-%s", conf->level, mdname(conf->mddev));
+ else
+ sprintf(conf->cache_name[0],
+ "raid%d-%p", conf->level, conf->mddev);
+ sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
@@ -3614,7 +3617,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
list_add_tail(&sh->lru, &conf->hold_list);
}
} else
- blk_plug_device(conf->mddev->queue);
+ plugger_set_plug(&conf->plug);
}
static void activate_bit_delay(raid5_conf_t *conf)
@@ -3655,36 +3658,44 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(struct request_queue *q)
+void md_raid5_unplug_device(raid5_conf_t *conf)
{
- mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev->private;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
- if (blk_remove_plug(q)) {
+ if (plugger_remove_plug(&conf->plug)) {
conf->seq_flush++;
raid5_activate_delayed(conf);
}
- md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(conf->mddev->thread);
spin_unlock_irqrestore(&conf->device_lock, flags);
- unplug_slaves(mddev);
+ unplug_slaves(conf->mddev);
}
+EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
-static int raid5_congested(void *data, int bits)
+static void raid5_unplug(struct plug_handle *plug)
+{
+ raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
+ md_raid5_unplug_device(conf);
+}
+
+static void raid5_unplug_queue(struct request_queue *q)
+{
+ mddev_t *mddev = q->queuedata;
+ md_raid5_unplug_device(mddev->private);
+}
+
+int md_raid5_congested(mddev_t *mddev, int bits)
{
- mddev_t *mddev = data;
raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
*/
- if (mddev_congested(mddev, bits))
- return 1;
if (conf->inactive_blocked)
return 1;
if (conf->quiesce)
@@ -3694,6 +3705,15 @@ static int raid5_congested(void *data, int bits)
return 0;
}
+EXPORT_SYMBOL_GPL(md_raid5_congested);
+
+static int raid5_congested(void *data, int bits)
+{
+ mddev_t *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid5_congested(mddev, bits);
+}
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
@@ -3958,7 +3978,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
const int rw = bio_data_dir(bi);
int remaining;
- if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+ if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
/* Drain all pending writes. We only really need
* to ensure they have been submitted, but this is
* easier.
@@ -4075,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything
* and wait a while
*/
- raid5_unplug_device(mddev->queue);
+ md_raid5_unplug_device(conf);
release_stripe(sh);
schedule();
goto retry;
@@ -4566,23 +4586,15 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
return 0;
}
-static ssize_t
-raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+int
+raid5_set_cache_size(mddev_t *mddev, int size)
{
raid5_conf_t *conf = mddev->private;
- unsigned long new;
int err;
- if (len >= PAGE_SIZE)
+ if (size <= 16 || size > 32768)
return -EINVAL;
- if (!conf)
- return -ENODEV;
-
- if (strict_strtoul(page, 10, &new))
- return -EINVAL;
- if (new <= 16 || new > 32768)
- return -EINVAL;
- while (new < conf->max_nr_stripes) {
+ while (size < conf->max_nr_stripes) {
if (drop_one_stripe(conf))
conf->max_nr_stripes--;
else
@@ -4591,11 +4603,32 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
err = md_allow_write(mddev);
if (err)
return err;
- while (new > conf->max_nr_stripes) {
+ while (size > conf->max_nr_stripes) {
if (grow_one_stripe(conf))
conf->max_nr_stripes++;
else break;
}
+ return 0;
+}
+EXPORT_SYMBOL(raid5_set_cache_size);
+
+static ssize_t
+raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+{
+ raid5_conf_t *conf = mddev->private;
+ unsigned long new;
+ int err;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (strict_strtoul(page, 10, &new))
+ return -EINVAL;
+ err = raid5_set_cache_size(mddev, new);
+ if (err)
+ return err;
return len;
}
@@ -4958,7 +4991,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0, chunk_size;
+ int working_disks = 0;
int dirty_parity_disks = 0;
mdk_rdev_t *rdev;
sector_t reshape_offset = 0;
@@ -5144,42 +5177,47 @@ static int run(mddev_t *mddev)
"reshape");
}
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- {
- int data_disks = conf->previous_raid_disks - conf->max_degraded;
- int stripe = data_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- }
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
- else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ else if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "md/raid:%s: failed to create sysfs attributes.\n",
+ "raid5: failed to create sysfs attributes for %s\n",
mdname(mddev));
+ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- mddev->queue->queue_lock = &conf->device_lock;
+ plugger_init(&conf->plug, raid5_unplug);
+ mddev->plug = &conf->plug;
+ if (mddev->queue) {
+ int chunk_size;
+ /* read-ahead size must cover two whole stripes, which
+ * is 2 * (datadisks) * chunksize where 'n' is the
+ * number of raid devices
+ */
+ int data_disks = conf->previous_raid_disks - conf->max_degraded;
+ int stripe = data_disks *
+ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
+ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- mddev->queue->unplug_fn = raid5_unplug_device;
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+ blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
- md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+ mddev->queue->queue_lock = &conf->device_lock;
+ mddev->queue->unplug_fn = raid5_unplug_queue;
- blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
- chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
- list_for_each_entry(rdev, &mddev->disks, same_set)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ }
return 0;
abort:
@@ -5200,8 +5238,9 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ if (mddev->queue)
+ mddev->queue->backing_dev_info.congested_fn = NULL;
+ plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
@@ -5291,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
int i;
raid5_conf_t *conf = mddev->private;
struct disk_info *tmp;
+ int count = 0;
+ unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
@@ -5298,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded--;
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ count++;
+ sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
}
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded -= count;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
- return 0;
+ return count;
}
static int raid5_remove_disk(mddev_t *mddev, int number)
@@ -5545,10 +5587,7 @@ static int raid5_start_reshape(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
- printk(KERN_WARNING
- "md/raid:%s: failed to create "
- " link %s\n",
- mdname(mddev), nm);
+ /* Failure here is OK */;
} else
break;
}
@@ -5603,7 +5642,7 @@ static void end_reshape(raid5_conf_t *conf)
/* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/
- {
+ if (conf->mddev->queue) {
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 0f86f5e3672..36eaed5dfd6 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -388,7 +388,7 @@ struct raid5_private_data {
* two caches.
*/
int active_name;
- char cache_name[2][20];
+ char cache_name[2][32];
struct kmem_cache *slab_cache; /* for allocating stripes */
int seq_flush, seq_write;
@@ -398,6 +398,9 @@ struct raid5_private_data {
* (fresh device added).
* Cleared when a sync completes.
*/
+
+ struct plug_handle plug;
+
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
@@ -497,4 +500,8 @@ static inline int algorithm_is_DDF(int layout)
{
return layout >= 8 && layout <= 10;
}
+
+extern int md_raid5_congested(mddev_t *mddev, int bits);
+extern void md_raid5_unplug_device(raid5_conf_t *conf);
+extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
deleted file mode 100644
index 1f8784bfd44..00000000000
--- a/drivers/md/raid6algos.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6algos.c
- *
- * Algorithm list and algorithm selection for RAID-6
- */
-
-#include <linux/raid/pq.h>
-#include <linux/gfp.h>
-#ifndef __KERNEL__
-#include <sys/mman.h>
-#include <stdio.h>
-#else
-#if !RAID6_USE_EMPTY_ZERO_PAGE
-/* In .bss so it's zeroed */
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
-EXPORT_SYMBOL(raid6_empty_zero_page);
-#endif
-#endif
-
-struct raid6_calls raid6_call;
-EXPORT_SYMBOL_GPL(raid6_call);
-
-const struct raid6_calls * const raid6_algos[] = {
- &raid6_intx1,
- &raid6_intx2,
- &raid6_intx4,
- &raid6_intx8,
-#if defined(__ia64__)
- &raid6_intx16,
- &raid6_intx32,
-#endif
-#if defined(__i386__) && !defined(__arch_um__)
- &raid6_mmxx1,
- &raid6_mmxx2,
- &raid6_sse1x1,
- &raid6_sse1x2,
- &raid6_sse2x1,
- &raid6_sse2x2,
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_sse2x1,
- &raid6_sse2x2,
- &raid6_sse2x4,
-#endif
-#ifdef CONFIG_ALTIVEC
- &raid6_altivec1,
- &raid6_altivec2,
- &raid6_altivec4,
- &raid6_altivec8,
-#endif
- NULL
-};
-
-#ifdef __KERNEL__
-#define RAID6_TIME_JIFFIES_LG2 4
-#else
-/* Need more time to be stable in userspace */
-#define RAID6_TIME_JIFFIES_LG2 9
-#define time_before(x, y) ((x) < (y))
-#endif
-
-/* Try to pick the best algorithm */
-/* This code uses the gfmul table as convenient data set to abuse */
-
-int __init raid6_select_algo(void)
-{
- const struct raid6_calls * const * algo;
- const struct raid6_calls * best;
- char *syndromes;
- void *dptrs[(65536/PAGE_SIZE)+2];
- int i, disks;
- unsigned long perf, bestperf;
- int bestprefer;
- unsigned long j0, j1;
-
- disks = (65536/PAGE_SIZE)+2;
- for ( i = 0 ; i < disks-2 ; i++ ) {
- dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
- }
-
- /* Normal code - use a 2-page allocation to avoid D$ conflict */
- syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
-
- if ( !syndromes ) {
- printk("raid6: Yikes! No memory available.\n");
- return -ENOMEM;
- }
-
- dptrs[disks-2] = syndromes;
- dptrs[disks-1] = syndromes + PAGE_SIZE;
-
- bestperf = 0; bestprefer = 0; best = NULL;
-
- for ( algo = raid6_algos ; *algo ; algo++ ) {
- if ( !(*algo)->valid || (*algo)->valid() ) {
- perf = 0;
-
- preempt_disable();
- j0 = jiffies;
- while ( (j1 = jiffies) == j0 )
- cpu_relax();
- while (time_before(jiffies,
- j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
- (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
- perf++;
- }
- preempt_enable();
-
- if ( (*algo)->prefer > bestprefer ||
- ((*algo)->prefer == bestprefer &&
- perf > bestperf) ) {
- best = *algo;
- bestprefer = best->prefer;
- bestperf = perf;
- }
- printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- }
- }
-
- if (best) {
- printk("raid6: using algorithm %s (%ld MB/s)\n",
- best->name,
- (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- raid6_call = *best;
- } else
- printk("raid6: Yikes! No algorithm found!\n");
-
- free_pages((unsigned long)syndromes, 1);
-
- return best ? 0 : -EINVAL;
-}
-
-static void raid6_exit(void)
-{
- do { } while (0);
-}
-
-subsys_initcall(raid6_select_algo);
-module_exit(raid6_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
deleted file mode 100644
index 2654d5c854b..00000000000
--- a/drivers/md/raid6altivec.uc
+++ /dev/null
@@ -1,130 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6altivec$#.c
- *
- * $#-way unrolled portable integer math RAID-6 instruction set
- *
- * This file is postprocessed using unroll.awk
- *
- * <benh> hpa: in process,
- * you can just "steal" the vec unit with enable_kernel_altivec() (but
- * bracked this with preempt_disable/enable or in a lock)
- */
-
-#include <linux/raid/pq.h>
-
-#ifdef CONFIG_ALTIVEC
-
-#include <altivec.h>
-#ifdef __KERNEL__
-# include <asm/system.h>
-# include <asm/cputable.h>
-#endif
-
-/*
- * This is the C data type to use. We use a vector of
- * signed char so vec_cmpgt() will generate the right
- * instruction.
- */
-
-typedef vector signed char unative_t;
-
-#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
-#define NSIZE sizeof(unative_t)
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
-{
- return vec_add(v,v);
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ unative_t MASK(unative_t v)
-{
- unative_t zv = NBYTES(0);
-
- /* vec_cmpgt returns a vector bool char; thus the need for the cast */
- return (unative_t)vec_cmpgt(zv, v);
-}
-
-
-/* This is noinline to make damned sure that gcc doesn't move any of the
- Altivec code around the enable/disable code */
-static void noinline
-raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- unative_t x1d = NBYTES(0x1d);
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
- wp$$ = vec_xor(wp$$, wd$$);
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ = vec_and(w2$$, x1d);
- w1$$ = vec_xor(w1$$, w2$$);
- wq$$ = vec_xor(w1$$, wd$$);
- }
- *(unative_t *)&p[d+NSIZE*$$] = wp$$;
- *(unative_t *)&q[d+NSIZE*$$] = wq$$;
- }
-}
-
-static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- preempt_disable();
- enable_kernel_altivec();
-
- raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
-
- preempt_enable();
-}
-
-int raid6_have_altivec(void);
-#if $# == 1
-int raid6_have_altivec(void)
-{
- /* This assumes either all CPUs have Altivec or none does */
-# ifdef __KERNEL__
- return cpu_has_feature(CPU_FTR_ALTIVEC);
-# else
- return 1;
-# endif
-}
-#endif
-
-const struct raid6_calls raid6_altivec$# = {
- raid6_altivec$#_gen_syndrome,
- raid6_have_altivec,
- "altivecx$#",
- 0
-};
-
-#endif /* CONFIG_ALTIVEC */
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc
deleted file mode 100644
index d1e276a14fa..00000000000
--- a/drivers/md/raid6int.uc
+++ /dev/null
@@ -1,117 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6int$#.c
- *
- * $#-way unrolled portable integer math RAID-6 instruction set
- *
- * This file is postprocessed using unroll.awk
- */
-
-#include <linux/raid/pq.h>
-
-/*
- * This is the C data type to use
- */
-
-/* Change this from BITS_PER_LONG if there is something better... */
-#if BITS_PER_LONG == 64
-# define NBYTES(x) ((x) * 0x0101010101010101UL)
-# define NSIZE 8
-# define NSHIFT 3
-# define NSTRING "64"
-typedef u64 unative_t;
-#else
-# define NBYTES(x) ((x) * 0x01010101U)
-# define NSIZE 4
-# define NSHIFT 2
-# define NSTRING "32"
-typedef u32 unative_t;
-#endif
-
-
-
-/*
- * IA-64 wants insane amounts of unrolling. On other architectures that
- * is just a waste of space.
- */
-#if ($# <= 8) || defined(__ia64__)
-
-
-/*
- * These sub-operations are separate inlines since they can sometimes be
- * specially optimized using architecture-specific hacks.
- */
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
-{
- unative_t vv;
-
- vv = (v << 1) & NBYTES(0xfe);
- return vv;
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ unative_t MASK(unative_t v)
-{
- unative_t vv;
-
- vv = v & NBYTES(0x80);
- vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */
- return vv;
-}
-
-
-static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
- wp$$ ^= wd$$;
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ &= NBYTES(0x1d);
- w1$$ ^= w2$$;
- wq$$ = w1$$ ^ wd$$;
- }
- *(unative_t *)&p[d+NSIZE*$$] = wp$$;
- *(unative_t *)&q[d+NSIZE*$$] = wq$$;
- }
-}
-
-const struct raid6_calls raid6_intx$# = {
- raid6_int$#_gen_syndrome,
- NULL, /* always valid */
- "int" NSTRING "x$#",
- 0
-};
-
-#endif
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c
deleted file mode 100644
index e7f6c13132b..00000000000
--- a/drivers/md/raid6mmx.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6mmx.c
- *
- * MMX implementation of RAID-6 syndrome functions
- */
-
-#if defined(__i386__) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-/* Shared with raid6sse1.c */
-const struct raid6_mmx_constants {
- u64 x1d;
-} raid6_mmx_constants = {
- 0x1d1d1d1d1d1d1d1dULL,
-};
-
-static int raid6_have_mmx(void)
-{
- /* Not really "boot_cpu" but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX);
-}
-
-/*
- * Plain MMX implementation
- */
-static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 8 ) {
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
- }
- asm volatile("movq %%mm2,%0" : "=m" (p[d]));
- asm volatile("pxor %mm2,%mm2");
- asm volatile("movq %%mm4,%0" : "=m" (q[d]));
- asm volatile("pxor %mm4,%mm4");
- }
-
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_mmxx1 = {
- raid6_mmx1_gen_syndrome,
- raid6_have_mmx,
- "mmxx1",
- 0
-};
-
-/*
- * Unrolled-by-2 MMX implementation
- */
-static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- asm volatile("pxor %mm7,%mm7"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("pcmpgtb %mm6,%mm7");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("paddb %mm6,%mm6");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pand %mm0,%mm7");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
- asm volatile("pxor %mm5,%mm2");
- asm volatile("pxor %mm7,%mm3");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm7,%mm7");
- }
- asm volatile("movq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
- asm volatile("movq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
- }
-
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_mmxx2 = {
- raid6_mmx2_gen_syndrome,
- raid6_have_mmx,
- "mmxx2",
- 0
-};
-
-#endif
diff --git a/drivers/md/raid6recov.c b/drivers/md/raid6recov.c
deleted file mode 100644
index 2609f00e0d6..00000000000
--- a/drivers/md/raid6recov.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6recov.c
- *
- * RAID-6 data recovery in dual failure mode. In single failure mode,
- * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
- * the syndrome.)
- */
-
-#include <linux/raid/pq.h>
-
-/* Recover two failed data blocks. */
-void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
- void **ptrs)
-{
- u8 *p, *q, *dp, *dq;
- u8 px, qx, db;
- const u8 *pbmul; /* P multiplier table for B data */
- const u8 *qmul; /* Q multiplier table (for both) */
-
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data pages
- Use the dead data pages as temporary storage for
- delta p and delta q */
- dp = (u8 *)ptrs[faila];
- ptrs[faila] = (void *)raid6_empty_zero_page;
- ptrs[disks-2] = dp;
- dq = (u8 *)ptrs[failb];
- ptrs[failb] = (void *)raid6_empty_zero_page;
- ptrs[disks-1] = dq;
-
- raid6_call.gen_syndrome(disks, bytes, ptrs);
-
- /* Restore pointer table */
- ptrs[faila] = dp;
- ptrs[failb] = dq;
- ptrs[disks-2] = p;
- ptrs[disks-1] = q;
-
- /* Now, pick the proper data tables */
- pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
- qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
-
- /* Now do it... */
- while ( bytes-- ) {
- px = *p ^ *dp;
- qx = qmul[*q ^ *dq];
- *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
- *dp++ = db ^ px; /* Reconstructed A */
- p++; q++;
- }
-}
-EXPORT_SYMBOL_GPL(raid6_2data_recov);
-
-/* Recover failure of one data block plus the P block */
-void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs)
-{
- u8 *p, *q, *dq;
- const u8 *qmul; /* Q multiplier table */
-
- p = (u8 *)ptrs[disks-2];
- q = (u8 *)ptrs[disks-1];
-
- /* Compute syndrome with zero for the missing data page
- Use the dead data page as temporary storage for delta q */
- dq = (u8 *)ptrs[faila];
- ptrs[faila] = (void *)raid6_empty_zero_page;
- ptrs[disks-1] = dq;
-
- raid6_call.gen_syndrome(disks, bytes, ptrs);
-
- /* Restore pointer table */
- ptrs[faila] = dq;
- ptrs[disks-1] = q;
-
- /* Now, pick the proper data tables */
- qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
-
- /* Now do it... */
- while ( bytes-- ) {
- *p++ ^= *dq = qmul[*q ^ *dq];
- q++; dq++;
- }
-}
-EXPORT_SYMBOL_GPL(raid6_datap_recov);
-
-#ifndef __KERNEL__
-/* Testing only */
-
-/* Recover two failed blocks. */
-void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
-{
- if ( faila > failb ) {
- int tmp = faila;
- faila = failb;
- failb = tmp;
- }
-
- if ( failb == disks-1 ) {
- if ( faila == disks-2 ) {
- /* P+Q failure. Just rebuild the syndrome. */
- raid6_call.gen_syndrome(disks, bytes, ptrs);
- } else {
- /* data+Q failure. Reconstruct data from P,
- then rebuild syndrome. */
- /* NOT IMPLEMENTED - equivalent to RAID-5 */
- }
- } else {
- if ( failb == disks-2 ) {
- /* data+P failure. */
- raid6_datap_recov(disks, bytes, faila, ptrs);
- } else {
- /* data+data failure. */
- raid6_2data_recov(disks, bytes, faila, failb, ptrs);
- }
- }
-}
-
-#endif
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c
deleted file mode 100644
index b274dd5eab8..00000000000
--- a/drivers/md/raid6sse1.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6sse1.c
- *
- * SSE-1/MMXEXT implementation of RAID-6 syndrome functions
- *
- * This is really an MMX implementation, but it requires SSE-1 or
- * AMD MMXEXT for prefetch support and a few other features. The
- * support for nontemporal memory accesses is enough to make this
- * worthwhile as a separate implementation.
- */
-
-#if defined(__i386__) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-/* Defined in raid6mmx.c */
-extern const struct raid6_mmx_constants {
- u64 x1d;
-} raid6_mmx_constants;
-
-static int raid6_have_sse1_or_mmxext(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- (boot_cpu_has(X86_FEATURE_XMM) ||
- boot_cpu_has(X86_FEATURE_MMXEXT));
-}
-
-/*
- * Plain SSE1 implementation
- */
-static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 8 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
- asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
- }
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm6,%mm2");
- asm volatile("pxor %mm6,%mm4");
-
- asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse1x1 = {
- raid6_sse11_gen_syndrome,
- raid6_have_sse1_or_mmxext,
- "sse1x1",
- 1 /* Has cache hints */
-};
-
-/*
- * Unrolled-by-2 SSE1 implementation
- */
-static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
- asm volatile("pxor %mm5,%mm5"); /* Zero temp */
- asm volatile("pxor %mm7,%mm7"); /* Zero temp */
-
- /* We uniformly assume a single prefetch covers at least 16 bytes */
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
- asm volatile("movq %mm2,%mm4"); /* Q[0] */
- asm volatile("movq %mm3,%mm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %mm4,%mm5");
- asm volatile("pcmpgtb %mm6,%mm7");
- asm volatile("paddb %mm4,%mm4");
- asm volatile("paddb %mm6,%mm6");
- asm volatile("pand %mm0,%mm5");
- asm volatile("pand %mm0,%mm7");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
- asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
- asm volatile("pxor %mm5,%mm2");
- asm volatile("pxor %mm7,%mm3");
- asm volatile("pxor %mm5,%mm4");
- asm volatile("pxor %mm7,%mm6");
- asm volatile("pxor %mm5,%mm5");
- asm volatile("pxor %mm7,%mm7");
- }
- asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
- asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
- asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
- asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
- }
-
- asm volatile("sfence" : :: "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse1x2 = {
- raid6_sse12_gen_syndrome,
- raid6_have_sse1_or_mmxext,
- "sse1x2",
- 1 /* Has cache hints */
-};
-
-#endif
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c
deleted file mode 100644
index 6ed6c6c0389..00000000000
--- a/drivers/md/raid6sse2.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6sse2.c
- *
- * SSE-2 implementation of RAID-6 syndrome functions
- *
- */
-
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
-#include <linux/raid/pq.h>
-#include "raid6x86.h"
-
-static const struct raid6_sse_constants {
- u64 x1d[2];
-} raid6_sse_constants __attribute__((aligned(16))) = {
- { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
-};
-
-static int raid6_have_sse2(void)
-{
- /* Not really boot_cpu but "all_cpus" */
- return boot_cpu_has(X86_FEATURE_MMX) &&
- boot_cpu_has(X86_FEATURE_FXSR) &&
- boot_cpu_has(X86_FEATURE_XMM) &&
- boot_cpu_has(X86_FEATURE_XMM2);
-}
-
-/*
- * Plain SSE2 implementation
- */
-static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 16 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
- asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
- asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
- for ( z = z0-2 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm6,%xmm2");
- asm volatile("pxor %xmm6,%xmm4");
- asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
- }
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm6,%xmm2");
- asm volatile("pxor %xmm6,%xmm4");
-
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("pxor %xmm2,%xmm2");
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("pxor %xmm4,%xmm4");
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x1 = {
- raid6_sse21_gen_syndrome,
- raid6_have_sse2,
- "sse2x1",
- 1 /* Has cache hints */
-};
-
-/*
- * Unrolled-by-2 SSE2 implementation
- */
-static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
- asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
-
- /* We uniformly assume a single prefetch covers at least 32 bytes */
- for ( d = 0 ; d < bytes ; d += 32 ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
- asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
- asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
- asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
- asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("pcmpgtb %xmm6,%xmm7");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("paddb %xmm6,%xmm6");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pand %xmm0,%xmm7");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
- asm volatile("pxor %xmm5,%xmm2");
- asm volatile("pxor %xmm7,%xmm3");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm7,%xmm7");
- }
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x2 = {
- raid6_sse22_gen_syndrome,
- raid6_have_sse2,
- "sse2x2",
- 1 /* Has cache hints */
-};
-
-#endif
-
-#if defined(__x86_64__) && !defined(__arch_um__)
-
-/*
- * Unrolled-by-4 SSE2 implementation
- */
-static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u8 *p, *q;
- int d, z, z0;
-
- z0 = disks - 3; /* Highest data disk */
- p = dptr[z0+1]; /* XOR parity */
- q = dptr[z0+2]; /* RS syndrome */
-
- kernel_fpu_begin();
-
- asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
- asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
- asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
- asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
- asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
- asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
- asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
- asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
- asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
- asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
- asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
- asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
- asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
-
- for ( d = 0 ; d < bytes ; d += 64 ) {
- for ( z = z0 ; z >= 0 ; z-- ) {
- /* The second prefetch seems to improve performance... */
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
- asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
- asm volatile("pcmpgtb %xmm4,%xmm5");
- asm volatile("pcmpgtb %xmm6,%xmm7");
- asm volatile("pcmpgtb %xmm12,%xmm13");
- asm volatile("pcmpgtb %xmm14,%xmm15");
- asm volatile("paddb %xmm4,%xmm4");
- asm volatile("paddb %xmm6,%xmm6");
- asm volatile("paddb %xmm12,%xmm12");
- asm volatile("paddb %xmm14,%xmm14");
- asm volatile("pand %xmm0,%xmm5");
- asm volatile("pand %xmm0,%xmm7");
- asm volatile("pand %xmm0,%xmm13");
- asm volatile("pand %xmm0,%xmm15");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm13,%xmm12");
- asm volatile("pxor %xmm15,%xmm14");
- asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
- asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
- asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
- asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
- asm volatile("pxor %xmm5,%xmm2");
- asm volatile("pxor %xmm7,%xmm3");
- asm volatile("pxor %xmm13,%xmm10");
- asm volatile("pxor %xmm15,%xmm11");
- asm volatile("pxor %xmm5,%xmm4");
- asm volatile("pxor %xmm7,%xmm6");
- asm volatile("pxor %xmm13,%xmm12");
- asm volatile("pxor %xmm15,%xmm14");
- asm volatile("pxor %xmm5,%xmm5");
- asm volatile("pxor %xmm7,%xmm7");
- asm volatile("pxor %xmm13,%xmm13");
- asm volatile("pxor %xmm15,%xmm15");
- }
- asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
- asm volatile("pxor %xmm2,%xmm2");
- asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
- asm volatile("pxor %xmm3,%xmm3");
- asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
- asm volatile("pxor %xmm10,%xmm10");
- asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
- asm volatile("pxor %xmm11,%xmm11");
- asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
- asm volatile("pxor %xmm4,%xmm4");
- asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
- asm volatile("pxor %xmm6,%xmm6");
- asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
- asm volatile("pxor %xmm12,%xmm12");
- asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
- asm volatile("pxor %xmm14,%xmm14");
- }
-
- asm volatile("sfence" : : : "memory");
- kernel_fpu_end();
-}
-
-const struct raid6_calls raid6_sse2x4 = {
- raid6_sse24_gen_syndrome,
- raid6_have_sse2,
- "sse2x4",
- 1 /* Has cache hints */
-};
-
-#endif
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
deleted file mode 100644
index 2874cbef529..00000000000
--- a/drivers/md/raid6test/Makefile
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# This is a simple Makefile to test some of the RAID-6 code
-# from userspace.
-#
-
-CC = gcc
-OPTFLAGS = -O2 # Adjust as desired
-CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
-LD = ld
-AWK = awk
-AR = ar
-RANLIB = ranlib
-
-.c.o:
- $(CC) $(CFLAGS) -c -o $@ $<
-
-%.c: ../%.c
- cp -f $< $@
-
-%.uc: ../%.uc
- cp -f $< $@
-
-all: raid6.a raid6test
-
-raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
- raid6int32.o \
- raid6mmx.o raid6sse1.o raid6sse2.o \
- raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
- raid6recov.o raid6algos.o \
- raid6tables.o
- rm -f $@
- $(AR) cq $@ $^
- $(RANLIB) $@
-
-raid6test: test.c raid6.a
- $(CC) $(CFLAGS) -o raid6test $^
-
-raid6altivec1.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
-
-raid6altivec2.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
-
-raid6altivec4.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
-
-raid6altivec8.c: raid6altivec.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
-
-raid6int1.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
-
-raid6int2.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
-
-raid6int4.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
-
-raid6int8.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
-
-raid6int16.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
-
-raid6int32.c: raid6int.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
-
-raid6tables.c: mktables
- ./mktables > raid6tables.c
-
-clean:
- rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
-
-spotless: clean
- rm -f *~
diff --git a/drivers/md/raid6test/test.c b/drivers/md/raid6test/test.c
deleted file mode 100644
index 7a930318b17..00000000000
--- a/drivers/md/raid6test/test.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2 or (at your
- * option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6test.c
- *
- * Test RAID-6 recovery with various algorithms
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <linux/raid/pq.h>
-
-#define NDISKS 16 /* Including P and Q */
-
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
-struct raid6_calls raid6_call;
-
-char *dataptrs[NDISKS];
-char data[NDISKS][PAGE_SIZE];
-char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
-
-static void makedata(void)
-{
- int i, j;
-
- for (i = 0; i < NDISKS; i++) {
- for (j = 0; j < PAGE_SIZE; j++)
- data[i][j] = rand();
-
- dataptrs[i] = data[i];
- }
-}
-
-static char disk_type(int d)
-{
- switch (d) {
- case NDISKS-2:
- return 'P';
- case NDISKS-1:
- return 'Q';
- default:
- return 'D';
- }
-}
-
-static int test_disks(int i, int j)
-{
- int erra, errb;
-
- memset(recovi, 0xf0, PAGE_SIZE);
- memset(recovj, 0xba, PAGE_SIZE);
-
- dataptrs[i] = recovi;
- dataptrs[j] = recovj;
-
- raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
-
- erra = memcmp(data[i], recovi, PAGE_SIZE);
- errb = memcmp(data[j], recovj, PAGE_SIZE);
-
- if (i < NDISKS-2 && j == NDISKS-1) {
- /* We don't implement the DQ failure scenario, since it's
- equivalent to a RAID-5 failure (XOR, then recompute Q) */
- erra = errb = 0;
- } else {
- printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
- raid6_call.name,
- i, disk_type(i),
- j, disk_type(j),
- (!erra && !errb) ? "OK" :
- !erra ? "ERRB" :
- !errb ? "ERRA" : "ERRAB");
- }
-
- dataptrs[i] = data[i];
- dataptrs[j] = data[j];
-
- return erra || errb;
-}
-
-int main(int argc, char *argv[])
-{
- const struct raid6_calls *const *algo;
- int i, j;
- int err = 0;
-
- makedata();
-
- for (algo = raid6_algos; *algo; algo++) {
- if (!(*algo)->valid || (*algo)->valid()) {
- raid6_call = **algo;
-
- /* Nuke syndromes */
- memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
-
- /* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
- (void **)&dataptrs);
-
- for (i = 0; i < NDISKS-1; i++)
- for (j = i+1; j < NDISKS; j++)
- err += test_disks(i, j);
- }
- printf("\n");
- }
-
- printf("\n");
- /* Pick the best algorithm test */
- raid6_select_algo();
-
- if (err)
- printf("\n*** ERRORS FOUND ***\n");
-
- return err;
-}
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h
deleted file mode 100644
index 4c22c156855..00000000000
--- a/drivers/md/raid6x86.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* ----------------------------------------------------------------------- *
- *
- * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * raid6x86.h
- *
- * Definitions common to x86 and x86-64 RAID-6 code only
- */
-
-#ifndef LINUX_RAID_RAID6X86_H
-#define LINUX_RAID_RAID6X86_H
-
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
-#ifdef __KERNEL__ /* Real code */
-
-#include <asm/i387.h>
-
-#else /* Dummy code for user space testing */
-
-static inline void kernel_fpu_begin(void)
-{
-}
-
-static inline void kernel_fpu_end(void)
-{
-}
-
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
- * (fast save and restore) */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-
-/* Should work well enough on modern CPUs for testing */
-static inline int boot_cpu_has(int flag)
-{
- u32 eax = (flag >> 5) ? 0x80000001 : 1;
- u32 edx;
-
- asm volatile("cpuid"
- : "+a" (eax), "=d" (edx)
- : : "ecx", "ebx");
-
- return (edx >> (flag & 31)) & 1;
-}
-
-#endif /* ndef __KERNEL__ */
-
-#endif
-#endif
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk
deleted file mode 100644
index c6aa03631df..00000000000
--- a/drivers/md/unroll.awk
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# This filter requires one command line option of form -vN=n
-# where n must be a decimal number.
-#
-# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
-# Replace each $# with n, and each $* with a single $.
-
-BEGIN {
- n = N + 0
-}
-{
- if (/\$\$/) { rep = n } else { rep = 1 }
- for (i = 0; i < rep; ++i) {
- tmp = $0
- gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
- gsub(/\$\*/, "$", tmp)
- print tmp
- }
-}