diff options
Diffstat (limited to 'drivers/md')
52 files changed, 37142 insertions, 0 deletions
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore new file mode 100644 index 0000000..a7afec6 --- /dev/null +++ b/drivers/md/.gitignore @@ -0,0 +1,4 @@ +mktables +raid6altivec*.c +raid6int*.c +raid6tables.c diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig new file mode 100644 index 0000000..2281b50 --- /dev/null +++ b/drivers/md/Kconfig @@ -0,0 +1,292 @@ +# +# Block device driver configuration +# + +menuconfig MD + bool "Multiple devices driver support (RAID and LVM)" + depends on BLOCK + help + Support multiple physical spindles through a single logical device. + Required for RAID and logical volume management. + +if MD + +config BLK_DEV_MD + tristate "RAID support" + ---help--- + This driver lets you combine several hard disk partitions into one + logical block device. This can be used to simply append one + partition to another one or to combine several redundant hard disks + into a RAID1/4/5 device so as to provide protection against hard + disk failures. This is called "Software RAID" since the combining of + the partitions is done by the kernel. "Hardware RAID" means that the + combining is done by a dedicated controller; if you have such a + controller, you do not need to say Y here. + + More information about Software RAID on Linux is contained in the + Software RAID mini-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. There you will also learn + where to get the supporting user space utilities raidtools. + + If unsure, say N. + +config MD_AUTODETECT + bool "Autodetect RAID arrays during kernel boot" + depends on BLK_DEV_MD=y + default y + ---help--- + If you say Y here, then the kernel will try to autodetect raid + arrays as part of its boot process. + + If you don't use raid and say Y, this autodetection can cause + a several-second delay in the boot time due to various + synchronisation steps that are part of this step. + + If unsure, say Y. + +config MD_LINEAR + tristate "Linear (append) mode" + depends on BLK_DEV_MD + ---help--- + If you say Y here, then your multiple devices driver will be able to + use the so-called linear mode, i.e. it will combine the hard disk + partitions by simply appending one to the other. + + To compile this as a module, choose M here: the module + will be called linear. + + If unsure, say Y. + +config MD_RAID0 + tristate "RAID-0 (striping) mode" + depends on BLK_DEV_MD + ---help--- + If you say Y here, then your multiple devices driver will be able to + use the so-called raid0 mode, i.e. it will combine the hard disk + partitions into one logical device in such a fashion as to fill them + up evenly, one chunk here and one chunk there. This will increase + the throughput rate if the partitions reside on distinct disks. + + Information about Software RAID on Linux is contained in the + Software-RAID mini-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. There you will also + learn where to get the supporting user space utilities raidtools. + + To compile this as a module, choose M here: the module + will be called raid0. + + If unsure, say Y. + +config MD_RAID1 + tristate "RAID-1 (mirroring) mode" + depends on BLK_DEV_MD + ---help--- + A RAID-1 set consists of several disk drives which are exact copies + of each other. In the event of a mirror failure, the RAID driver + will continue to use the operational mirrors in the set, providing + an error free MD (multiple device) to the higher levels of the + kernel. In a set with N drives, the available space is the capacity + of a single drive, and the set protects against a failure of (N - 1) + drives. + + Information about Software RAID on Linux is contained in the + Software-RAID mini-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. There you will also + learn where to get the supporting user space utilities raidtools. + + If you want to use such a RAID-1 set, say Y. To compile this code + as a module, choose M here: the module will be called raid1. + + If unsure, say Y. + +config MD_RAID10 + tristate "RAID-10 (mirrored striping) mode (EXPERIMENTAL)" + depends on BLK_DEV_MD && EXPERIMENTAL + ---help--- + RAID-10 provides a combination of striping (RAID-0) and + mirroring (RAID-1) with easier configuration and more flexible + layout. + Unlike RAID-0, but like RAID-1, RAID-10 requires all devices to + be the same size (or at least, only as much as the smallest device + will be used). + RAID-10 provides a variety of layouts that provide different levels + of redundancy and performance. + + RAID-10 requires mdadm-1.7.0 or later, available at: + + ftp://ftp.kernel.org/pub/linux/utils/raid/mdadm/ + + If unsure, say Y. + +config MD_RAID456 + tristate "RAID-4/RAID-5/RAID-6 mode" + depends on BLK_DEV_MD + select ASYNC_MEMCPY + select ASYNC_XOR + ---help--- + A RAID-5 set of N drives with a capacity of C MB per drive provides + the capacity of C * (N - 1) MB, and protects against a failure + of a single drive. For a given sector (row) number, (N - 1) drives + contain data sectors, and one drive contains the parity protection. + For a RAID-4 set, the parity blocks are present on a single drive, + while a RAID-5 set distributes the parity across the drives in one + of the available parity distribution methods. + + A RAID-6 set of N drives with a capacity of C MB per drive + provides the capacity of C * (N - 2) MB, and protects + against a failure of any two drives. For a given sector + (row) number, (N - 2) drives contain data sectors, and two + drives contains two independent redundancy syndromes. Like + RAID-5, RAID-6 distributes the syndromes across the drives + in one of the available parity distribution methods. + + Information about Software RAID on Linux is contained in the + Software-RAID mini-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. There you will also + learn where to get the supporting user space utilities raidtools. + + If you want to use such a RAID-4/RAID-5/RAID-6 set, say Y. To + compile this code as a module, choose M here: the module + will be called raid456. + + If unsure, say Y. + +config MD_RAID5_RESHAPE + bool "Support adding drives to a raid-5 array" + depends on MD_RAID456 + default y + ---help--- + A RAID-5 set can be expanded by adding extra drives. This + requires "restriping" the array which means (almost) every + block must be written to a different place. + + This option allows such restriping to be done while the array + is online. + + You will need mdadm version 2.4.1 or later to use this + feature safely. During the early stage of reshape there is + a critical section where live data is being over-written. A + crash during this time needs extra care for recovery. The + newer mdadm takes a copy of the data in the critical section + and will restore it, if necessary, after a crash. + + The mdadm usage is e.g. + mdadm --grow /dev/md1 --raid-disks=6 + to grow '/dev/md1' to having 6 disks. + + Note: The array can only be expanded, not contracted. + There should be enough spares already present to make the new + array workable. + + If unsure, say Y. + +config MD_MULTIPATH + tristate "Multipath I/O support" + depends on BLK_DEV_MD + help + Multipath-IO is the ability of certain devices to address the same + physical disk over multiple 'IO paths'. The code ensures that such + paths can be defined and handled at runtime, and ensures that a + transparent failover to the backup path(s) happens if a IO errors + arrives on the primary path. + + If unsure, say N. + +config MD_FAULTY + tristate "Faulty test module for MD" + depends on BLK_DEV_MD + help + The "faulty" module allows for a block device that occasionally returns + read or write errors. It is useful for testing. + + In unsure, say N. + +config BLK_DEV_DM + tristate "Device mapper support" + ---help--- + Device-mapper is a low level volume manager. It works by allowing + people to specify mappings for ranges of logical sectors. Various + mapping types are available, in addition people may write their own + modules containing custom mappings if they wish. + + Higher level volume managers such as LVM2 use this driver. + + To compile this as a module, choose M here: the module will be + called dm-mod. + + If unsure, say N. + +config DM_DEBUG + boolean "Device mapper debugging support" + depends on BLK_DEV_DM + ---help--- + Enable this for messages that may help debug device-mapper problems. + + If unsure, say N. + +config DM_CRYPT + tristate "Crypt target support" + depends on BLK_DEV_DM + select CRYPTO + select CRYPTO_CBC + ---help--- + This device-mapper target allows you to create a device that + transparently encrypts the data on it. You'll need to activate + the ciphers you're going to use in the cryptoapi configuration. + + Information on how to use dm-crypt can be found on + + <http://www.saout.de/misc/dm-crypt/> + + To compile this code as a module, choose M here: the module will + be called dm-crypt. + + If unsure, say N. + +config DM_SNAPSHOT + tristate "Snapshot target" + depends on BLK_DEV_DM + ---help--- + Allow volume managers to take writable snapshots of a device. + +config DM_MIRROR + tristate "Mirror target" + depends on BLK_DEV_DM + ---help--- + Allow volume managers to mirror logical volumes, also + needed for live data migration tools such as 'pvmove'. + +config DM_ZERO + tristate "Zero target" + depends on BLK_DEV_DM + ---help--- + A target that discards writes, and returns all zeroes for + reads. Useful in some recovery situations. + +config DM_MULTIPATH + tristate "Multipath target" + depends on BLK_DEV_DM + # nasty syntax but means make DM_MULTIPATH independent + # of SCSI_DH if the latter isn't defined but if + # it is, DM_MULTIPATH must depend on it. We get a build + # error if SCSI_DH=m and DM_MULTIPATH=y + depends on SCSI_DH || !SCSI_DH + ---help--- + Allow volume managers to support multipath hardware. + +config DM_DELAY + tristate "I/O delaying target (EXPERIMENTAL)" + depends on BLK_DEV_DM && EXPERIMENTAL + ---help--- + A target that delays reads and/or writes and can send + them to different devices. Useful for testing. + + If unsure, say N. + +config DM_UEVENT + bool "DM uevents (EXPERIMENTAL)" + depends on BLK_DEV_DM && EXPERIMENTAL + ---help--- + Generate udev events for DM events. + +endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile new file mode 100644 index 0000000..1c61580 --- /dev/null +++ b/drivers/md/Makefile @@ -0,0 +1,111 @@ +# +# Makefile for the kernel software RAID and LVM drivers. +# + +dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ + dm-ioctl.o dm-io.o dm-kcopyd.o +dm-multipath-objs := dm-path-selector.o dm-mpath.o +dm-snapshot-objs := dm-snap.o dm-exception-store.o +dm-mirror-objs := dm-raid1.o +md-mod-objs := md.o bitmap.o +raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ + raid6int1.o raid6int2.o raid6int4.o \ + raid6int8.o raid6int16.o raid6int32.o \ + raid6altivec1.o raid6altivec2.o raid6altivec4.o \ + raid6altivec8.o \ + raid6mmx.o raid6sse1.o raid6sse2.o +hostprogs-y := mktables + +# Note: link order is important. All raid personalities +# and must come before md.o, as they each initialise +# themselves, and md.o may use the personalities when it +# auto-initialised. + +obj-$(CONFIG_MD_LINEAR) += linear.o +obj-$(CONFIG_MD_RAID0) += raid0.o +obj-$(CONFIG_MD_RAID1) += raid1.o +obj-$(CONFIG_MD_RAID10) += raid10.o +obj-$(CONFIG_MD_RAID456) += raid456.o +obj-$(CONFIG_MD_MULTIPATH) += multipath.o +obj-$(CONFIG_MD_FAULTY) += faulty.o +obj-$(CONFIG_BLK_DEV_MD) += md-mod.o +obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o +obj-$(CONFIG_DM_CRYPT) += dm-crypt.o +obj-$(CONFIG_DM_DELAY) += dm-delay.o +obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o +obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o +obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o +obj-$(CONFIG_DM_ZERO) += dm-zero.o + +quiet_cmd_unroll = UNROLL $@ + cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ + < $< > $@ || ( rm -f $@ && exit 1 ) + +ifeq ($(CONFIG_ALTIVEC),y) +altivec_flags := -maltivec -mabi=altivec +endif + +ifeq ($(CONFIG_DM_UEVENT),y) +dm-mod-objs += dm-uevent.o +endif + +targets += raid6int1.c +$(obj)/raid6int1.c: UNROLL := 1 +$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +targets += raid6int2.c +$(obj)/raid6int2.c: UNROLL := 2 +$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +targets += raid6int4.c +$(obj)/raid6int4.c: UNROLL := 4 +$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +targets += raid6int8.c +$(obj)/raid6int8.c: UNROLL := 8 +$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +targets += raid6int16.c +$(obj)/raid6int16.c: UNROLL := 16 +$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +targets += raid6int32.c +$(obj)/raid6int32.c: UNROLL := 32 +$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +CFLAGS_raid6altivec1.o += $(altivec_flags) +targets += raid6altivec1.c +$(obj)/raid6altivec1.c: UNROLL := 1 +$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +CFLAGS_raid6altivec2.o += $(altivec_flags) +targets += raid6altivec2.c +$(obj)/raid6altivec2.c: UNROLL := 2 +$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +CFLAGS_raid6altivec4.o += $(altivec_flags) +targets += raid6altivec4.c +$(obj)/raid6altivec4.c: UNROLL := 4 +$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +CFLAGS_raid6altivec8.o += $(altivec_flags) +targets += raid6altivec8.c +$(obj)/raid6altivec8.c: UNROLL := 8 +$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE + $(call if_changed,unroll) + +quiet_cmd_mktable = TABLE $@ + cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) + +targets += raid6tables.c +$(obj)/raid6tables.c: $(obj)/mktables FORCE + $(call if_changed,mktable) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c new file mode 100644 index 0000000..666b7ba --- /dev/null +++ b/drivers/md/bitmap.c @@ -0,0 +1,1663 @@ +/* + * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 + * + * bitmap_create - sets up the bitmap structure + * bitmap_destroy - destroys the bitmap structure + * + * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: + * - added disk storage for bitmap + * - changes to allow various bitmap chunk sizes + */ + +/* + * Still to do: + * + * flush after percent set rather than just time based. (maybe both). + * wait if count gets too high, wake when it drops to half. + */ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/file.h> +#include <linux/mount.h> +#include <linux/buffer_head.h> +#include <linux/raid/md.h> +#include <linux/raid/bitmap.h> + +/* debug macros */ + +#define DEBUG 0 + +#if DEBUG +/* these are for debugging purposes only! */ + +/* define one and only one of these */ +#define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */ +#define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/ +#define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */ +#define INJECT_FAULTS_4 0 /* undef */ +#define INJECT_FAULTS_5 0 /* undef */ +#define INJECT_FAULTS_6 0 + +/* if these are defined, the driver will fail! debug only */ +#define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */ +#define INJECT_FATAL_FAULT_2 0 /* undef */ +#define INJECT_FATAL_FAULT_3 0 /* undef */ +#endif + +//#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */ +#define DPRINTK(x...) do { } while(0) + +#ifndef PRINTK +# if DEBUG > 0 +# define PRINTK(x...) printk(KERN_DEBUG x) +# else +# define PRINTK(x...) +# endif +#endif + +static inline char * bmname(struct bitmap *bitmap) +{ + return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; +} + + +/* + * just a placeholder - calls kmalloc for bitmap pages + */ +static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) +{ + unsigned char *page; + +#ifdef INJECT_FAULTS_1 + page = NULL; +#else + page = kmalloc(PAGE_SIZE, GFP_NOIO); +#endif + if (!page) + printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); + else + PRINTK("%s: bitmap_alloc_page: allocated page at %p\n", + bmname(bitmap), page); + return page; +} + +/* + * for now just a placeholder -- just calls kfree for bitmap pages + */ +static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) +{ + PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page); + kfree(page); +} + +/* + * check a page and, if necessary, allocate it (or hijack it if the alloc fails) + * + * 1) check to see if this page is allocated, if it's not then try to alloc + * 2) if the alloc fails, set the page's hijacked flag so we'll use the + * page pointer directly as a counter + * + * if we find our page, we increment the page's refcount so that it stays + * allocated while we're using it + */ +static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) +{ + unsigned char *mappage; + + if (page >= bitmap->pages) { + printk(KERN_ALERT + "%s: invalid bitmap page request: %lu (> %lu)\n", + bmname(bitmap), page, bitmap->pages-1); + return -EINVAL; + } + + + if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ + return 0; + + if (bitmap->bp[page].map) /* page is already allocated, just return */ + return 0; + + if (!create) + return -ENOENT; + + spin_unlock_irq(&bitmap->lock); + + /* this page has not been allocated yet */ + + if ((mappage = bitmap_alloc_page(bitmap)) == NULL) { + PRINTK("%s: bitmap map page allocation failed, hijacking\n", + bmname(bitmap)); + /* failed - set the hijacked flag so that we can use the + * pointer as a counter */ + spin_lock_irq(&bitmap->lock); + if (!bitmap->bp[page].map) + bitmap->bp[page].hijacked = 1; + goto out; + } + + /* got a page */ + + spin_lock_irq(&bitmap->lock); + + /* recheck the page */ + + if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { + /* somebody beat us to getting the page */ + bitmap_free_page(bitmap, mappage); + return 0; + } + + /* no page was in place and we have one, so install it */ + + memset(mappage, 0, PAGE_SIZE); + bitmap->bp[page].map = mappage; + bitmap->missing_pages--; +out: + return 0; +} + + +/* if page is completely empty, put it back on the free list, or dealloc it */ +/* if page was hijacked, unmark the flag so it might get alloced next time */ +/* Note: lock should be held when calling this */ +static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) +{ + char *ptr; + + if (bitmap->bp[page].count) /* page is still busy */ + return; + + /* page is no longer in use, it can be released */ + + if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ + bitmap->bp[page].hijacked = 0; + bitmap->bp[page].map = NULL; + return; + } + + /* normal case, free the page */ + +#if 0 +/* actually ... let's not. We will probably need the page again exactly when + * memory is tight and we are flusing to disk + */ + return; +#else + ptr = bitmap->bp[page].map; + bitmap->bp[page].map = NULL; + bitmap->missing_pages++; + bitmap_free_page(bitmap, ptr); + return; +#endif +} + + +/* + * bitmap file handling - read and write the bitmap file and its superblock + */ + +/* + * basic page I/O operations + */ + +/* IO operations when bitmap is stored near all superblocks */ +static struct page *read_sb_page(mddev_t *mddev, long offset, + struct page *page, + unsigned long index, int size) +{ + /* choose a good rdev and read the page from there */ + + mdk_rdev_t *rdev; + struct list_head *tmp; + sector_t target; + + if (!page) + page = alloc_page(GFP_KERNEL); + if (!page) + return ERR_PTR(-ENOMEM); + + rdev_for_each(rdev, tmp, mddev) { + if (! test_bit(In_sync, &rdev->flags) + || test_bit(Faulty, &rdev->flags)) + continue; + + target = rdev->sb_start + offset + index * (PAGE_SIZE/512); + + if (sync_page_io(rdev->bdev, target, + roundup(size, bdev_hardsect_size(rdev->bdev)), + page, READ)) { + page->index = index; + attach_page_buffers(page, NULL); /* so that free_buffer will + * quietly no-op */ + return page; + } + } + return ERR_PTR(-EIO); + +} + +static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) +{ + /* Iterate the disks of an mddev, using rcu to protect access to the + * linked list, and raising the refcount of devices we return to ensure + * they don't disappear while in use. + * As devices are only added or removed when raid_disk is < 0 and + * nr_pending is 0 and In_sync is clear, the entries we return will + * still be in the same position on the list when we re-enter + * list_for_each_continue_rcu. + */ + struct list_head *pos; + rcu_read_lock(); + if (rdev == NULL) + /* start at the beginning */ + pos = &mddev->disks; + else { + /* release the previous rdev and start from there. */ + rdev_dec_pending(rdev, mddev); + pos = &rdev->same_set; + } + list_for_each_continue_rcu(pos, &mddev->disks) { + rdev = list_entry(pos, mdk_rdev_t, same_set); + if (rdev->raid_disk >= 0 && + test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) { + /* this is a usable devices */ + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + return rdev; + } + } + rcu_read_unlock(); + return NULL; +} + +static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) +{ + mdk_rdev_t *rdev = NULL; + mddev_t *mddev = bitmap->mddev; + + while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { + int size = PAGE_SIZE; + if (page->index == bitmap->file_pages-1) + size = roundup(bitmap->last_page_size, + bdev_hardsect_size(rdev->bdev)); + /* Just make sure we aren't corrupting data or + * metadata + */ + if (bitmap->offset < 0) { + /* DATA BITMAP METADATA */ + if (bitmap->offset + + (long)(page->index * (PAGE_SIZE/512)) + + size/512 > 0) + /* bitmap runs in to metadata */ + goto bad_alignment; + if (rdev->data_offset + mddev->size*2 + > rdev->sb_start + bitmap->offset) + /* data runs in to bitmap */ + goto bad_alignment; + } else if (rdev->sb_start < rdev->data_offset) { + /* METADATA BITMAP DATA */ + if (rdev->sb_start + + bitmap->offset + + page->index*(PAGE_SIZE/512) + size/512 + > rdev->data_offset) + /* bitmap runs in to data */ + goto bad_alignment; + } else { + /* DATA METADATA BITMAP - no problems */ + } + md_super_write(mddev, rdev, + rdev->sb_start + bitmap->offset + + page->index * (PAGE_SIZE/512), + size, + page); + } + + if (wait) + md_super_wait(mddev); + return 0; + + bad_alignment: + rcu_read_unlock(); + return -EINVAL; +} + +static void bitmap_file_kick(struct bitmap *bitmap); +/* + * write out a page to a file + */ +static void write_page(struct bitmap *bitmap, struct page *page, int wait) +{ + struct buffer_head *bh; + + if (bitmap->file == NULL) { + switch (write_sb_page(bitmap, page, wait)) { + case -EINVAL: + bitmap->flags |= BITMAP_WRITE_ERROR; + } + } else { + + bh = page_buffers(page); + + while (bh && bh->b_blocknr) { + atomic_inc(&bitmap->pending_writes); + set_buffer_locked(bh); + set_buffer_mapped(bh); + submit_bh(WRITE, bh); + bh = bh->b_this_page; + } + + if (wait) { + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes)==0); + } + } + if (bitmap->flags & BITMAP_WRITE_ERROR) + bitmap_file_kick(bitmap); +} + +static void end_bitmap_write(struct buffer_head *bh, int uptodate) +{ + struct bitmap *bitmap = bh->b_private; + unsigned long flags; + + if (!uptodate) { + spin_lock_irqsave(&bitmap->lock, flags); + bitmap->flags |= BITMAP_WRITE_ERROR; + spin_unlock_irqrestore(&bitmap->lock, flags); + } + if (atomic_dec_and_test(&bitmap->pending_writes)) + wake_up(&bitmap->write_wait); +} + +/* copied from buffer.c */ +static void +__clear_page_buffers(struct page *page) +{ + ClearPagePrivate(page); + set_page_private(page, 0); + page_cache_release(page); +} +static void free_buffers(struct page *page) +{ + struct buffer_head *bh = page_buffers(page); + + while (bh) { + struct buffer_head *next = bh->b_this_page; + free_buffer_head(bh); + bh = next; + } + __clear_page_buffers(page); + put_page(page); +} + +/* read a page from a file. + * We both read the page, and attach buffers to the page to record the + * address of each block (using bmap). These addresses will be used + * to write the block later, completely bypassing the filesystem. + * This usage is similar to how swap files are handled, and allows us + * to write to a file with no concerns of memory allocation failing. + */ +static struct page *read_page(struct file *file, unsigned long index, + struct bitmap *bitmap, + unsigned long count) +{ + struct page *page = NULL; + struct inode *inode = file->f_path.dentry->d_inode; + struct buffer_head *bh; + sector_t block; + + PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT); + + page = alloc_page(GFP_KERNEL); + if (!page) + page = ERR_PTR(-ENOMEM); + if (IS_ERR(page)) + goto out; + + bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0); + if (!bh) { + put_page(page); + page = ERR_PTR(-ENOMEM); + goto out; + } + attach_page_buffers(page, bh); + block = index << (PAGE_SHIFT - inode->i_blkbits); + while (bh) { + if (count == 0) + bh->b_blocknr = 0; + else { + bh->b_blocknr = bmap(inode, block); + if (bh->b_blocknr == 0) { + /* Cannot use this file! */ + free_buffers(page); + page = ERR_PTR(-EINVAL); + goto out; + } + bh->b_bdev = inode->i_sb->s_bdev; + if (count < (1<<inode->i_blkbits)) + count = 0; + else + count -= (1<<inode->i_blkbits); + + bh->b_end_io = end_bitmap_write; + bh->b_private = bitmap; + atomic_inc(&bitmap->pending_writes); + set_buffer_locked(bh); + set_buffer_mapped(bh); + submit_bh(READ, bh); + } + block++; + bh = bh->b_this_page; + } + page->index = index; + + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes)==0); + if (bitmap->flags & BITMAP_WRITE_ERROR) { + free_buffers(page); + page = ERR_PTR(-EIO); + } +out: + if (IS_ERR(page)) + printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", + (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT, + PTR_ERR(page)); + return page; +} + +/* + * bitmap file superblock operations + */ + +/* update the event counter and sync the superblock to disk */ +void bitmap_update_sb(struct bitmap *bitmap) +{ + bitmap_super_t *sb; + unsigned long flags; + + if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ + return; + spin_lock_irqsave(&bitmap->lock, flags); + if (!bitmap->sb_page) { /* no superblock */ + spin_unlock_irqrestore(&bitmap->lock, flags); + return; + } + spin_unlock_irqrestore(&bitmap->lock, flags); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); + sb->events = cpu_to_le64(bitmap->mddev->events); + if (bitmap->mddev->events < bitmap->events_cleared) { + /* rocking back to read-only */ + bitmap->events_cleared = bitmap->mddev->events; + sb->events_cleared = cpu_to_le64(bitmap->events_cleared); + } + kunmap_atomic(sb, KM_USER0); + write_page(bitmap, bitmap->sb_page, 1); +} + +/* print out the bitmap file superblock */ +void bitmap_print_sb(struct bitmap *bitmap) +{ + bitmap_super_t *sb; + + if (!bitmap || !bitmap->sb_page) + return; + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); + printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); + printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); + printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); + printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", + *(__u32 *)(sb->uuid+0), + *(__u32 *)(sb->uuid+4), + *(__u32 *)(sb->uuid+8), + *(__u32 *)(sb->uuid+12)); + printk(KERN_DEBUG " events: %llu\n", + (unsigned long long) le64_to_cpu(sb->events)); + printk(KERN_DEBUG "events cleared: %llu\n", + (unsigned long long) le64_to_cpu(sb->events_cleared)); + printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); + printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); + printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); + printk(KERN_DEBUG " sync size: %llu KB\n", + (unsigned long long)le64_to_cpu(sb->sync_size)/2); + printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); + kunmap_atomic(sb, KM_USER0); +} + +/* read the superblock from the bitmap file and initialize some bitmap fields */ +static int bitmap_read_sb(struct bitmap *bitmap) +{ + char *reason = NULL; + bitmap_super_t *sb; + unsigned long chunksize, daemon_sleep, write_behind; + unsigned long long events; + int err = -EINVAL; + + /* page 0 is the superblock, read it... */ + if (bitmap->file) { + loff_t isize = i_size_read(bitmap->file->f_mapping->host); + int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; + + bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); + } else { + bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + 0, sizeof(bitmap_super_t)); + } + if (IS_ERR(bitmap->sb_page)) { + err = PTR_ERR(bitmap->sb_page); + bitmap->sb_page = NULL; + return err; + } + + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); + + chunksize = le32_to_cpu(sb->chunksize); + daemon_sleep = le32_to_cpu(sb->daemon_sleep); + write_behind = le32_to_cpu(sb->write_behind); + + /* verify that the bitmap-specific fields are valid */ + if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) + reason = "bad magic"; + else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || + le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) + reason = "unrecognized superblock version"; + else if (chunksize < PAGE_SIZE) + reason = "bitmap chunksize too small"; + else if ((1 << ffz(~chunksize)) != chunksize) + reason = "bitmap chunksize not a power of 2"; + else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) + reason = "daemon sleep period out of range"; + else if (write_behind > COUNTER_MAX) + reason = "write-behind limit out of range (0 - 16383)"; + if (reason) { + printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", + bmname(bitmap), reason); + goto out; + } + + /* keep the array size field of the bitmap superblock up to date */ + sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); + + if (!bitmap->mddev->persistent) + goto success; + + /* + * if we have a persistent array superblock, compare the + * bitmap's UUID and event counter to the mddev's + */ + if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { + printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n", + bmname(bitmap)); + goto out; + } + events = le64_to_cpu(sb->events); + if (events < bitmap->mddev->events) { + printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) " + "-- forcing full recovery\n", bmname(bitmap), events, + (unsigned long long) bitmap->mddev->events); + sb->state |= cpu_to_le32(BITMAP_STALE); + } +success: + /* assign fields using values from superblock */ + bitmap->chunksize = chunksize; + bitmap->daemon_sleep = daemon_sleep; + bitmap->daemon_lastrun = jiffies; + bitmap->max_write_behind = write_behind; + bitmap->flags |= le32_to_cpu(sb->state); + if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) + bitmap->flags |= BITMAP_HOSTENDIAN; + bitmap->events_cleared = le64_to_cpu(sb->events_cleared); + if (sb->state & cpu_to_le32(BITMAP_STALE)) + bitmap->events_cleared = bitmap->mddev->events; + err = 0; +out: + kunmap_atomic(sb, KM_USER0); + if (err) + bitmap_print_sb(bitmap); + return err; +} + +enum bitmap_mask_op { + MASK_SET, + MASK_UNSET +}; + +/* record the state of the bitmap in the superblock. Return the old value */ +static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, + enum bitmap_mask_op op) +{ + bitmap_super_t *sb; + unsigned long flags; + int old; + + spin_lock_irqsave(&bitmap->lock, flags); + if (!bitmap->sb_page) { /* can't set the state */ + spin_unlock_irqrestore(&bitmap->lock, flags); + return 0; + } + spin_unlock_irqrestore(&bitmap->lock, flags); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); + old = le32_to_cpu(sb->state) & bits; + switch (op) { + case MASK_SET: sb->state |= cpu_to_le32(bits); + break; + case MASK_UNSET: sb->state &= cpu_to_le32(~bits); + break; + default: BUG(); + } + kunmap_atomic(sb, KM_USER0); + return old; +} + +/* + * general bitmap file operations + */ + +/* calculate the index of the page that contains this bit */ +static inline unsigned long file_page_index(unsigned long chunk) +{ + return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT; +} + +/* calculate the (bit) offset of this bit within a page */ +static inline unsigned long file_page_offset(unsigned long chunk) +{ + return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1); +} + +/* + * return a pointer to the page in the filemap that contains the given bit + * + * this lookup is complicated by the fact that the bitmap sb might be exactly + * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page + * 0 or page 1 + */ +static inline struct page *filemap_get_page(struct bitmap *bitmap, + unsigned long chunk) +{ + if (file_page_index(chunk) >= bitmap->file_pages) return NULL; + return bitmap->filemap[file_page_index(chunk) - file_page_index(0)]; +} + + +static void bitmap_file_unmap(struct bitmap *bitmap) +{ + struct page **map, *sb_page; + unsigned long *attr; + int pages; + unsigned long flags; + + spin_lock_irqsave(&bitmap->lock, flags); + map = bitmap->filemap; + bitmap->filemap = NULL; + attr = bitmap->filemap_attr; + bitmap->filemap_attr = NULL; + pages = bitmap->file_pages; + bitmap->file_pages = 0; + sb_page = bitmap->sb_page; + bitmap->sb_page = NULL; + spin_unlock_irqrestore(&bitmap->lock, flags); + + while (pages--) + if (map[pages]->index != 0) /* 0 is sb_page, release it below */ + free_buffers(map[pages]); + kfree(map); + kfree(attr); + + if (sb_page) + free_buffers(sb_page); +} + +static void bitmap_file_put(struct bitmap *bitmap) +{ + struct file *file; + unsigned long flags; + + spin_lock_irqsave(&bitmap->lock, flags); + file = bitmap->file; + bitmap->file = NULL; + spin_unlock_irqrestore(&bitmap->lock, flags); + + if (file) + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes)==0); + bitmap_file_unmap(bitmap); + + if (file) { + struct inode *inode = file->f_path.dentry->d_inode; + invalidate_mapping_pages(inode->i_mapping, 0, -1); + fput(file); + } +} + + +/* + * bitmap_file_kick - if an error occurs while manipulating the bitmap file + * then it is no longer reliable, so we stop using it and we mark the file + * as failed in the superblock + */ +static void bitmap_file_kick(struct bitmap *bitmap) +{ + char *path, *ptr = NULL; + + if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) { + bitmap_update_sb(bitmap); + + if (bitmap->file) { + path = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (path) + ptr = d_path(&bitmap->file->f_path, path, + PAGE_SIZE); + + + printk(KERN_ALERT + "%s: kicking failed bitmap file %s from array!\n", + bmname(bitmap), IS_ERR(ptr) ? "" : ptr); + + kfree(path); + } else + printk(KERN_ALERT + "%s: disabling internal bitmap due to errors\n", + bmname(bitmap)); + } + + bitmap_file_put(bitmap); + + return; +} + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced + BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared + BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced +}; + +static inline void set_page_attr(struct bitmap *bitmap, struct page *page, + enum bitmap_page_attr attr) +{ + __set_bit((page->index<<2) + attr, bitmap->filemap_attr); +} + +static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, + enum bitmap_page_attr attr) +{ + __clear_bit((page->index<<2) + attr, bitmap->filemap_attr); +} + +static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, + enum bitmap_page_attr attr) +{ + return test_bit((page->index<<2) + attr, bitmap->filemap_attr); +} + +/* + * bitmap_file_set_bit -- called before performing a write to the md device + * to set (and eventually sync) a particular bit in the bitmap file + * + * we set the bit immediately, then we record the page number so that + * when an unplug occurs, we can flush the dirty pages out to disk + */ +static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) +{ + unsigned long bit; + struct page *page; + void *kaddr; + unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); + + if (!bitmap->filemap) { + return; + } + + page = filemap_get_page(bitmap, chunk); + if (!page) return; + bit = file_page_offset(chunk); + + /* set the bit */ + kaddr = kmap_atomic(page, KM_USER0); + if (bitmap->flags & BITMAP_HOSTENDIAN) + set_bit(bit, kaddr); + else + ext2_set_bit(bit, kaddr); + kunmap_atomic(kaddr, KM_USER0); + PRINTK("set file bit %lu page %lu\n", bit, page->index); + + /* record page number so it gets flushed to disk when unplug occurs */ + set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); + +} + +/* this gets called when the md device is ready to unplug its underlying + * (slave) device queues -- before we let any writes go down, we need to + * sync the dirty pages of the bitmap file to disk */ +void bitmap_unplug(struct bitmap *bitmap) +{ + unsigned long i, flags; + int dirty, need_write; + struct page *page; + int wait = 0; + + if (!bitmap) + return; + + /* look at each page to see if there are any set bits that need to be + * flushed out to disk */ + for (i = 0; i < bitmap->file_pages; i++) { + spin_lock_irqsave(&bitmap->lock, flags); + if (!bitmap->filemap) { + spin_unlock_irqrestore(&bitmap->lock, flags); + return; + } + page = bitmap->filemap[i]; + dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); + need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); + clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); + clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); + if (dirty) + wait = 1; + spin_unlock_irqrestore(&bitmap->lock, flags); + + if (dirty | need_write) + write_page(bitmap, page, 0); + } + if (wait) { /* if any writes were performed, we need to wait on them */ + if (bitmap->file) + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes)==0); + else + md_super_wait(bitmap->mddev); + } + if (bitmap->flags & BITMAP_WRITE_ERROR) + bitmap_file_kick(bitmap); +} + +static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); +/* * bitmap_init_from_disk -- called at bitmap_create time to initialize + * the in-memory bitmap from the on-disk bitmap -- also, sets up the + * memory mapping of the bitmap file + * Special cases: + * if there's no bitmap file, or if the bitmap file had been + * previously kicked from the array, we mark all the bits as + * 1's in order to cause a full resync. + * + * We ignore all bits for sectors that end earlier than 'start'. + * This is used when reading an out-of-date bitmap... + */ +static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) +{ + unsigned long i, chunks, index, oldindex, bit; + struct page *page = NULL, *oldpage = NULL; + unsigned long num_pages, bit_cnt = 0; + struct file *file; + unsigned long bytes, offset; + int outofdate; + int ret = -ENOSPC; + void *paddr; + + chunks = bitmap->chunks; + file = bitmap->file; + + BUG_ON(!file && !bitmap->offset); + +#ifdef INJECT_FAULTS_3 + outofdate = 1; +#else + outofdate = bitmap->flags & BITMAP_STALE; +#endif + if (outofdate) + printk(KERN_INFO "%s: bitmap file is out of date, doing full " + "recovery\n", bmname(bitmap)); + + bytes = (chunks + 7) / 8; + + num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE; + + if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) { + printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", + bmname(bitmap), + (unsigned long) i_size_read(file->f_mapping->host), + bytes + sizeof(bitmap_super_t)); + goto err; + } + + ret = -ENOMEM; + + bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); + if (!bitmap->filemap) + goto err; + + /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ + bitmap->filemap_attr = kzalloc( + roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), + GFP_KERNEL); + if (!bitmap->filemap_attr) + goto err; + + oldindex = ~0L; + + for (i = 0; i < chunks; i++) { + int b; + index = file_page_index(i); + bit = file_page_offset(i); + if (index != oldindex) { /* this is a new page, read it in */ + int count; + /* unmap the old page, we're done with it */ + if (index == num_pages-1) + count = bytes + sizeof(bitmap_super_t) + - index * PAGE_SIZE; + else + count = PAGE_SIZE; + if (index == 0) { + /* + * if we're here then the superblock page + * contains some bits (PAGE_SIZE != sizeof sb) + * we've already read it in, so just use it + */ + page = bitmap->sb_page; + offset = sizeof(bitmap_super_t); + if (!file) + read_sb_page(bitmap->mddev, + bitmap->offset, + page, + index, count); + } else if (file) { + page = read_page(file, index, bitmap, count); + offset = 0; + } else { + page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + index, count); + offset = 0; + } + if (IS_ERR(page)) { /* read error */ + ret = PTR_ERR(page); + goto err; + } + + oldindex = index; + oldpage = page; + + if (outofdate) { + /* + * if bitmap is out of date, dirty the + * whole page and write it out + */ + paddr = kmap_atomic(page, KM_USER0); + memset(paddr + offset, 0xff, + PAGE_SIZE - offset); + kunmap_atomic(paddr, KM_USER0); + write_page(bitmap, page, 1); + + ret = -EIO; + if (bitmap->flags & BITMAP_WRITE_ERROR) { + /* release, page not in filemap yet */ + put_page(page); + goto err; + } + } + + bitmap->filemap[bitmap->file_pages++] = page; + bitmap->last_page_size = count; + } + paddr = kmap_atomic(page, KM_USER0); + if (bitmap->flags & BITMAP_HOSTENDIAN) + b = test_bit(bit, paddr); + else + b = ext2_test_bit(bit, paddr); + kunmap_atomic(paddr, KM_USER0); + if (b) { + /* if the disk bit is set, set the memory bit */ + bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), + ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start) + ); + bit_cnt++; + set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); + } + } + + /* everything went OK */ + ret = 0; + bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); + + if (bit_cnt) { /* Kick recovery if any bits were set */ + set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); + md_wakeup_thread(bitmap->mddev->thread); + } + + printk(KERN_INFO "%s: bitmap initialized from disk: " + "read %lu/%lu pages, set %lu bits\n", + bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); + + return 0; + + err: + printk(KERN_INFO "%s: bitmap initialisation failed: %d\n", + bmname(bitmap), ret); + return ret; +} + +void bitmap_write_all(struct bitmap *bitmap) +{ + /* We don't actually write all bitmap blocks here, + * just flag them as needing to be written + */ + int i; + + for (i=0; i < bitmap->file_pages; i++) + set_page_attr(bitmap, bitmap->filemap[i], + BITMAP_PAGE_NEEDWRITE); +} + + +static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) +{ + sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); + unsigned long page = chunk >> PAGE_COUNTER_SHIFT; + bitmap->bp[page].count += inc; +/* + if (page == 0) printk("count page 0, offset %llu: %d gives %d\n", + (unsigned long long)offset, inc, bitmap->bp[page].count); +*/ + bitmap_checkfree(bitmap, page); +} +static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, + sector_t offset, int *blocks, + int create); + +/* + * bitmap daemon -- periodically wakes up to clean bits and flush pages + * out to disk + */ + +void bitmap_daemon_work(struct bitmap *bitmap) +{ + unsigned long j; + unsigned long flags; + struct page *page = NULL, *lastpage = NULL; + int blocks; + void *paddr; + + if (bitmap == NULL) + return; + if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) + goto done; + + bitmap->daemon_lastrun = jiffies; + if (bitmap->allclean) { + bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + return; + } + bitmap->allclean = 1; + + for (j = 0; j < bitmap->chunks; j++) { + bitmap_counter_t *bmc; + spin_lock_irqsave(&bitmap->lock, flags); + if (!bitmap->filemap) { + /* error or shutdown */ + spin_unlock_irqrestore(&bitmap->lock, flags); + break; + } + + page = filemap_get_page(bitmap, j); + + if (page != lastpage) { + /* skip this page unless it's marked as needing cleaning */ + if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) { + int need_write = test_page_attr(bitmap, page, + BITMAP_PAGE_NEEDWRITE); + if (need_write) + clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); + + spin_unlock_irqrestore(&bitmap->lock, flags); + if (need_write) { + write_page(bitmap, page, 0); + bitmap->allclean = 0; + } + continue; + } + + /* grab the new page, sync and release the old */ + if (lastpage != NULL) { + if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { + clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); + spin_unlock_irqrestore(&bitmap->lock, flags); + write_page(bitmap, lastpage, 0); + } else { + set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); + spin_unlock_irqrestore(&bitmap->lock, flags); + } + } else + spin_unlock_irqrestore(&bitmap->lock, flags); + lastpage = page; + + /* We are possibly going to clear some bits, so make + * sure that events_cleared is up-to-date. + */ + if (bitmap->need_sync) { + bitmap_super_t *sb; + bitmap->need_sync = 0; + sb = kmap_atomic(bitmap->sb_page, KM_USER0); + sb->events_cleared = + cpu_to_le64(bitmap->events_cleared); + kunmap_atomic(sb, KM_USER0); + write_page(bitmap, bitmap->sb_page, 1); + } + spin_lock_irqsave(&bitmap->lock, flags); + clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); + } + bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap), + &blocks, 0); + if (bmc) { +/* + if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); +*/ + if (*bmc) + bitmap->allclean = 0; + + if (*bmc == 2) { + *bmc=1; /* maybe clear the bit next time */ + set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); + } else if (*bmc == 1) { + /* we can clear the bit */ + *bmc = 0; + bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap), + -1); + + /* clear the bit */ + paddr = kmap_atomic(page, KM_USER0); + if (bitmap->flags & BITMAP_HOSTENDIAN) + clear_bit(file_page_offset(j), paddr); + else + ext2_clear_bit(file_page_offset(j), paddr); + kunmap_atomic(paddr, KM_USER0); + } + } + spin_unlock_irqrestore(&bitmap->lock, flags); + } + + /* now sync the final page */ + if (lastpage != NULL) { + spin_lock_irqsave(&bitmap->lock, flags); + if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { + clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); + spin_unlock_irqrestore(&bitmap->lock, flags); + write_page(bitmap, lastpage, 0); + } else { + set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); + spin_unlock_irqrestore(&bitmap->lock, flags); + } + } + + done: + if (bitmap->allclean == 0) + bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; +} + +static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, + sector_t offset, int *blocks, + int create) +{ + /* If 'create', we might release the lock and reclaim it. + * The lock must have been taken with interrupts enabled. + * If !create, we don't release the lock. + */ + sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); + unsigned long page = chunk >> PAGE_COUNTER_SHIFT; + unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; + sector_t csize; + + if (bitmap_checkpage(bitmap, page, create) < 0) { + csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); + *blocks = csize - (offset & (csize- 1)); + return NULL; + } + /* now locked ... */ + + if (bitmap->bp[page].hijacked) { /* hijacked pointer */ + /* should we use the first or second counter field + * of the hijacked pointer? */ + int hi = (pageoff > PAGE_COUNTER_MASK); + csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) + + PAGE_COUNTER_SHIFT - 1); + *blocks = csize - (offset & (csize- 1)); + return &((bitmap_counter_t *) + &bitmap->bp[page].map)[hi]; + } else { /* page is allocated */ + csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); + *blocks = csize - (offset & (csize- 1)); + return (bitmap_counter_t *) + &(bitmap->bp[page].map[pageoff]); + } +} + +int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) +{ + if (!bitmap) return 0; + + if (behind) { + atomic_inc(&bitmap->behind_writes); + PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n", + atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); + } + + while (sectors) { + int blocks; + bitmap_counter_t *bmc; + + spin_lock_irq(&bitmap->lock); + bmc = bitmap_get_counter(bitmap, offset, &blocks, 1); + if (!bmc) { + spin_unlock_irq(&bitmap->lock); + return 0; + } + + if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { + DEFINE_WAIT(__wait); + /* note that it is safe to do the prepare_to_wait + * after the test as long as we do it before dropping + * the spinlock. + */ + prepare_to_wait(&bitmap->overflow_wait, &__wait, + TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&bitmap->lock); + blk_unplug(bitmap->mddev->queue); + schedule(); + finish_wait(&bitmap->overflow_wait, &__wait); + continue; + } + + switch(*bmc) { + case 0: + bitmap_file_set_bit(bitmap, offset); + bitmap_count_page(bitmap,offset, 1); + blk_plug_device_unlocked(bitmap->mddev->queue); + /* fall through */ + case 1: + *bmc = 2; + } + + (*bmc)++; + + spin_unlock_irq(&bitmap->lock); + + offset += blocks; + if (sectors > blocks) + sectors -= blocks; + else sectors = 0; + } + bitmap->allclean = 0; + return 0; +} + +void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, + int success, int behind) +{ + if (!bitmap) return; + if (behind) { + atomic_dec(&bitmap->behind_writes); + PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n", + atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); + } + + while (sectors) { + int blocks; + unsigned long flags; + bitmap_counter_t *bmc; + + spin_lock_irqsave(&bitmap->lock, flags); + bmc = bitmap_get_counter(bitmap, offset, &blocks, 0); + if (!bmc) { + spin_unlock_irqrestore(&bitmap->lock, flags); + return; + } + + if (success && + bitmap->events_cleared < bitmap->mddev->events) { + bitmap->events_cleared = bitmap->mddev->events; + bitmap->need_sync = 1; + } + + if (!success && ! (*bmc & NEEDED_MASK)) + *bmc |= NEEDED_MASK; + + if ((*bmc & COUNTER_MAX) == COUNTER_MAX) + wake_up(&bitmap->overflow_wait); + + (*bmc)--; + if (*bmc <= 2) { + set_page_attr(bitmap, + filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), + BITMAP_PAGE_CLEAN); + } + spin_unlock_irqrestore(&bitmap->lock, flags); + offset += blocks; + if (sectors > blocks) + sectors -= blocks; + else sectors = 0; + } +} + +int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, + int degraded) +{ + bitmap_counter_t *bmc; + int rv; + if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ + *blocks = 1024; + return 1; /* always resync if no bitmap */ + } + spin_lock_irq(&bitmap->lock); + bmc = bitmap_get_counter(bitmap, offset, blocks, 0); + rv = 0; + if (bmc) { + /* locked */ + if (RESYNC(*bmc)) + rv = 1; + else if (NEEDED(*bmc)) { + rv = 1; + if (!degraded) { /* don't set/clear bits if degraded */ + *bmc |= RESYNC_MASK; + *bmc &= ~NEEDED_MASK; + } + } + } + spin_unlock_irq(&bitmap->lock); + bitmap->allclean = 0; + return rv; +} + +void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted) +{ + bitmap_counter_t *bmc; + unsigned long flags; +/* + if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted); +*/ if (bitmap == NULL) { + *blocks = 1024; + return; + } + spin_lock_irqsave(&bitmap->lock, flags); + bmc = bitmap_get_counter(bitmap, offset, blocks, 0); + if (bmc == NULL) + goto unlock; + /* locked */ +/* + if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks); +*/ + if (RESYNC(*bmc)) { + *bmc &= ~RESYNC_MASK; + + if (!NEEDED(*bmc) && aborted) + *bmc |= NEEDED_MASK; + else { + if (*bmc <= 2) { + set_page_attr(bitmap, + filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), + BITMAP_PAGE_CLEAN); + } + } + } + unlock: + spin_unlock_irqrestore(&bitmap->lock, flags); + bitmap->allclean = 0; +} + +void bitmap_close_sync(struct bitmap *bitmap) +{ + /* Sync has finished, and any bitmap chunks that weren't synced + * properly have been aborted. It remains to us to clear the + * RESYNC bit wherever it is still on + */ + sector_t sector = 0; + int blocks; + if (!bitmap) + return; + while (sector < bitmap->mddev->resync_max_sectors) { + bitmap_end_sync(bitmap, sector, &blocks, 0); + sector += blocks; + } +} + +void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) +{ + sector_t s = 0; + int blocks; + + if (!bitmap) + return; + if (sector == 0) { + bitmap->last_end_sync = jiffies; + return; + } + if (time_before(jiffies, (bitmap->last_end_sync + + bitmap->daemon_sleep * HZ))) + return; + wait_event(bitmap->mddev->recovery_wait, + atomic_read(&bitmap->mddev->recovery_active) == 0); + + sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); + s = 0; + while (s < sector && s < bitmap->mddev->resync_max_sectors) { + bitmap_end_sync(bitmap, s, &blocks, 0); + s += blocks; + } + bitmap->last_end_sync = jiffies; +} + +static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) +{ + /* For each chunk covered by any of these sectors, set the + * counter to 1 and set resync_needed. They should all + * be 0 at this point + */ + + int secs; + bitmap_counter_t *bmc; + spin_lock_irq(&bitmap->lock); + bmc = bitmap_get_counter(bitmap, offset, &secs, 1); + if (!bmc) { + spin_unlock_irq(&bitmap->lock); + return; + } + if (! *bmc) { + struct page *page; + *bmc = 1 | (needed?NEEDED_MASK:0); + bitmap_count_page(bitmap, offset, 1); + page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); + set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); + } + spin_unlock_irq(&bitmap->lock); + bitmap->allclean = 0; +} + +/* dirty the memory and file bits for bitmap chunks "s" to "e" */ +void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) +{ + unsigned long chunk; + + for (chunk = s; chunk <= e; chunk++) { + sector_t sec = chunk << CHUNK_BLOCK_SHIFT(bitmap); + bitmap_set_memory_bits(bitmap, sec, 1); + bitmap_file_set_bit(bitmap, sec); + } +} + +/* + * flush out any pending updates + */ +void bitmap_flush(mddev_t *mddev) +{ + struct bitmap *bitmap = mddev->bitmap; + int sleep; + + if (!bitmap) /* there was no bitmap */ + return; + + /* run the daemon_work three time to ensure everything is flushed + * that can be + */ + sleep = bitmap->daemon_sleep; + bitmap->daemon_sleep = 0; + bitmap_daemon_work(bitmap); + bitmap_daemon_work(bitmap); + bitmap_daemon_work(bitmap); + bitmap->daemon_sleep = sleep; + bitmap_update_sb(bitmap); +} + +/* + * free memory that was allocated + */ +static void bitmap_free(struct bitmap *bitmap) +{ + unsigned long k, pages; + struct bitmap_page *bp; + + if (!bitmap) /* there was no bitmap */ + return; + + /* release the bitmap file and kill the daemon */ + bitmap_file_put(bitmap); + + bp = bitmap->bp; + pages = bitmap->pages; + + /* free all allocated memory */ + + if (bp) /* deallocate the page memory */ + for (k = 0; k < pages; k++) + if (bp[k].map && !bp[k].hijacked) + kfree(bp[k].map); + kfree(bp); + kfree(bitmap); +} +void bitmap_destroy(mddev_t *mddev) +{ + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) /* there was no bitmap */ + return; + + mddev->bitmap = NULL; /* disconnect from the md device */ + if (mddev->thread) + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + + bitmap_free(bitmap); +} + +/* + * initialize the bitmap structure + * if this returns an error, bitmap_destroy must be called to do clean up + */ +int bitmap_create(mddev_t *mddev) +{ + struct bitmap *bitmap; + unsigned long blocks = mddev->resync_max_sectors; + unsigned long chunks; + unsigned long pages; + struct file *file = mddev->bitmap_file; + int err; + sector_t start; + + BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); + + if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */ + return 0; + + BUG_ON(file && mddev->bitmap_offset); + + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); + if (!bitmap) + return -ENOMEM; + + spin_lock_init(&bitmap->lock); + atomic_set(&bitmap->pending_writes, 0); + init_waitqueue_head(&bitmap->write_wait); + init_waitqueue_head(&bitmap->overflow_wait); + + bitmap->mddev = mddev; + + bitmap->file = file; + bitmap->offset = mddev->bitmap_offset; + if (file) { + get_file(file); + do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, + SYNC_FILE_RANGE_WAIT_BEFORE | + SYNC_FILE_RANGE_WRITE | + SYNC_FILE_RANGE_WAIT_AFTER); + } + /* read superblock from bitmap file (this sets bitmap->chunksize) */ + err = bitmap_read_sb(bitmap); + if (err) + goto error; + + bitmap->chunkshift = ffz(~bitmap->chunksize); + + /* now that chunksize and chunkshift are set, we can use these macros */ + chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) / + CHUNK_BLOCK_RATIO(bitmap); + pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; + + BUG_ON(!pages); + + bitmap->chunks = chunks; + bitmap->pages = pages; + bitmap->missing_pages = pages; + bitmap->counter_bits = COUNTER_BITS; + + bitmap->syncchunk = ~0UL; + +#ifdef INJECT_FATAL_FAULT_1 + bitmap->bp = NULL; +#else + bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); +#endif + err = -ENOMEM; + if (!bitmap->bp) + goto error; + + /* now that we have some pages available, initialize the in-memory + * bitmap from the on-disk bitmap */ + start = 0; + if (mddev->degraded == 0 + || bitmap->events_cleared == mddev->events) + /* no need to keep dirty bits to optimise a re-add of a missing device */ + start = mddev->recovery_cp; + err = bitmap_init_from_disk(bitmap, start); + + if (err) + goto error; + + printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", + pages, bmname(bitmap)); + + mddev->bitmap = bitmap; + + mddev->thread->timeout = bitmap->daemon_sleep * HZ; + + bitmap_update_sb(bitmap); + + return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; + + error: + bitmap_free(bitmap); + return err; +} + +/* the bitmap API -- for raid personalities */ +EXPORT_SYMBOL(bitmap_startwrite); +EXPORT_SYMBOL(bitmap_endwrite); +EXPORT_SYMBOL(bitmap_start_sync); +EXPORT_SYMBOL(bitmap_end_sync); +EXPORT_SYMBOL(bitmap_unplug); +EXPORT_SYMBOL(bitmap_close_sync); +EXPORT_SYMBOL(bitmap_cond_end_sync); diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h new file mode 100644 index 0000000..d4509be --- /dev/null +++ b/drivers/md/dm-bio-list.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2004 Red Hat UK Ltd. + * + * This file is released under the GPL. + */ + +#ifndef DM_BIO_LIST_H +#define DM_BIO_LIST_H + +#include <linux/bio.h> + +#ifdef CONFIG_BLOCK + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == NULL; +} + +static inline void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = NULL; +} + +#define bio_list_for_each(bio, bl) \ + for (bio = (bl)->head; bio; bio = bio->bi_next) + +static inline unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + bio_list_for_each(bio, bl) + sz++; + + return sz; +} + +static inline void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = NULL; + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = NULL; + + bio->bi_next = NULL; + } + + return bio; +} + +static inline struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = NULL; + + return bio; +} + +#endif /* CONFIG_BLOCK */ +#endif diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h new file mode 100644 index 0000000..d3ec217 --- /dev/null +++ b/drivers/md/dm-bio-record.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#ifndef DM_BIO_RECORD_H +#define DM_BIO_RECORD_H + +#include <linux/bio.h> + +/* + * There are lots of mutable fields in the bio struct that get + * changed by the lower levels of the block layer. Some targets, + * such as multipath, may wish to resubmit a bio on error. The + * functions in this file help the target record and restore the + * original bio state. + */ +struct dm_bio_details { + sector_t bi_sector; + struct block_device *bi_bdev; + unsigned int bi_size; + unsigned short bi_idx; + unsigned long bi_flags; +}; + +static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) +{ + bd->bi_sector = bio->bi_sector; + bd->bi_bdev = bio->bi_bdev; + bd->bi_size = bio->bi_size; + bd->bi_idx = bio->bi_idx; + bd->bi_flags = bio->bi_flags; +} + +static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) +{ + bio->bi_sector = bd->bi_sector; + bio->bi_bdev = bd->bi_bdev; + bio->bi_size = bd->bi_size; + bio->bi_idx = bd->bi_idx; + bio->bi_flags = bd->bi_flags; +} + +#endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c new file mode 100644 index 0000000..51047d3 --- /dev/null +++ b/drivers/md/dm-crypt.c @@ -0,0 +1,1357 @@ +/* + * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> + * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include <linux/completion.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/mempool.h> +#include <linux/slab.h> +#include <linux/crypto.h> +#include <linux/workqueue.h> +#include <linux/backing-dev.h> +#include <asm/atomic.h> +#include <linux/scatterlist.h> +#include <asm/page.h> +#include <asm/unaligned.h> + +#include <linux/device-mapper.h> + +#define DM_MSG_PREFIX "crypt" +#define MESG_STR(x) x, sizeof(x) + +/* + * context holding the current state of a multi-part conversion + */ +struct convert_context { + struct completion restart; + struct bio *bio_in; + struct bio *bio_out; + unsigned int offset_in; + unsigned int offset_out; + unsigned int idx_in; + unsigned int idx_out; + sector_t sector; + atomic_t pending; +}; + +/* + * per bio private data + */ +struct dm_crypt_io { + struct dm_target *target; + struct bio *base_bio; + struct work_struct work; + + struct convert_context ctx; + + atomic_t pending; + int error; + sector_t sector; + struct dm_crypt_io *base_io; +}; + +struct dm_crypt_request { + struct convert_context *ctx; + struct scatterlist sg_in; + struct scatterlist sg_out; +}; + +struct crypt_config; + +struct crypt_iv_operations { + int (*ctr)(struct crypt_config *cc, struct dm_target *ti, + const char *opts); + void (*dtr)(struct crypt_config *cc); + const char *(*status)(struct crypt_config *cc); + int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); +}; + +/* + * Crypt: maps a linear range of a block device + * and encrypts / decrypts at the same time. + */ +enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; +struct crypt_config { + struct dm_dev *dev; + sector_t start; + + /* + * pool for per bio private data, crypto requests and + * encryption requeusts/buffer pages + */ + mempool_t *io_pool; + mempool_t *req_pool; + mempool_t *page_pool; + struct bio_set *bs; + + struct workqueue_struct *io_queue; + struct workqueue_struct *crypt_queue; + + /* + * crypto related data + */ + struct crypt_iv_operations *iv_gen_ops; + char *iv_mode; + union { + struct crypto_cipher *essiv_tfm; + int benbi_shift; + } iv_gen_private; + sector_t iv_offset; + unsigned int iv_size; + + /* + * Layout of each crypto request: + * + * struct ablkcipher_request + * context + * padding + * struct dm_crypt_request + * padding + * IV + * + * The padding is added so that dm_crypt_request and the IV are + * correctly aligned. + */ + unsigned int dmreq_start; + struct ablkcipher_request *req; + + char cipher[CRYPTO_MAX_ALG_NAME]; + char chainmode[CRYPTO_MAX_ALG_NAME]; + struct crypto_ablkcipher *tfm; + unsigned long flags; + unsigned int key_size; + u8 key[0]; +}; + +#define MIN_IOS 16 +#define MIN_POOL_PAGES 32 +#define MIN_BIO_PAGES 8 + +static struct kmem_cache *_crypt_io_pool; + +static void clone_init(struct dm_crypt_io *, struct bio *); +static void kcryptd_queue_crypt(struct dm_crypt_io *io); + +/* + * Different IV generation algorithms: + * + * plain: the initial vector is the 32-bit little-endian version of the sector + * number, padded with zeros if necessary. + * + * essiv: "encrypted sector|salt initial vector", the sector number is + * encrypted with the bulk cipher using a salt as key. The salt + * should be derived from the bulk cipher's key via hashing. + * + * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 + * (needed for LRW-32-AES and possible other narrow block modes) + * + * null: the initial vector is always zero. Provides compatibility with + * obsolete loop_fish2 devices. Do not use for new devices. + * + * plumb: unimplemented, see: + * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 + */ + +static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + memset(iv, 0, cc->iv_size); + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); + + return 0; +} + +static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct crypto_cipher *essiv_tfm; + struct crypto_hash *hash_tfm; + struct hash_desc desc; + struct scatterlist sg; + unsigned int saltsize; + u8 *salt; + int err; + + if (opts == NULL) { + ti->error = "Digest algorithm missing for ESSIV mode"; + return -EINVAL; + } + + /* Hash the cipher key with the given hash algorithm */ + hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hash_tfm)) { + ti->error = "Error initializing ESSIV hash"; + return PTR_ERR(hash_tfm); + } + + saltsize = crypto_hash_digestsize(hash_tfm); + salt = kmalloc(saltsize, GFP_KERNEL); + if (salt == NULL) { + ti->error = "Error kmallocing salt storage in ESSIV"; + crypto_free_hash(hash_tfm); + return -ENOMEM; + } + + sg_init_one(&sg, cc->key, cc->key_size); + desc.tfm = hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); + crypto_free_hash(hash_tfm); + + if (err) { + ti->error = "Error calculating hash in ESSIV"; + kfree(salt); + return err; + } + + /* Setup the essiv_tfm with the given salt */ + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(essiv_tfm)) { + ti->error = "Error allocating crypto tfm for ESSIV"; + kfree(salt); + return PTR_ERR(essiv_tfm); + } + if (crypto_cipher_blocksize(essiv_tfm) != + crypto_ablkcipher_ivsize(cc->tfm)) { + ti->error = "Block size of ESSIV cipher does " + "not match IV size of block cipher"; + crypto_free_cipher(essiv_tfm); + kfree(salt); + return -EINVAL; + } + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); + if (err) { + ti->error = "Failed to set key for ESSIV cipher"; + crypto_free_cipher(essiv_tfm); + kfree(salt); + return err; + } + kfree(salt); + + cc->iv_gen_private.essiv_tfm = essiv_tfm; + return 0; +} + +static void crypt_iv_essiv_dtr(struct crypt_config *cc) +{ + crypto_free_cipher(cc->iv_gen_private.essiv_tfm); + cc->iv_gen_private.essiv_tfm = NULL; +} + +static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + memset(iv, 0, cc->iv_size); + *(u64 *)iv = cpu_to_le64(sector); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + return 0; +} + +static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); + int log = ilog2(bs); + + /* we need to calculate how far we must shift the sector count + * to get the cipher block count, we use this shift in _gen */ + + if (1 << log != bs) { + ti->error = "cypher blocksize is not a power of 2"; + return -EINVAL; + } + + if (log > 9) { + ti->error = "cypher blocksize is > 512"; + return -EINVAL; + } + + cc->iv_gen_private.benbi_shift = 9 - log; + + return 0; +} + +static void crypt_iv_benbi_dtr(struct crypt_config *cc) +{ +} + +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + __be64 val; + + memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ + + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); + + return 0; +} + +static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + memset(iv, 0, cc->iv_size); + + return 0; +} + +static struct crypt_iv_operations crypt_iv_plain_ops = { + .generator = crypt_iv_plain_gen +}; + +static struct crypt_iv_operations crypt_iv_essiv_ops = { + .ctr = crypt_iv_essiv_ctr, + .dtr = crypt_iv_essiv_dtr, + .generator = crypt_iv_essiv_gen +}; + +static struct crypt_iv_operations crypt_iv_benbi_ops = { + .ctr = crypt_iv_benbi_ctr, + .dtr = crypt_iv_benbi_dtr, + .generator = crypt_iv_benbi_gen +}; + +static struct crypt_iv_operations crypt_iv_null_ops = { + .generator = crypt_iv_null_gen +}; + +static void crypt_convert_init(struct crypt_config *cc, + struct convert_context *ctx, + struct bio *bio_out, struct bio *bio_in, + sector_t sector) +{ + ctx->bio_in = bio_in; + ctx->bio_out = bio_out; + ctx->offset_in = 0; + ctx->offset_out = 0; + ctx->idx_in = bio_in ? bio_in->bi_idx : 0; + ctx->idx_out = bio_out ? bio_out->bi_idx : 0; + ctx->sector = sector + cc->iv_offset; + init_completion(&ctx->restart); +} + +static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, + struct ablkcipher_request *req) +{ + return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); +} + +static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); +} + +static int crypt_convert_block(struct crypt_config *cc, + struct convert_context *ctx, + struct ablkcipher_request *req) +{ + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct dm_crypt_request *dmreq; + u8 *iv; + int r = 0; + + dmreq = dmreq_of_req(cc, req); + iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_ablkcipher_alignmask(cc->tfm) + 1); + + dmreq->ctx = ctx; + sg_init_table(&dmreq->sg_in, 1); + sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, + bv_in->bv_offset + ctx->offset_in); + + sg_init_table(&dmreq->sg_out, 1); + sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, + bv_out->bv_offset + ctx->offset_out); + + ctx->offset_in += 1 << SECTOR_SHIFT; + if (ctx->offset_in >= bv_in->bv_len) { + ctx->offset_in = 0; + ctx->idx_in++; + } + + ctx->offset_out += 1 << SECTOR_SHIFT; + if (ctx->offset_out >= bv_out->bv_len) { + ctx->offset_out = 0; + ctx->idx_out++; + } + + if (cc->iv_gen_ops) { + r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); + if (r < 0) + return r; + } + + ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, + 1 << SECTOR_SHIFT, iv); + + if (bio_data_dir(ctx->bio_in) == WRITE) + r = crypto_ablkcipher_encrypt(req); + else + r = crypto_ablkcipher_decrypt(req); + + return r; +} + +static void kcryptd_async_done(struct crypto_async_request *async_req, + int error); +static void crypt_alloc_req(struct crypt_config *cc, + struct convert_context *ctx) +{ + if (!cc->req) + cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + ablkcipher_request_set_tfm(cc->req, cc->tfm); + ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, + dmreq_of_req(cc, cc->req)); +} + +/* + * Encrypt / decrypt data from one bio to another one (can be the same one) + */ +static int crypt_convert(struct crypt_config *cc, + struct convert_context *ctx) +{ + int r; + + atomic_set(&ctx->pending, 1); + + while(ctx->idx_in < ctx->bio_in->bi_vcnt && + ctx->idx_out < ctx->bio_out->bi_vcnt) { + + crypt_alloc_req(cc, ctx); + + atomic_inc(&ctx->pending); + + r = crypt_convert_block(cc, ctx, cc->req); + + switch (r) { + /* async */ + case -EBUSY: + wait_for_completion(&ctx->restart); + INIT_COMPLETION(ctx->restart); + /* fall through*/ + case -EINPROGRESS: + cc->req = NULL; + ctx->sector++; + continue; + + /* sync */ + case 0: + atomic_dec(&ctx->pending); + ctx->sector++; + cond_resched(); + continue; + + /* error */ + default: + atomic_dec(&ctx->pending); + return r; + } + } + + return 0; +} + +static void dm_crypt_bio_destructor(struct bio *bio) +{ + struct dm_crypt_io *io = bio->bi_private; + struct crypt_config *cc = io->target->private; + + bio_free(bio, cc->bs); +} + +/* + * Generate a new unfragmented bio with the given size + * This should never violate the device limitations + * May return a smaller bio when running out of pages, indicated by + * *out_of_pages set to 1. + */ +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, + unsigned *out_of_pages) +{ + struct crypt_config *cc = io->target->private; + struct bio *clone; + unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; + unsigned i, len; + struct page *page; + + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); + if (!clone) + return NULL; + + clone_init(io, clone); + *out_of_pages = 0; + + for (i = 0; i < nr_iovecs; i++) { + page = mempool_alloc(cc->page_pool, gfp_mask); + if (!page) { + *out_of_pages = 1; + break; + } + + /* + * if additional pages cannot be allocated without waiting, + * return a partially allocated bio, the caller will then try + * to allocate additional bios while submitting this partial bio + */ + if (i == (MIN_BIO_PAGES - 1)) + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; + + len = (size > PAGE_SIZE) ? PAGE_SIZE : size; + + if (!bio_add_page(clone, page, len, 0)) { + mempool_free(page, cc->page_pool); + break; + } + + size -= len; + } + + if (!clone->bi_size) { + bio_put(clone); + return NULL; + } + + return clone; +} + +static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) +{ + unsigned int i; + struct bio_vec *bv; + + for (i = 0; i < clone->bi_vcnt; i++) { + bv = bio_iovec_idx(clone, i); + BUG_ON(!bv->bv_page); + mempool_free(bv->bv_page, cc->page_pool); + bv->bv_page = NULL; + } +} + +static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, + struct bio *bio, sector_t sector) +{ + struct crypt_config *cc = ti->private; + struct dm_crypt_io *io; + + io = mempool_alloc(cc->io_pool, GFP_NOIO); + io->target = ti; + io->base_bio = bio; + io->sector = sector; + io->error = 0; + io->base_io = NULL; + atomic_set(&io->pending, 0); + + return io; +} + +static void crypt_inc_pending(struct dm_crypt_io *io) +{ + atomic_inc(&io->pending); +} + +/* + * One of the bios was finished. Check for completion of + * the whole request and correctly clean up the buffer. + * If base_io is set, wait for the last fragment to complete. + */ +static void crypt_dec_pending(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + struct dm_crypt_io *base_io = io->base_io; + int error = io->error; + + if (!atomic_dec_and_test(&io->pending)) + return; + + mempool_free(io, cc->io_pool); + + if (likely(!base_io)) + bio_endio(base_bio, error); + else { + if (error && !base_io->error) + base_io->error = error; + crypt_dec_pending(base_io); + } +} + +/* + * kcryptd/kcryptd_io: + * + * Needed because it would be very unwise to do decryption in an + * interrupt context. + * + * kcryptd performs the actual encryption or decryption. + * + * kcryptd_io performs the IO submission. + * + * They must be separated as otherwise the final stages could be + * starved by new requests which can block in the first stages due + * to memory allocation. + */ +static void crypt_endio(struct bio *clone, int error) +{ + struct dm_crypt_io *io = clone->bi_private; + struct crypt_config *cc = io->target->private; + unsigned rw = bio_data_dir(clone); + + if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) + error = -EIO; + + /* + * free the processed pages + */ + if (rw == WRITE) + crypt_free_buffer_pages(cc, clone); + + bio_put(clone); + + if (rw == READ && !error) { + kcryptd_queue_crypt(io); + return; + } + + if (unlikely(error)) + io->error = error; + + crypt_dec_pending(io); +} + +static void clone_init(struct dm_crypt_io *io, struct bio *clone) +{ + struct crypt_config *cc = io->target->private; + + clone->bi_private = io; + clone->bi_end_io = crypt_endio; + clone->bi_bdev = cc->dev->bdev; + clone->bi_rw = io->base_bio->bi_rw; + clone->bi_destructor = dm_crypt_bio_destructor; +} + +static void kcryptd_io_read(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + struct bio *clone; + + crypt_inc_pending(io); + + /* + * The block layer might modify the bvec array, so always + * copy the required bvecs because we need the original + * one in order to decrypt the whole bio data *afterwards*. + */ + clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); + if (unlikely(!clone)) { + io->error = -ENOMEM; + crypt_dec_pending(io); + return; + } + + clone_init(io, clone); + clone->bi_idx = 0; + clone->bi_vcnt = bio_segments(base_bio); + clone->bi_size = base_bio->bi_size; + clone->bi_sector = cc->start + io->sector; + memcpy(clone->bi_io_vec, bio_iovec(base_bio), + sizeof(struct bio_vec) * clone->bi_vcnt); + + generic_make_request(clone); +} + +static void kcryptd_io_write(struct dm_crypt_io *io) +{ + struct bio *clone = io->ctx.bio_out; + generic_make_request(clone); +} + +static void kcryptd_io(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_io_read(io); + else + kcryptd_io_write(io); +} + +static void kcryptd_queue_io(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + + INIT_WORK(&io->work, kcryptd_io); + queue_work(cc->io_queue, &io->work); +} + +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, + int error, int async) +{ + struct bio *clone = io->ctx.bio_out; + struct crypt_config *cc = io->target->private; + + if (unlikely(error < 0)) { + crypt_free_buffer_pages(cc, clone); + bio_put(clone); + io->error = -EIO; + crypt_dec_pending(io); + return; + } + + /* crypt_convert should have filled the clone bio */ + BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + + clone->bi_sector = cc->start + io->sector; + + if (async) + kcryptd_queue_io(io); + else + generic_make_request(clone); +} + +static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *clone; + struct dm_crypt_io *new_io; + int crypt_finished; + unsigned out_of_pages = 0; + unsigned remaining = io->base_bio->bi_size; + sector_t sector = io->sector; + int r; + + /* + * Prevent io from disappearing until this function completes. + */ + crypt_inc_pending(io); + crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); + + /* + * The allocated buffers can be smaller than the whole bio, + * so repeat the whole process until all the data can be handled. + */ + while (remaining) { + clone = crypt_alloc_buffer(io, remaining, &out_of_pages); + if (unlikely(!clone)) { + io->error = -ENOMEM; + break; + } + + io->ctx.bio_out = clone; + io->ctx.idx_out = 0; + + remaining -= clone->bi_size; + sector += bio_sectors(clone); + + crypt_inc_pending(io); + r = crypt_convert(cc, &io->ctx); + crypt_finished = atomic_dec_and_test(&io->ctx.pending); + + /* Encryption was already finished, submit io now */ + if (crypt_finished) { + kcryptd_crypt_write_io_submit(io, r, 0); + + /* + * If there was an error, do not try next fragments. + * For async, error is processed in async handler. + */ + if (unlikely(r < 0)) + break; + + io->sector = sector; + } + + /* + * Out of memory -> run queues + * But don't wait if split was due to the io size restriction + */ + if (unlikely(out_of_pages)) + congestion_wait(WRITE, HZ/100); + + /* + * With async crypto it is unsafe to share the crypto context + * between fragments, so switch to a new dm_crypt_io structure. + */ + if (unlikely(!crypt_finished && remaining)) { + new_io = crypt_io_alloc(io->target, io->base_bio, + sector); + crypt_inc_pending(new_io); + crypt_convert_init(cc, &new_io->ctx, NULL, + io->base_bio, sector); + new_io->ctx.idx_in = io->ctx.idx_in; + new_io->ctx.offset_in = io->ctx.offset_in; + + /* + * Fragments after the first use the base_io + * pending count. + */ + if (!io->base_io) + new_io->base_io = io; + else { + new_io->base_io = io->base_io; + crypt_inc_pending(io->base_io); + crypt_dec_pending(io); + } + + io = new_io; + } + } + + crypt_dec_pending(io); +} + +static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) +{ + if (unlikely(error < 0)) + io->error = -EIO; + + crypt_dec_pending(io); +} + +static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + int r = 0; + + crypt_inc_pending(io); + + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, + io->sector); + + r = crypt_convert(cc, &io->ctx); + + if (atomic_dec_and_test(&io->ctx.pending)) + kcryptd_crypt_read_done(io, r); + + crypt_dec_pending(io); +} + +static void kcryptd_async_done(struct crypto_async_request *async_req, + int error) +{ + struct dm_crypt_request *dmreq = async_req->data; + struct convert_context *ctx = dmreq->ctx; + struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); + struct crypt_config *cc = io->target->private; + + if (error == -EINPROGRESS) { + complete(&ctx->restart); + return; + } + + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); + + if (!atomic_dec_and_test(&ctx->pending)) + return; + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_crypt_read_done(io, error); + else + kcryptd_crypt_write_io_submit(io, error, 1); +} + +static void kcryptd_crypt(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_crypt_read_convert(io); + else + kcryptd_crypt_write_convert(io); +} + +static void kcryptd_queue_crypt(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + + INIT_WORK(&io->work, kcryptd_crypt); + queue_work(cc->crypt_queue, &io->work); +} + +/* + * Decode key from its hex representation + */ +static int crypt_decode_key(u8 *key, char *hex, unsigned int size) +{ + char buffer[3]; + char *endp; + unsigned int i; + + buffer[2] = '\0'; + + for (i = 0; i < size; i++) { + buffer[0] = *hex++; + buffer[1] = *hex++; + + key[i] = (u8)simple_strtoul(buffer, &endp, 16); + + if (endp != &buffer[2]) + return -EINVAL; + } + + if (*hex != '\0') + return -EINVAL; + + return 0; +} + +/* + * Encode key into its hex representation + */ +static void crypt_encode_key(char *hex, u8 *key, unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + sprintf(hex, "%02x", *key); + hex += 2; + key++; + } +} + +static int crypt_set_key(struct crypt_config *cc, char *key) +{ + unsigned key_size = strlen(key) >> 1; + + if (cc->key_size && cc->key_size != key_size) + return -EINVAL; + + cc->key_size = key_size; /* initial settings */ + + if ((!key_size && strcmp(key, "-")) || + (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) + return -EINVAL; + + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + + return 0; +} + +static int crypt_wipe_key(struct crypt_config *cc) +{ + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); + memset(&cc->key, 0, cc->key_size * sizeof(u8)); + return 0; +} + +/* + * Construct an encryption mapping: + * <cipher> <key> <iv_offset> <dev_path> <start> + */ +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct crypt_config *cc; + struct crypto_ablkcipher *tfm; + char *tmp; + char *cipher; + char *chainmode; + char *ivmode; + char *ivopts; + unsigned int key_size; + unsigned long long tmpll; + + if (argc != 5) { + ti->error = "Not enough arguments"; + return -EINVAL; + } + + tmp = argv[0]; + cipher = strsep(&tmp, "-"); + chainmode = strsep(&tmp, "-"); + ivopts = strsep(&tmp, "-"); + ivmode = strsep(&ivopts, ":"); + + if (tmp) + DMWARN("Unexpected additional cipher options"); + + key_size = strlen(argv[1]) >> 1; + + cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); + if (cc == NULL) { + ti->error = + "Cannot allocate transparent encryption context"; + return -ENOMEM; + } + + if (crypt_set_key(cc, argv[1])) { + ti->error = "Error decoding key"; + goto bad_cipher; + } + + /* Compatiblity mode for old dm-crypt cipher strings */ + if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { + chainmode = "cbc"; + ivmode = "plain"; + } + + if (strcmp(chainmode, "ecb") && !ivmode) { + ti->error = "This chaining mode requires an IV mechanism"; + goto bad_cipher; + } + + if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", + chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { + ti->error = "Chain mode + cipher name is too long"; + goto bad_cipher; + } + + tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); + if (IS_ERR(tfm)) { + ti->error = "Error allocating crypto tfm"; + goto bad_cipher; + } + + strcpy(cc->cipher, cipher); + strcpy(cc->chainmode, chainmode); + cc->tfm = tfm; + + /* + * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". + * See comments at iv code + */ + + if (ivmode == NULL) + cc->iv_gen_ops = NULL; + else if (strcmp(ivmode, "plain") == 0) + cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "essiv") == 0) + cc->iv_gen_ops = &crypt_iv_essiv_ops; + else if (strcmp(ivmode, "benbi") == 0) + cc->iv_gen_ops = &crypt_iv_benbi_ops; + else if (strcmp(ivmode, "null") == 0) + cc->iv_gen_ops = &crypt_iv_null_ops; + else { + ti->error = "Invalid IV mode"; + goto bad_ivmode; + } + + if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && + cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) + goto bad_ivmode; + + cc->iv_size = crypto_ablkcipher_ivsize(tfm); + if (cc->iv_size) + /* at least a 64 bit sector number should fit in our buffer */ + cc->iv_size = max(cc->iv_size, + (unsigned int)(sizeof(u64) / sizeof(u8))); + else { + if (cc->iv_gen_ops) { + DMWARN("Selected cipher does not support IVs"); + if (cc->iv_gen_ops->dtr) + cc->iv_gen_ops->dtr(cc); + cc->iv_gen_ops = NULL; + } + } + + cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); + if (!cc->io_pool) { + ti->error = "Cannot allocate crypt io mempool"; + goto bad_slab_pool; + } + + cc->dmreq_start = sizeof(struct ablkcipher_request); + cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); + cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); + cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & + ~(crypto_tfm_ctx_alignment() - 1); + + cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + + sizeof(struct dm_crypt_request) + cc->iv_size); + if (!cc->req_pool) { + ti->error = "Cannot allocate crypt request mempool"; + goto bad_req_pool; + } + cc->req = NULL; + + cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); + if (!cc->page_pool) { + ti->error = "Cannot allocate page mempool"; + goto bad_page_pool; + } + + cc->bs = bioset_create(MIN_IOS, MIN_IOS); + if (!cc->bs) { + ti->error = "Cannot allocate crypt bioset"; + goto bad_bs; + } + + if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { + ti->error = "Error setting key"; + goto bad_device; + } + + if (sscanf(argv[2], "%llu", &tmpll) != 1) { + ti->error = "Invalid iv_offset sector"; + goto bad_device; + } + cc->iv_offset = tmpll; + + if (sscanf(argv[4], "%llu", &tmpll) != 1) { + ti->error = "Invalid device sector"; + goto bad_device; + } + cc->start = tmpll; + + if (dm_get_device(ti, argv[3], cc->start, ti->len, + dm_table_get_mode(ti->table), &cc->dev)) { + ti->error = "Device lookup failed"; + goto bad_device; + } + + if (ivmode && cc->iv_gen_ops) { + if (ivopts) + *(ivopts - 1) = ':'; + cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); + if (!cc->iv_mode) { + ti->error = "Error kmallocing iv_mode string"; + goto bad_ivmode_string; + } + strcpy(cc->iv_mode, ivmode); + } else + cc->iv_mode = NULL; + + cc->io_queue = create_singlethread_workqueue("kcryptd_io"); + if (!cc->io_queue) { + ti->error = "Couldn't create kcryptd io queue"; + goto bad_io_queue; + } + + cc->crypt_queue = create_singlethread_workqueue("kcryptd"); + if (!cc->crypt_queue) { + ti->error = "Couldn't create kcryptd queue"; + goto bad_crypt_queue; + } + + ti->private = cc; + return 0; + +bad_crypt_queue: + destroy_workqueue(cc->io_queue); +bad_io_queue: + kfree(cc->iv_mode); +bad_ivmode_string: + dm_put_device(ti, cc->dev); +bad_device: + bioset_free(cc->bs); +bad_bs: + mempool_destroy(cc->page_pool); +bad_page_pool: + mempool_destroy(cc->req_pool); +bad_req_pool: + mempool_destroy(cc->io_pool); +bad_slab_pool: + if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) + cc->iv_gen_ops->dtr(cc); +bad_ivmode: + crypto_free_ablkcipher(tfm); +bad_cipher: + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); + kfree(cc); + return -EINVAL; +} + +static void crypt_dtr(struct dm_target *ti) +{ + struct crypt_config *cc = (struct crypt_config *) ti->private; + + destroy_workqueue(cc->io_queue); + destroy_workqueue(cc->crypt_queue); + + if (cc->req) + mempool_free(cc->req, cc->req_pool); + + bioset_free(cc->bs); + mempool_destroy(cc->page_pool); + mempool_destroy(cc->req_pool); + mempool_destroy(cc->io_pool); + + kfree(cc->iv_mode); + if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) + cc->iv_gen_ops->dtr(cc); + crypto_free_ablkcipher(cc->tfm); + dm_put_device(ti, cc->dev); + + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); + kfree(cc); +} + +static int crypt_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_crypt_io *io; + + io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_queue_io(io); + else + kcryptd_queue_crypt(io); + + return DM_MAPIO_SUBMITTED; +} + +static int crypt_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + struct crypt_config *cc = (struct crypt_config *) ti->private; + unsigned int sz = 0; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + if (cc->iv_mode) + DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, + cc->iv_mode); + else + DMEMIT("%s-%s ", cc->cipher, cc->chainmode); + + if (cc->key_size > 0) { + if ((maxlen - sz) < ((cc->key_size << 1) + 1)) + return -ENOMEM; + + crypt_encode_key(result + sz, cc->key, cc->key_size); + sz += cc->key_size << 1; + } else { + if (sz >= maxlen) + return -ENOMEM; + result[sz++] = '-'; + } + + DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, + cc->dev->name, (unsigned long long)cc->start); + break; + } + return 0; +} + +static void crypt_postsuspend(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + set_bit(DM_CRYPT_SUSPENDED, &cc->flags); +} + +static int crypt_preresume(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { + DMERR("aborting resume - crypt key is not set."); + return -EAGAIN; + } + + return 0; +} + +static void crypt_resume(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); +} + +/* Message interface + * key set <key> + * key wipe + */ +static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) +{ + struct crypt_config *cc = ti->private; + + if (argc < 2) + goto error; + + if (!strnicmp(argv[0], MESG_STR("key"))) { + if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { + DMWARN("not suspended during key manipulation."); + return -EINVAL; + } + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) + return crypt_set_key(cc, argv[2]); + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + return crypt_wipe_key(cc); + } + +error: + DMWARN("unrecognised message received."); + return -EINVAL; +} + +static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size) +{ + struct crypt_config *cc = ti->private; + struct request_queue *q = bdev_get_queue(cc->dev->bdev); + + if (!q->merge_bvec_fn) + return max_size; + + bvm->bi_bdev = cc->dev->bdev; + bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; + + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static struct target_type crypt_target = { + .name = "crypt", + .version= {1, 6, 0}, + .module = THIS_MODULE, + .ctr = crypt_ctr, + .dtr = crypt_dtr, + .map = crypt_map, + .status = crypt_status, + .postsuspend = crypt_postsuspend, + .preresume = crypt_preresume, + .resume = crypt_resume, + .message = crypt_message, + .merge = crypt_merge, +}; + +static int __init dm_crypt_init(void) +{ + int r; + + _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); + if (!_crypt_io_pool) + return -ENOMEM; + + r = dm_register_target(&crypt_target); + if (r < 0) { + DMERR("register failed %d", r); + kmem_cache_destroy(_crypt_io_pool); + } + + return r; +} + +static void __exit dm_crypt_exit(void) +{ + int r = dm_unregister_target(&crypt_target); + + if (r < 0) + DMERR("unregister failed %d", r); + + kmem_cache_destroy(_crypt_io_pool); +} + +module_init(dm_crypt_init); +module_exit(dm_crypt_exit); + +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c new file mode 100644 index 0000000..848b381 --- /dev/null +++ b/drivers/md/dm-delay.c @@ -0,0 +1,382 @@ +/* + * Copyright (C) 2005-2007 Red Hat GmbH + * + * A target that delays reads and/or writes and can send + * them to different devices. + * + * This file is released under the GPL. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/slab.h> + +#include <linux/device-mapper.h> + +#include "dm-bio-list.h" + +#define DM_MSG_PREFIX "delay" + +struct delay_c { + struct timer_list delay_timer; + struct mutex timer_lock; + struct work_struct flush_expired_bios; + struct list_head delayed_bios; + atomic_t may_delay; + mempool_t *delayed_pool; + + struct dm_dev *dev_read; + sector_t start_read; + unsigned read_delay; + unsigned reads; + + struct dm_dev *dev_write; + sector_t start_write; + unsigned write_delay; + unsigned writes; +}; + +struct dm_delay_info { + struct delay_c *context; + struct list_head list; + struct bio *bio; + unsigned long expires; +}; + +static DEFINE_MUTEX(delayed_bios_lock); + +static struct workqueue_struct *kdelayd_wq; +static struct kmem_cache *delayed_cache; + +static void handle_delayed_timer(unsigned long data) +{ + struct delay_c *dc = (struct delay_c *)data; + + queue_work(kdelayd_wq, &dc->flush_expired_bios); +} + +static void queue_timeout(struct delay_c *dc, unsigned long expires) +{ + mutex_lock(&dc->timer_lock); + + if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) + mod_timer(&dc->delay_timer, expires); + + mutex_unlock(&dc->timer_lock); +} + +static void flush_bios(struct bio *bio) +{ + struct bio *n; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = n; + } +} + +static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) +{ + struct dm_delay_info *delayed, *next; + unsigned long next_expires = 0; + int start_timer = 0; + struct bio_list flush_bios = { }; + + mutex_lock(&delayed_bios_lock); + list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { + if (flush_all || time_after_eq(jiffies, delayed->expires)) { + list_del(&delayed->list); + bio_list_add(&flush_bios, delayed->bio); + if ((bio_data_dir(delayed->bio) == WRITE)) + delayed->context->writes--; + else + delayed->context->reads--; + mempool_free(delayed, dc->delayed_pool); + continue; + } + + if (!start_timer) { + start_timer = 1; + next_expires = delayed->expires; + } else + next_expires = min(next_expires, delayed->expires); + } + + mutex_unlock(&delayed_bios_lock); + + if (start_timer) + queue_timeout(dc, next_expires); + + return bio_list_get(&flush_bios); +} + +static void flush_expired_bios(struct work_struct *work) +{ + struct delay_c *dc; + + dc = container_of(work, struct delay_c, flush_expired_bios); + flush_bios(flush_delayed_bios(dc, 0)); +} + +/* + * Mapping parameters: + * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>] + * + * With separate write parameters, the first set is only used for reads. + * Delays are specified in milliseconds. + */ +static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct delay_c *dc; + unsigned long long tmpll; + + if (argc != 3 && argc != 6) { + ti->error = "requires exactly 3 or 6 arguments"; + return -EINVAL; + } + + dc = kmalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) { + ti->error = "Cannot allocate context"; + return -ENOMEM; + } + + dc->reads = dc->writes = 0; + + if (sscanf(argv[1], "%llu", &tmpll) != 1) { + ti->error = "Invalid device sector"; + goto bad; + } + dc->start_read = tmpll; + + if (sscanf(argv[2], "%u", &dc->read_delay) != 1) { + ti->error = "Invalid delay"; + goto bad; + } + + if (dm_get_device(ti, argv[0], dc->start_read, ti->len, + dm_table_get_mode(ti->table), &dc->dev_read)) { + ti->error = "Device lookup failed"; + goto bad; + } + + dc->dev_write = NULL; + if (argc == 3) + goto out; + + if (sscanf(argv[4], "%llu", &tmpll) != 1) { + ti->error = "Invalid write device sector"; + goto bad_dev_read; + } + dc->start_write = tmpll; + + if (sscanf(argv[5], "%u", &dc->write_delay) != 1) { + ti->error = "Invalid write delay"; + goto bad_dev_read; + } + + if (dm_get_device(ti, argv[3], dc->start_write, ti->len, + dm_table_get_mode(ti->table), &dc->dev_write)) { + ti->error = "Write device lookup failed"; + goto bad_dev_read; + } + +out: + dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); + if (!dc->delayed_pool) { + DMERR("Couldn't create delayed bio pool."); + goto bad_dev_write; + } + + setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); + + INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); + INIT_LIST_HEAD(&dc->delayed_bios); + mutex_init(&dc->timer_lock); + atomic_set(&dc->may_delay, 1); + + ti->private = dc; + return 0; + +bad_dev_write: + if (dc->dev_write) + dm_put_device(ti, dc->dev_write); +bad_dev_read: + dm_put_device(ti, dc->dev_read); +bad: + kfree(dc); + return -EINVAL; +} + +static void delay_dtr(struct dm_target *ti) +{ + struct delay_c *dc = ti->private; + + flush_workqueue(kdelayd_wq); + + dm_put_device(ti, dc->dev_read); + + if (dc->dev_write) + dm_put_device(ti, dc->dev_write); + + mempool_destroy(dc->delayed_pool); + kfree(dc); +} + +static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) +{ + struct dm_delay_info *delayed; + unsigned long expires = 0; + + if (!delay || !atomic_read(&dc->may_delay)) + return 1; + + delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); + + delayed->context = dc; + delayed->bio = bio; + delayed->expires = expires = jiffies + (delay * HZ / 1000); + + mutex_lock(&delayed_bios_lock); + + if (bio_data_dir(bio) == WRITE) + dc->writes++; + else + dc->reads++; + + list_add_tail(&delayed->list, &dc->delayed_bios); + + mutex_unlock(&delayed_bios_lock); + + queue_timeout(dc, expires); + + return 0; +} + +static void delay_presuspend(struct dm_target *ti) +{ + struct delay_c *dc = ti->private; + + atomic_set(&dc->may_delay, 0); + del_timer_sync(&dc->delay_timer); + flush_bios(flush_delayed_bios(dc, 1)); +} + +static void delay_resume(struct dm_target *ti) +{ + struct delay_c *dc = ti->private; + + atomic_set(&dc->may_delay, 1); +} + +static int delay_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct delay_c *dc = ti->private; + + if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { + bio->bi_bdev = dc->dev_write->bdev; + bio->bi_sector = dc->start_write + + (bio->bi_sector - ti->begin); + + return delay_bio(dc, dc->write_delay, bio); + } + + bio->bi_bdev = dc->dev_read->bdev; + bio->bi_sector = dc->start_read + + (bio->bi_sector - ti->begin); + + return delay_bio(dc, dc->read_delay, bio); +} + +static int delay_status(struct dm_target *ti, status_type_t type, + char *result, unsigned maxlen) +{ + struct delay_c *dc = ti->private; + int sz = 0; + + switch (type) { + case STATUSTYPE_INFO: + DMEMIT("%u %u", dc->reads, dc->writes); + break; + + case STATUSTYPE_TABLE: + DMEMIT("%s %llu %u", dc->dev_read->name, + (unsigned long long) dc->start_read, + dc->read_delay); + if (dc->dev_write) + DMEMIT(" %s %llu %u", dc->dev_write->name, + (unsigned long long) dc->start_write, + dc->write_delay); + break; + } + + return 0; +} + +static struct target_type delay_target = { + .name = "delay", + .version = {1, 0, 2}, + .module = THIS_MODULE, + .ctr = delay_ctr, + .dtr = delay_dtr, + .map = delay_map, + .presuspend = delay_presuspend, + .resume = delay_resume, + .status = delay_status, +}; + +static int __init dm_delay_init(void) +{ + int r = -ENOMEM; + + kdelayd_wq = create_workqueue("kdelayd"); + if (!kdelayd_wq) { + DMERR("Couldn't start kdelayd"); + goto bad_queue; + } + + delayed_cache = KMEM_CACHE(dm_delay_info, 0); + if (!delayed_cache) { + DMERR("Couldn't create delayed bio cache."); + goto bad_memcache; + } + + r = dm_register_target(&delay_target); + if (r < 0) { + DMERR("register failed %d", r); + goto bad_register; + } + + return 0; + +bad_register: + kmem_cache_destroy(delayed_cache); +bad_memcache: + destroy_workqueue(kdelayd_wq); +bad_queue: + return r; +} + +static void __exit dm_delay_exit(void) +{ + int r = dm_unregister_target(&delay_target); + + if (r < 0) + DMERR("unregister failed %d", r); + + kmem_cache_destroy(delayed_cache); + destroy_workqueue(kdelayd_wq); +} + +/* Module hooks */ +module_init(dm_delay_init); +module_exit(dm_delay_exit); + +MODULE_DESCRIPTION(DM_NAME " delay target"); +MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c new file mode 100644 index 0000000..01590f3 --- /dev/null +++ b/drivers/md/dm-exception-store.c @@ -0,0 +1,756 @@ +/* + * dm-exception-store.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * Copyright (C) 2006 Red Hat GmbH + * + * This file is released under the GPL. + */ + +#include "dm-snap.h" + +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/dm-io.h> +#include <linux/dm-kcopyd.h> + +#define DM_MSG_PREFIX "snapshots" +#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ + +/*----------------------------------------------------------------- + * Persistent snapshots, by persistent we mean that the snapshot + * will survive a reboot. + *---------------------------------------------------------------*/ + +/* + * We need to store a record of which parts of the origin have + * been copied to the snapshot device. The snapshot code + * requires that we copy exception chunks to chunk aligned areas + * of the COW store. It makes sense therefore, to store the + * metadata in chunk size blocks. + * + * There is no backward or forward compatibility implemented, + * snapshots with different disk versions than the kernel will + * not be usable. It is expected that "lvcreate" will blank out + * the start of a fresh COW device before calling the snapshot + * constructor. + * + * The first chunk of the COW device just contains the header. + * After this there is a chunk filled with exception metadata, + * followed by as many exception chunks as can fit in the + * metadata areas. + * + * All on disk structures are in little-endian format. The end + * of the exceptions info is indicated by an exception with a + * new_chunk of 0, which is invalid since it would point to the + * header chunk. + */ + +/* + * Magic for persistent snapshots: "SnAp" - Feeble isn't it. + */ +#define SNAP_MAGIC 0x70416e53 + +/* + * The on-disk version of the metadata. + */ +#define SNAPSHOT_DISK_VERSION 1 + +struct disk_header { + uint32_t magic; + + /* + * Is this snapshot valid. There is no way of recovering + * an invalid snapshot. + */ + uint32_t valid; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + + /* In sectors */ + uint32_t chunk_size; +}; + +struct disk_exception { + uint64_t old_chunk; + uint64_t new_chunk; +}; + +struct commit_callback { + void (*callback)(void *, int success); + void *context; +}; + +/* + * The top level structure for a persistent exception store. + */ +struct pstore { + struct dm_snapshot *snap; /* up pointer to my snapshot */ + int version; + int valid; + uint32_t exceptions_per_area; + + /* + * Now that we have an asynchronous kcopyd there is no + * need for large chunk sizes, so it wont hurt to have a + * whole chunks worth of metadata in memory at once. + */ + void *area; + + /* + * An area of zeros used to clear the next area. + */ + void *zero_area; + + /* + * Used to keep track of which metadata area the data in + * 'chunk' refers to. + */ + chunk_t current_area; + + /* + * The next free chunk for an exception. + */ + chunk_t next_free; + + /* + * The index of next free exception in the current + * metadata area. + */ + uint32_t current_committed; + + atomic_t pending_count; + uint32_t callback_count; + struct commit_callback *callbacks; + struct dm_io_client *io_client; + + struct workqueue_struct *metadata_wq; +}; + +static unsigned sectors_to_pages(unsigned sectors) +{ + return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9); +} + +static int alloc_area(struct pstore *ps) +{ + int r = -ENOMEM; + size_t len; + + len = ps->snap->chunk_size << SECTOR_SHIFT; + + /* + * Allocate the chunk_size block of memory that will hold + * a single metadata area. + */ + ps->area = vmalloc(len); + if (!ps->area) + return r; + + ps->zero_area = vmalloc(len); + if (!ps->zero_area) { + vfree(ps->area); + return r; + } + memset(ps->zero_area, 0, len); + + return 0; +} + +static void free_area(struct pstore *ps) +{ + vfree(ps->area); + ps->area = NULL; + vfree(ps->zero_area); + ps->zero_area = NULL; +} + +struct mdata_req { + struct dm_io_region *where; + struct dm_io_request *io_req; + struct work_struct work; + int result; +}; + +static void do_metadata(struct work_struct *work) +{ + struct mdata_req *req = container_of(work, struct mdata_req, work); + + req->result = dm_io(req->io_req, 1, req->where, NULL); +} + +/* + * Read or write a chunk aligned and sized block of data from a device. + */ +static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) +{ + struct dm_io_region where = { + .bdev = ps->snap->cow->bdev, + .sector = ps->snap->chunk_size * chunk, + .count = ps->snap->chunk_size, + }; + struct dm_io_request io_req = { + .bi_rw = rw, + .mem.type = DM_IO_VMA, + .mem.ptr.vma = ps->area, + .client = ps->io_client, + .notify.fn = NULL, + }; + struct mdata_req req; + + if (!metadata) + return dm_io(&io_req, 1, &where, NULL); + + req.where = &where; + req.io_req = &io_req; + + /* + * Issue the synchronous I/O from a different thread + * to avoid generic_make_request recursion. + */ + INIT_WORK(&req.work, do_metadata); + queue_work(ps->metadata_wq, &req.work); + flush_workqueue(ps->metadata_wq); + + return req.result; +} + +/* + * Convert a metadata area index to a chunk index. + */ +static chunk_t area_location(struct pstore *ps, chunk_t area) +{ + return 1 + ((ps->exceptions_per_area + 1) * area); +} + +/* + * Read or write a metadata area. Remembering to skip the first + * chunk which holds the header. + */ +static int area_io(struct pstore *ps, int rw) +{ + int r; + chunk_t chunk; + + chunk = area_location(ps, ps->current_area); + + r = chunk_io(ps, chunk, rw, 0); + if (r) + return r; + + return 0; +} + +static void zero_memory_area(struct pstore *ps) +{ + memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); +} + +static int zero_disk_area(struct pstore *ps, chunk_t area) +{ + struct dm_io_region where = { + .bdev = ps->snap->cow->bdev, + .sector = ps->snap->chunk_size * area_location(ps, area), + .count = ps->snap->chunk_size, + }; + struct dm_io_request io_req = { + .bi_rw = WRITE, + .mem.type = DM_IO_VMA, + .mem.ptr.vma = ps->zero_area, + .client = ps->io_client, + .notify.fn = NULL, + }; + + return dm_io(&io_req, 1, &where, NULL); +} + +static int read_header(struct pstore *ps, int *new_snapshot) +{ + int r; + struct disk_header *dh; + chunk_t chunk_size; + int chunk_size_supplied = 1; + + /* + * Use default chunk size (or hardsect_size, if larger) if none supplied + */ + if (!ps->snap->chunk_size) { + ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, + bdev_hardsect_size(ps->snap->cow->bdev) >> 9); + ps->snap->chunk_mask = ps->snap->chunk_size - 1; + ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1; + chunk_size_supplied = 0; + } + + ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap-> + chunk_size)); + if (IS_ERR(ps->io_client)) + return PTR_ERR(ps->io_client); + + r = alloc_area(ps); + if (r) + return r; + + r = chunk_io(ps, 0, READ, 1); + if (r) + goto bad; + + dh = (struct disk_header *) ps->area; + + if (le32_to_cpu(dh->magic) == 0) { + *new_snapshot = 1; + return 0; + } + + if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { + DMWARN("Invalid or corrupt snapshot"); + r = -ENXIO; + goto bad; + } + + *new_snapshot = 0; + ps->valid = le32_to_cpu(dh->valid); + ps->version = le32_to_cpu(dh->version); + chunk_size = le32_to_cpu(dh->chunk_size); + + if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size) + return 0; + + DMWARN("chunk size %llu in device metadata overrides " + "table chunk size of %llu.", + (unsigned long long)chunk_size, + (unsigned long long)ps->snap->chunk_size); + + /* We had a bogus chunk_size. Fix stuff up. */ + free_area(ps); + + ps->snap->chunk_size = chunk_size; + ps->snap->chunk_mask = chunk_size - 1; + ps->snap->chunk_shift = ffs(chunk_size) - 1; + + r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size), + ps->io_client); + if (r) + return r; + + r = alloc_area(ps); + return r; + +bad: + free_area(ps); + return r; +} + +static int write_header(struct pstore *ps) +{ + struct disk_header *dh; + + memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); + + dh = (struct disk_header *) ps->area; + dh->magic = cpu_to_le32(SNAP_MAGIC); + dh->valid = cpu_to_le32(ps->valid); + dh->version = cpu_to_le32(ps->version); + dh->chunk_size = cpu_to_le32(ps->snap->chunk_size); + + return chunk_io(ps, 0, WRITE, 1); +} + +/* + * Access functions for the disk exceptions, these do the endian conversions. + */ +static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +{ + BUG_ON(index >= ps->exceptions_per_area); + + return ((struct disk_exception *) ps->area) + index; +} + +static void read_exception(struct pstore *ps, + uint32_t index, struct disk_exception *result) +{ + struct disk_exception *e = get_exception(ps, index); + + /* copy it */ + result->old_chunk = le64_to_cpu(e->old_chunk); + result->new_chunk = le64_to_cpu(e->new_chunk); +} + +static void write_exception(struct pstore *ps, + uint32_t index, struct disk_exception *de) +{ + struct disk_exception *e = get_exception(ps, index); + + /* copy it */ + e->old_chunk = cpu_to_le64(de->old_chunk); + e->new_chunk = cpu_to_le64(de->new_chunk); +} + +/* + * Registers the exceptions that are present in the current area. + * 'full' is filled in to indicate if the area has been + * filled. + */ +static int insert_exceptions(struct pstore *ps, int *full) +{ + int r; + unsigned int i; + struct disk_exception de; + + /* presume the area is full */ + *full = 1; + + for (i = 0; i < ps->exceptions_per_area; i++) { + read_exception(ps, i, &de); + + /* + * If the new_chunk is pointing at the start of + * the COW device, where the first metadata area + * is we know that we've hit the end of the + * exceptions. Therefore the area is not full. + */ + if (de.new_chunk == 0LL) { + ps->current_committed = i; + *full = 0; + break; + } + + /* + * Keep track of the start of the free chunks. + */ + if (ps->next_free <= de.new_chunk) + ps->next_free = de.new_chunk + 1; + + /* + * Otherwise we add the exception to the snapshot. + */ + r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk); + if (r) + return r; + } + + return 0; +} + +static int read_exceptions(struct pstore *ps) +{ + int r, full = 1; + + /* + * Keeping reading chunks and inserting exceptions until + * we find a partially full area. + */ + for (ps->current_area = 0; full; ps->current_area++) { + r = area_io(ps, READ); + if (r) + return r; + + r = insert_exceptions(ps, &full); + if (r) + return r; + } + + ps->current_area--; + + return 0; +} + +static struct pstore *get_info(struct exception_store *store) +{ + return (struct pstore *) store->context; +} + +static void persistent_fraction_full(struct exception_store *store, + sector_t *numerator, sector_t *denominator) +{ + *numerator = get_info(store)->next_free * store->snap->chunk_size; + *denominator = get_dev_size(store->snap->cow->bdev); +} + +static void persistent_destroy(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + destroy_workqueue(ps->metadata_wq); + dm_io_client_destroy(ps->io_client); + vfree(ps->callbacks); + free_area(ps); + kfree(ps); +} + +static int persistent_read_metadata(struct exception_store *store) +{ + int r, uninitialized_var(new_snapshot); + struct pstore *ps = get_info(store); + + /* + * Read the snapshot header. + */ + r = read_header(ps, &new_snapshot); + if (r) + return r; + + /* + * Now we know correct chunk_size, complete the initialisation. + */ + ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) / + sizeof(struct disk_exception); + ps->callbacks = dm_vcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks)); + if (!ps->callbacks) + return -ENOMEM; + + /* + * Do we need to setup a new snapshot ? + */ + if (new_snapshot) { + r = write_header(ps); + if (r) { + DMWARN("write_header failed"); + return r; + } + + ps->current_area = 0; + zero_memory_area(ps); + r = zero_disk_area(ps, 0); + if (r) { + DMWARN("zero_disk_area(0) failed"); + return r; + } + } else { + /* + * Sanity checks. + */ + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + return -EINVAL; + } + + /* + * Metadata are valid, but snapshot is invalidated + */ + if (!ps->valid) + return 1; + + /* + * Read the metadata. + */ + r = read_exceptions(ps); + if (r) + return r; + } + + return 0; +} + +static int persistent_prepare(struct exception_store *store, + struct dm_snap_exception *e) +{ + struct pstore *ps = get_info(store); + uint32_t stride; + chunk_t next_free; + sector_t size = get_dev_size(store->snap->cow->bdev); + + /* Is there enough room ? */ + if (size < ((ps->next_free + 1) * store->snap->chunk_size)) + return -ENOSPC; + + e->new_chunk = ps->next_free; + + /* + * Move onto the next free pending, making sure to take + * into account the location of the metadata chunks. + */ + stride = (ps->exceptions_per_area + 1); + next_free = ++ps->next_free; + if (sector_div(next_free, stride) == 1) + ps->next_free++; + + atomic_inc(&ps->pending_count); + return 0; +} + +static void persistent_commit(struct exception_store *store, + struct dm_snap_exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + unsigned int i; + struct pstore *ps = get_info(store); + struct disk_exception de; + struct commit_callback *cb; + + de.old_chunk = e->old_chunk; + de.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &de); + + /* + * Add the callback to the back of the array. This code + * is the only place where the callback array is + * manipulated, and we know that it will never be called + * multiple times concurrently. + */ + cb = ps->callbacks + ps->callback_count++; + cb->callback = callback; + cb->context = callback_context; + + /* + * If there are exceptions in flight and we have not yet + * filled this metadata area there's nothing more to do. + */ + if (!atomic_dec_and_test(&ps->pending_count) && + (ps->current_committed != ps->exceptions_per_area)) + return; + + /* + * If we completely filled the current area, then wipe the next one. + */ + if ((ps->current_committed == ps->exceptions_per_area) && + zero_disk_area(ps, ps->current_area + 1)) + ps->valid = 0; + + /* + * Commit exceptions to disk. + */ + if (ps->valid && area_io(ps, WRITE)) + ps->valid = 0; + + /* + * Advance to the next area if this one is full. + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + ps->current_area++; + zero_memory_area(ps); + } + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, ps->valid); + } + + ps->callback_count = 0; +} + +static void persistent_drop(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + ps->valid = 0; + if (write_header(ps)) + DMWARN("write header failed"); +} + +int dm_create_persistent(struct exception_store *store) +{ + struct pstore *ps; + + /* allocate the pstore */ + ps = kmalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + + ps->snap = store->snap; + ps->valid = 1; + ps->version = SNAPSHOT_DISK_VERSION; + ps->area = NULL; + ps->next_free = 2; /* skipping the header and first area */ + ps->current_committed = 0; + + ps->callback_count = 0; + atomic_set(&ps->pending_count, 0); + ps->callbacks = NULL; + + ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); + if (!ps->metadata_wq) { + kfree(ps); + DMERR("couldn't start header metadata update thread"); + return -ENOMEM; + } + + store->destroy = persistent_destroy; + store->read_metadata = persistent_read_metadata; + store->prepare_exception = persistent_prepare; + store->commit_exception = persistent_commit; + store->drop_snapshot = persistent_drop; + store->fraction_full = persistent_fraction_full; + store->context = ps; + + return 0; +} + +/*----------------------------------------------------------------- + * Implementation of the store for non-persistent snapshots. + *---------------------------------------------------------------*/ +struct transient_c { + sector_t next_free; +}; + +static void transient_destroy(struct exception_store *store) +{ + kfree(store->context); +} + +static int transient_read_metadata(struct exception_store *store) +{ + return 0; +} + +static int transient_prepare(struct exception_store *store, + struct dm_snap_exception *e) +{ + struct transient_c *tc = (struct transient_c *) store->context; + sector_t size = get_dev_size(store->snap->cow->bdev); + + if (size < (tc->next_free + store->snap->chunk_size)) + return -1; + + e->new_chunk = sector_to_chunk(store->snap, tc->next_free); + tc->next_free += store->snap->chunk_size; + + return 0; +} + +static void transient_commit(struct exception_store *store, + struct dm_snap_exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + /* Just succeed */ + callback(callback_context, 1); +} + +static void transient_fraction_full(struct exception_store *store, + sector_t *numerator, sector_t *denominator) +{ + *numerator = ((struct transient_c *) store->context)->next_free; + *denominator = get_dev_size(store->snap->cow->bdev); +} + +int dm_create_transient(struct exception_store *store) +{ + struct transient_c *tc; + + store->destroy = transient_destroy; + store->read_metadata = transient_read_metadata; + store->prepare_exception = transient_prepare; + store->commit_exception = transient_commit; + store->drop_snapshot = NULL; + store->fraction_full = transient_fraction_full; + + tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->next_free = 0; + store->context = tc; + + return 0; +} diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c new file mode 100644 index 0000000..657e1dd --- /dev/null +++ b/drivers/md/dm-io.c @@ -0,0 +1,465 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2006 Red Hat GmbH + * + * This file is released under the GPL. + */ + +#include <linux/device-mapper.h> + +#include <linux/bio.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/dm-io.h> + +struct dm_io_client { + mempool_t *pool; + struct bio_set *bios; +}; + +/* FIXME: can we shrink this ? */ +struct io { + unsigned long error_bits; + atomic_t count; + struct task_struct *sleeper; + struct dm_io_client *client; + io_notify_fn callback; + void *context; +}; + +/* + * io contexts are only dynamically allocated for asynchronous + * io. Since async io is likely to be the majority of io we'll + * have the same number of io contexts as bios! (FIXME: must reduce this). + */ + +static unsigned int pages_to_ios(unsigned int pages) +{ + return 4 * pages; /* too many ? */ +} + +/* + * Create a client with mempool and bioset. + */ +struct dm_io_client *dm_io_client_create(unsigned num_pages) +{ + unsigned ios = pages_to_ios(num_pages); + struct dm_io_client *client; + + client = kmalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return ERR_PTR(-ENOMEM); + + client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io)); + if (!client->pool) + goto bad; + + client->bios = bioset_create(16, 16); + if (!client->bios) + goto bad; + + return client; + + bad: + if (client->pool) + mempool_destroy(client->pool); + kfree(client); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(dm_io_client_create); + +int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) +{ + return mempool_resize(client->pool, pages_to_ios(num_pages), + GFP_KERNEL); +} +EXPORT_SYMBOL(dm_io_client_resize); + +void dm_io_client_destroy(struct dm_io_client *client) +{ + mempool_destroy(client->pool); + bioset_free(client->bios); + kfree(client); +} +EXPORT_SYMBOL(dm_io_client_destroy); + +/*----------------------------------------------------------------- + * We need to keep track of which region a bio is doing io for. + * In order to save a memory allocation we store this the last + * bvec which we know is unused (blech). + * XXX This is ugly and can OOPS with some configs... find another way. + *---------------------------------------------------------------*/ +static inline void bio_set_region(struct bio *bio, unsigned region) +{ + bio->bi_io_vec[bio->bi_max_vecs].bv_len = region; +} + +static inline unsigned bio_get_region(struct bio *bio) +{ + return bio->bi_io_vec[bio->bi_max_vecs].bv_len; +} + +/*----------------------------------------------------------------- + * We need an io object to keep track of the number of bios that + * have been dispatched for a particular io. + *---------------------------------------------------------------*/ +static void dec_count(struct io *io, unsigned int region, int error) +{ + if (error) + set_bit(region, &io->error_bits); + + if (atomic_dec_and_test(&io->count)) { + if (io->sleeper) + wake_up_process(io->sleeper); + + else { + unsigned long r = io->error_bits; + io_notify_fn fn = io->callback; + void *context = io->context; + + mempool_free(io, io->client->pool); + fn(r, context); + } + } +} + +static void endio(struct bio *bio, int error) +{ + struct io *io; + unsigned region; + + if (error && bio_data_dir(bio) == READ) + zero_fill_bio(bio); + + /* + * The bio destructor in bio_put() may use the io object. + */ + io = bio->bi_private; + region = bio_get_region(bio); + + bio->bi_max_vecs++; + bio_put(bio); + + dec_count(io, region, error); +} + +/*----------------------------------------------------------------- + * These little objects provide an abstraction for getting a new + * destination page for io. + *---------------------------------------------------------------*/ +struct dpages { + void (*get_page)(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset); + void (*next_page)(struct dpages *dp); + + unsigned context_u; + void *context_ptr; +}; + +/* + * Functions for getting the pages from a list. + */ +static void list_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + unsigned o = dp->context_u; + struct page_list *pl = (struct page_list *) dp->context_ptr; + + *p = pl->page; + *len = PAGE_SIZE - o; + *offset = o; +} + +static void list_next_page(struct dpages *dp) +{ + struct page_list *pl = (struct page_list *) dp->context_ptr; + dp->context_ptr = pl->next; + dp->context_u = 0; +} + +static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) +{ + dp->get_page = list_get_page; + dp->next_page = list_next_page; + dp->context_u = offset; + dp->context_ptr = pl; +} + +/* + * Functions for getting the pages from a bvec. + */ +static void bvec_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + *p = bvec->bv_page; + *len = bvec->bv_len; + *offset = bvec->bv_offset; +} + +static void bvec_next_page(struct dpages *dp) +{ + struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + dp->context_ptr = bvec + 1; +} + +static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +{ + dp->get_page = bvec_get_page; + dp->next_page = bvec_next_page; + dp->context_ptr = bvec; +} + +/* + * Functions for getting the pages from a VMA. + */ +static void vm_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + *p = vmalloc_to_page(dp->context_ptr); + *offset = dp->context_u; + *len = PAGE_SIZE - dp->context_u; +} + +static void vm_next_page(struct dpages *dp) +{ + dp->context_ptr += PAGE_SIZE - dp->context_u; + dp->context_u = 0; +} + +static void vm_dp_init(struct dpages *dp, void *data) +{ + dp->get_page = vm_get_page; + dp->next_page = vm_next_page; + dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); + dp->context_ptr = data; +} + +static void dm_bio_destructor(struct bio *bio) +{ + struct io *io = bio->bi_private; + + bio_free(bio, io->client->bios); +} + +/* + * Functions for getting the pages from kernel memory. + */ +static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, + unsigned *offset) +{ + *p = virt_to_page(dp->context_ptr); + *offset = dp->context_u; + *len = PAGE_SIZE - dp->context_u; +} + +static void km_next_page(struct dpages *dp) +{ + dp->context_ptr += PAGE_SIZE - dp->context_u; + dp->context_u = 0; +} + +static void km_dp_init(struct dpages *dp, void *data) +{ + dp->get_page = km_get_page; + dp->next_page = km_next_page; + dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); + dp->context_ptr = data; +} + +/*----------------------------------------------------------------- + * IO routines that accept a list of pages. + *---------------------------------------------------------------*/ +static void do_region(int rw, unsigned region, struct dm_io_region *where, + struct dpages *dp, struct io *io) +{ + struct bio *bio; + struct page *page; + unsigned long len; + unsigned offset; + unsigned num_bvecs; + sector_t remaining = where->count; + + while (remaining) { + /* + * Allocate a suitably sized-bio: we add an extra + * bvec for bio_get/set_region() and decrement bi_max_vecs + * to hide it from bio_add_page(). + */ + num_bvecs = dm_sector_div_up(remaining, + (PAGE_SIZE >> SECTOR_SHIFT)); + num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), + num_bvecs); + if (unlikely(num_bvecs > BIO_MAX_PAGES)) + num_bvecs = BIO_MAX_PAGES; + bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); + bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_bdev = where->bdev; + bio->bi_end_io = endio; + bio->bi_private = io; + bio->bi_destructor = dm_bio_destructor; + bio->bi_max_vecs--; + bio_set_region(bio, region); + + /* + * Try and add as many pages as possible. + */ + while (remaining) { + dp->get_page(dp, &page, &len, &offset); + len = min(len, to_bytes(remaining)); + if (!bio_add_page(bio, page, len, offset)) + break; + + offset = 0; + remaining -= to_sector(len); + dp->next_page(dp); + } + + atomic_inc(&io->count); + submit_bio(rw, bio); + } +} + +static void dispatch_io(int rw, unsigned int num_regions, + struct dm_io_region *where, struct dpages *dp, + struct io *io, int sync) +{ + int i; + struct dpages old_pages = *dp; + + if (sync) + rw |= (1 << BIO_RW_SYNC); + + /* + * For multiple regions we need to be careful to rewind + * the dp object for each call to do_region. + */ + for (i = 0; i < num_regions; i++) { + *dp = old_pages; + if (where[i].count) + do_region(rw, i, where + i, dp, io); + } + + /* + * Drop the extra reference that we were holding to avoid + * the io being completed too early. + */ + dec_count(io, 0, 0); +} + +static int sync_io(struct dm_io_client *client, unsigned int num_regions, + struct dm_io_region *where, int rw, struct dpages *dp, + unsigned long *error_bits) +{ + struct io io; + + if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + WARN_ON(1); + return -EIO; + } + + io.error_bits = 0; + atomic_set(&io.count, 1); /* see dispatch_io() */ + io.sleeper = current; + io.client = client; + + dispatch_io(rw, num_regions, where, dp, &io, 1); + + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + + if (!atomic_read(&io.count) || signal_pending(current)) + break; + + io_schedule(); + } + set_current_state(TASK_RUNNING); + + if (atomic_read(&io.count)) + return -EINTR; + + if (error_bits) + *error_bits = io.error_bits; + + return io.error_bits ? -EIO : 0; +} + +static int async_io(struct dm_io_client *client, unsigned int num_regions, + struct dm_io_region *where, int rw, struct dpages *dp, + io_notify_fn fn, void *context) +{ + struct io *io; + + if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + WARN_ON(1); + fn(1, context); + return -EIO; + } + + io = mempool_alloc(client->pool, GFP_NOIO); + io->error_bits = 0; + atomic_set(&io->count, 1); /* see dispatch_io() */ + io->sleeper = NULL; + io->client = client; + io->callback = fn; + io->context = context; + + dispatch_io(rw, num_regions, where, dp, io, 0); + return 0; +} + +static int dp_init(struct dm_io_request *io_req, struct dpages *dp) +{ + /* Set up dpages based on memory type */ + switch (io_req->mem.type) { + case DM_IO_PAGE_LIST: + list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); + break; + + case DM_IO_BVEC: + bvec_dp_init(dp, io_req->mem.ptr.bvec); + break; + + case DM_IO_VMA: + vm_dp_init(dp, io_req->mem.ptr.vma); + break; + + case DM_IO_KMEM: + km_dp_init(dp, io_req->mem.ptr.addr); + break; + + default: + return -EINVAL; + } + + return 0; +} + +/* + * New collapsed (a)synchronous interface. + * + * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug + * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in + * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to + * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. + */ +int dm_io(struct dm_io_request *io_req, unsigned num_regions, + struct dm_io_region *where, unsigned long *sync_error_bits) +{ + int r; + struct dpages dp; + + r = dp_init(io_req, &dp); + if (r) + return r; + + if (!io_req->notify.fn) + return sync_io(io_req->client, num_regions, where, + io_req->bi_rw, &dp, sync_error_bits); + + return async_io(io_req->client, num_regions, where, io_req->bi_rw, + &dp, io_req->notify.fn, io_req->notify.context); +} +EXPORT_SYMBOL(dm_io); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c new file mode 100644 index 0000000..c4819b6 --- /dev/null +++ b/drivers/md/dm-ioctl.c @@ -0,0 +1,1562 @@ +/* + * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. + * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/miscdevice.h> +#include <linux/init.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/dm-ioctl.h> +#include <linux/hdreg.h> +#include <linux/compat.h> + +#include <asm/uaccess.h> + +#define DM_MSG_PREFIX "ioctl" +#define DM_DRIVER_EMAIL "dm-devel@redhat.com" + +/*----------------------------------------------------------------- + * The ioctl interface needs to be able to look up devices by + * name or uuid. + *---------------------------------------------------------------*/ +struct hash_cell { + struct list_head name_list; + struct list_head uuid_list; + + char *name; + char *uuid; + struct mapped_device *md; + struct dm_table *new_map; +}; + +struct vers_iter { + size_t param_size; + struct dm_target_versions *vers, *old_vers; + char *end; + uint32_t flags; +}; + + +#define NUM_BUCKETS 64 +#define MASK_BUCKETS (NUM_BUCKETS - 1) +static struct list_head _name_buckets[NUM_BUCKETS]; +static struct list_head _uuid_buckets[NUM_BUCKETS]; + +static void dm_hash_remove_all(int keep_open_devices); + +/* + * Guards access to both hash tables. + */ +static DECLARE_RWSEM(_hash_lock); + +static void init_buckets(struct list_head *buckets) +{ + unsigned int i; + + for (i = 0; i < NUM_BUCKETS; i++) + INIT_LIST_HEAD(buckets + i); +} + +static int dm_hash_init(void) +{ + init_buckets(_name_buckets); + init_buckets(_uuid_buckets); + return 0; +} + +static void dm_hash_exit(void) +{ + dm_hash_remove_all(0); +} + +/*----------------------------------------------------------------- + * Hash function: + * We're not really concerned with the str hash function being + * fast since it's only used by the ioctl interface. + *---------------------------------------------------------------*/ +static unsigned int hash_str(const char *str) +{ + const unsigned int hash_mult = 2654435387U; + unsigned int h = 0; + + while (*str) + h = (h + (unsigned int) *str++) * hash_mult; + + return h & MASK_BUCKETS; +} + +/*----------------------------------------------------------------- + * Code for looking up a device by name + *---------------------------------------------------------------*/ +static struct hash_cell *__get_name_cell(const char *str) +{ + struct hash_cell *hc; + unsigned int h = hash_str(str); + + list_for_each_entry (hc, _name_buckets + h, name_list) + if (!strcmp(hc->name, str)) { + dm_get(hc->md); + return hc; + } + + return NULL; +} + +static struct hash_cell *__get_uuid_cell(const char *str) +{ + struct hash_cell *hc; + unsigned int h = hash_str(str); + + list_for_each_entry (hc, _uuid_buckets + h, uuid_list) + if (!strcmp(hc->uuid, str)) { + dm_get(hc->md); + return hc; + } + + return NULL; +} + +/*----------------------------------------------------------------- + * Inserting, removing and renaming a device. + *---------------------------------------------------------------*/ +static struct hash_cell *alloc_cell(const char *name, const char *uuid, + struct mapped_device *md) +{ + struct hash_cell *hc; + + hc = kmalloc(sizeof(*hc), GFP_KERNEL); + if (!hc) + return NULL; + + hc->name = kstrdup(name, GFP_KERNEL); + if (!hc->name) { + kfree(hc); + return NULL; + } + + if (!uuid) + hc->uuid = NULL; + + else { + hc->uuid = kstrdup(uuid, GFP_KERNEL); + if (!hc->uuid) { + kfree(hc->name); + kfree(hc); + return NULL; + } + } + + INIT_LIST_HEAD(&hc->name_list); + INIT_LIST_HEAD(&hc->uuid_list); + hc->md = md; + hc->new_map = NULL; + return hc; +} + +static void free_cell(struct hash_cell *hc) +{ + if (hc) { + kfree(hc->name); + kfree(hc->uuid); + kfree(hc); + } +} + +/* + * The kdev_t and uuid of a device can never change once it is + * initially inserted. + */ +static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) +{ + struct hash_cell *cell, *hc; + + /* + * Allocate the new cells. + */ + cell = alloc_cell(name, uuid, md); + if (!cell) + return -ENOMEM; + + /* + * Insert the cell into both hash tables. + */ + down_write(&_hash_lock); + hc = __get_name_cell(name); + if (hc) { + dm_put(hc->md); + goto bad; + } + + list_add(&cell->name_list, _name_buckets + hash_str(name)); + + if (uuid) { + hc = __get_uuid_cell(uuid); + if (hc) { + list_del(&cell->name_list); + dm_put(hc->md); + goto bad; + } + list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); + } + dm_get(md); + dm_set_mdptr(md, cell); + up_write(&_hash_lock); + + return 0; + + bad: + up_write(&_hash_lock); + free_cell(cell); + return -EBUSY; +} + +static void __hash_remove(struct hash_cell *hc) +{ + struct dm_table *table; + + /* remove from the dev hash */ + list_del(&hc->uuid_list); + list_del(&hc->name_list); + dm_set_mdptr(hc->md, NULL); + + table = dm_get_table(hc->md); + if (table) { + dm_table_event(table); + dm_table_put(table); + } + + if (hc->new_map) + dm_table_put(hc->new_map); + dm_put(hc->md); + free_cell(hc); +} + +static void dm_hash_remove_all(int keep_open_devices) +{ + int i, dev_skipped, dev_removed; + struct hash_cell *hc; + struct list_head *tmp, *n; + + down_write(&_hash_lock); + +retry: + dev_skipped = dev_removed = 0; + for (i = 0; i < NUM_BUCKETS; i++) { + list_for_each_safe (tmp, n, _name_buckets + i) { + hc = list_entry(tmp, struct hash_cell, name_list); + + if (keep_open_devices && + dm_lock_for_deletion(hc->md)) { + dev_skipped++; + continue; + } + __hash_remove(hc); + dev_removed = 1; + } + } + + /* + * Some mapped devices may be using other mapped devices, so if any + * still exist, repeat until we make no further progress. + */ + if (dev_skipped) { + if (dev_removed) + goto retry; + + DMWARN("remove_all left %d open device(s)", dev_skipped); + } + + up_write(&_hash_lock); +} + +static int dm_hash_rename(const char *old, const char *new) +{ + char *new_name, *old_name; + struct hash_cell *hc; + struct dm_table *table; + + /* + * duplicate new. + */ + new_name = kstrdup(new, GFP_KERNEL); + if (!new_name) + return -ENOMEM; + + down_write(&_hash_lock); + + /* + * Is new free ? + */ + hc = __get_name_cell(new); + if (hc) { + DMWARN("asked to rename to an already existing name %s -> %s", + old, new); + dm_put(hc->md); + up_write(&_hash_lock); + kfree(new_name); + return -EBUSY; + } + + /* + * Is there such a device as 'old' ? + */ + hc = __get_name_cell(old); + if (!hc) { + DMWARN("asked to rename a non existent device %s -> %s", + old, new); + up_write(&_hash_lock); + kfree(new_name); + return -ENXIO; + } + + /* + * rename and move the name cell. + */ + list_del(&hc->name_list); + old_name = hc->name; + hc->name = new_name; + list_add(&hc->name_list, _name_buckets + hash_str(new_name)); + + /* + * Wake up any dm event waiters. + */ + table = dm_get_table(hc->md); + if (table) { + dm_table_event(table); + dm_table_put(table); + } + + dm_kobject_uevent(hc->md); + + dm_put(hc->md); + up_write(&_hash_lock); + kfree(old_name); + return 0; +} + +/*----------------------------------------------------------------- + * Implementation of the ioctl commands + *---------------------------------------------------------------*/ +/* + * All the ioctl commands get dispatched to functions with this + * prototype. + */ +typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); + +static int remove_all(struct dm_ioctl *param, size_t param_size) +{ + dm_hash_remove_all(1); + param->data_size = 0; + return 0; +} + +/* + * Round up the ptr to an 8-byte boundary. + */ +#define ALIGN_MASK 7 +static inline void *align_ptr(void *ptr) +{ + return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); +} + +/* + * Retrieves the data payload buffer from an already allocated + * struct dm_ioctl. + */ +static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, + size_t *len) +{ + param->data_start = align_ptr(param + 1) - (void *) param; + + if (param->data_start < param_size) + *len = param_size - param->data_start; + else + *len = 0; + + return ((void *) param) + param->data_start; +} + +static int list_devices(struct dm_ioctl *param, size_t param_size) +{ + unsigned int i; + struct hash_cell *hc; + size_t len, needed = 0; + struct gendisk *disk; + struct dm_name_list *nl, *old_nl = NULL; + + down_write(&_hash_lock); + + /* + * Loop through all the devices working out how much + * space we need. + */ + for (i = 0; i < NUM_BUCKETS; i++) { + list_for_each_entry (hc, _name_buckets + i, name_list) { + needed += sizeof(struct dm_name_list); + needed += strlen(hc->name) + 1; + needed += ALIGN_MASK; + } + } + + /* + * Grab our output buffer. + */ + nl = get_result_buffer(param, param_size, &len); + if (len < needed) { + param->flags |= DM_BUFFER_FULL_FLAG; + goto out; + } + param->data_size = param->data_start + needed; + + nl->dev = 0; /* Flags no data */ + + /* + * Now loop through filling out the names. + */ + for (i = 0; i < NUM_BUCKETS; i++) { + list_for_each_entry (hc, _name_buckets + i, name_list) { + if (old_nl) + old_nl->next = (uint32_t) ((void *) nl - + (void *) old_nl); + disk = dm_disk(hc->md); + nl->dev = huge_encode_dev(disk_devt(disk)); + nl->next = 0; + strcpy(nl->name, hc->name); + + old_nl = nl; + nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); + } + } + + out: + up_write(&_hash_lock); + return 0; +} + +static void list_version_get_needed(struct target_type *tt, void *needed_param) +{ + size_t *needed = needed_param; + + *needed += sizeof(struct dm_target_versions); + *needed += strlen(tt->name); + *needed += ALIGN_MASK; +} + +static void list_version_get_info(struct target_type *tt, void *param) +{ + struct vers_iter *info = param; + + /* Check space - it might have changed since the first iteration */ + if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > + info->end) { + + info->flags = DM_BUFFER_FULL_FLAG; + return; + } + + if (info->old_vers) + info->old_vers->next = (uint32_t) ((void *)info->vers - + (void *)info->old_vers); + info->vers->version[0] = tt->version[0]; + info->vers->version[1] = tt->version[1]; + info->vers->version[2] = tt->version[2]; + info->vers->next = 0; + strcpy(info->vers->name, tt->name); + + info->old_vers = info->vers; + info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); +} + +static int list_versions(struct dm_ioctl *param, size_t param_size) +{ + size_t len, needed = 0; + struct dm_target_versions *vers; + struct vers_iter iter_info; + + /* + * Loop through all the devices working out how much + * space we need. + */ + dm_target_iterate(list_version_get_needed, &needed); + + /* + * Grab our output buffer. + */ + vers = get_result_buffer(param, param_size, &len); + if (len < needed) { + param->flags |= DM_BUFFER_FULL_FLAG; + goto out; + } + param->data_size = param->data_start + needed; + + iter_info.param_size = param_size; + iter_info.old_vers = NULL; + iter_info.vers = vers; + iter_info.flags = 0; + iter_info.end = (char *)vers+len; + + /* + * Now loop through filling out the names & versions. + */ + dm_target_iterate(list_version_get_info, &iter_info); + param->flags |= iter_info.flags; + + out: + return 0; +} + + + +static int check_name(const char *name) +{ + if (strchr(name, '/')) { + DMWARN("invalid device name"); + return -EINVAL; + } + + return 0; +} + +/* + * Fills in a dm_ioctl structure, ready for sending back to + * userland. + */ +static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) +{ + struct gendisk *disk = dm_disk(md); + struct dm_table *table; + + param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | + DM_ACTIVE_PRESENT_FLAG); + + if (dm_suspended(md)) + param->flags |= DM_SUSPEND_FLAG; + + param->dev = huge_encode_dev(disk_devt(disk)); + + /* + * Yes, this will be out of date by the time it gets back + * to userland, but it is still very useful for + * debugging. + */ + param->open_count = dm_open_count(md); + + if (get_disk_ro(disk)) + param->flags |= DM_READONLY_FLAG; + + param->event_nr = dm_get_event_nr(md); + + table = dm_get_table(md); + if (table) { + param->flags |= DM_ACTIVE_PRESENT_FLAG; + param->target_count = dm_table_get_num_targets(table); + dm_table_put(table); + } else + param->target_count = 0; + + return 0; +} + +static int dev_create(struct dm_ioctl *param, size_t param_size) +{ + int r, m = DM_ANY_MINOR; + struct mapped_device *md; + + r = check_name(param->name); + if (r) + return r; + + if (param->flags & DM_PERSISTENT_DEV_FLAG) + m = MINOR(huge_decode_dev(param->dev)); + + r = dm_create(m, &md); + if (r) + return r; + + r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); + if (r) { + dm_put(md); + return r; + } + + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + + r = __dev_status(md, param); + dm_put(md); + + return r; +} + +/* + * Always use UUID for lookups if it's present, otherwise use name or dev. + */ +static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) +{ + struct mapped_device *md; + void *mdptr = NULL; + + if (*param->uuid) + return __get_uuid_cell(param->uuid); + + if (*param->name) + return __get_name_cell(param->name); + + md = dm_get_md(huge_decode_dev(param->dev)); + if (!md) + goto out; + + mdptr = dm_get_mdptr(md); + if (!mdptr) + dm_put(md); + +out: + return mdptr; +} + +static struct mapped_device *find_device(struct dm_ioctl *param) +{ + struct hash_cell *hc; + struct mapped_device *md = NULL; + + down_read(&_hash_lock); + hc = __find_device_hash_cell(param); + if (hc) { + md = hc->md; + + /* + * Sneakily write in both the name and the uuid + * while we have the cell. + */ + strncpy(param->name, hc->name, sizeof(param->name)); + if (hc->uuid) + strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1); + else + param->uuid[0] = '\0'; + + if (hc->new_map) + param->flags |= DM_INACTIVE_PRESENT_FLAG; + else + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + } + up_read(&_hash_lock); + + return md; +} + +static int dev_remove(struct dm_ioctl *param, size_t param_size) +{ + struct hash_cell *hc; + struct mapped_device *md; + int r; + + down_write(&_hash_lock); + hc = __find_device_hash_cell(param); + + if (!hc) { + DMWARN("device doesn't appear to be in the dev hash table."); + up_write(&_hash_lock); + return -ENXIO; + } + + md = hc->md; + + /* + * Ensure the device is not open and nothing further can open it. + */ + r = dm_lock_for_deletion(md); + if (r) { + DMWARN("unable to remove open device %s", hc->name); + up_write(&_hash_lock); + dm_put(md); + return r; + } + + __hash_remove(hc); + up_write(&_hash_lock); + dm_put(md); + param->data_size = 0; + return 0; +} + +/* + * Check a string doesn't overrun the chunk of + * memory we copied from userland. + */ +static int invalid_str(char *str, void *end) +{ + while ((void *) str < end) + if (!*str++) + return 0; + + return -EINVAL; +} + +static int dev_rename(struct dm_ioctl *param, size_t param_size) +{ + int r; + char *new_name = (char *) param + param->data_start; + + if (new_name < param->data || + invalid_str(new_name, (void *) param + param_size) || + strlen(new_name) > DM_NAME_LEN - 1) { + DMWARN("Invalid new logical volume name supplied."); + return -EINVAL; + } + + r = check_name(new_name); + if (r) + return r; + + param->data_size = 0; + return dm_hash_rename(param->name, new_name); +} + +static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) +{ + int r = -EINVAL, x; + struct mapped_device *md; + struct hd_geometry geometry; + unsigned long indata[4]; + char *geostr = (char *) param + param->data_start; + + md = find_device(param); + if (!md) + return -ENXIO; + + if (geostr < param->data || + invalid_str(geostr, (void *) param + param_size)) { + DMWARN("Invalid geometry supplied."); + goto out; + } + + x = sscanf(geostr, "%lu %lu %lu %lu", indata, + indata + 1, indata + 2, indata + 3); + + if (x != 4) { + DMWARN("Unable to interpret geometry settings."); + goto out; + } + + if (indata[0] > 65535 || indata[1] > 255 || + indata[2] > 255 || indata[3] > ULONG_MAX) { + DMWARN("Geometry exceeds range limits."); + goto out; + } + + geometry.cylinders = indata[0]; + geometry.heads = indata[1]; + geometry.sectors = indata[2]; + geometry.start = indata[3]; + + r = dm_set_geometry(md, &geometry); + if (!r) + r = __dev_status(md, param); + + param->data_size = 0; + +out: + dm_put(md); + return r; +} + +static int do_suspend(struct dm_ioctl *param) +{ + int r = 0; + unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; + struct mapped_device *md; + + md = find_device(param); + if (!md) + return -ENXIO; + + if (param->flags & DM_SKIP_LOCKFS_FLAG) + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + if (param->flags & DM_NOFLUSH_FLAG) + suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; + + if (!dm_suspended(md)) + r = dm_suspend(md, suspend_flags); + + if (!r) + r = __dev_status(md, param); + + dm_put(md); + return r; +} + +static int do_resume(struct dm_ioctl *param) +{ + int r = 0; + unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; + struct hash_cell *hc; + struct mapped_device *md; + struct dm_table *new_map; + + down_write(&_hash_lock); + + hc = __find_device_hash_cell(param); + if (!hc) { + DMWARN("device doesn't appear to be in the dev hash table."); + up_write(&_hash_lock); + return -ENXIO; + } + + md = hc->md; + + new_map = hc->new_map; + hc->new_map = NULL; + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + + up_write(&_hash_lock); + + /* Do we need to load a new map ? */ + if (new_map) { + /* Suspend if it isn't already suspended */ + if (param->flags & DM_SKIP_LOCKFS_FLAG) + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + if (param->flags & DM_NOFLUSH_FLAG) + suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; + if (!dm_suspended(md)) + dm_suspend(md, suspend_flags); + + r = dm_swap_table(md, new_map); + if (r) { + dm_put(md); + dm_table_put(new_map); + return r; + } + + if (dm_table_get_mode(new_map) & FMODE_WRITE) + set_disk_ro(dm_disk(md), 0); + else + set_disk_ro(dm_disk(md), 1); + + dm_table_put(new_map); + } + + if (dm_suspended(md)) + r = dm_resume(md); + + if (!r) + r = __dev_status(md, param); + + dm_put(md); + return r; +} + +/* + * Set or unset the suspension state of a device. + * If the device already is in the requested state we just return its status. + */ +static int dev_suspend(struct dm_ioctl *param, size_t param_size) +{ + if (param->flags & DM_SUSPEND_FLAG) + return do_suspend(param); + + return do_resume(param); +} + +/* + * Copies device info back to user space, used by + * the create and info ioctls. + */ +static int dev_status(struct dm_ioctl *param, size_t param_size) +{ + int r; + struct mapped_device *md; + + md = find_device(param); + if (!md) + return -ENXIO; + + r = __dev_status(md, param); + dm_put(md); + return r; +} + +/* + * Build up the status struct for each target + */ +static void retrieve_status(struct dm_table *table, + struct dm_ioctl *param, size_t param_size) +{ + unsigned int i, num_targets; + struct dm_target_spec *spec; + char *outbuf, *outptr; + status_type_t type; + size_t remaining, len, used = 0; + + outptr = outbuf = get_result_buffer(param, param_size, &len); + + if (param->flags & DM_STATUS_TABLE_FLAG) + type = STATUSTYPE_TABLE; + else + type = STATUSTYPE_INFO; + + /* Get all the target info */ + num_targets = dm_table_get_num_targets(table); + for (i = 0; i < num_targets; i++) { + struct dm_target *ti = dm_table_get_target(table, i); + + remaining = len - (outptr - outbuf); + if (remaining <= sizeof(struct dm_target_spec)) { + param->flags |= DM_BUFFER_FULL_FLAG; + break; + } + + spec = (struct dm_target_spec *) outptr; + + spec->status = 0; + spec->sector_start = ti->begin; + spec->length = ti->len; + strncpy(spec->target_type, ti->type->name, + sizeof(spec->target_type)); + + outptr += sizeof(struct dm_target_spec); + remaining = len - (outptr - outbuf); + if (remaining <= 0) { + param->flags |= DM_BUFFER_FULL_FLAG; + break; + } + + /* Get the status/table string from the target driver */ + if (ti->type->status) { + if (ti->type->status(ti, type, outptr, remaining)) { + param->flags |= DM_BUFFER_FULL_FLAG; + break; + } + } else + outptr[0] = '\0'; + + outptr += strlen(outptr) + 1; + used = param->data_start + (outptr - outbuf); + + outptr = align_ptr(outptr); + spec->next = outptr - outbuf; + } + + if (used) + param->data_size = used; + + param->target_count = num_targets; +} + +/* + * Wait for a device to report an event + */ +static int dev_wait(struct dm_ioctl *param, size_t param_size) +{ + int r; + struct mapped_device *md; + struct dm_table *table; + + md = find_device(param); + if (!md) + return -ENXIO; + + /* + * Wait for a notification event + */ + if (dm_wait_event(md, param->event_nr)) { + r = -ERESTARTSYS; + goto out; + } + + /* + * The userland program is going to want to know what + * changed to trigger the event, so we may as well tell + * him and save an ioctl. + */ + r = __dev_status(md, param); + if (r) + goto out; + + table = dm_get_table(md); + if (table) { + retrieve_status(table, param, param_size); + dm_table_put(table); + } + + out: + dm_put(md); + return r; +} + +static inline fmode_t get_mode(struct dm_ioctl *param) +{ + fmode_t mode = FMODE_READ | FMODE_WRITE; + + if (param->flags & DM_READONLY_FLAG) + mode = FMODE_READ; + + return mode; +} + +static int next_target(struct dm_target_spec *last, uint32_t next, void *end, + struct dm_target_spec **spec, char **target_params) +{ + *spec = (struct dm_target_spec *) ((unsigned char *) last + next); + *target_params = (char *) (*spec + 1); + + if (*spec < (last + 1)) + return -EINVAL; + + return invalid_str(*target_params, end); +} + +static int populate_table(struct dm_table *table, + struct dm_ioctl *param, size_t param_size) +{ + int r; + unsigned int i = 0; + struct dm_target_spec *spec = (struct dm_target_spec *) param; + uint32_t next = param->data_start; + void *end = (void *) param + param_size; + char *target_params; + + if (!param->target_count) { + DMWARN("populate_table: no targets specified"); + return -EINVAL; + } + + for (i = 0; i < param->target_count; i++) { + + r = next_target(spec, next, end, &spec, &target_params); + if (r) { + DMWARN("unable to find target"); + return r; + } + + r = dm_table_add_target(table, spec->target_type, + (sector_t) spec->sector_start, + (sector_t) spec->length, + target_params); + if (r) { + DMWARN("error adding target to table"); + return r; + } + + next = spec->next; + } + + return dm_table_complete(table); +} + +static int table_load(struct dm_ioctl *param, size_t param_size) +{ + int r; + struct hash_cell *hc; + struct dm_table *t; + struct mapped_device *md; + + md = find_device(param); + if (!md) + return -ENXIO; + + r = dm_table_create(&t, get_mode(param), param->target_count, md); + if (r) + goto out; + + r = populate_table(t, param, param_size); + if (r) { + dm_table_put(t); + goto out; + } + + down_write(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + DMWARN("device has been removed from the dev hash table."); + dm_table_put(t); + up_write(&_hash_lock); + r = -ENXIO; + goto out; + } + + if (hc->new_map) + dm_table_put(hc->new_map); + hc->new_map = t; + up_write(&_hash_lock); + + param->flags |= DM_INACTIVE_PRESENT_FLAG; + r = __dev_status(md, param); + +out: + dm_put(md); + + return r; +} + +static int table_clear(struct dm_ioctl *param, size_t param_size) +{ + int r; + struct hash_cell *hc; + struct mapped_device *md; + + down_write(&_hash_lock); + + hc = __find_device_hash_cell(param); + if (!hc) { + DMWARN("device doesn't appear to be in the dev hash table."); + up_write(&_hash_lock); + return -ENXIO; + } + + if (hc->new_map) { + dm_table_put(hc->new_map); + hc->new_map = NULL; + } + + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + + r = __dev_status(hc->md, param); + md = hc->md; + up_write(&_hash_lock); + dm_put(md); + return r; +} + +/* + * Retrieves a list of devices used by a particular dm device. + */ +static void retrieve_deps(struct dm_table *table, + struct dm_ioctl *param, size_t param_size) +{ + unsigned int count = 0; + struct list_head *tmp; + size_t len, needed; + struct dm_dev_internal *dd; + struct dm_target_deps *deps; + + deps = get_result_buffer(param, param_size, &len); + + /* + * Count the devices. + */ + list_for_each (tmp, dm_table_get_devices(table)) + count++; + + /* + * Check we have enough space. + */ + needed = sizeof(*deps) + (sizeof(*deps->dev) * count); + if (len < needed) { + param->flags |= DM_BUFFER_FULL_FLAG; + return; + } + + /* + * Fill in the devices. + */ + deps->count = count; + count = 0; + list_for_each_entry (dd, dm_table_get_devices(table), list) + deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); + + param->data_size = param->data_start + needed; +} + +static int table_deps(struct dm_ioctl *param, size_t param_size) +{ + int r = 0; + struct mapped_device *md; + struct dm_table *table; + + md = find_device(param); + if (!md) + return -ENXIO; + + r = __dev_status(md, param); + if (r) + goto out; + + table = dm_get_table(md); + if (table) { + retrieve_deps(table, param, param_size); + dm_table_put(table); + } + + out: + dm_put(md); + return r; +} + +/* + * Return the status of a device as a text string for each + * target. + */ +static int table_status(struct dm_ioctl *param, size_t param_size) +{ + int r; + struct mapped_device *md; + struct dm_table *table; + + md = find_device(param); + if (!md) + return -ENXIO; + + r = __dev_status(md, param); + if (r) + goto out; + + table = dm_get_table(md); + if (table) { + retrieve_status(table, param, param_size); + dm_table_put(table); + } + + out: + dm_put(md); + return r; +} + +/* + * Pass a message to the target that's at the supplied device offset. + */ +static int target_message(struct dm_ioctl *param, size_t param_size) +{ + int r, argc; + char **argv; + struct mapped_device *md; + struct dm_table *table; + struct dm_target *ti; + struct dm_target_msg *tmsg = (void *) param + param->data_start; + + md = find_device(param); + if (!md) + return -ENXIO; + + r = __dev_status(md, param); + if (r) + goto out; + + if (tmsg < (struct dm_target_msg *) param->data || + invalid_str(tmsg->message, (void *) param + param_size)) { + DMWARN("Invalid target message parameters."); + r = -EINVAL; + goto out; + } + + r = dm_split_args(&argc, &argv, tmsg->message); + if (r) { + DMWARN("Failed to split target message parameters"); + goto out; + } + + table = dm_get_table(md); + if (!table) + goto out_argv; + + ti = dm_table_find_target(table, tmsg->sector); + if (!dm_target_is_valid(ti)) { + DMWARN("Target message sector outside device."); + r = -EINVAL; + } else if (ti->type->message) + r = ti->type->message(ti, argc, argv); + else { + DMWARN("Target type does not support messages"); + r = -EINVAL; + } + + dm_table_put(table); + out_argv: + kfree(argv); + out: + param->data_size = 0; + dm_put(md); + return r; +} + +/*----------------------------------------------------------------- + * Implementation of open/close/ioctl on the special char + * device. + *---------------------------------------------------------------*/ +static ioctl_fn lookup_ioctl(unsigned int cmd) +{ + static struct { + int cmd; + ioctl_fn fn; + } _ioctls[] = { + {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ + {DM_REMOVE_ALL_CMD, remove_all}, + {DM_LIST_DEVICES_CMD, list_devices}, + + {DM_DEV_CREATE_CMD, dev_create}, + {DM_DEV_REMOVE_CMD, dev_remove}, + {DM_DEV_RENAME_CMD, dev_rename}, + {DM_DEV_SUSPEND_CMD, dev_suspend}, + {DM_DEV_STATUS_CMD, dev_status}, + {DM_DEV_WAIT_CMD, dev_wait}, + + {DM_TABLE_LOAD_CMD, table_load}, + {DM_TABLE_CLEAR_CMD, table_clear}, + {DM_TABLE_DEPS_CMD, table_deps}, + {DM_TABLE_STATUS_CMD, table_status}, + + {DM_LIST_VERSIONS_CMD, list_versions}, + + {DM_TARGET_MSG_CMD, target_message}, + {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} + }; + + return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; +} + +/* + * As well as checking the version compatibility this always + * copies the kernel interface version out. + */ +static int check_version(unsigned int cmd, struct dm_ioctl __user *user) +{ + uint32_t version[3]; + int r = 0; + + if (copy_from_user(version, user->version, sizeof(version))) + return -EFAULT; + + if ((DM_VERSION_MAJOR != version[0]) || + (DM_VERSION_MINOR < version[1])) { + DMWARN("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); + r = -EINVAL; + } + + /* + * Fill in the kernel version. + */ + version[0] = DM_VERSION_MAJOR; + version[1] = DM_VERSION_MINOR; + version[2] = DM_VERSION_PATCHLEVEL; + if (copy_to_user(user->version, version, sizeof(version))) + return -EFAULT; + + return r; +} + +static void free_params(struct dm_ioctl *param) +{ + vfree(param); +} + +static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) +{ + struct dm_ioctl tmp, *dmi; + + if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) + return -EFAULT; + + if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) + return -EINVAL; + + dmi = vmalloc(tmp.data_size); + if (!dmi) + return -ENOMEM; + + if (copy_from_user(dmi, user, tmp.data_size)) { + vfree(dmi); + return -EFAULT; + } + + *param = dmi; + return 0; +} + +static int validate_params(uint cmd, struct dm_ioctl *param) +{ + /* Always clear this flag */ + param->flags &= ~DM_BUFFER_FULL_FLAG; + + /* Ignores parameters */ + if (cmd == DM_REMOVE_ALL_CMD || + cmd == DM_LIST_DEVICES_CMD || + cmd == DM_LIST_VERSIONS_CMD) + return 0; + + if ((cmd == DM_DEV_CREATE_CMD)) { + if (!*param->name) { + DMWARN("name not supplied when creating device"); + return -EINVAL; + } + } else if ((*param->uuid && *param->name)) { + DMWARN("only supply one of name or uuid, cmd(%u)", cmd); + return -EINVAL; + } + + /* Ensure strings are terminated */ + param->name[DM_NAME_LEN - 1] = '\0'; + param->uuid[DM_UUID_LEN - 1] = '\0'; + + return 0; +} + +static int ctl_ioctl(uint command, struct dm_ioctl __user *user) +{ + int r = 0; + unsigned int cmd; + struct dm_ioctl *uninitialized_var(param); + ioctl_fn fn = NULL; + size_t param_size; + + /* only root can play with this */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (_IOC_TYPE(command) != DM_IOCTL) + return -ENOTTY; + + cmd = _IOC_NR(command); + + /* + * Check the interface version passed in. This also + * writes out the kernel's interface version. + */ + r = check_version(cmd, user); + if (r) + return r; + + /* + * Nothing more to do for the version command. + */ + if (cmd == DM_VERSION_CMD) + return 0; + + fn = lookup_ioctl(cmd); + if (!fn) { + DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + return -ENOTTY; + } + + /* + * Trying to avoid low memory issues when a device is + * suspended. + */ + current->flags |= PF_MEMALLOC; + + /* + * Copy the parameters into kernel space. + */ + r = copy_params(user, ¶m); + + current->flags &= ~PF_MEMALLOC; + + if (r) + return r; + + r = validate_params(cmd, param); + if (r) + goto out; + + param_size = param->data_size; + param->data_size = sizeof(*param); + r = fn(param, param_size); + + /* + * Copy the results back to userland. + */ + if (!r && copy_to_user(user, param, param->data_size)) + r = -EFAULT; + + out: + free_params(param); + return r; +} + +static long dm_ctl_ioctl(struct file *file, uint command, ulong u) +{ + return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); +} + +#ifdef CONFIG_COMPAT +static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) +{ + return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); +} +#else +#define dm_compat_ctl_ioctl NULL +#endif + +static const struct file_operations _ctl_fops = { + .unlocked_ioctl = dm_ctl_ioctl, + .compat_ioctl = dm_compat_ctl_ioctl, + .owner = THIS_MODULE, +}; + +static struct miscdevice _dm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = DM_NAME, + .fops = &_ctl_fops +}; + +/* + * Create misc character device and link to DM_DIR/control. + */ +int __init dm_interface_init(void) +{ + int r; + + r = dm_hash_init(); + if (r) + return r; + + r = misc_register(&_dm_misc); + if (r) { + DMERR("misc_register failed for control device"); + dm_hash_exit(); + return r; + } + + DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, + DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, + DM_DRIVER_EMAIL); + return 0; +} + +void dm_interface_exit(void) +{ + if (misc_deregister(&_dm_misc) < 0) + DMERR("misc_deregister failed for control device"); + + dm_hash_exit(); +} + +/** + * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers + * @md: Pointer to mapped_device + * @name: Buffer (size DM_NAME_LEN) for name + * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined + */ +int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) +{ + int r = 0; + struct hash_cell *hc; + + if (!md) + return -ENXIO; + + dm_get(md); + down_read(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + r = -ENXIO; + goto out; + } + + strcpy(name, hc->name); + strcpy(uuid, hc->uuid ? : ""); + +out: + up_read(&_hash_lock); + dm_put(md); + + return r; +} diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c new file mode 100644 index 0000000..3073618 --- /dev/null +++ b/drivers/md/dm-kcopyd.c @@ -0,0 +1,666 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * Copyright (C) 2006 Red Hat GmbH + * + * This file is released under the GPL. + * + * Kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, with an asynchronous + * completion notification. + */ + +#include <linux/types.h> +#include <asm/atomic.h> +#include <linux/blkdev.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> +#include <linux/device-mapper.h> +#include <linux/dm-kcopyd.h> + +#include "dm.h" + +/*----------------------------------------------------------------- + * Each kcopyd client has its own little pool of preallocated + * pages for kcopyd io. + *---------------------------------------------------------------*/ +struct dm_kcopyd_client { + spinlock_t lock; + struct page_list *pages; + unsigned int nr_pages; + unsigned int nr_free_pages; + + struct dm_io_client *io_client; + + wait_queue_head_t destroyq; + atomic_t nr_jobs; + + mempool_t *job_pool; + + struct workqueue_struct *kcopyd_wq; + struct work_struct kcopyd_work; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ + spinlock_t job_lock; + struct list_head complete_jobs; + struct list_head io_jobs; + struct list_head pages_jobs; +}; + +static void wake(struct dm_kcopyd_client *kc) +{ + queue_work(kc->kcopyd_wq, &kc->kcopyd_work); +} + +static struct page_list *alloc_pl(void) +{ + struct page_list *pl; + + pl = kmalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return NULL; + + pl->page = alloc_page(GFP_KERNEL); + if (!pl->page) { + kfree(pl); + return NULL; + } + + return pl; +} + +static void free_pl(struct page_list *pl) +{ + __free_page(pl->page); + kfree(pl); +} + +static int kcopyd_get_pages(struct dm_kcopyd_client *kc, + unsigned int nr, struct page_list **pages) +{ + struct page_list *pl; + + spin_lock(&kc->lock); + if (kc->nr_free_pages < nr) { + spin_unlock(&kc->lock); + return -ENOMEM; + } + + kc->nr_free_pages -= nr; + for (*pages = pl = kc->pages; --nr; pl = pl->next) + ; + + kc->pages = pl->next; + pl->next = NULL; + + spin_unlock(&kc->lock); + + return 0; +} + +static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) +{ + struct page_list *cursor; + + spin_lock(&kc->lock); + for (cursor = pl; cursor->next; cursor = cursor->next) + kc->nr_free_pages++; + + kc->nr_free_pages++; + cursor->next = kc->pages; + kc->pages = pl; + spin_unlock(&kc->lock); +} + +/* + * These three functions resize the page pool. + */ +static void drop_pages(struct page_list *pl) +{ + struct page_list *next; + + while (pl) { + next = pl->next; + free_pl(pl); + pl = next; + } +} + +static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) +{ + unsigned int i; + struct page_list *pl = NULL, *next; + + for (i = 0; i < nr; i++) { + next = alloc_pl(); + if (!next) { + if (pl) + drop_pages(pl); + return -ENOMEM; + } + next->next = pl; + pl = next; + } + + kcopyd_put_pages(kc, pl); + kc->nr_pages += nr; + return 0; +} + +static void client_free_pages(struct dm_kcopyd_client *kc) +{ + BUG_ON(kc->nr_free_pages != kc->nr_pages); + drop_pages(kc->pages); + kc->pages = NULL; + kc->nr_free_pages = kc->nr_pages = 0; +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a deadlock). + *---------------------------------------------------------------*/ +struct kcopyd_job { + struct dm_kcopyd_client *kc; + struct list_head list; + unsigned long flags; + + /* + * Error state of the job. + */ + int read_err; + unsigned long write_err; + + /* + * Either READ or WRITE + */ + int rw; + struct dm_io_region source; + + /* + * The destinations for the transfer. + */ + unsigned int num_dests; + struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; + + sector_t offset; + unsigned int nr_pages; + struct page_list *pages; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + dm_kcopyd_notify_fn fn; + void *context; + + /* + * These fields are only used if the job has been split + * into more manageable parts. + */ + struct mutex lock; + atomic_t sub_jobs; + sector_t progress; +}; + +/* FIXME: this should scale with the number of pages */ +#define MIN_JOBS 512 + +static struct kmem_cache *_job_cache; + +int __init dm_kcopyd_init(void) +{ + _job_cache = KMEM_CACHE(kcopyd_job, 0); + if (!_job_cache) + return -ENOMEM; + + return 0; +} + +void dm_kcopyd_exit(void) +{ + kmem_cache_destroy(_job_cache); + _job_cache = NULL; +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static struct kcopyd_job *pop(struct list_head *jobs, + struct dm_kcopyd_client *kc) +{ + struct kcopyd_job *job = NULL; + unsigned long flags; + + spin_lock_irqsave(&kc->job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&kc->job_lock, flags); + + return job; +} + +static void push(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + struct dm_kcopyd_client *kc = job->kc; + + spin_lock_irqsave(&kc->job_lock, flags); + list_add_tail(&job->list, jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); +} + + +static void push_head(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + struct dm_kcopyd_client *kc = job->kc; + + spin_lock_irqsave(&kc->job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + void *context = job->context; + int read_err = job->read_err; + unsigned long write_err = job->write_err; + dm_kcopyd_notify_fn fn = job->fn; + struct dm_kcopyd_client *kc = job->kc; + + kcopyd_put_pages(kc, job->pages); + mempool_free(job, kc->job_pool); + fn(read_err, write_err, context); + + if (atomic_dec_and_test(&kc->nr_jobs)) + wake_up(&kc->destroyq); + + return 0; +} + +static void complete_io(unsigned long error, void *context) +{ + struct kcopyd_job *job = (struct kcopyd_job *) context; + struct dm_kcopyd_client *kc = job->kc; + + if (error) { + if (job->rw == WRITE) + job->write_err |= error; + else + job->read_err = 1; + + if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { + push(&kc->complete_jobs, job); + wake(kc); + return; + } + } + + if (job->rw == WRITE) + push(&kc->complete_jobs, job); + + else { + job->rw = WRITE; + push(&kc->io_jobs, job); + } + + wake(kc); +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + int r; + struct dm_io_request io_req = { + .bi_rw = job->rw | (1 << BIO_RW_SYNC), + .mem.type = DM_IO_PAGE_LIST, + .mem.ptr.pl = job->pages, + .mem.offset = job->offset, + .notify.fn = complete_io, + .notify.context = job, + .client = job->kc->io_client, + }; + + if (job->rw == READ) + r = dm_io(&io_req, 1, &job->source, NULL); + else + r = dm_io(&io_req, job->num_dests, job->dests, NULL); + + return r; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = dm_div_up(job->dests[0].count + job->offset, + PAGE_SIZE >> 9); + r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); + if (!r) { + /* this job is ready for io */ + push(&job->kc->io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can't complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, + int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs, kc))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + if (job->rw == WRITE) + job->write_err = (unsigned long) -1L; + else + job->read_err = 1; + push(&kc->complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push_head(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(struct work_struct *work) +{ + struct dm_kcopyd_client *kc = container_of(work, + struct dm_kcopyd_client, kcopyd_work); + + /* + * The order that these are called is *very* important. + * complete jobs can free some pages for pages jobs. + * Pages jobs when successful will jump onto the io jobs + * list. io jobs call wake when they complete and it all + * starts again. + */ + process_jobs(&kc->complete_jobs, kc, run_complete_job); + process_jobs(&kc->pages_jobs, kc, run_pages_job); + process_jobs(&kc->io_jobs, kc, run_io_job); +} + +/* + * If we are copying a small region we just dispatch a single job + * to do the copy, otherwise the io has to be split up into many + * jobs. + */ +static void dispatch_job(struct kcopyd_job *job) +{ + struct dm_kcopyd_client *kc = job->kc; + atomic_inc(&kc->nr_jobs); + push(&kc->pages_jobs, job); + wake(kc); +} + +#define SUB_JOB_SIZE 128 +static void segment_complete(int read_err, unsigned long write_err, + void *context) +{ + /* FIXME: tidy this function */ + sector_t progress = 0; + sector_t count = 0; + struct kcopyd_job *job = (struct kcopyd_job *) context; + + mutex_lock(&job->lock); + + /* update the error */ + if (read_err) + job->read_err = 1; + + if (write_err) + job->write_err |= write_err; + + /* + * Only dispatch more work if there hasn't been an error. + */ + if ((!job->read_err && !job->write_err) || + test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { + /* get the next chunk of work */ + progress = job->progress; + count = job->source.count - progress; + if (count) { + if (count > SUB_JOB_SIZE) + count = SUB_JOB_SIZE; + + job->progress += count; + } + } + mutex_unlock(&job->lock); + + if (count) { + int i; + struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, + GFP_NOIO); + + *sub_job = *job; + sub_job->source.sector += progress; + sub_job->source.count = count; + + for (i = 0; i < job->num_dests; i++) { + sub_job->dests[i].sector += progress; + sub_job->dests[i].count = count; + } + + sub_job->fn = segment_complete; + sub_job->context = job; + dispatch_job(sub_job); + + } else if (atomic_dec_and_test(&job->sub_jobs)) { + + /* + * To avoid a race we must keep the job around + * until after the notify function has completed. + * Otherwise the client may try and stop the job + * after we've completed. + */ + job->fn(read_err, write_err, job->context); + mempool_free(job, job->kc->job_pool); + } +} + +/* + * Create some little jobs that will do the move between + * them. + */ +#define SPLIT_COUNT 8 +static void split_job(struct kcopyd_job *job) +{ + int i; + + atomic_set(&job->sub_jobs, SPLIT_COUNT); + for (i = 0; i < SPLIT_COUNT; i++) + segment_complete(0, 0u, job); +} + +int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, + unsigned int num_dests, struct dm_io_region *dests, + unsigned int flags, dm_kcopyd_notify_fn fn, void *context) +{ + struct kcopyd_job *job; + + /* + * Allocate a new job. + */ + job = mempool_alloc(kc->job_pool, GFP_NOIO); + + /* + * set up for the read. + */ + job->kc = kc; + job->flags = flags; + job->read_err = 0; + job->write_err = 0; + job->rw = READ; + + job->source = *from; + + job->num_dests = num_dests; + memcpy(&job->dests, dests, sizeof(*dests) * num_dests); + + job->offset = 0; + job->nr_pages = 0; + job->pages = NULL; + + job->fn = fn; + job->context = context; + + if (job->source.count < SUB_JOB_SIZE) + dispatch_job(job); + + else { + mutex_init(&job->lock); + job->progress = 0; + split_job(job); + } + + return 0; +} +EXPORT_SYMBOL(dm_kcopyd_copy); + +/* + * Cancels a kcopyd job, eg. someone might be deactivating a + * mirror. + */ +#if 0 +int kcopyd_cancel(struct kcopyd_job *job, int block) +{ + /* FIXME: finish */ + return -1; +} +#endif /* 0 */ + +/*----------------------------------------------------------------- + * Client setup + *---------------------------------------------------------------*/ +int dm_kcopyd_client_create(unsigned int nr_pages, + struct dm_kcopyd_client **result) +{ + int r = -ENOMEM; + struct dm_kcopyd_client *kc; + + kc = kmalloc(sizeof(*kc), GFP_KERNEL); + if (!kc) + return -ENOMEM; + + spin_lock_init(&kc->lock); + spin_lock_init(&kc->job_lock); + INIT_LIST_HEAD(&kc->complete_jobs); + INIT_LIST_HEAD(&kc->io_jobs); + INIT_LIST_HEAD(&kc->pages_jobs); + + kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); + if (!kc->job_pool) + goto bad_slab; + + INIT_WORK(&kc->kcopyd_work, do_work); + kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); + if (!kc->kcopyd_wq) + goto bad_workqueue; + + kc->pages = NULL; + kc->nr_pages = kc->nr_free_pages = 0; + r = client_alloc_pages(kc, nr_pages); + if (r) + goto bad_client_pages; + + kc->io_client = dm_io_client_create(nr_pages); + if (IS_ERR(kc->io_client)) { + r = PTR_ERR(kc->io_client); + goto bad_io_client; + } + + init_waitqueue_head(&kc->destroyq); + atomic_set(&kc->nr_jobs, 0); + + *result = kc; + return 0; + +bad_io_client: + client_free_pages(kc); +bad_client_pages: + destroy_workqueue(kc->kcopyd_wq); +bad_workqueue: + mempool_destroy(kc->job_pool); +bad_slab: + kfree(kc); + + return r; +} +EXPORT_SYMBOL(dm_kcopyd_client_create); + +void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) +{ + /* Wait for completion of all jobs submitted by this client. */ + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); + + BUG_ON(!list_empty(&kc->complete_jobs)); + BUG_ON(!list_empty(&kc->io_jobs)); + BUG_ON(!list_empty(&kc->pages_jobs)); + destroy_workqueue(kc->kcopyd_wq); + dm_io_client_destroy(kc->io_client); + client_free_pages(kc); + mempool_destroy(kc->job_pool); + kfree(kc); +} +EXPORT_SYMBOL(dm_kcopyd_client_destroy); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c new file mode 100644 index 0000000..44042be --- /dev/null +++ b/drivers/md/dm-linear.c @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2001-2003 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/slab.h> +#include <linux/device-mapper.h> + +#define DM_MSG_PREFIX "linear" + +/* + * Linear: maps a linear range of a device. + */ +struct linear_c { + struct dm_dev *dev; + sector_t start; +}; + +/* + * Construct a linear mapping: <dev_path> <offset> + */ +static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct linear_c *lc; + unsigned long long tmp; + + if (argc != 2) { + ti->error = "Invalid argument count"; + return -EINVAL; + } + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (lc == NULL) { + ti->error = "dm-linear: Cannot allocate linear context"; + return -ENOMEM; + } + + if (sscanf(argv[1], "%llu", &tmp) != 1) { + ti->error = "dm-linear: Invalid device sector"; + goto bad; + } + lc->start = tmp; + + if (dm_get_device(ti, argv[0], lc->start, ti->len, + dm_table_get_mode(ti->table), &lc->dev)) { + ti->error = "dm-linear: Device lookup failed"; + goto bad; + } + + ti->private = lc; + return 0; + + bad: + kfree(lc); + return -EINVAL; +} + +static void linear_dtr(struct dm_target *ti) +{ + struct linear_c *lc = (struct linear_c *) ti->private; + + dm_put_device(ti, lc->dev); + kfree(lc); +} + +static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) +{ + struct linear_c *lc = ti->private; + + return lc->start + (bi_sector - ti->begin); +} + +static void linear_map_bio(struct dm_target *ti, struct bio *bio) +{ + struct linear_c *lc = ti->private; + + bio->bi_bdev = lc->dev->bdev; + bio->bi_sector = linear_map_sector(ti, bio->bi_sector); +} + +static int linear_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + linear_map_bio(ti, bio); + + return DM_MAPIO_REMAPPED; +} + +static int linear_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + struct linear_c *lc = (struct linear_c *) ti->private; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s %llu", lc->dev->name, + (unsigned long long)lc->start); + break; + } + return 0; +} + +static int linear_ioctl(struct dm_target *ti, unsigned int cmd, + unsigned long arg) +{ + struct linear_c *lc = (struct linear_c *) ti->private; + return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg); +} + +static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size) +{ + struct linear_c *lc = ti->private; + struct request_queue *q = bdev_get_queue(lc->dev->bdev); + + if (!q->merge_bvec_fn) + return max_size; + + bvm->bi_bdev = lc->dev->bdev; + bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector); + + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static struct target_type linear_target = { + .name = "linear", + .version= {1, 0, 3}, + .module = THIS_MODULE, + .ctr = linear_ctr, + .dtr = linear_dtr, + .map = linear_map, + .status = linear_status, + .ioctl = linear_ioctl, + .merge = linear_merge, +}; + +int __init dm_linear_init(void) +{ + int r = dm_register_target(&linear_target); + + if (r < 0) + DMERR("register failed %d", r); + + return r; +} + +void dm_linear_exit(void) +{ + int r = dm_unregister_target(&linear_target); + + if (r < 0) + DMERR("unregister failed %d", r); +} diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c new file mode 100644 index 0000000..13e2a1a --- /dev/null +++ b/drivers/md/dm-log.c @@ -0,0 +1,867 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/dm-io.h> +#include <linux/dm-dirty-log.h> + +#include <linux/device-mapper.h> + +#define DM_MSG_PREFIX "dirty region log" + +struct dm_dirty_log_internal { + struct dm_dirty_log_type *type; + + struct list_head list; + long use; +}; + +static LIST_HEAD(_log_types); +static DEFINE_SPINLOCK(_lock); + +static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name) +{ + struct dm_dirty_log_internal *log_type; + + list_for_each_entry(log_type, &_log_types, list) + if (!strcmp(name, log_type->type->name)) + return log_type; + + return NULL; +} + +static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name) +{ + struct dm_dirty_log_internal *log_type; + + spin_lock(&_lock); + + log_type = __find_dirty_log_type(name); + if (log_type) { + if (!log_type->use && !try_module_get(log_type->type->module)) + log_type = NULL; + else + log_type->use++; + } + + spin_unlock(&_lock); + + return log_type; +} + +/* + * get_type + * @type_name + * + * Attempt to retrieve the dm_dirty_log_type by name. If not already + * available, attempt to load the appropriate module. + * + * Log modules are named "dm-log-" followed by the 'type_name'. + * Modules may contain multiple types. + * This function will first try the module "dm-log-<type_name>", + * then truncate 'type_name' on the last '-' and try again. + * + * For example, if type_name was "clustered-disk", it would search + * 'dm-log-clustered-disk' then 'dm-log-clustered'. + * + * Returns: dirty_log_type* on success, NULL on failure + */ +static struct dm_dirty_log_type *get_type(const char *type_name) +{ + char *p, *type_name_dup; + struct dm_dirty_log_internal *log_type; + + if (!type_name) + return NULL; + + log_type = _get_dirty_log_type(type_name); + if (log_type) + return log_type->type; + + type_name_dup = kstrdup(type_name, GFP_KERNEL); + if (!type_name_dup) { + DMWARN("No memory left to attempt log module load for \"%s\"", + type_name); + return NULL; + } + + while (request_module("dm-log-%s", type_name_dup) || + !(log_type = _get_dirty_log_type(type_name))) { + p = strrchr(type_name_dup, '-'); + if (!p) + break; + p[0] = '\0'; + } + + if (!log_type) + DMWARN("Module for logging type \"%s\" not found.", type_name); + + kfree(type_name_dup); + + return log_type ? log_type->type : NULL; +} + +static void put_type(struct dm_dirty_log_type *type) +{ + struct dm_dirty_log_internal *log_type; + + if (!type) + return; + + spin_lock(&_lock); + log_type = __find_dirty_log_type(type->name); + if (!log_type) + goto out; + + if (!--log_type->use) + module_put(type->module); + + BUG_ON(log_type->use < 0); + +out: + spin_unlock(&_lock); +} + +static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type) +{ + struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type), + GFP_KERNEL); + + if (log_type) + log_type->type = type; + + return log_type; +} + +int dm_dirty_log_type_register(struct dm_dirty_log_type *type) +{ + struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type); + int r = 0; + + if (!log_type) + return -ENOMEM; + + spin_lock(&_lock); + if (!__find_dirty_log_type(type->name)) + list_add(&log_type->list, &_log_types); + else { + kfree(log_type); + r = -EEXIST; + } + spin_unlock(&_lock); + + return r; +} +EXPORT_SYMBOL(dm_dirty_log_type_register); + +int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type) +{ + struct dm_dirty_log_internal *log_type; + + spin_lock(&_lock); + + log_type = __find_dirty_log_type(type->name); + if (!log_type) { + spin_unlock(&_lock); + return -EINVAL; + } + + if (log_type->use) { + spin_unlock(&_lock); + return -ETXTBSY; + } + + list_del(&log_type->list); + + spin_unlock(&_lock); + kfree(log_type); + + return 0; +} +EXPORT_SYMBOL(dm_dirty_log_type_unregister); + +struct dm_dirty_log *dm_dirty_log_create(const char *type_name, + struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct dm_dirty_log_type *type; + struct dm_dirty_log *log; + + log = kmalloc(sizeof(*log), GFP_KERNEL); + if (!log) + return NULL; + + type = get_type(type_name); + if (!type) { + kfree(log); + return NULL; + } + + log->type = type; + if (type->ctr(log, ti, argc, argv)) { + kfree(log); + put_type(type); + return NULL; + } + + return log; +} +EXPORT_SYMBOL(dm_dirty_log_create); + +void dm_dirty_log_destroy(struct dm_dirty_log *log) +{ + log->type->dtr(log); + put_type(log->type); + kfree(log); +} +EXPORT_SYMBOL(dm_dirty_log_destroy); + +/*----------------------------------------------------------------- + * Persistent and core logs share a lot of their implementation. + * FIXME: need a reload method to be called from a resume + *---------------------------------------------------------------*/ +/* + * Magic for persistent mirrors: "MiRr" + */ +#define MIRROR_MAGIC 0x4D695272 + +/* + * The on-disk version of the metadata. + */ +#define MIRROR_DISK_VERSION 2 +#define LOG_OFFSET 2 + +struct log_header { + uint32_t magic; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + sector_t nr_regions; +}; + +struct log_c { + struct dm_target *ti; + int touched; + uint32_t region_size; + unsigned int region_count; + region_t sync_count; + + unsigned bitset_uint32_count; + uint32_t *clean_bits; + uint32_t *sync_bits; + uint32_t *recovering_bits; /* FIXME: this seems excessive */ + + int sync_search; + + /* Resync flag */ + enum sync { + DEFAULTSYNC, /* Synchronize if necessary */ + NOSYNC, /* Devices known to be already in sync */ + FORCESYNC, /* Force a sync to happen */ + } sync; + + struct dm_io_request io_req; + + /* + * Disk log fields + */ + int log_dev_failed; + struct dm_dev *log_dev; + struct log_header header; + + struct dm_io_region header_location; + struct log_header *disk_header; +}; + +/* + * The touched member needs to be updated every time we access + * one of the bitsets. + */ +static inline int log_test_bit(uint32_t *bs, unsigned bit) +{ + return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0; +} + +static inline void log_set_bit(struct log_c *l, + uint32_t *bs, unsigned bit) +{ + ext2_set_bit(bit, (unsigned long *) bs); + l->touched = 1; +} + +static inline void log_clear_bit(struct log_c *l, + uint32_t *bs, unsigned bit) +{ + ext2_clear_bit(bit, (unsigned long *) bs); + l->touched = 1; +} + +/*---------------------------------------------------------------- + * Header IO + *--------------------------------------------------------------*/ +static void header_to_disk(struct log_header *core, struct log_header *disk) +{ + disk->magic = cpu_to_le32(core->magic); + disk->version = cpu_to_le32(core->version); + disk->nr_regions = cpu_to_le64(core->nr_regions); +} + +static void header_from_disk(struct log_header *core, struct log_header *disk) +{ + core->magic = le32_to_cpu(disk->magic); + core->version = le32_to_cpu(disk->version); + core->nr_regions = le64_to_cpu(disk->nr_regions); +} + +static int rw_header(struct log_c *lc, int rw) +{ + lc->io_req.bi_rw = rw; + lc->io_req.mem.ptr.vma = lc->disk_header; + lc->io_req.notify.fn = NULL; + + return dm_io(&lc->io_req, 1, &lc->header_location, NULL); +} + +static int read_header(struct log_c *log) +{ + int r; + + r = rw_header(log, READ); + if (r) + return r; + + header_from_disk(&log->header, log->disk_header); + + /* New log required? */ + if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { + log->header.magic = MIRROR_MAGIC; + log->header.version = MIRROR_DISK_VERSION; + log->header.nr_regions = 0; + } + +#ifdef __LITTLE_ENDIAN + if (log->header.version == 1) + log->header.version = 2; +#endif + + if (log->header.version != MIRROR_DISK_VERSION) { + DMWARN("incompatible disk log version"); + return -EINVAL; + } + + return 0; +} + +static inline int write_header(struct log_c *log) +{ + header_to_disk(&log->header, log->disk_header); + return rw_header(log, WRITE); +} + +/*---------------------------------------------------------------- + * core log constructor/destructor + * + * argv contains region_size followed optionally by [no]sync + *--------------------------------------------------------------*/ +#define BYTE_SHIFT 3 +static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv, + struct dm_dev *dev) +{ + enum sync sync = DEFAULTSYNC; + + struct log_c *lc; + uint32_t region_size; + unsigned int region_count; + size_t bitset_size, buf_size; + int r; + + if (argc < 1 || argc > 2) { + DMWARN("wrong number of arguments to dirty region log"); + return -EINVAL; + } + + if (argc > 1) { + if (!strcmp(argv[1], "sync")) + sync = FORCESYNC; + else if (!strcmp(argv[1], "nosync")) + sync = NOSYNC; + else { + DMWARN("unrecognised sync argument to " + "dirty region log: %s", argv[1]); + return -EINVAL; + } + } + + if (sscanf(argv[0], "%u", ®ion_size) != 1) { + DMWARN("invalid region size string"); + return -EINVAL; + } + + region_count = dm_sector_div_up(ti->len, region_size); + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (!lc) { + DMWARN("couldn't allocate core log"); + return -ENOMEM; + } + + lc->ti = ti; + lc->touched = 0; + lc->region_size = region_size; + lc->region_count = region_count; + lc->sync = sync; + + /* + * Work out how many "unsigned long"s we need to hold the bitset. + */ + bitset_size = dm_round_up(region_count, + sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size >>= BYTE_SHIFT; + + lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); + + /* + * Disk log? + */ + if (!dev) { + lc->clean_bits = vmalloc(bitset_size); + if (!lc->clean_bits) { + DMWARN("couldn't allocate clean bitset"); + kfree(lc); + return -ENOMEM; + } + lc->disk_header = NULL; + } else { + lc->log_dev = dev; + lc->log_dev_failed = 0; + lc->header_location.bdev = lc->log_dev->bdev; + lc->header_location.sector = 0; + + /* + * Buffer holds both header and bitset. + */ + buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + + bitset_size, ti->limits.hardsect_size); + lc->header_location.count = buf_size >> SECTOR_SHIFT; + lc->io_req.mem.type = DM_IO_VMA; + lc->io_req.client = dm_io_client_create(dm_div_up(buf_size, + PAGE_SIZE)); + if (IS_ERR(lc->io_req.client)) { + r = PTR_ERR(lc->io_req.client); + DMWARN("couldn't allocate disk io client"); + kfree(lc); + return -ENOMEM; + } + + lc->disk_header = vmalloc(buf_size); + if (!lc->disk_header) { + DMWARN("couldn't allocate disk log buffer"); + dm_io_client_destroy(lc->io_req.client); + kfree(lc); + return -ENOMEM; + } + + lc->clean_bits = (void *)lc->disk_header + + (LOG_OFFSET << SECTOR_SHIFT); + } + + memset(lc->clean_bits, -1, bitset_size); + + lc->sync_bits = vmalloc(bitset_size); + if (!lc->sync_bits) { + DMWARN("couldn't allocate sync bitset"); + if (!dev) + vfree(lc->clean_bits); + else + dm_io_client_destroy(lc->io_req.client); + vfree(lc->disk_header); + kfree(lc); + return -ENOMEM; + } + memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); + lc->sync_count = (sync == NOSYNC) ? region_count : 0; + + lc->recovering_bits = vmalloc(bitset_size); + if (!lc->recovering_bits) { + DMWARN("couldn't allocate sync bitset"); + vfree(lc->sync_bits); + if (!dev) + vfree(lc->clean_bits); + else + dm_io_client_destroy(lc->io_req.client); + vfree(lc->disk_header); + kfree(lc); + return -ENOMEM; + } + memset(lc->recovering_bits, 0, bitset_size); + lc->sync_search = 0; + log->context = lc; + + return 0; +} + +static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv) +{ + return create_log_context(log, ti, argc, argv, NULL); +} + +static void destroy_log_context(struct log_c *lc) +{ + vfree(lc->sync_bits); + vfree(lc->recovering_bits); + kfree(lc); +} + +static void core_dtr(struct dm_dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + + vfree(lc->clean_bits); + destroy_log_context(lc); +} + +/*---------------------------------------------------------------- + * disk log constructor/destructor + * + * argv contains log_device region_size followed optionally by [no]sync + *--------------------------------------------------------------*/ +static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv) +{ + int r; + struct dm_dev *dev; + + if (argc < 2 || argc > 3) { + DMWARN("wrong number of arguments to disk dirty region log"); + return -EINVAL; + } + + r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, + FMODE_READ | FMODE_WRITE, &dev); + if (r) + return r; + + r = create_log_context(log, ti, argc - 1, argv + 1, dev); + if (r) { + dm_put_device(ti, dev); + return r; + } + + return 0; +} + +static void disk_dtr(struct dm_dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + + dm_put_device(lc->ti, lc->log_dev); + vfree(lc->disk_header); + dm_io_client_destroy(lc->io_req.client); + destroy_log_context(lc); +} + +static int count_bits32(uint32_t *addr, unsigned size) +{ + int count = 0, i; + + for (i = 0; i < size; i++) { + count += hweight32(*(addr+i)); + } + return count; +} + +static void fail_log_device(struct log_c *lc) +{ + if (lc->log_dev_failed) + return; + + lc->log_dev_failed = 1; + dm_table_event(lc->ti->table); +} + +static int disk_resume(struct dm_dirty_log *log) +{ + int r; + unsigned i; + struct log_c *lc = (struct log_c *) log->context; + size_t size = lc->bitset_uint32_count * sizeof(uint32_t); + + /* read the disk header */ + r = read_header(lc); + if (r) { + DMWARN("%s: Failed to read header on dirty region log device", + lc->log_dev->name); + fail_log_device(lc); + /* + * If the log device cannot be read, we must assume + * all regions are out-of-sync. If we simply return + * here, the state will be uninitialized and could + * lead us to return 'in-sync' status for regions + * that are actually 'out-of-sync'. + */ + lc->header.nr_regions = 0; + } + + /* set or clear any new bits -- device has grown */ + if (lc->sync == NOSYNC) + for (i = lc->header.nr_regions; i < lc->region_count; i++) + /* FIXME: amazingly inefficient */ + log_set_bit(lc, lc->clean_bits, i); + else + for (i = lc->header.nr_regions; i < lc->region_count; i++) + /* FIXME: amazingly inefficient */ + log_clear_bit(lc, lc->clean_bits, i); + + /* clear any old bits -- device has shrunk */ + for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) + log_clear_bit(lc, lc->clean_bits, i); + + /* copy clean across to sync */ + memcpy(lc->sync_bits, lc->clean_bits, size); + lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count); + lc->sync_search = 0; + + /* set the correct number of regions in the header */ + lc->header.nr_regions = lc->region_count; + + /* write the new header */ + r = write_header(lc); + if (r) { + DMWARN("%s: Failed to write header on dirty region log device", + lc->log_dev->name); + fail_log_device(lc); + } + + return r; +} + +static uint32_t core_get_region_size(struct dm_dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + return lc->region_size; +} + +static int core_resume(struct dm_dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + lc->sync_search = 0; + return 0; +} + +static int core_is_clean(struct dm_dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + return log_test_bit(lc->clean_bits, region); +} + +static int core_in_sync(struct dm_dirty_log *log, region_t region, int block) +{ + struct log_c *lc = (struct log_c *) log->context; + return log_test_bit(lc->sync_bits, region); +} + +static int core_flush(struct dm_dirty_log *log) +{ + /* no op */ + return 0; +} + +static int disk_flush(struct dm_dirty_log *log) +{ + int r; + struct log_c *lc = (struct log_c *) log->context; + + /* only write if the log has changed */ + if (!lc->touched) + return 0; + + r = write_header(lc); + if (r) + fail_log_device(lc); + else + lc->touched = 0; + + return r; +} + +static void core_mark_region(struct dm_dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + log_clear_bit(lc, lc->clean_bits, region); +} + +static void core_clear_region(struct dm_dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + log_set_bit(lc, lc->clean_bits, region); +} + +static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) +{ + struct log_c *lc = (struct log_c *) log->context; + + if (lc->sync_search >= lc->region_count) + return 0; + + do { + *region = ext2_find_next_zero_bit( + (unsigned long *) lc->sync_bits, + lc->region_count, + lc->sync_search); + lc->sync_search = *region + 1; + + if (*region >= lc->region_count) + return 0; + + } while (log_test_bit(lc->recovering_bits, *region)); + + log_set_bit(lc, lc->recovering_bits, *region); + return 1; +} + +static void core_set_region_sync(struct dm_dirty_log *log, region_t region, + int in_sync) +{ + struct log_c *lc = (struct log_c *) log->context; + + log_clear_bit(lc, lc->recovering_bits, region); + if (in_sync) { + log_set_bit(lc, lc->sync_bits, region); + lc->sync_count++; + } else if (log_test_bit(lc->sync_bits, region)) { + lc->sync_count--; + log_clear_bit(lc, lc->sync_bits, region); + } +} + +static region_t core_get_sync_count(struct dm_dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + + return lc->sync_count; +} + +#define DMEMIT_SYNC \ + if (lc->sync != DEFAULTSYNC) \ + DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "") + +static int core_status(struct dm_dirty_log *log, status_type_t status, + char *result, unsigned int maxlen) +{ + int sz = 0; + struct log_c *lc = log->context; + + switch(status) { + case STATUSTYPE_INFO: + DMEMIT("1 %s", log->type->name); + break; + + case STATUSTYPE_TABLE: + DMEMIT("%s %u %u ", log->type->name, + lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size); + DMEMIT_SYNC; + } + + return sz; +} + +static int disk_status(struct dm_dirty_log *log, status_type_t status, + char *result, unsigned int maxlen) +{ + int sz = 0; + struct log_c *lc = log->context; + + switch(status) { + case STATUSTYPE_INFO: + DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name, + lc->log_dev_failed ? 'D' : 'A'); + break; + + case STATUSTYPE_TABLE: + DMEMIT("%s %u %s %u ", log->type->name, + lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name, + lc->region_size); + DMEMIT_SYNC; + } + + return sz; +} + +static struct dm_dirty_log_type _core_type = { + .name = "core", + .module = THIS_MODULE, + .ctr = core_ctr, + .dtr = core_dtr, + .resume = core_resume, + .get_region_size = core_get_region_size, + .is_clean = core_is_clean, + .in_sync = core_in_sync, + .flush = core_flush, + .mark_region = core_mark_region, + .clear_region = core_clear_region, + .get_resync_work = core_get_resync_work, + .set_region_sync = core_set_region_sync, + .get_sync_count = core_get_sync_count, + .status = core_status, +}; + +static struct dm_dirty_log_type _disk_type = { + .name = "disk", + .module = THIS_MODULE, + .ctr = disk_ctr, + .dtr = disk_dtr, + .postsuspend = disk_flush, + .resume = disk_resume, + .get_region_size = core_get_region_size, + .is_clean = core_is_clean, + .in_sync = core_in_sync, + .flush = disk_flush, + .mark_region = core_mark_region, + .clear_region = core_clear_region, + .get_resync_work = core_get_resync_work, + .set_region_sync = core_set_region_sync, + .get_sync_count = core_get_sync_count, + .status = disk_status, +}; + +static int __init dm_dirty_log_init(void) +{ + int r; + + r = dm_dirty_log_type_register(&_core_type); + if (r) + DMWARN("couldn't register core log"); + + r = dm_dirty_log_type_register(&_disk_type); + if (r) { + DMWARN("couldn't register disk type"); + dm_dirty_log_type_unregister(&_core_type); + } + + return r; +} + +static void __exit dm_dirty_log_exit(void) +{ + dm_dirty_log_type_unregister(&_disk_type); + dm_dirty_log_type_unregister(&_core_type); +} + +module_init(dm_dirty_log_init); +module_exit(dm_dirty_log_exit); + +MODULE_DESCRIPTION(DM_NAME " dirty region log"); +MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c new file mode 100644 index 0000000..3d7f492 --- /dev/null +++ b/drivers/md/dm-mpath.c @@ -0,0 +1,1514 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include <linux/device-mapper.h> + +#include "dm-path-selector.h" +#include "dm-bio-list.h" +#include "dm-bio-record.h" +#include "dm-uevent.h" + +#include <linux/ctype.h> +#include <linux/init.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/workqueue.h> +#include <scsi/scsi_dh.h> +#include <asm/atomic.h> + +#define DM_MSG_PREFIX "multipath" +#define MESG_STR(x) x, sizeof(x) + +/* Path properties */ +struct pgpath { + struct list_head list; + + struct priority_group *pg; /* Owning PG */ + unsigned is_active; /* Path status */ + unsigned fail_count; /* Cumulative failure count */ + + struct dm_path path; + struct work_struct deactivate_path; +}; + +#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) + +/* + * Paths are grouped into Priority Groups and numbered from 1 upwards. + * Each has a path selector which controls which path gets used. + */ +struct priority_group { + struct list_head list; + + struct multipath *m; /* Owning multipath instance */ + struct path_selector ps; + + unsigned pg_num; /* Reference number */ + unsigned bypassed; /* Temporarily bypass this PG? */ + + unsigned nr_pgpaths; /* Number of paths in PG */ + struct list_head pgpaths; +}; + +/* Multipath context */ +struct multipath { + struct list_head list; + struct dm_target *ti; + + spinlock_t lock; + + const char *hw_handler_name; + struct work_struct activate_path; + struct pgpath *pgpath_to_activate; + unsigned nr_priority_groups; + struct list_head priority_groups; + unsigned pg_init_required; /* pg_init needs calling? */ + unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ + + unsigned nr_valid_paths; /* Total number of usable paths */ + struct pgpath *current_pgpath; + struct priority_group *current_pg; + struct priority_group *next_pg; /* Switch to this PG if set */ + unsigned repeat_count; /* I/Os left before calling PS again */ + + unsigned queue_io; /* Must we queue all I/O? */ + unsigned queue_if_no_path; /* Queue I/O if last path fails? */ + unsigned saved_queue_if_no_path;/* Saved state during suspension */ + unsigned pg_init_retries; /* Number of times to retry pg_init */ + unsigned pg_init_count; /* Number of times pg_init called */ + + struct work_struct process_queued_ios; + struct bio_list queued_ios; + unsigned queue_size; + + struct work_struct trigger_event; + + /* + * We must use a mempool of dm_mpath_io structs so that we + * can resubmit bios on error. + */ + mempool_t *mpio_pool; +}; + +/* + * Context information attached to each bio we process. + */ +struct dm_mpath_io { + struct pgpath *pgpath; + struct dm_bio_details details; +}; + +typedef int (*action_fn) (struct pgpath *pgpath); + +#define MIN_IOS 256 /* Mempool size */ + +static struct kmem_cache *_mpio_cache; + +static struct workqueue_struct *kmultipathd, *kmpath_handlerd; +static void process_queued_ios(struct work_struct *work); +static void trigger_event(struct work_struct *work); +static void activate_path(struct work_struct *work); +static void deactivate_path(struct work_struct *work); + + +/*----------------------------------------------- + * Allocation routines + *-----------------------------------------------*/ + +static struct pgpath *alloc_pgpath(void) +{ + struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); + + if (pgpath) { + pgpath->is_active = 1; + INIT_WORK(&pgpath->deactivate_path, deactivate_path); + } + + return pgpath; +} + +static void free_pgpath(struct pgpath *pgpath) +{ + kfree(pgpath); +} + +static void deactivate_path(struct work_struct *work) +{ + struct pgpath *pgpath = + container_of(work, struct pgpath, deactivate_path); + + blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); +} + +static struct priority_group *alloc_priority_group(void) +{ + struct priority_group *pg; + + pg = kzalloc(sizeof(*pg), GFP_KERNEL); + + if (pg) + INIT_LIST_HEAD(&pg->pgpaths); + + return pg; +} + +static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) +{ + unsigned long flags; + struct pgpath *pgpath, *tmp; + struct multipath *m = ti->private; + + list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { + list_del(&pgpath->list); + if (m->hw_handler_name) + scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); + dm_put_device(ti, pgpath->path.dev); + spin_lock_irqsave(&m->lock, flags); + if (m->pgpath_to_activate == pgpath) + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); + free_pgpath(pgpath); + } +} + +static void free_priority_group(struct priority_group *pg, + struct dm_target *ti) +{ + struct path_selector *ps = &pg->ps; + + if (ps->type) { + ps->type->destroy(ps); + dm_put_path_selector(ps->type); + } + + free_pgpaths(&pg->pgpaths, ti); + kfree(pg); +} + +static struct multipath *alloc_multipath(struct dm_target *ti) +{ + struct multipath *m; + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (m) { + INIT_LIST_HEAD(&m->priority_groups); + spin_lock_init(&m->lock); + m->queue_io = 1; + INIT_WORK(&m->process_queued_ios, process_queued_ios); + INIT_WORK(&m->trigger_event, trigger_event); + INIT_WORK(&m->activate_path, activate_path); + m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); + if (!m->mpio_pool) { + kfree(m); + return NULL; + } + m->ti = ti; + ti->private = m; + } + + return m; +} + +static void free_multipath(struct multipath *m) +{ + struct priority_group *pg, *tmp; + + list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { + list_del(&pg->list); + free_priority_group(pg, m->ti); + } + + kfree(m->hw_handler_name); + mempool_destroy(m->mpio_pool); + kfree(m); +} + + +/*----------------------------------------------- + * Path selection + *-----------------------------------------------*/ + +static void __switch_pg(struct multipath *m, struct pgpath *pgpath) +{ + m->current_pg = pgpath->pg; + + /* Must we initialise the PG first, and queue I/O till it's ready? */ + if (m->hw_handler_name) { + m->pg_init_required = 1; + m->queue_io = 1; + } else { + m->pg_init_required = 0; + m->queue_io = 0; + } + + m->pg_init_count = 0; +} + +static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) +{ + struct dm_path *path; + + path = pg->ps.type->select_path(&pg->ps, &m->repeat_count); + if (!path) + return -ENXIO; + + m->current_pgpath = path_to_pgpath(path); + + if (m->current_pg != pg) + __switch_pg(m, m->current_pgpath); + + return 0; +} + +static void __choose_pgpath(struct multipath *m) +{ + struct priority_group *pg; + unsigned bypassed = 1; + + if (!m->nr_valid_paths) + goto failed; + + /* Were we instructed to switch PG? */ + if (m->next_pg) { + pg = m->next_pg; + m->next_pg = NULL; + if (!__choose_path_in_pg(m, pg)) + return; + } + + /* Don't change PG until it has no remaining paths */ + if (m->current_pg && !__choose_path_in_pg(m, m->current_pg)) + return; + + /* + * Loop through priority groups until we find a valid path. + * First time we skip PGs marked 'bypassed'. + * Second time we only try the ones we skipped. + */ + do { + list_for_each_entry(pg, &m->priority_groups, list) { + if (pg->bypassed == bypassed) + continue; + if (!__choose_path_in_pg(m, pg)) + return; + } + } while (bypassed--); + +failed: + m->current_pgpath = NULL; + m->current_pg = NULL; +} + +/* + * Check whether bios must be queued in the device-mapper core rather + * than here in the target. + * + * m->lock must be held on entry. + * + * If m->queue_if_no_path and m->saved_queue_if_no_path hold the + * same value then we are not between multipath_presuspend() + * and multipath_resume() calls and we have no need to check + * for the DMF_NOFLUSH_SUSPENDING flag. + */ +static int __must_push_back(struct multipath *m) +{ + return (m->queue_if_no_path != m->saved_queue_if_no_path && + dm_noflush_suspending(m->ti)); +} + +static int map_io(struct multipath *m, struct bio *bio, + struct dm_mpath_io *mpio, unsigned was_queued) +{ + int r = DM_MAPIO_REMAPPED; + unsigned long flags; + struct pgpath *pgpath; + + spin_lock_irqsave(&m->lock, flags); + + /* Do we need to select a new pgpath? */ + if (!m->current_pgpath || + (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) + __choose_pgpath(m); + + pgpath = m->current_pgpath; + + if (was_queued) + m->queue_size--; + + if ((pgpath && m->queue_io) || + (!pgpath && m->queue_if_no_path)) { + /* Queue for the daemon to resubmit */ + bio_list_add(&m->queued_ios, bio); + m->queue_size++; + if ((m->pg_init_required && !m->pg_init_in_progress) || + !m->queue_io) + queue_work(kmultipathd, &m->process_queued_ios); + pgpath = NULL; + r = DM_MAPIO_SUBMITTED; + } else if (pgpath) + bio->bi_bdev = pgpath->path.dev->bdev; + else if (__must_push_back(m)) + r = DM_MAPIO_REQUEUE; + else + r = -EIO; /* Failed */ + + mpio->pgpath = pgpath; + + spin_unlock_irqrestore(&m->lock, flags); + + return r; +} + +/* + * If we run out of usable paths, should we queue I/O or error it? + */ +static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, + unsigned save_old_value) +{ + unsigned long flags; + + spin_lock_irqsave(&m->lock, flags); + + if (save_old_value) + m->saved_queue_if_no_path = m->queue_if_no_path; + else + m->saved_queue_if_no_path = queue_if_no_path; + m->queue_if_no_path = queue_if_no_path; + if (!m->queue_if_no_path && m->queue_size) + queue_work(kmultipathd, &m->process_queued_ios); + + spin_unlock_irqrestore(&m->lock, flags); + + return 0; +} + +/*----------------------------------------------------------------- + * The multipath daemon is responsible for resubmitting queued ios. + *---------------------------------------------------------------*/ + +static void dispatch_queued_ios(struct multipath *m) +{ + int r; + unsigned long flags; + struct bio *bio = NULL, *next; + struct dm_mpath_io *mpio; + union map_info *info; + + spin_lock_irqsave(&m->lock, flags); + bio = bio_list_get(&m->queued_ios); + spin_unlock_irqrestore(&m->lock, flags); + + while (bio) { + next = bio->bi_next; + bio->bi_next = NULL; + + info = dm_get_mapinfo(bio); + mpio = info->ptr; + + r = map_io(m, bio, mpio, 1); + if (r < 0) + bio_endio(bio, r); + else if (r == DM_MAPIO_REMAPPED) + generic_make_request(bio); + else if (r == DM_MAPIO_REQUEUE) + bio_endio(bio, -EIO); + + bio = next; + } +} + +static void process_queued_ios(struct work_struct *work) +{ + struct multipath *m = + container_of(work, struct multipath, process_queued_ios); + struct pgpath *pgpath = NULL; + unsigned init_required = 0, must_queue = 1; + unsigned long flags; + + spin_lock_irqsave(&m->lock, flags); + + if (!m->queue_size) + goto out; + + if (!m->current_pgpath) + __choose_pgpath(m); + + pgpath = m->current_pgpath; + + if ((pgpath && !m->queue_io) || + (!pgpath && !m->queue_if_no_path)) + must_queue = 0; + + if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { + m->pgpath_to_activate = pgpath; + m->pg_init_count++; + m->pg_init_required = 0; + m->pg_init_in_progress = 1; + init_required = 1; + } + +out: + spin_unlock_irqrestore(&m->lock, flags); + + if (init_required) + queue_work(kmpath_handlerd, &m->activate_path); + + if (!must_queue) + dispatch_queued_ios(m); +} + +/* + * An event is triggered whenever a path is taken out of use. + * Includes path failure and PG bypass. + */ +static void trigger_event(struct work_struct *work) +{ + struct multipath *m = + container_of(work, struct multipath, trigger_event); + + dm_table_event(m->ti->table); +} + +/*----------------------------------------------------------------- + * Constructor/argument parsing: + * <#multipath feature args> [<arg>]* + * <#hw_handler args> [hw_handler [<arg>]*] + * <#priority groups> + * <initial priority group> + * [<selector> <#selector args> [<arg>]* + * <#paths> <#per-path selector args> + * [<path> [<arg>]* ]+ ]+ + *---------------------------------------------------------------*/ +struct param { + unsigned min; + unsigned max; + char *error; +}; + +static int read_param(struct param *param, char *str, unsigned *v, char **error) +{ + if (!str || + (sscanf(str, "%u", v) != 1) || + (*v < param->min) || + (*v > param->max)) { + *error = param->error; + return -EINVAL; + } + + return 0; +} + +struct arg_set { + unsigned argc; + char **argv; +}; + +static char *shift(struct arg_set *as) +{ + char *r; + + if (as->argc) { + as->argc--; + r = *as->argv; + as->argv++; + return r; + } + + return NULL; +} + +static void consume(struct arg_set *as, unsigned n) +{ + BUG_ON (as->argc < n); + as->argc -= n; + as->argv += n; +} + +static int parse_path_selector(struct arg_set *as, struct priority_group *pg, + struct dm_target *ti) +{ + int r; + struct path_selector_type *pst; + unsigned ps_argc; + + static struct param _params[] = { + {0, 1024, "invalid number of path selector args"}, + }; + + pst = dm_get_path_selector(shift(as)); + if (!pst) { + ti->error = "unknown path selector type"; + return -EINVAL; + } + + r = read_param(_params, shift(as), &ps_argc, &ti->error); + if (r) { + dm_put_path_selector(pst); + return -EINVAL; + } + + r = pst->create(&pg->ps, ps_argc, as->argv); + if (r) { + dm_put_path_selector(pst); + ti->error = "path selector constructor failed"; + return r; + } + + pg->ps.type = pst; + consume(as, ps_argc); + + return 0; +} + +static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, + struct dm_target *ti) +{ + int r; + struct pgpath *p; + struct multipath *m = ti->private; + + /* we need at least a path arg */ + if (as->argc < 1) { + ti->error = "no device given"; + return ERR_PTR(-EINVAL); + } + + p = alloc_pgpath(); + if (!p) + return ERR_PTR(-ENOMEM); + + r = dm_get_device(ti, shift(as), ti->begin, ti->len, + dm_table_get_mode(ti->table), &p->path.dev); + if (r) { + ti->error = "error getting device"; + goto bad; + } + + if (m->hw_handler_name) { + r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev), + m->hw_handler_name); + if (r < 0) { + dm_put_device(ti, p->path.dev); + goto bad; + } + } + + r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); + if (r) { + dm_put_device(ti, p->path.dev); + goto bad; + } + + return p; + + bad: + free_pgpath(p); + return ERR_PTR(r); +} + +static struct priority_group *parse_priority_group(struct arg_set *as, + struct multipath *m) +{ + static struct param _params[] = { + {1, 1024, "invalid number of paths"}, + {0, 1024, "invalid number of selector args"} + }; + + int r; + unsigned i, nr_selector_args, nr_params; + struct priority_group *pg; + struct dm_target *ti = m->ti; + + if (as->argc < 2) { + as->argc = 0; + ti->error = "not enough priority group arguments"; + return ERR_PTR(-EINVAL); + } + + pg = alloc_priority_group(); + if (!pg) { + ti->error = "couldn't allocate priority group"; + return ERR_PTR(-ENOMEM); + } + pg->m = m; + + r = parse_path_selector(as, pg, ti); + if (r) + goto bad; + + /* + * read the paths + */ + r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); + if (r) + goto bad; + + r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); + if (r) + goto bad; + + nr_params = 1 + nr_selector_args; + for (i = 0; i < pg->nr_pgpaths; i++) { + struct pgpath *pgpath; + struct arg_set path_args; + + if (as->argc < nr_params) { + ti->error = "not enough path parameters"; + goto bad; + } + + path_args.argc = nr_params; + path_args.argv = as->argv; + + pgpath = parse_path(&path_args, &pg->ps, ti); + if (IS_ERR(pgpath)) { + r = PTR_ERR(pgpath); + goto bad; + } + + pgpath->pg = pg; + list_add_tail(&pgpath->list, &pg->pgpaths); + consume(as, nr_params); + } + + return pg; + + bad: + free_priority_group(pg, ti); + return ERR_PTR(r); +} + +static int parse_hw_handler(struct arg_set *as, struct multipath *m) +{ + unsigned hw_argc; + struct dm_target *ti = m->ti; + + static struct param _params[] = { + {0, 1024, "invalid number of hardware handler args"}, + }; + + if (read_param(_params, shift(as), &hw_argc, &ti->error)) + return -EINVAL; + + if (!hw_argc) + return 0; + + m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); + request_module("scsi_dh_%s", m->hw_handler_name); + if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { + ti->error = "unknown hardware handler type"; + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; + return -EINVAL; + } + + if (hw_argc > 1) + DMWARN("Ignoring user-specified arguments for " + "hardware handler \"%s\"", m->hw_handler_name); + consume(as, hw_argc - 1); + + return 0; +} + +static int parse_features(struct arg_set *as, struct multipath *m) +{ + int r; + unsigned argc; + struct dm_target *ti = m->ti; + const char *param_name; + + static struct param _params[] = { + {0, 3, "invalid number of feature args"}, + {1, 50, "pg_init_retries must be between 1 and 50"}, + }; + + r = read_param(_params, shift(as), &argc, &ti->error); + if (r) + return -EINVAL; + + if (!argc) + return 0; + + do { + param_name = shift(as); + argc--; + + if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { + r = queue_if_no_path(m, 1, 0); + continue; + } + + if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && + (argc >= 1)) { + r = read_param(_params + 1, shift(as), + &m->pg_init_retries, &ti->error); + argc--; + continue; + } + + ti->error = "Unrecognised multipath feature request"; + r = -EINVAL; + } while (argc && !r); + + return r; +} + +static int multipath_ctr(struct dm_target *ti, unsigned int argc, + char **argv) +{ + /* target parameters */ + static struct param _params[] = { + {1, 1024, "invalid number of priority groups"}, + {1, 1024, "invalid initial priority group number"}, + }; + + int r; + struct multipath *m; + struct arg_set as; + unsigned pg_count = 0; + unsigned next_pg_num; + + as.argc = argc; + as.argv = argv; + + m = alloc_multipath(ti); + if (!m) { + ti->error = "can't allocate multipath"; + return -EINVAL; + } + + r = parse_features(&as, m); + if (r) + goto bad; + + r = parse_hw_handler(&as, m); + if (r) + goto bad; + + r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); + if (r) + goto bad; + + r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); + if (r) + goto bad; + + /* parse the priority groups */ + while (as.argc) { + struct priority_group *pg; + + pg = parse_priority_group(&as, m); + if (IS_ERR(pg)) { + r = PTR_ERR(pg); + goto bad; + } + + m->nr_valid_paths += pg->nr_pgpaths; + list_add_tail(&pg->list, &m->priority_groups); + pg_count++; + pg->pg_num = pg_count; + if (!--next_pg_num) + m->next_pg = pg; + } + + if (pg_count != m->nr_priority_groups) { + ti->error = "priority group count mismatch"; + r = -EINVAL; + goto bad; + } + + return 0; + + bad: + free_multipath(m); + return r; +} + +static void multipath_dtr(struct dm_target *ti) +{ + struct multipath *m = (struct multipath *) ti->private; + + flush_workqueue(kmpath_handlerd); + flush_workqueue(kmultipathd); + free_multipath(m); +} + +/* + * Map bios, recording original fields for later in case we have to resubmit + */ +static int multipath_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + int r; + struct dm_mpath_io *mpio; + struct multipath *m = (struct multipath *) ti->private; + + mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); + dm_bio_record(&mpio->details, bio); + + map_context->ptr = mpio; + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); + r = map_io(m, bio, mpio, 0); + if (r < 0 || r == DM_MAPIO_REQUEUE) + mempool_free(mpio, m->mpio_pool); + + return r; +} + +/* + * Take a path out of use. + */ +static int fail_path(struct pgpath *pgpath) +{ + unsigned long flags; + struct multipath *m = pgpath->pg->m; + + spin_lock_irqsave(&m->lock, flags); + + if (!pgpath->is_active) + goto out; + + DMWARN("Failing path %s.", pgpath->path.dev->name); + + pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); + pgpath->is_active = 0; + pgpath->fail_count++; + + m->nr_valid_paths--; + + if (pgpath == m->current_pgpath) + m->current_pgpath = NULL; + + dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, + pgpath->path.dev->name, m->nr_valid_paths); + + queue_work(kmultipathd, &m->trigger_event); + queue_work(kmultipathd, &pgpath->deactivate_path); + +out: + spin_unlock_irqrestore(&m->lock, flags); + + return 0; +} + +/* + * Reinstate a previously-failed path + */ +static int reinstate_path(struct pgpath *pgpath) +{ + int r = 0; + unsigned long flags; + struct multipath *m = pgpath->pg->m; + + spin_lock_irqsave(&m->lock, flags); + + if (pgpath->is_active) + goto out; + + if (!pgpath->pg->ps.type->reinstate_path) { + DMWARN("Reinstate path not supported by path selector %s", + pgpath->pg->ps.type->name); + r = -EINVAL; + goto out; + } + + r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); + if (r) + goto out; + + pgpath->is_active = 1; + + m->current_pgpath = NULL; + if (!m->nr_valid_paths++ && m->queue_size) + queue_work(kmultipathd, &m->process_queued_ios); + + dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, + pgpath->path.dev->name, m->nr_valid_paths); + + queue_work(kmultipathd, &m->trigger_event); + +out: + spin_unlock_irqrestore(&m->lock, flags); + + return r; +} + +/* + * Fail or reinstate all paths that match the provided struct dm_dev. + */ +static int action_dev(struct multipath *m, struct dm_dev *dev, + action_fn action) +{ + int r = 0; + struct pgpath *pgpath; + struct priority_group *pg; + + list_for_each_entry(pg, &m->priority_groups, list) { + list_for_each_entry(pgpath, &pg->pgpaths, list) { + if (pgpath->path.dev == dev) + r = action(pgpath); + } + } + + return r; +} + +/* + * Temporarily try to avoid having to use the specified PG + */ +static void bypass_pg(struct multipath *m, struct priority_group *pg, + int bypassed) +{ + unsigned long flags; + + spin_lock_irqsave(&m->lock, flags); + + pg->bypassed = bypassed; + m->current_pgpath = NULL; + m->current_pg = NULL; + + spin_unlock_irqrestore(&m->lock, flags); + + queue_work(kmultipathd, &m->trigger_event); +} + +/* + * Switch to using the specified PG from the next I/O that gets mapped + */ +static int switch_pg_num(struct multipath *m, const char *pgstr) +{ + struct priority_group *pg; + unsigned pgnum; + unsigned long flags; + + if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || + (pgnum > m->nr_priority_groups)) { + DMWARN("invalid PG number supplied to switch_pg_num"); + return -EINVAL; + } + + spin_lock_irqsave(&m->lock, flags); + list_for_each_entry(pg, &m->priority_groups, list) { + pg->bypassed = 0; + if (--pgnum) + continue; + + m->current_pgpath = NULL; + m->current_pg = NULL; + m->next_pg = pg; + } + spin_unlock_irqrestore(&m->lock, flags); + + queue_work(kmultipathd, &m->trigger_event); + return 0; +} + +/* + * Set/clear bypassed status of a PG. + * PGs are numbered upwards from 1 in the order they were declared. + */ +static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) +{ + struct priority_group *pg; + unsigned pgnum; + + if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || + (pgnum > m->nr_priority_groups)) { + DMWARN("invalid PG number supplied to bypass_pg"); + return -EINVAL; + } + + list_for_each_entry(pg, &m->priority_groups, list) { + if (!--pgnum) + break; + } + + bypass_pg(m, pg, bypassed); + return 0; +} + +/* + * Should we retry pg_init immediately? + */ +static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) +{ + unsigned long flags; + int limit_reached = 0; + + spin_lock_irqsave(&m->lock, flags); + + if (m->pg_init_count <= m->pg_init_retries) + m->pg_init_required = 1; + else + limit_reached = 1; + + spin_unlock_irqrestore(&m->lock, flags); + + return limit_reached; +} + +static void pg_init_done(struct dm_path *path, int errors) +{ + struct pgpath *pgpath = path_to_pgpath(path); + struct priority_group *pg = pgpath->pg; + struct multipath *m = pg->m; + unsigned long flags; + + /* device or driver problems */ + switch (errors) { + case SCSI_DH_OK: + break; + case SCSI_DH_NOSYS: + if (!m->hw_handler_name) { + errors = 0; + break; + } + DMERR("Cannot failover device because scsi_dh_%s was not " + "loaded.", m->hw_handler_name); + /* + * Fail path for now, so we do not ping pong + */ + fail_path(pgpath); + break; + case SCSI_DH_DEV_TEMP_BUSY: + /* + * Probably doing something like FW upgrade on the + * controller so try the other pg. + */ + bypass_pg(m, pg, 1); + break; + /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ + case SCSI_DH_RETRY: + case SCSI_DH_IMM_RETRY: + case SCSI_DH_RES_TEMP_UNAVAIL: + if (pg_init_limit_reached(m, pgpath)) + fail_path(pgpath); + errors = 0; + break; + default: + /* + * We probably do not want to fail the path for a device + * error, but this is what the old dm did. In future + * patches we can do more advanced handling. + */ + fail_path(pgpath); + } + + spin_lock_irqsave(&m->lock, flags); + if (errors) { + DMERR("Could not failover device. Error %d.", errors); + m->current_pgpath = NULL; + m->current_pg = NULL; + } else if (!m->pg_init_required) { + m->queue_io = 0; + pg->bypassed = 0; + } + + m->pg_init_in_progress = 0; + queue_work(kmultipathd, &m->process_queued_ios); + spin_unlock_irqrestore(&m->lock, flags); +} + +static void activate_path(struct work_struct *work) +{ + int ret; + struct multipath *m = + container_of(work, struct multipath, activate_path); + struct dm_path *path; + unsigned long flags; + + spin_lock_irqsave(&m->lock, flags); + path = &m->pgpath_to_activate->path; + m->pgpath_to_activate = NULL; + spin_unlock_irqrestore(&m->lock, flags); + if (!path) + return; + ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); + pg_init_done(path, ret); +} + +/* + * end_io handling + */ +static int do_end_io(struct multipath *m, struct bio *bio, + int error, struct dm_mpath_io *mpio) +{ + unsigned long flags; + + if (!error) + return 0; /* I/O complete */ + + if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) + return error; + + if (error == -EOPNOTSUPP) + return error; + + spin_lock_irqsave(&m->lock, flags); + if (!m->nr_valid_paths) { + if (__must_push_back(m)) { + spin_unlock_irqrestore(&m->lock, flags); + return DM_ENDIO_REQUEUE; + } else if (!m->queue_if_no_path) { + spin_unlock_irqrestore(&m->lock, flags); + return -EIO; + } else { + spin_unlock_irqrestore(&m->lock, flags); + goto requeue; + } + } + spin_unlock_irqrestore(&m->lock, flags); + + if (mpio->pgpath) + fail_path(mpio->pgpath); + + requeue: + dm_bio_restore(&mpio->details, bio); + + /* queue for the daemon to resubmit or fail */ + spin_lock_irqsave(&m->lock, flags); + bio_list_add(&m->queued_ios, bio); + m->queue_size++; + if (!m->queue_io) + queue_work(kmultipathd, &m->process_queued_ios); + spin_unlock_irqrestore(&m->lock, flags); + + return DM_ENDIO_INCOMPLETE; /* io not complete */ +} + +static int multipath_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + struct multipath *m = ti->private; + struct dm_mpath_io *mpio = map_context->ptr; + struct pgpath *pgpath = mpio->pgpath; + struct path_selector *ps; + int r; + + r = do_end_io(m, bio, error, mpio); + if (pgpath) { + ps = &pgpath->pg->ps; + if (ps->type->end_io) + ps->type->end_io(ps, &pgpath->path); + } + if (r != DM_ENDIO_INCOMPLETE) + mempool_free(mpio, m->mpio_pool); + + return r; +} + +/* + * Suspend can't complete until all the I/O is processed so if + * the last path fails we must error any remaining I/O. + * Note that if the freeze_bdev fails while suspending, the + * queue_if_no_path state is lost - userspace should reset it. + */ +static void multipath_presuspend(struct dm_target *ti) +{ + struct multipath *m = (struct multipath *) ti->private; + + queue_if_no_path(m, 0, 1); +} + +/* + * Restore the queue_if_no_path setting. + */ +static void multipath_resume(struct dm_target *ti) +{ + struct multipath *m = (struct multipath *) ti->private; + unsigned long flags; + + spin_lock_irqsave(&m->lock, flags); + m->queue_if_no_path = m->saved_queue_if_no_path; + spin_unlock_irqrestore(&m->lock, flags); +} + +/* + * Info output has the following format: + * num_multipath_feature_args [multipath_feature_args]* + * num_handler_status_args [handler_status_args]* + * num_groups init_group_number + * [A|D|E num_ps_status_args [ps_status_args]* + * num_paths num_selector_args + * [path_dev A|F fail_count [selector_args]* ]+ ]+ + * + * Table output has the following format (identical to the constructor string): + * num_feature_args [features_args]* + * num_handler_args hw_handler [hw_handler_args]* + * num_groups init_group_number + * [priority selector-name num_ps_args [ps_args]* + * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ + */ +static int multipath_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + int sz = 0; + unsigned long flags; + struct multipath *m = (struct multipath *) ti->private; + struct priority_group *pg; + struct pgpath *p; + unsigned pg_num; + char state; + + spin_lock_irqsave(&m->lock, flags); + + /* Features */ + if (type == STATUSTYPE_INFO) + DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); + else { + DMEMIT("%u ", m->queue_if_no_path + + (m->pg_init_retries > 0) * 2); + if (m->queue_if_no_path) + DMEMIT("queue_if_no_path "); + if (m->pg_init_retries) + DMEMIT("pg_init_retries %u ", m->pg_init_retries); + } + + if (!m->hw_handler_name || type == STATUSTYPE_INFO) + DMEMIT("0 "); + else + DMEMIT("1 %s ", m->hw_handler_name); + + DMEMIT("%u ", m->nr_priority_groups); + + if (m->next_pg) + pg_num = m->next_pg->pg_num; + else if (m->current_pg) + pg_num = m->current_pg->pg_num; + else + pg_num = 1; + + DMEMIT("%u ", pg_num); + + switch (type) { + case STATUSTYPE_INFO: + list_for_each_entry(pg, &m->priority_groups, list) { + if (pg->bypassed) + state = 'D'; /* Disabled */ + else if (pg == m->current_pg) + state = 'A'; /* Currently Active */ + else + state = 'E'; /* Enabled */ + + DMEMIT("%c ", state); + + if (pg->ps.type->status) + sz += pg->ps.type->status(&pg->ps, NULL, type, + result + sz, + maxlen - sz); + else + DMEMIT("0 "); + + DMEMIT("%u %u ", pg->nr_pgpaths, + pg->ps.type->info_args); + + list_for_each_entry(p, &pg->pgpaths, list) { + DMEMIT("%s %s %u ", p->path.dev->name, + p->is_active ? "A" : "F", + p->fail_count); + if (pg->ps.type->status) + sz += pg->ps.type->status(&pg->ps, + &p->path, type, result + sz, + maxlen - sz); + } + } + break; + + case STATUSTYPE_TABLE: + list_for_each_entry(pg, &m->priority_groups, list) { + DMEMIT("%s ", pg->ps.type->name); + + if (pg->ps.type->status) + sz += pg->ps.type->status(&pg->ps, NULL, type, + result + sz, + maxlen - sz); + else + DMEMIT("0 "); + + DMEMIT("%u %u ", pg->nr_pgpaths, + pg->ps.type->table_args); + + list_for_each_entry(p, &pg->pgpaths, list) { + DMEMIT("%s ", p->path.dev->name); + if (pg->ps.type->status) + sz += pg->ps.type->status(&pg->ps, + &p->path, type, result + sz, + maxlen - sz); + } + } + break; + } + + spin_unlock_irqrestore(&m->lock, flags); + + return 0; +} + +static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) +{ + int r; + struct dm_dev *dev; + struct multipath *m = (struct multipath *) ti->private; + action_fn action; + + if (argc == 1) { + if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) + return queue_if_no_path(m, 1, 0); + else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) + return queue_if_no_path(m, 0, 0); + } + + if (argc != 2) + goto error; + + if (!strnicmp(argv[0], MESG_STR("disable_group"))) + return bypass_pg_num(m, argv[1], 1); + else if (!strnicmp(argv[0], MESG_STR("enable_group"))) + return bypass_pg_num(m, argv[1], 0); + else if (!strnicmp(argv[0], MESG_STR("switch_group"))) + return switch_pg_num(m, argv[1]); + else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) + action = reinstate_path; + else if (!strnicmp(argv[0], MESG_STR("fail_path"))) + action = fail_path; + else + goto error; + + r = dm_get_device(ti, argv[1], ti->begin, ti->len, + dm_table_get_mode(ti->table), &dev); + if (r) { + DMWARN("message: error getting device %s", + argv[1]); + return -EINVAL; + } + + r = action_dev(m, dev, action); + + dm_put_device(ti, dev); + + return r; + +error: + DMWARN("Unrecognised multipath message received."); + return -EINVAL; +} + +static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, + unsigned long arg) +{ + struct multipath *m = (struct multipath *) ti->private; + struct block_device *bdev = NULL; + fmode_t mode = 0; + unsigned long flags; + int r = 0; + + spin_lock_irqsave(&m->lock, flags); + + if (!m->current_pgpath) + __choose_pgpath(m); + + if (m->current_pgpath) { + bdev = m->current_pgpath->path.dev->bdev; + mode = m->current_pgpath->path.dev->mode; + } + + if (m->queue_io) + r = -EAGAIN; + else if (!bdev) + r = -EIO; + + spin_unlock_irqrestore(&m->lock, flags); + + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); +} + +/*----------------------------------------------------------------- + * Module setup + *---------------------------------------------------------------*/ +static struct target_type multipath_target = { + .name = "multipath", + .version = {1, 0, 5}, + .module = THIS_MODULE, + .ctr = multipath_ctr, + .dtr = multipath_dtr, + .map = multipath_map, + .end_io = multipath_end_io, + .presuspend = multipath_presuspend, + .resume = multipath_resume, + .status = multipath_status, + .message = multipath_message, + .ioctl = multipath_ioctl, +}; + +static int __init dm_multipath_init(void) +{ + int r; + + /* allocate a slab for the dm_ios */ + _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); + if (!_mpio_cache) + return -ENOMEM; + + r = dm_register_target(&multipath_target); + if (r < 0) { + DMERR("register failed %d", r); + kmem_cache_destroy(_mpio_cache); + return -EINVAL; + } + + kmultipathd = create_workqueue("kmpathd"); + if (!kmultipathd) { + DMERR("failed to create workqueue kmpathd"); + dm_unregister_target(&multipath_target); + kmem_cache_destroy(_mpio_cache); + return -ENOMEM; + } + + /* + * A separate workqueue is used to handle the device handlers + * to avoid overloading existing workqueue. Overloading the + * old workqueue would also create a bottleneck in the + * path of the storage hardware device activation. + */ + kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); + if (!kmpath_handlerd) { + DMERR("failed to create workqueue kmpath_handlerd"); + destroy_workqueue(kmultipathd); + dm_unregister_target(&multipath_target); + kmem_cache_destroy(_mpio_cache); + return -ENOMEM; + } + + DMINFO("version %u.%u.%u loaded", + multipath_target.version[0], multipath_target.version[1], + multipath_target.version[2]); + + return r; +} + +static void __exit dm_multipath_exit(void) +{ + int r; + + destroy_workqueue(kmpath_handlerd); + destroy_workqueue(kmultipathd); + + r = dm_unregister_target(&multipath_target); + if (r < 0) + DMERR("target unregister failed %d", r); + kmem_cache_destroy(_mpio_cache); +} + +module_init(dm_multipath_init); +module_exit(dm_multipath_exit); + +MODULE_DESCRIPTION(DM_NAME " multipath target"); +MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h new file mode 100644 index 0000000..e230f71 --- /dev/null +++ b/drivers/md/dm-mpath.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + * + * Multipath. + */ + +#ifndef DM_MPATH_H +#define DM_MPATH_H + +struct dm_dev; + +struct dm_path { + struct dm_dev *dev; /* Read-only */ + void *pscontext; /* For path-selector use */ +}; + +/* Callback for hwh_pg_init_fn to use when complete */ +void dm_pg_init_complete(struct dm_path *path, unsigned err_flags); + +#endif diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c new file mode 100644 index 0000000..96ea226 --- /dev/null +++ b/drivers/md/dm-path-selector.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2003 Sistina Software. + * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * + * Module Author: Heinz Mauelshagen + * + * This file is released under the GPL. + * + * Path selector registration. + */ + +#include <linux/device-mapper.h> + +#include "dm-path-selector.h" + +#include <linux/slab.h> + +struct ps_internal { + struct path_selector_type pst; + + struct list_head list; + long use; +}; + +#define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst) + +static LIST_HEAD(_path_selectors); +static DECLARE_RWSEM(_ps_lock); + +static struct ps_internal *__find_path_selector_type(const char *name) +{ + struct ps_internal *psi; + + list_for_each_entry(psi, &_path_selectors, list) { + if (!strcmp(name, psi->pst.name)) + return psi; + } + + return NULL; +} + +static struct ps_internal *get_path_selector(const char *name) +{ + struct ps_internal *psi; + + down_read(&_ps_lock); + psi = __find_path_selector_type(name); + if (psi) { + if ((psi->use == 0) && !try_module_get(psi->pst.module)) + psi = NULL; + else + psi->use++; + } + up_read(&_ps_lock); + + return psi; +} + +struct path_selector_type *dm_get_path_selector(const char *name) +{ + struct ps_internal *psi; + + if (!name) + return NULL; + + psi = get_path_selector(name); + if (!psi) { + request_module("dm-%s", name); + psi = get_path_selector(name); + } + + return psi ? &psi->pst : NULL; +} + +void dm_put_path_selector(struct path_selector_type *pst) +{ + struct ps_internal *psi; + + if (!pst) + return; + + down_read(&_ps_lock); + psi = __find_path_selector_type(pst->name); + if (!psi) + goto out; + + if (--psi->use == 0) + module_put(psi->pst.module); + + BUG_ON(psi->use < 0); + +out: + up_read(&_ps_lock); +} + +static struct ps_internal *_alloc_path_selector(struct path_selector_type *pst) +{ + struct ps_internal *psi = kzalloc(sizeof(*psi), GFP_KERNEL); + + if (psi) + psi->pst = *pst; + + return psi; +} + +int dm_register_path_selector(struct path_selector_type *pst) +{ + int r = 0; + struct ps_internal *psi = _alloc_path_selector(pst); + + if (!psi) + return -ENOMEM; + + down_write(&_ps_lock); + + if (__find_path_selector_type(pst->name)) { + kfree(psi); + r = -EEXIST; + } else + list_add(&psi->list, &_path_selectors); + + up_write(&_ps_lock); + + return r; +} + +int dm_unregister_path_selector(struct path_selector_type *pst) +{ + struct ps_internal *psi; + + down_write(&_ps_lock); + + psi = __find_path_selector_type(pst->name); + if (!psi) { + up_write(&_ps_lock); + return -EINVAL; + } + + if (psi->use) { + up_write(&_ps_lock); + return -ETXTBSY; + } + + list_del(&psi->list); + + up_write(&_ps_lock); + + kfree(psi); + + return 0; +} + +EXPORT_SYMBOL_GPL(dm_register_path_selector); +EXPORT_SYMBOL_GPL(dm_unregister_path_selector); diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h new file mode 100644 index 0000000..27357b8 --- /dev/null +++ b/drivers/md/dm-path-selector.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2003 Sistina Software. + * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * + * Module Author: Heinz Mauelshagen + * + * This file is released under the GPL. + * + * Path-Selector registration. + */ + +#ifndef DM_PATH_SELECTOR_H +#define DM_PATH_SELECTOR_H + +#include <linux/device-mapper.h> + +#include "dm-mpath.h" + +/* + * We provide an abstraction for the code that chooses which path + * to send some io down. + */ +struct path_selector_type; +struct path_selector { + struct path_selector_type *type; + void *context; +}; + +/* Information about a path selector type */ +struct path_selector_type { + char *name; + struct module *module; + + unsigned int table_args; + unsigned int info_args; + + /* + * Constructs a path selector object, takes custom arguments + */ + int (*create) (struct path_selector *ps, unsigned argc, char **argv); + void (*destroy) (struct path_selector *ps); + + /* + * Add an opaque path object, along with some selector specific + * path args (eg, path priority). + */ + int (*add_path) (struct path_selector *ps, struct dm_path *path, + int argc, char **argv, char **error); + + /* + * Chooses a path for this io, if no paths are available then + * NULL will be returned. + * + * repeat_count is the number of times to use the path before + * calling the function again. 0 means don't call it again unless + * the path fails. + */ + struct dm_path *(*select_path) (struct path_selector *ps, + unsigned *repeat_count); + + /* + * Notify the selector that a path has failed. + */ + void (*fail_path) (struct path_selector *ps, struct dm_path *p); + + /* + * Ask selector to reinstate a path. + */ + int (*reinstate_path) (struct path_selector *ps, struct dm_path *p); + + /* + * Table content based on parameters added in ps_add_path_fn + * or path selector status + */ + int (*status) (struct path_selector *ps, struct dm_path *path, + status_type_t type, char *result, unsigned int maxlen); + + int (*end_io) (struct path_selector *ps, struct dm_path *path); +}; + +/* Register a path selector */ +int dm_register_path_selector(struct path_selector_type *type); + +/* Unregister a path selector */ +int dm_unregister_path_selector(struct path_selector_type *type); + +/* Returns a registered path selector type */ +struct path_selector_type *dm_get_path_selector(const char *name); + +/* Releases a path selector */ +void dm_put_path_selector(struct path_selector_type *pst); + +#endif diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c new file mode 100644 index 0000000..d0fed2b --- /dev/null +++ b/drivers/md/dm-raid1.c @@ -0,0 +1,1316 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm-bio-list.h" +#include "dm-bio-record.h" + +#include <linux/init.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/device-mapper.h> +#include <linux/dm-io.h> +#include <linux/dm-dirty-log.h> +#include <linux/dm-kcopyd.h> +#include <linux/dm-region-hash.h> + +#define DM_MSG_PREFIX "raid1" + +#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ +#define DM_IO_PAGES 64 +#define DM_KCOPYD_PAGES 64 + +#define DM_RAID1_HANDLE_ERRORS 0x01 +#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) + +static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); + +/*----------------------------------------------------------------- + * Mirror set structures. + *---------------------------------------------------------------*/ +enum dm_raid1_error { + DM_RAID1_WRITE_ERROR, + DM_RAID1_SYNC_ERROR, + DM_RAID1_READ_ERROR +}; + +struct mirror { + struct mirror_set *ms; + atomic_t error_count; + unsigned long error_type; + struct dm_dev *dev; + sector_t offset; +}; + +struct mirror_set { + struct dm_target *ti; + struct list_head list; + + uint64_t features; + + spinlock_t lock; /* protects the lists */ + struct bio_list reads; + struct bio_list writes; + struct bio_list failures; + + struct dm_region_hash *rh; + struct dm_kcopyd_client *kcopyd_client; + struct dm_io_client *io_client; + mempool_t *read_record_pool; + + /* recovery */ + region_t nr_regions; + int in_sync; + int log_failure; + atomic_t suspend; + + atomic_t default_mirror; /* Default mirror */ + + struct workqueue_struct *kmirrord_wq; + struct work_struct kmirrord_work; + struct timer_list timer; + unsigned long timer_pending; + + struct work_struct trigger_event; + + unsigned nr_mirrors; + struct mirror mirror[0]; +}; + +static void wakeup_mirrord(void *context) +{ + struct mirror_set *ms = context; + + queue_work(ms->kmirrord_wq, &ms->kmirrord_work); +} + +static void delayed_wake_fn(unsigned long data) +{ + struct mirror_set *ms = (struct mirror_set *) data; + + clear_bit(0, &ms->timer_pending); + wakeup_mirrord(ms); +} + +static void delayed_wake(struct mirror_set *ms) +{ + if (test_and_set_bit(0, &ms->timer_pending)) + return; + + ms->timer.expires = jiffies + HZ / 5; + ms->timer.data = (unsigned long) ms; + ms->timer.function = delayed_wake_fn; + add_timer(&ms->timer); +} + +static void wakeup_all_recovery_waiters(void *context) +{ + wake_up_all(&_kmirrord_recovery_stopped); +} + +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) +{ + unsigned long flags; + int should_wake = 0; + struct bio_list *bl; + + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock_irqsave(&ms->lock, flags); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock_irqrestore(&ms->lock, flags); + + if (should_wake) + wakeup_mirrord(ms); +} + +static void dispatch_bios(void *context, struct bio_list *bio_list) +{ + struct mirror_set *ms = context; + struct bio *bio; + + while ((bio = bio_list_pop(bio_list))) + queue_bio(ms, bio, WRITE); +} + +#define MIN_READ_RECORDS 20 +struct dm_raid1_read_record { + struct mirror *m; + struct dm_bio_details details; +}; + +/* + * Every mirror should look like this one. + */ +#define DEFAULT_MIRROR 0 + +/* + * This is yucky. We squirrel the mirror struct away inside + * bi_next for read/write buffers. This is safe since the bh + * doesn't get submitted to the lower levels of block layer. + */ +static struct mirror *bio_get_m(struct bio *bio) +{ + return (struct mirror *) bio->bi_next; +} + +static void bio_set_m(struct bio *bio, struct mirror *m) +{ + bio->bi_next = (struct bio *) m; +} + +static struct mirror *get_default_mirror(struct mirror_set *ms) +{ + return &ms->mirror[atomic_read(&ms->default_mirror)]; +} + +static void set_default_mirror(struct mirror *m) +{ + struct mirror_set *ms = m->ms; + struct mirror *m0 = &(ms->mirror[0]); + + atomic_set(&ms->default_mirror, m - m0); +} + +/* fail_mirror + * @m: mirror device to fail + * @error_type: one of the enum's, DM_RAID1_*_ERROR + * + * If errors are being handled, record the type of + * error encountered for this device. If this type + * of error has already been recorded, we can return; + * otherwise, we must signal userspace by triggering + * an event. Additionally, if the device is the + * primary device, we must choose a new primary, but + * only if the mirror is in-sync. + * + * This function must not block. + */ +static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) +{ + struct mirror_set *ms = m->ms; + struct mirror *new; + + /* + * error_count is used for nothing more than a + * simple way to tell if a device has encountered + * errors. + */ + atomic_inc(&m->error_count); + + if (test_and_set_bit(error_type, &m->error_type)) + return; + + if (!errors_handled(ms)) + return; + + if (m != get_default_mirror(ms)) + goto out; + + if (!ms->in_sync) { + /* + * Better to issue requests to same failing device + * than to risk returning corrupt data. + */ + DMERR("Primary mirror (%s) failed while out-of-sync: " + "Reads may fail.", m->dev->name); + goto out; + } + + for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) + if (!atomic_read(&new->error_count)) { + set_default_mirror(new); + break; + } + + if (unlikely(new == ms->mirror + ms->nr_mirrors)) + DMWARN("All sides of mirror have failed."); + +out: + schedule_work(&ms->trigger_event); +} + +/*----------------------------------------------------------------- + * Recovery. + * + * When a mirror is first activated we may find that some regions + * are in the no-sync state. We have to recover these by + * recopying from the default mirror to all the others. + *---------------------------------------------------------------*/ +static void recovery_complete(int read_err, unsigned long write_err, + void *context) +{ + struct dm_region *reg = context; + struct mirror_set *ms = dm_rh_region_context(reg); + int m, bit = 0; + + if (read_err) { + /* Read error means the failure of default mirror. */ + DMERR_LIMIT("Unable to read primary mirror during recovery"); + fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); + } + + if (write_err) { + DMERR_LIMIT("Write error during recovery (error = 0x%lx)", + write_err); + /* + * Bits correspond to devices (excluding default mirror). + * The default mirror cannot change during recovery. + */ + for (m = 0; m < ms->nr_mirrors; m++) { + if (&ms->mirror[m] == get_default_mirror(ms)) + continue; + if (test_bit(bit, &write_err)) + fail_mirror(ms->mirror + m, + DM_RAID1_SYNC_ERROR); + bit++; + } + } + + dm_rh_recovery_end(reg, !(read_err || write_err)); +} + +static int recover(struct mirror_set *ms, struct dm_region *reg) +{ + int r; + unsigned i; + struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; + struct mirror *m; + unsigned long flags = 0; + region_t key = dm_rh_get_region_key(reg); + sector_t region_size = dm_rh_get_region_size(ms->rh); + + /* fill in the source */ + m = get_default_mirror(ms); + from.bdev = m->dev->bdev; + from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); + if (key == (ms->nr_regions - 1)) { + /* + * The final region may be smaller than + * region_size. + */ + from.count = ms->ti->len & (region_size - 1); + if (!from.count) + from.count = region_size; + } else + from.count = region_size; + + /* fill in the destinations */ + for (i = 0, dest = to; i < ms->nr_mirrors; i++) { + if (&ms->mirror[i] == get_default_mirror(ms)) + continue; + + m = ms->mirror + i; + dest->bdev = m->dev->bdev; + dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); + dest->count = from.count; + dest++; + } + + /* hand to kcopyd */ + if (!errors_handled(ms)) + set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); + + r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, + flags, recovery_complete, reg); + + return r; +} + +static void do_recovery(struct mirror_set *ms) +{ + struct dm_region *reg; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + int r; + + /* + * Start quiescing some regions. + */ + dm_rh_recovery_prepare(ms->rh); + + /* + * Copy any already quiesced regions. + */ + while ((reg = dm_rh_recovery_start(ms->rh))) { + r = recover(ms, reg); + if (r) + dm_rh_recovery_end(reg, 0); + } + + /* + * Update the in sync flag. + */ + if (!ms->in_sync && + (log->type->get_sync_count(log) == ms->nr_regions)) { + /* the sync is complete */ + dm_table_event(ms->ti->table); + ms->in_sync = 1; + } +} + +/*----------------------------------------------------------------- + * Reads + *---------------------------------------------------------------*/ +static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) +{ + struct mirror *m = get_default_mirror(ms); + + do { + if (likely(!atomic_read(&m->error_count))) + return m; + + if (m-- == ms->mirror) + m += ms->nr_mirrors; + } while (m != get_default_mirror(ms)); + + return NULL; +} + +static int default_ok(struct mirror *m) +{ + struct mirror *default_mirror = get_default_mirror(m->ms); + + return !atomic_read(&default_mirror->error_count); +} + +static int mirror_available(struct mirror_set *ms, struct bio *bio) +{ + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + region_t region = dm_rh_bio_to_region(ms->rh, bio); + + if (log->type->in_sync(log, region, 0)) + return choose_mirror(ms, bio->bi_sector) ? 1 : 0; + + return 0; +} + +/* + * remap a buffer to a particular mirror. + */ +static sector_t map_sector(struct mirror *m, struct bio *bio) +{ + return m->offset + (bio->bi_sector - m->ms->ti->begin); +} + +static void map_bio(struct mirror *m, struct bio *bio) +{ + bio->bi_bdev = m->dev->bdev; + bio->bi_sector = map_sector(m, bio); +} + +static void map_region(struct dm_io_region *io, struct mirror *m, + struct bio *bio) +{ + io->bdev = m->dev->bdev; + io->sector = map_sector(m, bio); + io->count = bio->bi_size >> 9; +} + +/*----------------------------------------------------------------- + * Reads + *---------------------------------------------------------------*/ +static void read_callback(unsigned long error, void *context) +{ + struct bio *bio = context; + struct mirror *m; + + m = bio_get_m(bio); + bio_set_m(bio, NULL); + + if (likely(!error)) { + bio_endio(bio, 0); + return; + } + + fail_mirror(m, DM_RAID1_READ_ERROR); + + if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { + DMWARN_LIMIT("Read failure on mirror device %s. " + "Trying alternative device.", + m->dev->name); + queue_bio(m->ms, bio, bio_rw(bio)); + return; + } + + DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", + m->dev->name); + bio_endio(bio, -EIO); +} + +/* Asynchronous read. */ +static void read_async_bio(struct mirror *m, struct bio *bio) +{ + struct dm_io_region io; + struct dm_io_request io_req = { + .bi_rw = READ, + .mem.type = DM_IO_BVEC, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .notify.fn = read_callback, + .notify.context = bio, + .client = m->ms->io_client, + }; + + map_region(&io, m, bio); + bio_set_m(bio, m); + BUG_ON(dm_io(&io_req, 1, &io, NULL)); +} + +static inline int region_in_sync(struct mirror_set *ms, region_t region, + int may_block) +{ + int state = dm_rh_get_state(ms->rh, region, may_block); + return state == DM_RH_CLEAN || state == DM_RH_DIRTY; +} + +static void do_reads(struct mirror_set *ms, struct bio_list *reads) +{ + region_t region; + struct bio *bio; + struct mirror *m; + + while ((bio = bio_list_pop(reads))) { + region = dm_rh_bio_to_region(ms->rh, bio); + m = get_default_mirror(ms); + + /* + * We can only read balance if the region is in sync. + */ + if (likely(region_in_sync(ms, region, 1))) + m = choose_mirror(ms, bio->bi_sector); + else if (m && atomic_read(&m->error_count)) + m = NULL; + + if (likely(m)) + read_async_bio(m, bio); + else + bio_endio(bio, -EIO); + } +} + +/*----------------------------------------------------------------- + * Writes. + * + * We do different things with the write io depending on the + * state of the region that it's in: + * + * SYNC: increment pending, use kcopyd to write to *all* mirrors + * RECOVERING: delay the io until recovery completes + * NOSYNC: increment pending, just write to the default mirror + *---------------------------------------------------------------*/ + + +static void write_callback(unsigned long error, void *context) +{ + unsigned i, ret = 0; + struct bio *bio = (struct bio *) context; + struct mirror_set *ms; + int uptodate = 0; + int should_wake = 0; + unsigned long flags; + + ms = bio_get_m(bio)->ms; + bio_set_m(bio, NULL); + + /* + * NOTE: We don't decrement the pending count here, + * instead it is done by the targets endio function. + * This way we handle both writes to SYNC and NOSYNC + * regions with the same code. + */ + if (likely(!error)) + goto out; + + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error)) + fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); + else + uptodate = 1; + + if (unlikely(!uptodate)) { + DMERR("All replicated volumes dead, failing I/O"); + /* None of the writes succeeded, fail the I/O. */ + ret = -EIO; + } else if (errors_handled(ms)) { + /* + * Need to raise event. Since raising + * events can block, we need to do it in + * the main thread. + */ + spin_lock_irqsave(&ms->lock, flags); + if (!ms->failures.head) + should_wake = 1; + bio_list_add(&ms->failures, bio); + spin_unlock_irqrestore(&ms->lock, flags); + if (should_wake) + wakeup_mirrord(ms); + return; + } +out: + bio_endio(bio, ret); +} + +static void do_write(struct mirror_set *ms, struct bio *bio) +{ + unsigned int i; + struct dm_io_region io[ms->nr_mirrors], *dest = io; + struct mirror *m; + struct dm_io_request io_req = { + .bi_rw = WRITE, + .mem.type = DM_IO_BVEC, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .notify.fn = write_callback, + .notify.context = bio, + .client = ms->io_client, + }; + + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) + map_region(dest++, m, bio); + + /* + * Use default mirror because we only need it to retrieve the reference + * to the mirror set in write_callback(). + */ + bio_set_m(bio, get_default_mirror(ms)); + + BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); +} + +static void do_writes(struct mirror_set *ms, struct bio_list *writes) +{ + int state; + struct bio *bio; + struct bio_list sync, nosync, recover, *this_list = NULL; + + if (!writes->head) + return; + + /* + * Classify each write. + */ + bio_list_init(&sync); + bio_list_init(&nosync); + bio_list_init(&recover); + + while ((bio = bio_list_pop(writes))) { + state = dm_rh_get_state(ms->rh, + dm_rh_bio_to_region(ms->rh, bio), 1); + switch (state) { + case DM_RH_CLEAN: + case DM_RH_DIRTY: + this_list = &sync; + break; + + case DM_RH_NOSYNC: + this_list = &nosync; + break; + + case DM_RH_RECOVERING: + this_list = &recover; + break; + } + + bio_list_add(this_list, bio); + } + + /* + * Increment the pending counts for any regions that will + * be written to (writes to recover regions are going to + * be delayed). + */ + dm_rh_inc_pending(ms->rh, &sync); + dm_rh_inc_pending(ms->rh, &nosync); + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; + + /* + * Dispatch io. + */ + if (unlikely(ms->log_failure)) { + spin_lock_irq(&ms->lock); + bio_list_merge(&ms->failures, &sync); + spin_unlock_irq(&ms->lock); + wakeup_mirrord(ms); + } else + while ((bio = bio_list_pop(&sync))) + do_write(ms, bio); + + while ((bio = bio_list_pop(&recover))) + dm_rh_delay(ms->rh, bio); + + while ((bio = bio_list_pop(&nosync))) { + map_bio(get_default_mirror(ms), bio); + generic_make_request(bio); + } +} + +static void do_failures(struct mirror_set *ms, struct bio_list *failures) +{ + struct bio *bio; + + if (!failures->head) + return; + + if (!ms->log_failure) { + while ((bio = bio_list_pop(failures))) { + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); + } + return; + } + + /* + * If the log has failed, unattempted writes are being + * put on the failures list. We can't issue those writes + * until a log has been marked, so we must store them. + * + * If a 'noflush' suspend is in progress, we can requeue + * the I/O's to the core. This give userspace a chance + * to reconfigure the mirror, at which point the core + * will reissue the writes. If the 'noflush' flag is + * not set, we have no choice but to return errors. + * + * Some writes on the failures list may have been + * submitted before the log failure and represent a + * failure to write to one of the devices. It is ok + * for us to treat them the same and requeue them + * as well. + */ + if (dm_noflush_suspending(ms->ti)) { + while ((bio = bio_list_pop(failures))) + bio_endio(bio, DM_ENDIO_REQUEUE); + return; + } + + if (atomic_read(&ms->suspend)) { + while ((bio = bio_list_pop(failures))) + bio_endio(bio, -EIO); + return; + } + + spin_lock_irq(&ms->lock); + bio_list_merge(&ms->failures, failures); + spin_unlock_irq(&ms->lock); + + delayed_wake(ms); +} + +static void trigger_event(struct work_struct *work) +{ + struct mirror_set *ms = + container_of(work, struct mirror_set, trigger_event); + + dm_table_event(ms->ti->table); +} + +/*----------------------------------------------------------------- + * kmirrord + *---------------------------------------------------------------*/ +static void do_mirror(struct work_struct *work) +{ + struct mirror_set *ms = container_of(work, struct mirror_set, + kmirrord_work); + struct bio_list reads, writes, failures; + unsigned long flags; + + spin_lock_irqsave(&ms->lock, flags); + reads = ms->reads; + writes = ms->writes; + failures = ms->failures; + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + bio_list_init(&ms->failures); + spin_unlock_irqrestore(&ms->lock, flags); + + dm_rh_update_states(ms->rh, errors_handled(ms)); + do_recovery(ms); + do_reads(ms, &reads); + do_writes(ms, &writes); + do_failures(ms, &failures); + + dm_table_unplug_all(ms->ti->table); +} + +/*----------------------------------------------------------------- + * Target functions + *---------------------------------------------------------------*/ +static struct mirror_set *alloc_context(unsigned int nr_mirrors, + uint32_t region_size, + struct dm_target *ti, + struct dm_dirty_log *dl) +{ + size_t len; + struct mirror_set *ms = NULL; + + len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); + + ms = kzalloc(len, GFP_KERNEL); + if (!ms) { + ti->error = "Cannot allocate mirror context"; + return NULL; + } + + spin_lock_init(&ms->lock); + + ms->ti = ti; + ms->nr_mirrors = nr_mirrors; + ms->nr_regions = dm_sector_div_up(ti->len, region_size); + ms->in_sync = 0; + ms->log_failure = 0; + atomic_set(&ms->suspend, 0); + atomic_set(&ms->default_mirror, DEFAULT_MIRROR); + + len = sizeof(struct dm_raid1_read_record); + ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS, + len); + if (!ms->read_record_pool) { + ti->error = "Error creating mirror read_record_pool"; + kfree(ms); + return NULL; + } + + ms->io_client = dm_io_client_create(DM_IO_PAGES); + if (IS_ERR(ms->io_client)) { + ti->error = "Error creating dm_io client"; + mempool_destroy(ms->read_record_pool); + kfree(ms); + return NULL; + } + + ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, + wakeup_all_recovery_waiters, + ms->ti->begin, MAX_RECOVERY, + dl, region_size, ms->nr_regions); + if (IS_ERR(ms->rh)) { + ti->error = "Error creating dirty region hash"; + dm_io_client_destroy(ms->io_client); + mempool_destroy(ms->read_record_pool); + kfree(ms); + return NULL; + } + + return ms; +} + +static void free_context(struct mirror_set *ms, struct dm_target *ti, + unsigned int m) +{ + while (m--) + dm_put_device(ti, ms->mirror[m].dev); + + dm_io_client_destroy(ms->io_client); + dm_region_hash_destroy(ms->rh); + mempool_destroy(ms->read_record_pool); + kfree(ms); +} + +static inline int _check_region_size(struct dm_target *ti, uint32_t size) +{ + return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) || + size > ti->len); +} + +static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + unsigned int mirror, char **argv) +{ + unsigned long long offset; + + if (sscanf(argv[1], "%llu", &offset) != 1) { + ti->error = "Invalid offset"; + return -EINVAL; + } + + if (dm_get_device(ti, argv[0], offset, ti->len, + dm_table_get_mode(ti->table), + &ms->mirror[mirror].dev)) { + ti->error = "Device lookup failure"; + return -ENXIO; + } + + ms->mirror[mirror].ms = ms; + atomic_set(&(ms->mirror[mirror].error_count), 0); + ms->mirror[mirror].error_type = 0; + ms->mirror[mirror].offset = offset; + + return 0; +} + +/* + * Create dirty log: log_type #log_params <log_params> + */ +static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, + unsigned argc, char **argv, + unsigned *args_used) +{ + unsigned param_count; + struct dm_dirty_log *dl; + + if (argc < 2) { + ti->error = "Insufficient mirror log arguments"; + return NULL; + } + + if (sscanf(argv[1], "%u", ¶m_count) != 1) { + ti->error = "Invalid mirror log argument count"; + return NULL; + } + + *args_used = 2 + param_count; + + if (argc < *args_used) { + ti->error = "Insufficient mirror log arguments"; + return NULL; + } + + dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); + if (!dl) { + ti->error = "Error creating mirror dirty log"; + return NULL; + } + + if (!_check_region_size(ti, dl->type->get_region_size(dl))) { + ti->error = "Invalid region size"; + dm_dirty_log_destroy(dl); + return NULL; + } + + return dl; +} + +static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, + unsigned *args_used) +{ + unsigned num_features; + struct dm_target *ti = ms->ti; + + *args_used = 0; + + if (!argc) + return 0; + + if (sscanf(argv[0], "%u", &num_features) != 1) { + ti->error = "Invalid number of features"; + return -EINVAL; + } + + argc--; + argv++; + (*args_used)++; + + if (num_features > argc) { + ti->error = "Not enough arguments to support feature count"; + return -EINVAL; + } + + if (!strcmp("handle_errors", argv[0])) + ms->features |= DM_RAID1_HANDLE_ERRORS; + else { + ti->error = "Unrecognised feature requested"; + return -EINVAL; + } + + (*args_used)++; + + return 0; +} + +/* + * Construct a mirror mapping: + * + * log_type #log_params <log_params> + * #mirrors [mirror_path offset]{2,} + * [#features <features>] + * + * log_type is "core" or "disk" + * #log_params is between 1 and 3 + * + * If present, features must be "handle_errors". + */ +static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int r; + unsigned int nr_mirrors, m, args_used; + struct mirror_set *ms; + struct dm_dirty_log *dl; + + dl = create_dirty_log(ti, argc, argv, &args_used); + if (!dl) + return -EINVAL; + + argv += args_used; + argc -= args_used; + + if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || + nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { + ti->error = "Invalid number of mirrors"; + dm_dirty_log_destroy(dl); + return -EINVAL; + } + + argv++, argc--; + + if (argc < nr_mirrors * 2) { + ti->error = "Too few mirror arguments"; + dm_dirty_log_destroy(dl); + return -EINVAL; + } + + ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); + if (!ms) { + dm_dirty_log_destroy(dl); + return -ENOMEM; + } + + /* Get the mirror parameter sets */ + for (m = 0; m < nr_mirrors; m++) { + r = get_mirror(ms, ti, m, argv); + if (r) { + free_context(ms, ti, m); + return r; + } + argv += 2; + argc -= 2; + } + + ti->private = ms; + ti->split_io = dm_rh_get_region_size(ms->rh); + + ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); + if (!ms->kmirrord_wq) { + DMERR("couldn't start kmirrord"); + r = -ENOMEM; + goto err_free_context; + } + INIT_WORK(&ms->kmirrord_work, do_mirror); + init_timer(&ms->timer); + ms->timer_pending = 0; + INIT_WORK(&ms->trigger_event, trigger_event); + + r = parse_features(ms, argc, argv, &args_used); + if (r) + goto err_destroy_wq; + + argv += args_used; + argc -= args_used; + + /* + * Any read-balancing addition depends on the + * DM_RAID1_HANDLE_ERRORS flag being present. + * This is because the decision to balance depends + * on the sync state of a region. If the above + * flag is not present, we ignore errors; and + * the sync state may be inaccurate. + */ + + if (argc) { + ti->error = "Too many mirror arguments"; + r = -EINVAL; + goto err_destroy_wq; + } + + r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); + if (r) + goto err_destroy_wq; + + wakeup_mirrord(ms); + return 0; + +err_destroy_wq: + destroy_workqueue(ms->kmirrord_wq); +err_free_context: + free_context(ms, ti, ms->nr_mirrors); + return r; +} + +static void mirror_dtr(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + + del_timer_sync(&ms->timer); + flush_workqueue(ms->kmirrord_wq); + flush_scheduled_work(); + dm_kcopyd_client_destroy(ms->kcopyd_client); + destroy_workqueue(ms->kmirrord_wq); + free_context(ms, ti, ms->nr_mirrors); +} + +/* + * Mirror mapping function + */ +static int mirror_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + int r, rw = bio_rw(bio); + struct mirror *m; + struct mirror_set *ms = ti->private; + struct dm_raid1_read_record *read_record = NULL; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + if (rw == WRITE) { + /* Save region for mirror_end_io() handler */ + map_context->ll = dm_rh_bio_to_region(ms->rh, bio); + queue_bio(ms, bio, rw); + return DM_MAPIO_SUBMITTED; + } + + r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); + if (r < 0 && r != -EWOULDBLOCK) + return r; + + /* + * If region is not in-sync queue the bio. + */ + if (!r || (r == -EWOULDBLOCK)) { + if (rw == READA) + return -EWOULDBLOCK; + + queue_bio(ms, bio, rw); + return DM_MAPIO_SUBMITTED; + } + + /* + * The region is in-sync and we can perform reads directly. + * Store enough information so we can retry if it fails. + */ + m = choose_mirror(ms, bio->bi_sector); + if (unlikely(!m)) + return -EIO; + + read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); + if (likely(read_record)) { + dm_bio_record(&read_record->details, bio); + map_context->ptr = read_record; + read_record->m = m; + } + + map_bio(m, bio); + + return DM_MAPIO_REMAPPED; +} + +static int mirror_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + int rw = bio_rw(bio); + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct mirror *m = NULL; + struct dm_bio_details *bd = NULL; + struct dm_raid1_read_record *read_record = map_context->ptr; + + /* + * We need to dec pending if this was a write. + */ + if (rw == WRITE) { + dm_rh_dec(ms->rh, map_context->ll); + return error; + } + + if (error == -EOPNOTSUPP) + goto out; + + if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) + goto out; + + if (unlikely(error)) { + if (!read_record) { + /* + * There wasn't enough memory to record necessary + * information for a retry or there was no other + * mirror in-sync. + */ + DMERR_LIMIT("Mirror read failed."); + return -EIO; + } + + m = read_record->m; + + DMERR("Mirror read failed from %s. Trying alternative device.", + m->dev->name); + + fail_mirror(m, DM_RAID1_READ_ERROR); + + /* + * A failed read is requeued for another attempt using an intact + * mirror. + */ + if (default_ok(m) || mirror_available(ms, bio)) { + bd = &read_record->details; + + dm_bio_restore(bd, bio); + mempool_free(read_record, ms->read_record_pool); + map_context->ptr = NULL; + queue_bio(ms, bio, rw); + return 1; + } + DMERR("All replicated volumes dead, failing I/O"); + } + +out: + if (read_record) { + mempool_free(read_record, ms->read_record_pool); + map_context->ptr = NULL; + } + + return error; +} + +static void mirror_presuspend(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + atomic_set(&ms->suspend, 1); + + /* + * We must finish up all the work that we've + * generated (i.e. recovery work). + */ + dm_rh_stop_recovery(ms->rh); + + wait_event(_kmirrord_recovery_stopped, + !dm_rh_recovery_in_flight(ms->rh)); + + if (log->type->presuspend && log->type->presuspend(log)) + /* FIXME: need better error handling */ + DMWARN("log presuspend failed"); + + /* + * Now that recovery is complete/stopped and the + * delayed bios are queued, we need to wait for + * the worker thread to complete. This way, + * we know that all of our I/O has been pushed. + */ + flush_workqueue(ms->kmirrord_wq); +} + +static void mirror_postsuspend(struct dm_target *ti) +{ + struct mirror_set *ms = ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + if (log->type->postsuspend && log->type->postsuspend(log)) + /* FIXME: need better error handling */ + DMWARN("log postsuspend failed"); +} + +static void mirror_resume(struct dm_target *ti) +{ + struct mirror_set *ms = ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + atomic_set(&ms->suspend, 0); + if (log->type->resume && log->type->resume(log)) + /* FIXME: need better error handling */ + DMWARN("log resume failed"); + dm_rh_start_recovery(ms->rh); +} + +/* + * device_status_char + * @m: mirror device/leg we want the status of + * + * We return one character representing the most severe error + * we have encountered. + * A => Alive - No failures + * D => Dead - A write failure occurred leaving mirror out-of-sync + * S => Sync - A sychronization failure occurred, mirror out-of-sync + * R => Read - A read failure occurred, mirror data unaffected + * + * Returns: <char> + */ +static char device_status_char(struct mirror *m) +{ + if (!atomic_read(&(m->error_count))) + return 'A'; + + return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : + (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : + (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; +} + + +static int mirror_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + unsigned int m, sz = 0; + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + char buffer[ms->nr_mirrors + 1]; + + switch (type) { + case STATUSTYPE_INFO: + DMEMIT("%d ", ms->nr_mirrors); + for (m = 0; m < ms->nr_mirrors; m++) { + DMEMIT("%s ", ms->mirror[m].dev->name); + buffer[m] = device_status_char(&(ms->mirror[m])); + } + buffer[m] = '\0'; + + DMEMIT("%llu/%llu 1 %s ", + (unsigned long long)log->type->get_sync_count(log), + (unsigned long long)ms->nr_regions, buffer); + + sz += log->type->status(log, type, result+sz, maxlen-sz); + + break; + + case STATUSTYPE_TABLE: + sz = log->type->status(log, type, result, maxlen); + + DMEMIT("%d", ms->nr_mirrors); + for (m = 0; m < ms->nr_mirrors; m++) + DMEMIT(" %s %llu", ms->mirror[m].dev->name, + (unsigned long long)ms->mirror[m].offset); + + if (ms->features & DM_RAID1_HANDLE_ERRORS) + DMEMIT(" 1 handle_errors"); + } + + return 0; +} + +static struct target_type mirror_target = { + .name = "mirror", + .version = {1, 0, 20}, + .module = THIS_MODULE, + .ctr = mirror_ctr, + .dtr = mirror_dtr, + .map = mirror_map, + .end_io = mirror_end_io, + .presuspend = mirror_presuspend, + .postsuspend = mirror_postsuspend, + .resume = mirror_resume, + .status = mirror_status, +}; + +static int __init dm_mirror_init(void) +{ + int r; + + r = dm_register_target(&mirror_target); + if (r < 0) + DMERR("Failed to register mirror target"); + + return r; +} + +static void __exit dm_mirror_exit(void) +{ + int r; + + r = dm_unregister_target(&mirror_target); + if (r < 0) + DMERR("unregister failed %d", r); +} + +/* Module hooks */ +module_init(dm_mirror_init); +module_exit(dm_mirror_exit); + +MODULE_DESCRIPTION(DM_NAME " mirror target"); +MODULE_AUTHOR("Joe Thornber"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c new file mode 100644 index 0000000..59f8d9d --- /dev/null +++ b/drivers/md/dm-region-hash.c @@ -0,0 +1,704 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include <linux/dm-dirty-log.h> +#include <linux/dm-region-hash.h> + +#include <linux/ctype.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "dm.h" +#include "dm-bio-list.h" + +#define DM_MSG_PREFIX "region hash" + +/*----------------------------------------------------------------- + * Region hash + * + * The mirror splits itself up into discrete regions. Each + * region can be in one of three states: clean, dirty, + * nosync. There is no need to put clean regions in the hash. + * + * In addition to being present in the hash table a region _may_ + * be present on one of three lists. + * + * clean_regions: Regions on this list have no io pending to + * them, they are in sync, we are no longer interested in them, + * they are dull. dm_rh_update_states() will remove them from the + * hash table. + * + * quiesced_regions: These regions have been spun down, ready + * for recovery. rh_recovery_start() will remove regions from + * this list and hand them to kmirrord, which will schedule the + * recovery io with kcopyd. + * + * recovered_regions: Regions that kcopyd has successfully + * recovered. dm_rh_update_states() will now schedule any delayed + * io, up the recovery_count, and remove the region from the + * hash. + * + * There are 2 locks: + * A rw spin lock 'hash_lock' protects just the hash table, + * this is never held in write mode from interrupt context, + * which I believe means that we only have to disable irqs when + * doing a write lock. + * + * An ordinary spin lock 'region_lock' that protects the three + * lists in the region_hash, with the 'state', 'list' and + * 'delayed_bios' fields of the regions. This is used from irq + * context, so all other uses will have to suspend local irqs. + *---------------------------------------------------------------*/ +struct dm_region_hash { + uint32_t region_size; + unsigned region_shift; + + /* holds persistent region state */ + struct dm_dirty_log *log; + + /* hash table */ + rwlock_t hash_lock; + mempool_t *region_pool; + unsigned mask; + unsigned nr_buckets; + unsigned prime; + unsigned shift; + struct list_head *buckets; + + unsigned max_recovery; /* Max # of regions to recover in parallel */ + + spinlock_t region_lock; + atomic_t recovery_in_flight; + struct semaphore recovery_count; + struct list_head clean_regions; + struct list_head quiesced_regions; + struct list_head recovered_regions; + struct list_head failed_recovered_regions; + + void *context; + sector_t target_begin; + + /* Callback function to schedule bios writes */ + void (*dispatch_bios)(void *context, struct bio_list *bios); + + /* Callback function to wakeup callers worker thread. */ + void (*wakeup_workers)(void *context); + + /* Callback function to wakeup callers recovery waiters. */ + void (*wakeup_all_recovery_waiters)(void *context); +}; + +struct dm_region { + struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ + region_t key; + int state; + + struct list_head hash_list; + struct list_head list; + + atomic_t pending; + struct bio_list delayed_bios; +}; + +/* + * Conversion fns + */ +static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) +{ + return sector >> rh->region_shift; +} + +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) +{ + return region << rh->region_shift; +} +EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); + +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) +{ + return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); +} +EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); + +void *dm_rh_region_context(struct dm_region *reg) +{ + return reg->rh->context; +} +EXPORT_SYMBOL_GPL(dm_rh_region_context); + +region_t dm_rh_get_region_key(struct dm_region *reg) +{ + return reg->key; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_key); + +sector_t dm_rh_get_region_size(struct dm_region_hash *rh) +{ + return rh->region_size; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_size); + +/* + * FIXME: shall we pass in a structure instead of all these args to + * dm_region_hash_create()???? + */ +#define RH_HASH_MULT 2654435387U +#define RH_HASH_SHIFT 12 + +#define MIN_REGIONS 64 +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions) +{ + struct dm_region_hash *rh; + unsigned nr_buckets, max_buckets; + size_t i; + + /* + * Calculate a suitable number of buckets for our hash + * table. + */ + max_buckets = nr_regions >> 6; + for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) + ; + nr_buckets >>= 1; + + rh = kmalloc(sizeof(*rh), GFP_KERNEL); + if (!rh) { + DMERR("unable to allocate region hash memory"); + return ERR_PTR(-ENOMEM); + } + + rh->context = context; + rh->dispatch_bios = dispatch_bios; + rh->wakeup_workers = wakeup_workers; + rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; + rh->target_begin = target_begin; + rh->max_recovery = max_recovery; + rh->log = log; + rh->region_size = region_size; + rh->region_shift = ffs(region_size) - 1; + rwlock_init(&rh->hash_lock); + rh->mask = nr_buckets - 1; + rh->nr_buckets = nr_buckets; + + rh->shift = RH_HASH_SHIFT; + rh->prime = RH_HASH_MULT; + + rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + if (!rh->buckets) { + DMERR("unable to allocate region hash bucket memory"); + kfree(rh); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nr_buckets; i++) + INIT_LIST_HEAD(rh->buckets + i); + + spin_lock_init(&rh->region_lock); + sema_init(&rh->recovery_count, 0); + atomic_set(&rh->recovery_in_flight, 0); + INIT_LIST_HEAD(&rh->clean_regions); + INIT_LIST_HEAD(&rh->quiesced_regions); + INIT_LIST_HEAD(&rh->recovered_regions); + INIT_LIST_HEAD(&rh->failed_recovered_regions); + + rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, + sizeof(struct dm_region)); + if (!rh->region_pool) { + vfree(rh->buckets); + kfree(rh); + rh = ERR_PTR(-ENOMEM); + } + + return rh; +} +EXPORT_SYMBOL_GPL(dm_region_hash_create); + +void dm_region_hash_destroy(struct dm_region_hash *rh) +{ + unsigned h; + struct dm_region *reg, *nreg; + + BUG_ON(!list_empty(&rh->quiesced_regions)); + for (h = 0; h < rh->nr_buckets; h++) { + list_for_each_entry_safe(reg, nreg, rh->buckets + h, + hash_list) { + BUG_ON(atomic_read(®->pending)); + mempool_free(reg, rh->region_pool); + } + } + + if (rh->log) + dm_dirty_log_destroy(rh->log); + + if (rh->region_pool) + mempool_destroy(rh->region_pool); + + vfree(rh->buckets); + kfree(rh); +} +EXPORT_SYMBOL_GPL(dm_region_hash_destroy); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) +{ + return rh->log; +} +EXPORT_SYMBOL_GPL(dm_rh_dirty_log); + +static unsigned rh_hash(struct dm_region_hash *rh, region_t region) +{ + return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; +} + +static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + struct list_head *bucket = rh->buckets + rh_hash(rh, region); + + list_for_each_entry(reg, bucket, hash_list) + if (reg->key == region) + return reg; + + return NULL; +} + +static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) +{ + list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); +} + +static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg, *nreg; + + nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); + if (unlikely(!nreg)) + nreg = kmalloc(sizeof(*nreg), GFP_NOIO); + + nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? + DM_RH_CLEAN : DM_RH_NOSYNC; + nreg->rh = rh; + nreg->key = region; + INIT_LIST_HEAD(&nreg->list); + atomic_set(&nreg->pending, 0); + bio_list_init(&nreg->delayed_bios); + + write_lock_irq(&rh->hash_lock); + reg = __rh_lookup(rh, region); + if (reg) + /* We lost the race. */ + mempool_free(nreg, rh->region_pool); + else { + __rh_insert(rh, nreg); + if (nreg->state == DM_RH_CLEAN) { + spin_lock(&rh->region_lock); + list_add(&nreg->list, &rh->clean_regions); + spin_unlock(&rh->region_lock); + } + + reg = nreg; + } + write_unlock_irq(&rh->hash_lock); + + return reg; +} + +static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + reg = __rh_lookup(rh, region); + if (!reg) { + read_unlock(&rh->hash_lock); + reg = __rh_alloc(rh, region); + read_lock(&rh->hash_lock); + } + + return reg; +} + +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) +{ + int r; + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (reg) + return reg->state; + + /* + * The region wasn't in the hash, so we fall back to the + * dirty log. + */ + r = rh->log->type->in_sync(rh->log, region, may_block); + + /* + * Any error from the dirty log (eg. -EWOULDBLOCK) gets + * taken as a DM_RH_NOSYNC + */ + return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; +} +EXPORT_SYMBOL_GPL(dm_rh_get_state); + +static void complete_resync_work(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + rh->log->type->set_region_sync(rh->log, reg->key, success); + + /* + * Dispatch the bios before we call 'wake_up_all'. + * This is important because if we are suspending, + * we want to know that recovery is complete and + * the work queue is flushed. If we wake_up_all + * before we dispatch_bios (queue bios and call wake()), + * then we risk suspending before the work queue + * has been properly flushed. + */ + rh->dispatch_bios(rh->context, ®->delayed_bios); + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); + up(&rh->recovery_count); +} + +/* dm_rh_mark_nosync + * @ms + * @bio + * @done + * @error + * + * The bio was written on some mirror(s) but failed on other mirror(s). + * We can successfully endio the bio but should avoid the region being + * marked clean by setting the state DM_RH_NOSYNC. + * + * This function is _not_ safe in interrupt context! + */ +void dm_rh_mark_nosync(struct dm_region_hash *rh, + struct bio *bio, unsigned done, int error) +{ + unsigned long flags; + struct dm_dirty_log *log = rh->log; + struct dm_region *reg; + region_t region = dm_rh_bio_to_region(rh, bio); + int recovering = 0; + + /* We must inform the log that the sync count has changed. */ + log->type->set_region_sync(log, region, 0); + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + /* region hash entry should exist because write was in-flight */ + BUG_ON(!reg); + BUG_ON(!list_empty(®->list)); + + spin_lock_irqsave(&rh->region_lock, flags); + /* + * Possible cases: + * 1) DM_RH_DIRTY + * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed + * 3) DM_RH_RECOVERING: flushing pending writes + * Either case, the region should have not been connected to list. + */ + recovering = (reg->state == DM_RH_RECOVERING); + reg->state = DM_RH_NOSYNC; + BUG_ON(!list_empty(®->list)); + spin_unlock_irqrestore(&rh->region_lock, flags); + + bio_endio(bio, error); + if (recovering) + complete_resync_work(reg, 0); +} +EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); + +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) +{ + struct dm_region *reg, *next; + + LIST_HEAD(clean); + LIST_HEAD(recovered); + LIST_HEAD(failed_recovered); + + /* + * Quickly grab the lists. + */ + write_lock_irq(&rh->hash_lock); + spin_lock(&rh->region_lock); + if (!list_empty(&rh->clean_regions)) { + list_splice_init(&rh->clean_regions, &clean); + + list_for_each_entry(reg, &clean, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->recovered_regions)) { + list_splice_init(&rh->recovered_regions, &recovered); + + list_for_each_entry(reg, &recovered, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->failed_recovered_regions)) { + list_splice_init(&rh->failed_recovered_regions, + &failed_recovered); + + list_for_each_entry(reg, &failed_recovered, list) + list_del(®->hash_list); + } + + spin_unlock(&rh->region_lock); + write_unlock_irq(&rh->hash_lock); + + /* + * All the regions on the recovered and clean lists have + * now been pulled out of the system, so no need to do + * any more locking. + */ + list_for_each_entry_safe(reg, next, &recovered, list) { + rh->log->type->clear_region(rh->log, reg->key); + complete_resync_work(reg, 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &failed_recovered, list) { + complete_resync_work(reg, errors_handled ? 0 : 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &clean, list) { + rh->log->type->clear_region(rh->log, reg->key); + mempool_free(reg, rh->region_pool); + } + + rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_update_states); + +static void rh_inc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + + spin_lock_irq(&rh->region_lock); + atomic_inc(®->pending); + + if (reg->state == DM_RH_CLEAN) { + reg->state = DM_RH_DIRTY; + list_del_init(®->list); /* take off the clean list */ + spin_unlock_irq(&rh->region_lock); + + rh->log->type->mark_region(rh->log, reg->key); + } else + spin_unlock_irq(&rh->region_lock); + + + read_unlock(&rh->hash_lock); +} + +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) +{ + struct bio *bio; + + for (bio = bios->head; bio; bio = bio->bi_next) + rh_inc(rh, dm_rh_bio_to_region(rh, bio)); +} +EXPORT_SYMBOL_GPL(dm_rh_inc_pending); + +void dm_rh_dec(struct dm_region_hash *rh, region_t region) +{ + unsigned long flags; + struct dm_region *reg; + int should_wake = 0; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irqsave(&rh->region_lock, flags); + if (atomic_dec_and_test(®->pending)) { + /* + * There is no pending I/O for this region. + * We can move the region to corresponding list for next action. + * At this point, the region is not yet connected to any list. + * + * If the state is DM_RH_NOSYNC, the region should be kept off + * from clean list. + * The hash entry for DM_RH_NOSYNC will remain in memory + * until the region is recovered or the map is reloaded. + */ + + /* do nothing for DM_RH_NOSYNC */ + if (reg->state == DM_RH_RECOVERING) { + list_add_tail(®->list, &rh->quiesced_regions); + } else if (reg->state == DM_RH_DIRTY) { + reg->state = DM_RH_CLEAN; + list_add(®->list, &rh->clean_regions); + } + should_wake = 1; + } + spin_unlock_irqrestore(&rh->region_lock, flags); + + if (should_wake) + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_dec); + +/* + * Starts quiescing a region in preparation for recovery. + */ +static int __rh_recovery_prepare(struct dm_region_hash *rh) +{ + int r; + region_t region; + struct dm_region *reg; + + /* + * Ask the dirty log what's next. + */ + r = rh->log->type->get_resync_work(rh->log, ®ion); + if (r <= 0) + return r; + + /* + * Get this region, and start it quiescing by setting the + * recovering flag. + */ + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irq(&rh->region_lock); + reg->state = DM_RH_RECOVERING; + + /* Already quiesced ? */ + if (atomic_read(®->pending)) + list_del_init(®->list); + else + list_move(®->list, &rh->quiesced_regions); + + spin_unlock_irq(&rh->region_lock); + + return 1; +} + +void dm_rh_recovery_prepare(struct dm_region_hash *rh) +{ + /* Extra reference to avoid race with dm_rh_stop_recovery */ + atomic_inc(&rh->recovery_in_flight); + + while (!down_trylock(&rh->recovery_count)) { + atomic_inc(&rh->recovery_in_flight); + if (__rh_recovery_prepare(rh) <= 0) { + atomic_dec(&rh->recovery_in_flight); + up(&rh->recovery_count); + break; + } + } + + /* Drop the extra reference */ + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); + +/* + * Returns any quiesced regions. + */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) +{ + struct dm_region *reg = NULL; + + spin_lock_irq(&rh->region_lock); + if (!list_empty(&rh->quiesced_regions)) { + reg = list_entry(rh->quiesced_regions.next, + struct dm_region, list); + list_del_init(®->list); /* remove from the quiesced list */ + } + spin_unlock_irq(&rh->region_lock); + + return reg; +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_start); + +void dm_rh_recovery_end(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + spin_lock_irq(&rh->region_lock); + if (success) + list_add(®->list, ®->rh->recovered_regions); + else { + reg->state = DM_RH_NOSYNC; + list_add(®->list, ®->rh->failed_recovered_regions); + } + spin_unlock_irq(&rh->region_lock); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_end); + +/* Return recovery in flight count. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh) +{ + return atomic_read(&rh->recovery_in_flight); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); + +int dm_rh_flush(struct dm_region_hash *rh) +{ + return rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_flush); + +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); + bio_list_add(®->delayed_bios, bio); + read_unlock(&rh->hash_lock); +} +EXPORT_SYMBOL_GPL(dm_rh_delay); + +void dm_rh_stop_recovery(struct dm_region_hash *rh) +{ + int i; + + /* wait for any recovering regions */ + for (i = 0; i < rh->max_recovery; i++) + down(&rh->recovery_count); +} +EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); + +void dm_rh_start_recovery(struct dm_region_hash *rh) +{ + int i; + + for (i = 0; i < rh->max_recovery; i++) + up(&rh->recovery_count); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_start_recovery); + +MODULE_DESCRIPTION(DM_NAME " region hash"); +MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c new file mode 100644 index 0000000..cdfbf65 --- /dev/null +++ b/drivers/md/dm-round-robin.c @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2003 Sistina Software. + * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. + * + * Module Author: Heinz Mauelshagen + * + * This file is released under the GPL. + * + * Round-robin path selector. + */ + +#include <linux/device-mapper.h> + +#include "dm-path-selector.h" + +#include <linux/slab.h> + +#define DM_MSG_PREFIX "multipath round-robin" + +/*----------------------------------------------------------------- + * Path-handling code, paths are held in lists + *---------------------------------------------------------------*/ +struct path_info { + struct list_head list; + struct dm_path *path; + unsigned repeat_count; +}; + +static void free_paths(struct list_head *paths) +{ + struct path_info *pi, *next; + + list_for_each_entry_safe(pi, next, paths, list) { + list_del(&pi->list); + kfree(pi); + } +} + +/*----------------------------------------------------------------- + * Round-robin selector + *---------------------------------------------------------------*/ + +#define RR_MIN_IO 1000 + +struct selector { + struct list_head valid_paths; + struct list_head invalid_paths; +}; + +static struct selector *alloc_selector(void) +{ + struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); + + if (s) { + INIT_LIST_HEAD(&s->valid_paths); + INIT_LIST_HEAD(&s->invalid_paths); + } + + return s; +} + +static int rr_create(struct path_selector *ps, unsigned argc, char **argv) +{ + struct selector *s; + + s = alloc_selector(); + if (!s) + return -ENOMEM; + + ps->context = s; + return 0; +} + +static void rr_destroy(struct path_selector *ps) +{ + struct selector *s = (struct selector *) ps->context; + + free_paths(&s->valid_paths); + free_paths(&s->invalid_paths); + kfree(s); + ps->context = NULL; +} + +static int rr_status(struct path_selector *ps, struct dm_path *path, + status_type_t type, char *result, unsigned int maxlen) +{ + struct path_info *pi; + int sz = 0; + + if (!path) + DMEMIT("0 "); + else { + switch(type) { + case STATUSTYPE_INFO: + break; + case STATUSTYPE_TABLE: + pi = path->pscontext; + DMEMIT("%u ", pi->repeat_count); + break; + } + } + + return sz; +} + +/* + * Called during initialisation to register each path with an + * optional repeat_count. + */ +static int rr_add_path(struct path_selector *ps, struct dm_path *path, + int argc, char **argv, char **error) +{ + struct selector *s = (struct selector *) ps->context; + struct path_info *pi; + unsigned repeat_count = RR_MIN_IO; + + if (argc > 1) { + *error = "round-robin ps: incorrect number of arguments"; + return -EINVAL; + } + + /* First path argument is number of I/Os before switching path */ + if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) { + *error = "round-robin ps: invalid repeat count"; + return -EINVAL; + } + + /* allocate the path */ + pi = kmalloc(sizeof(*pi), GFP_KERNEL); + if (!pi) { + *error = "round-robin ps: Error allocating path context"; + return -ENOMEM; + } + + pi->path = path; + pi->repeat_count = repeat_count; + + path->pscontext = pi; + + list_add_tail(&pi->list, &s->valid_paths); + + return 0; +} + +static void rr_fail_path(struct path_selector *ps, struct dm_path *p) +{ + struct selector *s = (struct selector *) ps->context; + struct path_info *pi = p->pscontext; + + list_move(&pi->list, &s->invalid_paths); +} + +static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p) +{ + struct selector *s = (struct selector *) ps->context; + struct path_info *pi = p->pscontext; + + list_move(&pi->list, &s->valid_paths); + + return 0; +} + +static struct dm_path *rr_select_path(struct path_selector *ps, + unsigned *repeat_count) +{ + struct selector *s = (struct selector *) ps->context; + struct path_info *pi = NULL; + + if (!list_empty(&s->valid_paths)) { + pi = list_entry(s->valid_paths.next, struct path_info, list); + list_move_tail(&pi->list, &s->valid_paths); + *repeat_count = pi->repeat_count; + } + + return pi ? pi->path : NULL; +} + +static struct path_selector_type rr_ps = { + .name = "round-robin", + .module = THIS_MODULE, + .table_args = 1, + .info_args = 0, + .create = rr_create, + .destroy = rr_destroy, + .status = rr_status, + .add_path = rr_add_path, + .fail_path = rr_fail_path, + .reinstate_path = rr_reinstate_path, + .select_path = rr_select_path, +}; + +static int __init dm_rr_init(void) +{ + int r = dm_register_path_selector(&rr_ps); + + if (r < 0) + DMERR("register failed %d", r); + + DMINFO("version 1.0.0 loaded"); + + return r; +} + +static void __exit dm_rr_exit(void) +{ + int r = dm_unregister_path_selector(&rr_ps); + + if (r < 0) + DMERR("unregister failed %d", r); +} + +module_init(dm_rr_init); +module_exit(dm_rr_exit); + +MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector"); +MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c new file mode 100644 index 0000000..6c96db2 --- /dev/null +++ b/drivers/md/dm-snap.c @@ -0,0 +1,1496 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include <linux/blkdev.h> +#include <linux/ctype.h> +#include <linux/device-mapper.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kdev_t.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/log2.h> +#include <linux/dm-kcopyd.h> + +#include "dm-snap.h" +#include "dm-bio-list.h" + +#define DM_MSG_PREFIX "snapshots" + +/* + * The percentage increment we will wake up users at + */ +#define WAKE_UP_PERCENT 5 + +/* + * kcopyd priority of snapshot operations + */ +#define SNAPSHOT_COPY_PRIORITY 2 + +/* + * Reserve 1MB for each snapshot initially (with minimum of 1 page). + */ +#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) + +/* + * The size of the mempool used to track chunks in use. + */ +#define MIN_IOS 256 + +static struct workqueue_struct *ksnapd; +static void flush_queued_bios(struct work_struct *work); + +struct dm_snap_pending_exception { + struct dm_snap_exception e; + + /* + * Origin buffers waiting for this to complete are held + * in a bio list + */ + struct bio_list origin_bios; + struct bio_list snapshot_bios; + + /* + * Short-term queue of pending exceptions prior to submission. + */ + struct list_head list; + + /* + * The primary pending_exception is the one that holds + * the ref_count and the list of origin_bios for a + * group of pending_exceptions. It is always last to get freed. + * These fields get set up when writing to the origin. + */ + struct dm_snap_pending_exception *primary_pe; + + /* + * Number of pending_exceptions processing this chunk. + * When this drops to zero we must complete the origin bios. + * If incrementing or decrementing this, hold pe->snap->lock for + * the sibling concerned and not pe->primary_pe->snap->lock unless + * they are the same. + */ + atomic_t ref_count; + + /* Pointer back to snapshot context */ + struct dm_snapshot *snap; + + /* + * 1 indicates the exception has already been sent to + * kcopyd. + */ + int started; +}; + +/* + * Hash table mapping origin volumes to lists of snapshots and + * a lock to protect it + */ +static struct kmem_cache *exception_cache; +static struct kmem_cache *pending_cache; + +struct dm_snap_tracked_chunk { + struct hlist_node node; + chunk_t chunk; +}; + +static struct kmem_cache *tracked_chunk_cache; + +static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, + chunk_t chunk) +{ + struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, + GFP_NOIO); + unsigned long flags; + + c->chunk = chunk; + + spin_lock_irqsave(&s->tracked_chunk_lock, flags); + hlist_add_head(&c->node, + &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); + spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); + + return c; +} + +static void stop_tracking_chunk(struct dm_snapshot *s, + struct dm_snap_tracked_chunk *c) +{ + unsigned long flags; + + spin_lock_irqsave(&s->tracked_chunk_lock, flags); + hlist_del(&c->node); + spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); + + mempool_free(c, s->tracked_chunk_pool); +} + +static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) +{ + struct dm_snap_tracked_chunk *c; + struct hlist_node *hn; + int found = 0; + + spin_lock_irq(&s->tracked_chunk_lock); + + hlist_for_each_entry(c, hn, + &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { + if (c->chunk == chunk) { + found = 1; + break; + } + } + + spin_unlock_irq(&s->tracked_chunk_lock); + + return found; +} + +/* + * One of these per registered origin, held in the snapshot_origins hash + */ +struct origin { + /* The origin device */ + struct block_device *bdev; + + struct list_head hash_list; + + /* List of snapshots for this origin */ + struct list_head snapshots; +}; + +/* + * Size of the hash table for origin volumes. If we make this + * the size of the minors list then it should be nearly perfect + */ +#define ORIGIN_HASH_SIZE 256 +#define ORIGIN_MASK 0xFF +static struct list_head *_origins; +static struct rw_semaphore _origins_lock; + +static int init_origin_hash(void) +{ + int i; + + _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_origins) { + DMERR("unable to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_origins + i); + init_rwsem(&_origins_lock); + + return 0; +} + +static void exit_origin_hash(void) +{ + kfree(_origins); +} + +static unsigned origin_hash(struct block_device *bdev) +{ + return bdev->bd_dev & ORIGIN_MASK; +} + +static struct origin *__lookup_origin(struct block_device *origin) +{ + struct list_head *ol; + struct origin *o; + + ol = &_origins[origin_hash(origin)]; + list_for_each_entry (o, ol, hash_list) + if (bdev_equal(o->bdev, origin)) + return o; + + return NULL; +} + +static void __insert_origin(struct origin *o) +{ + struct list_head *sl = &_origins[origin_hash(o->bdev)]; + list_add_tail(&o->hash_list, sl); +} + +/* + * Make a note of the snapshot and its origin so we can look it + * up when the origin has a write on it. + */ +static int register_snapshot(struct dm_snapshot *snap) +{ + struct origin *o, *new_o; + struct block_device *bdev = snap->origin->bdev; + + new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); + if (!new_o) + return -ENOMEM; + + down_write(&_origins_lock); + o = __lookup_origin(bdev); + + if (o) + kfree(new_o); + else { + /* New origin */ + o = new_o; + + /* Initialise the struct */ + INIT_LIST_HEAD(&o->snapshots); + o->bdev = bdev; + + __insert_origin(o); + } + + list_add_tail(&snap->list, &o->snapshots); + + up_write(&_origins_lock); + return 0; +} + +static void unregister_snapshot(struct dm_snapshot *s) +{ + struct origin *o; + + down_write(&_origins_lock); + o = __lookup_origin(s->origin->bdev); + + list_del(&s->list); + if (list_empty(&o->snapshots)) { + list_del(&o->hash_list); + kfree(o); + } + + up_write(&_origins_lock); +} + +/* + * Implementation of the exception hash tables. + * The lowest hash_shift bits of the chunk number are ignored, allowing + * some consecutive chunks to be grouped together. + */ +static int init_exception_table(struct exception_table *et, uint32_t size, + unsigned hash_shift) +{ + unsigned int i; + + et->hash_shift = hash_shift; + et->hash_mask = size - 1; + et->table = dm_vcalloc(size, sizeof(struct list_head)); + if (!et->table) + return -ENOMEM; + + for (i = 0; i < size; i++) + INIT_LIST_HEAD(et->table + i); + + return 0; +} + +static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) +{ + struct list_head *slot; + struct dm_snap_exception *ex, *next; + int i, size; + + size = et->hash_mask + 1; + for (i = 0; i < size; i++) { + slot = et->table + i; + + list_for_each_entry_safe (ex, next, slot, hash_list) + kmem_cache_free(mem, ex); + } + + vfree(et->table); +} + +static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +{ + return (chunk >> et->hash_shift) & et->hash_mask; +} + +static void insert_exception(struct exception_table *eh, + struct dm_snap_exception *e) +{ + struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; + list_add(&e->hash_list, l); +} + +static void remove_exception(struct dm_snap_exception *e) +{ + list_del(&e->hash_list); +} + +/* + * Return the exception data for a sector, or NULL if not + * remapped. + */ +static struct dm_snap_exception *lookup_exception(struct exception_table *et, + chunk_t chunk) +{ + struct list_head *slot; + struct dm_snap_exception *e; + + slot = &et->table[exception_hash(et, chunk)]; + list_for_each_entry (e, slot, hash_list) + if (chunk >= e->old_chunk && + chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) + return e; + + return NULL; +} + +static struct dm_snap_exception *alloc_exception(void) +{ + struct dm_snap_exception *e; + + e = kmem_cache_alloc(exception_cache, GFP_NOIO); + if (!e) + e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); + + return e; +} + +static void free_exception(struct dm_snap_exception *e) +{ + kmem_cache_free(exception_cache, e); +} + +static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) +{ + struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, + GFP_NOIO); + + atomic_inc(&s->pending_exceptions_count); + pe->snap = s; + + return pe; +} + +static void free_pending_exception(struct dm_snap_pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + + mempool_free(pe, s->pending_pool); + smp_mb__before_atomic_dec(); + atomic_dec(&s->pending_exceptions_count); +} + +static void insert_completed_exception(struct dm_snapshot *s, + struct dm_snap_exception *new_e) +{ + struct exception_table *eh = &s->complete; + struct list_head *l; + struct dm_snap_exception *e = NULL; + + l = &eh->table[exception_hash(eh, new_e->old_chunk)]; + + /* Add immediately if this table doesn't support consecutive chunks */ + if (!eh->hash_shift) + goto out; + + /* List is ordered by old_chunk */ + list_for_each_entry_reverse(e, l, hash_list) { + /* Insert after an existing chunk? */ + if (new_e->old_chunk == (e->old_chunk + + dm_consecutive_chunk_count(e) + 1) && + new_e->new_chunk == (dm_chunk_number(e->new_chunk) + + dm_consecutive_chunk_count(e) + 1)) { + dm_consecutive_chunk_count_inc(e); + free_exception(new_e); + return; + } + + /* Insert before an existing chunk? */ + if (new_e->old_chunk == (e->old_chunk - 1) && + new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { + dm_consecutive_chunk_count_inc(e); + e->old_chunk--; + e->new_chunk--; + free_exception(new_e); + return; + } + + if (new_e->old_chunk > e->old_chunk) + break; + } + +out: + list_add(&new_e->hash_list, e ? &e->hash_list : l); +} + +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) +{ + struct dm_snap_exception *e; + + e = alloc_exception(); + if (!e) + return -ENOMEM; + + e->old_chunk = old; + + /* Consecutive_count is implicitly initialised to zero */ + e->new_chunk = new; + + insert_completed_exception(s, e); + + return 0; +} + +/* + * Hard coded magic. + */ +static int calc_max_buckets(void) +{ + /* use a fixed size of 2MB */ + unsigned long mem = 2 * 1024 * 1024; + mem /= sizeof(struct list_head); + + return mem; +} + +/* + * Allocate room for a suitable hash table. + */ +static int init_hash_tables(struct dm_snapshot *s) +{ + sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + + /* + * Calculate based on the size of the original volume or + * the COW volume... + */ + cow_dev_size = get_dev_size(s->cow->bdev); + origin_dev_size = get_dev_size(s->origin->bdev); + max_buckets = calc_max_buckets(); + + hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; + hash_size = min(hash_size, max_buckets); + + hash_size = rounddown_pow_of_two(hash_size); + if (init_exception_table(&s->complete, hash_size, + DM_CHUNK_CONSECUTIVE_BITS)) + return -ENOMEM; + + /* + * Allocate hash table for in-flight exceptions + * Make this smaller than the real hash table + */ + hash_size >>= 3; + if (hash_size < 64) + hash_size = 64; + + if (init_exception_table(&s->pending, hash_size, 0)) { + exit_exception_table(&s->complete, exception_cache); + return -ENOMEM; + } + + return 0; +} + +/* + * Round a number up to the nearest 'size' boundary. size must + * be a power of 2. + */ +static ulong round_up(ulong n, ulong size) +{ + size--; + return (n + size) & ~size; +} + +static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, + char **error) +{ + unsigned long chunk_size; + char *value; + + chunk_size = simple_strtoul(chunk_size_arg, &value, 10); + if (*chunk_size_arg == '\0' || *value != '\0') { + *error = "Invalid chunk size"; + return -EINVAL; + } + + if (!chunk_size) { + s->chunk_size = s->chunk_mask = s->chunk_shift = 0; + return 0; + } + + /* + * Chunk size must be multiple of page size. Silently + * round up if it's not. + */ + chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); + + /* Check chunk_size is a power of 2 */ + if (!is_power_of_2(chunk_size)) { + *error = "Chunk size is not a power of 2"; + return -EINVAL; + } + + /* Validate the chunk size against the device block size */ + if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { + *error = "Chunk size is not a multiple of device blocksize"; + return -EINVAL; + } + + s->chunk_size = chunk_size; + s->chunk_mask = chunk_size - 1; + s->chunk_shift = ffs(chunk_size) - 1; + + return 0; +} + +/* + * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> + */ +static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct dm_snapshot *s; + int i; + int r = -EINVAL; + char persistent; + char *origin_path; + char *cow_path; + + if (argc != 4) { + ti->error = "requires exactly 4 arguments"; + r = -EINVAL; + goto bad1; + } + + origin_path = argv[0]; + cow_path = argv[1]; + persistent = toupper(*argv[2]); + + if (persistent != 'P' && persistent != 'N') { + ti->error = "Persistent flag is not P or N"; + r = -EINVAL; + goto bad1; + } + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) { + ti->error = "Cannot allocate snapshot context private " + "structure"; + r = -ENOMEM; + goto bad1; + } + + r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); + if (r) { + ti->error = "Cannot get origin device"; + goto bad2; + } + + r = dm_get_device(ti, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + dm_put_device(ti, s->origin); + ti->error = "Cannot get COW device"; + goto bad2; + } + + r = set_chunk_size(s, argv[3], &ti->error); + if (r) + goto bad3; + + s->type = persistent; + + s->valid = 1; + s->active = 0; + atomic_set(&s->pending_exceptions_count, 0); + init_rwsem(&s->lock); + spin_lock_init(&s->pe_lock); + s->ti = ti; + + /* Allocate hash table for COW data */ + if (init_hash_tables(s)) { + ti->error = "Unable to allocate hash table space"; + r = -ENOMEM; + goto bad3; + } + + s->store.snap = s; + + if (persistent == 'P') + r = dm_create_persistent(&s->store); + else + r = dm_create_transient(&s->store); + + if (r) { + ti->error = "Couldn't create exception store"; + r = -EINVAL; + goto bad4; + } + + r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); + if (r) { + ti->error = "Could not create kcopyd client"; + goto bad5; + } + + s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); + if (!s->pending_pool) { + ti->error = "Could not allocate mempool for pending exceptions"; + goto bad6; + } + + s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, + tracked_chunk_cache); + if (!s->tracked_chunk_pool) { + ti->error = "Could not allocate tracked_chunk mempool for " + "tracking reads"; + goto bad_tracked_chunk_pool; + } + + for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) + INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); + + spin_lock_init(&s->tracked_chunk_lock); + + /* Metadata must only be loaded into one table at once */ + r = s->store.read_metadata(&s->store); + if (r < 0) { + ti->error = "Failed to read snapshot metadata"; + goto bad_load_and_register; + } else if (r > 0) { + s->valid = 0; + DMWARN("Snapshot is marked invalid."); + } + + bio_list_init(&s->queued_bios); + INIT_WORK(&s->queued_bios_work, flush_queued_bios); + + /* Add snapshot to the list of snapshots for this origin */ + /* Exceptions aren't triggered till snapshot_resume() is called */ + if (register_snapshot(s)) { + r = -EINVAL; + ti->error = "Cannot register snapshot origin"; + goto bad_load_and_register; + } + + ti->private = s; + ti->split_io = s->chunk_size; + + return 0; + + bad_load_and_register: + mempool_destroy(s->tracked_chunk_pool); + + bad_tracked_chunk_pool: + mempool_destroy(s->pending_pool); + + bad6: + dm_kcopyd_client_destroy(s->kcopyd_client); + + bad5: + s->store.destroy(&s->store); + + bad4: + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + bad3: + dm_put_device(ti, s->cow); + dm_put_device(ti, s->origin); + + bad2: + kfree(s); + + bad1: + return r; +} + +static void __free_exceptions(struct dm_snapshot *s) +{ + dm_kcopyd_client_destroy(s->kcopyd_client); + s->kcopyd_client = NULL; + + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + s->store.destroy(&s->store); +} + +static void snapshot_dtr(struct dm_target *ti) +{ +#ifdef CONFIG_DM_DEBUG + int i; +#endif + struct dm_snapshot *s = ti->private; + + flush_workqueue(ksnapd); + + /* Prevent further origin writes from using this snapshot. */ + /* After this returns there can be no new kcopyd jobs. */ + unregister_snapshot(s); + + while (atomic_read(&s->pending_exceptions_count)) + yield(); + /* + * Ensure instructions in mempool_destroy aren't reordered + * before atomic_read. + */ + smp_mb(); + +#ifdef CONFIG_DM_DEBUG + for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) + BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); +#endif + + mempool_destroy(s->tracked_chunk_pool); + + __free_exceptions(s); + + mempool_destroy(s->pending_pool); + + dm_put_device(ti, s->origin); + dm_put_device(ti, s->cow); + + kfree(s); +} + +/* + * Flush a list of buffers. + */ +static void flush_bios(struct bio *bio) +{ + struct bio *n; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = n; + } +} + +static void flush_queued_bios(struct work_struct *work) +{ + struct dm_snapshot *s = + container_of(work, struct dm_snapshot, queued_bios_work); + struct bio *queued_bios; + unsigned long flags; + + spin_lock_irqsave(&s->pe_lock, flags); + queued_bios = bio_list_get(&s->queued_bios); + spin_unlock_irqrestore(&s->pe_lock, flags); + + flush_bios(queued_bios); +} + +/* + * Error a list of buffers. + */ +static void error_bios(struct bio *bio) +{ + struct bio *n; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + bio_io_error(bio); + bio = n; + } +} + +static void __invalidate_snapshot(struct dm_snapshot *s, int err) +{ + if (!s->valid) + return; + + if (err == -EIO) + DMERR("Invalidating snapshot: Error reading/writing."); + else if (err == -ENOMEM) + DMERR("Invalidating snapshot: Unable to allocate exception."); + + if (s->store.drop_snapshot) + s->store.drop_snapshot(&s->store); + + s->valid = 0; + + dm_table_event(s->ti->table); +} + +static void get_pending_exception(struct dm_snap_pending_exception *pe) +{ + atomic_inc(&pe->ref_count); +} + +static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) +{ + struct dm_snap_pending_exception *primary_pe; + struct bio *origin_bios = NULL; + + primary_pe = pe->primary_pe; + + /* + * If this pe is involved in a write to the origin and + * it is the last sibling to complete then release + * the bios for the original write to the origin. + */ + if (primary_pe && + atomic_dec_and_test(&primary_pe->ref_count)) { + origin_bios = bio_list_get(&primary_pe->origin_bios); + free_pending_exception(primary_pe); + } + + /* + * Free the pe if it's not linked to an origin write or if + * it's not itself a primary pe. + */ + if (!primary_pe || primary_pe != pe) + free_pending_exception(pe); + + return origin_bios; +} + +static void pending_complete(struct dm_snap_pending_exception *pe, int success) +{ + struct dm_snap_exception *e; + struct dm_snapshot *s = pe->snap; + struct bio *origin_bios = NULL; + struct bio *snapshot_bios = NULL; + int error = 0; + + if (!success) { + /* Read/write error - snapshot is unusable */ + down_write(&s->lock); + __invalidate_snapshot(s, -EIO); + error = 1; + goto out; + } + + e = alloc_exception(); + if (!e) { + down_write(&s->lock); + __invalidate_snapshot(s, -ENOMEM); + error = 1; + goto out; + } + *e = pe->e; + + down_write(&s->lock); + if (!s->valid) { + free_exception(e); + error = 1; + goto out; + } + + /* + * Check for conflicting reads. This is extremely improbable, + * so yield() is sufficient and there is no need for a wait queue. + */ + while (__chunk_is_tracked(s, pe->e.old_chunk)) + yield(); + + /* + * Add a proper exception, and remove the + * in-flight exception from the list. + */ + insert_completed_exception(s, e); + + out: + remove_exception(&pe->e); + snapshot_bios = bio_list_get(&pe->snapshot_bios); + origin_bios = put_pending_exception(pe); + + up_write(&s->lock); + + /* Submit any pending write bios */ + if (error) + error_bios(snapshot_bios); + else + flush_bios(snapshot_bios); + + flush_bios(origin_bios); +} + +static void commit_callback(void *context, int success) +{ + struct dm_snap_pending_exception *pe = context; + + pending_complete(pe, success); +} + +/* + * Called when the copy I/O has finished. kcopyd actually runs + * this code so don't block. + */ +static void copy_callback(int read_err, unsigned long write_err, void *context) +{ + struct dm_snap_pending_exception *pe = context; + struct dm_snapshot *s = pe->snap; + + if (read_err || write_err) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store.commit_exception(&s->store, &pe->e, commit_callback, + pe); +} + +/* + * Dispatches the copy operation to kcopyd. + */ +static void start_copy(struct dm_snap_pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + struct dm_io_region src, dest; + struct block_device *bdev = s->origin->bdev; + sector_t dev_size; + + dev_size = get_dev_size(bdev); + + src.bdev = bdev; + src.sector = chunk_to_sector(s, pe->e.old_chunk); + src.count = min(s->chunk_size, dev_size - src.sector); + + dest.bdev = s->cow->bdev; + dest.sector = chunk_to_sector(s, pe->e.new_chunk); + dest.count = src.count; + + /* Hand over to kcopyd */ + dm_kcopyd_copy(s->kcopyd_client, + &src, 1, &dest, 0, copy_callback, pe); +} + +/* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts + * it into the pending table. + * + * NOTE: a write lock must be held on snap->lock before calling + * this. + */ +static struct dm_snap_pending_exception * +__find_pending_exception(struct dm_snapshot *s, struct bio *bio) +{ + struct dm_snap_exception *e; + struct dm_snap_pending_exception *pe; + chunk_t chunk = sector_to_chunk(s, bio->bi_sector); + + /* + * Is there a pending exception for this already ? + */ + e = lookup_exception(&s->pending, chunk); + if (e) { + /* cast the exception to a pending exception */ + pe = container_of(e, struct dm_snap_pending_exception, e); + goto out; + } + + /* + * Create a new pending exception, we don't want + * to hold the lock while we do this. + */ + up_write(&s->lock); + pe = alloc_pending_exception(s); + down_write(&s->lock); + + if (!s->valid) { + free_pending_exception(pe); + return NULL; + } + + e = lookup_exception(&s->pending, chunk); + if (e) { + free_pending_exception(pe); + pe = container_of(e, struct dm_snap_pending_exception, e); + goto out; + } + + pe->e.old_chunk = chunk; + bio_list_init(&pe->origin_bios); + bio_list_init(&pe->snapshot_bios); + pe->primary_pe = NULL; + atomic_set(&pe->ref_count, 0); + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + return NULL; + } + + get_pending_exception(pe); + insert_exception(&s->pending, &pe->e); + + out: + return pe; +} + +static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, + struct bio *bio, chunk_t chunk) +{ + bio->bi_bdev = s->cow->bdev; + bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + + (chunk - e->old_chunk)) + + (bio->bi_sector & s->chunk_mask); +} + +static int snapshot_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_snap_exception *e; + struct dm_snapshot *s = ti->private; + int r = DM_MAPIO_REMAPPED; + chunk_t chunk; + struct dm_snap_pending_exception *pe = NULL; + + chunk = sector_to_chunk(s, bio->bi_sector); + + /* Full snapshots are not usable */ + /* To get here the table must be live so s->active is always set. */ + if (!s->valid) + return -EIO; + + /* FIXME: should only take write lock if we need + * to copy an exception */ + down_write(&s->lock); + + if (!s->valid) { + r = -EIO; + goto out_unlock; + } + + /* If the block is already remapped - use that, else remap it */ + e = lookup_exception(&s->complete, chunk); + if (e) { + remap_exception(s, e, bio, chunk); + goto out_unlock; + } + + /* + * Write to snapshot - higher level takes care of RW/RO + * flags so we should only get this if we are + * writeable. + */ + if (bio_rw(bio) == WRITE) { + pe = __find_pending_exception(s, bio); + if (!pe) { + __invalidate_snapshot(s, -ENOMEM); + r = -EIO; + goto out_unlock; + } + + remap_exception(s, &pe->e, bio, chunk); + bio_list_add(&pe->snapshot_bios, bio); + + r = DM_MAPIO_SUBMITTED; + + if (!pe->started) { + /* this is protected by snap->lock */ + pe->started = 1; + up_write(&s->lock); + start_copy(pe); + goto out; + } + } else { + bio->bi_bdev = s->origin->bdev; + map_context->ptr = track_chunk(s, chunk); + } + + out_unlock: + up_write(&s->lock); + out: + return r; +} + +static int snapshot_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + struct dm_snapshot *s = ti->private; + struct dm_snap_tracked_chunk *c = map_context->ptr; + + if (c) + stop_tracking_chunk(s, c); + + return 0; +} + +static void snapshot_resume(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + down_write(&s->lock); + s->active = 1; + up_write(&s->lock); +} + +static int snapshot_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + struct dm_snapshot *snap = ti->private; + + switch (type) { + case STATUSTYPE_INFO: + if (!snap->valid) + snprintf(result, maxlen, "Invalid"); + else { + if (snap->store.fraction_full) { + sector_t numerator, denominator; + snap->store.fraction_full(&snap->store, + &numerator, + &denominator); + snprintf(result, maxlen, "%llu/%llu", + (unsigned long long)numerator, + (unsigned long long)denominator); + } + else + snprintf(result, maxlen, "Unknown"); + } + break; + + case STATUSTYPE_TABLE: + /* + * kdevname returns a static pointer so we need + * to make private copies if the output is to + * make sense. + */ + snprintf(result, maxlen, "%s %s %c %llu", + snap->origin->name, snap->cow->name, + snap->type, + (unsigned long long)snap->chunk_size); + break; + } + + return 0; +} + +/*----------------------------------------------------------------- + * Origin methods + *---------------------------------------------------------------*/ +static int __origin_write(struct list_head *snapshots, struct bio *bio) +{ + int r = DM_MAPIO_REMAPPED, first = 0; + struct dm_snapshot *snap; + struct dm_snap_exception *e; + struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; + chunk_t chunk; + LIST_HEAD(pe_queue); + + /* Do all the snapshots on this origin */ + list_for_each_entry (snap, snapshots, list) { + + down_write(&snap->lock); + + /* Only deal with valid and active snapshots */ + if (!snap->valid || !snap->active) + goto next_snapshot; + + /* Nothing to do if writing beyond end of snapshot */ + if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) + goto next_snapshot; + + /* + * Remember, different snapshots can have + * different chunk sizes. + */ + chunk = sector_to_chunk(snap, bio->bi_sector); + + /* + * Check exception table to see if block + * is already remapped in this snapshot + * and trigger an exception if not. + * + * ref_count is initialised to 1 so pending_complete() + * won't destroy the primary_pe while we're inside this loop. + */ + e = lookup_exception(&snap->complete, chunk); + if (e) + goto next_snapshot; + + pe = __find_pending_exception(snap, bio); + if (!pe) { + __invalidate_snapshot(snap, -ENOMEM); + goto next_snapshot; + } + + if (!primary_pe) { + /* + * Either every pe here has same + * primary_pe or none has one yet. + */ + if (pe->primary_pe) + primary_pe = pe->primary_pe; + else { + primary_pe = pe; + first = 1; + } + + bio_list_add(&primary_pe->origin_bios, bio); + + r = DM_MAPIO_SUBMITTED; + } + + if (!pe->primary_pe) { + pe->primary_pe = primary_pe; + get_pending_exception(primary_pe); + } + + if (!pe->started) { + pe->started = 1; + list_add_tail(&pe->list, &pe_queue); + } + + next_snapshot: + up_write(&snap->lock); + } + + if (!primary_pe) + return r; + + /* + * If this is the first time we're processing this chunk and + * ref_count is now 1 it means all the pending exceptions + * got completed while we were in the loop above, so it falls to + * us here to remove the primary_pe and submit any origin_bios. + */ + + if (first && atomic_dec_and_test(&primary_pe->ref_count)) { + flush_bios(bio_list_get(&primary_pe->origin_bios)); + free_pending_exception(primary_pe); + /* If we got here, pe_queue is necessarily empty. */ + return r; + } + + /* + * Now that we have a complete pe list we can start the copying. + */ + list_for_each_entry_safe(pe, next_pe, &pe_queue, list) + start_copy(pe); + + return r; +} + +/* + * Called on a write from the origin driver. + */ +static int do_origin(struct dm_dev *origin, struct bio *bio) +{ + struct origin *o; + int r = DM_MAPIO_REMAPPED; + + down_read(&_origins_lock); + o = __lookup_origin(origin->bdev); + if (o) + r = __origin_write(&o->snapshots, bio); + up_read(&_origins_lock); + + return r; +} + +/* + * Origin: maps a linear range of a device, with hooks for snapshotting. + */ + +/* + * Construct an origin mapping: <dev_path> + * The context for an origin is merely a 'struct dm_dev *' + * pointing to the real device. + */ +static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int r; + struct dm_dev *dev; + + if (argc != 1) { + ti->error = "origin: incorrect number of arguments"; + return -EINVAL; + } + + r = dm_get_device(ti, argv[0], 0, ti->len, + dm_table_get_mode(ti->table), &dev); + if (r) { + ti->error = "Cannot get target device"; + return r; + } + + ti->private = dev; + return 0; +} + +static void origin_dtr(struct dm_target *ti) +{ + struct dm_dev *dev = ti->private; + dm_put_device(ti, dev); +} + +static int origin_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_dev *dev = ti->private; + bio->bi_bdev = dev->bdev; + + /* Only tell snapshots if this is a write */ + return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; +} + +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + +/* + * Set the target "split_io" field to the minimum of all the snapshots' + * chunk sizes. + */ +static void origin_resume(struct dm_target *ti) +{ + struct dm_dev *dev = ti->private; + struct dm_snapshot *snap; + struct origin *o; + chunk_t chunk_size = 0; + + down_read(&_origins_lock); + o = __lookup_origin(dev->bdev); + if (o) + list_for_each_entry (snap, &o->snapshots, list) + chunk_size = min_not_zero(chunk_size, snap->chunk_size); + up_read(&_origins_lock); + + ti->split_io = chunk_size; +} + +static int origin_status(struct dm_target *ti, status_type_t type, char *result, + unsigned int maxlen) +{ + struct dm_dev *dev = ti->private; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s", dev->name); + break; + } + + return 0; +} + +static struct target_type origin_target = { + .name = "snapshot-origin", + .version = {1, 6, 0}, + .module = THIS_MODULE, + .ctr = origin_ctr, + .dtr = origin_dtr, + .map = origin_map, + .resume = origin_resume, + .status = origin_status, +}; + +static struct target_type snapshot_target = { + .name = "snapshot", + .version = {1, 6, 0}, + .module = THIS_MODULE, + .ctr = snapshot_ctr, + .dtr = snapshot_dtr, + .map = snapshot_map, + .end_io = snapshot_end_io, + .resume = snapshot_resume, + .status = snapshot_status, +}; + +static int __init dm_snapshot_init(void) +{ + int r; + + r = dm_register_target(&snapshot_target); + if (r) { + DMERR("snapshot target register failed %d", r); + return r; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Origin target register failed %d", r); + goto bad1; + } + + r = init_origin_hash(); + if (r) { + DMERR("init_origin_hash failed."); + goto bad2; + } + + exception_cache = KMEM_CACHE(dm_snap_exception, 0); + if (!exception_cache) { + DMERR("Couldn't create exception cache."); + r = -ENOMEM; + goto bad3; + } + + pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); + if (!pending_cache) { + DMERR("Couldn't create pending cache."); + r = -ENOMEM; + goto bad4; + } + + tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); + if (!tracked_chunk_cache) { + DMERR("Couldn't create cache to track chunks in use."); + r = -ENOMEM; + goto bad5; + } + + ksnapd = create_singlethread_workqueue("ksnapd"); + if (!ksnapd) { + DMERR("Failed to create ksnapd workqueue."); + r = -ENOMEM; + goto bad_pending_pool; + } + + return 0; + + bad_pending_pool: + kmem_cache_destroy(tracked_chunk_cache); + bad5: + kmem_cache_destroy(pending_cache); + bad4: + kmem_cache_destroy(exception_cache); + bad3: + exit_origin_hash(); + bad2: + dm_unregister_target(&origin_target); + bad1: + dm_unregister_target(&snapshot_target); + return r; +} + +static void __exit dm_snapshot_exit(void) +{ + int r; + + destroy_workqueue(ksnapd); + + r = dm_unregister_target(&snapshot_target); + if (r) + DMERR("snapshot unregister failed %d", r); + + r = dm_unregister_target(&origin_target); + if (r) + DMERR("origin unregister failed %d", r); + + exit_origin_hash(); + kmem_cache_destroy(pending_cache); + kmem_cache_destroy(exception_cache); + kmem_cache_destroy(tracked_chunk_cache); +} + +/* Module hooks */ +module_init(dm_snapshot_init); +module_exit(dm_snapshot_exit); + +MODULE_DESCRIPTION(DM_NAME " snapshot target"); +MODULE_AUTHOR("Joe Thornber"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h new file mode 100644 index 0000000..99c0106 --- /dev/null +++ b/drivers/md/dm-snap.h @@ -0,0 +1,230 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#ifndef DM_SNAPSHOT_H +#define DM_SNAPSHOT_H + +#include <linux/device-mapper.h> +#include "dm-bio-list.h" +#include <linux/blkdev.h> +#include <linux/workqueue.h> + +struct exception_table { + uint32_t hash_mask; + unsigned hash_shift; + struct list_head *table; +}; + +/* + * The snapshot code deals with largish chunks of the disk at a + * time. Typically 32k - 512k. + */ +typedef sector_t chunk_t; + +/* + * An exception is used where an old chunk of data has been + * replaced by a new one. + * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number + * of chunks that follow contiguously. Remaining bits hold the number of the + * chunk within the device. + */ +struct dm_snap_exception { + struct list_head hash_list; + + chunk_t old_chunk; + chunk_t new_chunk; +}; + +/* + * Funtions to manipulate consecutive chunks + */ +# if defined(CONFIG_LBD) || (BITS_PER_LONG == 64) +# define DM_CHUNK_CONSECUTIVE_BITS 8 +# define DM_CHUNK_NUMBER_BITS 56 + +static inline chunk_t dm_chunk_number(chunk_t chunk) +{ + return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL); +} + +static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +{ + return e->new_chunk >> DM_CHUNK_NUMBER_BITS; +} + +static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +{ + e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS); + + BUG_ON(!dm_consecutive_chunk_count(e)); +} + +# else +# define DM_CHUNK_CONSECUTIVE_BITS 0 + +static inline chunk_t dm_chunk_number(chunk_t chunk) +{ + return chunk; +} + +static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +{ + return 0; +} + +static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +{ +} + +# endif + +/* + * Abstraction to handle the meta/layout of exception stores (the + * COW device). + */ +struct exception_store { + + /* + * Destroys this object when you've finished with it. + */ + void (*destroy) (struct exception_store *store); + + /* + * The target shouldn't read the COW device until this is + * called. + */ + int (*read_metadata) (struct exception_store *store); + + /* + * Find somewhere to store the next exception. + */ + int (*prepare_exception) (struct exception_store *store, + struct dm_snap_exception *e); + + /* + * Update the metadata with this exception. + */ + void (*commit_exception) (struct exception_store *store, + struct dm_snap_exception *e, + void (*callback) (void *, int success), + void *callback_context); + + /* + * The snapshot is invalid, note this in the metadata. + */ + void (*drop_snapshot) (struct exception_store *store); + + /* + * Return how full the snapshot is. + */ + void (*fraction_full) (struct exception_store *store, + sector_t *numerator, + sector_t *denominator); + + struct dm_snapshot *snap; + void *context; +}; + +#define DM_TRACKED_CHUNK_HASH_SIZE 16 +#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ + (DM_TRACKED_CHUNK_HASH_SIZE - 1)) + +struct dm_snapshot { + struct rw_semaphore lock; + struct dm_target *ti; + + struct dm_dev *origin; + struct dm_dev *cow; + + /* List of snapshots per Origin */ + struct list_head list; + + /* Size of data blocks saved - must be a power of 2 */ + chunk_t chunk_size; + chunk_t chunk_mask; + chunk_t chunk_shift; + + /* You can't use a snapshot if this is 0 (e.g. if full) */ + int valid; + + /* Origin writes don't trigger exceptions until this is set */ + int active; + + /* Used for display of table */ + char type; + + mempool_t *pending_pool; + + atomic_t pending_exceptions_count; + + struct exception_table pending; + struct exception_table complete; + + /* + * pe_lock protects all pending_exception operations and access + * as well as the snapshot_bios list. + */ + spinlock_t pe_lock; + + /* The on disk metadata handler */ + struct exception_store store; + + struct dm_kcopyd_client *kcopyd_client; + + /* Queue of snapshot writes for ksnapd to flush */ + struct bio_list queued_bios; + struct work_struct queued_bios_work; + + /* Chunks with outstanding reads */ + mempool_t *tracked_chunk_pool; + spinlock_t tracked_chunk_lock; + struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; +}; + +/* + * Used by the exception stores to load exceptions hen + * initialising. + */ +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new); + +/* + * Constructor and destructor for the default persistent + * store. + */ +int dm_create_persistent(struct exception_store *store); + +int dm_create_transient(struct exception_store *store); + +/* + * Return the number of sectors in the device. + */ +static inline sector_t get_dev_size(struct block_device *bdev) +{ + return bdev->bd_inode->i_size >> SECTOR_SHIFT; +} + +static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector) +{ + return (sector & ~s->chunk_mask) >> s->chunk_shift; +} + +static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk) +{ + return chunk << s->chunk_shift; +} + +static inline int bdev_equal(struct block_device *lhs, struct block_device *rhs) +{ + /* + * There is only ever one instance of a particular block + * device so we can compare pointers safely. + */ + return lhs == rhs; +} + +#endif diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c new file mode 100644 index 0000000..9e4ef88 --- /dev/null +++ b/drivers/md/dm-stripe.c @@ -0,0 +1,346 @@ +/* + * Copyright (C) 2001-2003 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include <linux/device-mapper.h> + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/slab.h> +#include <linux/log2.h> + +#define DM_MSG_PREFIX "striped" +#define DM_IO_ERROR_THRESHOLD 15 + +struct stripe { + struct dm_dev *dev; + sector_t physical_start; + + atomic_t error_count; +}; + +struct stripe_c { + uint32_t stripes; + + /* The size of this target / num. stripes */ + sector_t stripe_width; + + /* stripe chunk size */ + uint32_t chunk_shift; + sector_t chunk_mask; + + /* Needed for handling events */ + struct dm_target *ti; + + /* Work struct used for triggering events*/ + struct work_struct kstriped_ws; + + struct stripe stripe[0]; +}; + +static struct workqueue_struct *kstriped; + +/* + * An event is triggered whenever a drive + * drops out of a stripe volume. + */ +static void trigger_event(struct work_struct *work) +{ + struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws); + + dm_table_event(sc->ti->table); + +} + +static inline struct stripe_c *alloc_context(unsigned int stripes) +{ + size_t len; + + if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) + return NULL; + + len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); + + return kmalloc(len, GFP_KERNEL); +} + +/* + * Parse a single <dev> <sector> pair + */ +static int get_stripe(struct dm_target *ti, struct stripe_c *sc, + unsigned int stripe, char **argv) +{ + unsigned long long start; + + if (sscanf(argv[1], "%llu", &start) != 1) + return -EINVAL; + + if (dm_get_device(ti, argv[0], start, sc->stripe_width, + dm_table_get_mode(ti->table), + &sc->stripe[stripe].dev)) + return -ENXIO; + + sc->stripe[stripe].physical_start = start; + + return 0; +} + +/* + * Construct a striped mapping. + * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+ + */ +static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct stripe_c *sc; + sector_t width; + uint32_t stripes; + uint32_t chunk_size; + char *end; + int r; + unsigned int i; + + if (argc < 2) { + ti->error = "Not enough arguments"; + return -EINVAL; + } + + stripes = simple_strtoul(argv[0], &end, 10); + if (*end) { + ti->error = "Invalid stripe count"; + return -EINVAL; + } + + chunk_size = simple_strtoul(argv[1], &end, 10); + if (*end) { + ti->error = "Invalid chunk_size"; + return -EINVAL; + } + + /* + * chunk_size is a power of two + */ + if (!is_power_of_2(chunk_size) || + (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) { + ti->error = "Invalid chunk size"; + return -EINVAL; + } + + if (ti->len & (chunk_size - 1)) { + ti->error = "Target length not divisible by " + "chunk size"; + return -EINVAL; + } + + width = ti->len; + if (sector_div(width, stripes)) { + ti->error = "Target length not divisible by " + "number of stripes"; + return -EINVAL; + } + + /* + * Do we have enough arguments for that many stripes ? + */ + if (argc != (2 + 2 * stripes)) { + ti->error = "Not enough destinations " + "specified"; + return -EINVAL; + } + + sc = alloc_context(stripes); + if (!sc) { + ti->error = "Memory allocation for striped context " + "failed"; + return -ENOMEM; + } + + INIT_WORK(&sc->kstriped_ws, trigger_event); + + /* Set pointer to dm target; used in trigger_event */ + sc->ti = ti; + + sc->stripes = stripes; + sc->stripe_width = width; + ti->split_io = chunk_size; + + sc->chunk_mask = ((sector_t) chunk_size) - 1; + for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) + chunk_size >>= 1; + sc->chunk_shift--; + + /* + * Get the stripe destinations. + */ + for (i = 0; i < stripes; i++) { + argv += 2; + + r = get_stripe(ti, sc, i, argv); + if (r < 0) { + ti->error = "Couldn't parse stripe destination"; + while (i--) + dm_put_device(ti, sc->stripe[i].dev); + kfree(sc); + return r; + } + atomic_set(&(sc->stripe[i].error_count), 0); + } + + ti->private = sc; + + return 0; +} + +static void stripe_dtr(struct dm_target *ti) +{ + unsigned int i; + struct stripe_c *sc = (struct stripe_c *) ti->private; + + for (i = 0; i < sc->stripes; i++) + dm_put_device(ti, sc->stripe[i].dev); + + flush_workqueue(kstriped); + kfree(sc); +} + +static int stripe_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct stripe_c *sc = (struct stripe_c *) ti->private; + + sector_t offset = bio->bi_sector - ti->begin; + sector_t chunk = offset >> sc->chunk_shift; + uint32_t stripe = sector_div(chunk, sc->stripes); + + bio->bi_bdev = sc->stripe[stripe].dev->bdev; + bio->bi_sector = sc->stripe[stripe].physical_start + + (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); + return DM_MAPIO_REMAPPED; +} + +/* + * Stripe status: + * + * INFO + * #stripes [stripe_name <stripe_name>] [group word count] + * [error count 'A|D' <error count 'A|D'>] + * + * TABLE + * #stripes [stripe chunk size] + * [stripe_name physical_start <stripe_name physical_start>] + * + */ + +static int stripe_status(struct dm_target *ti, + status_type_t type, char *result, unsigned int maxlen) +{ + struct stripe_c *sc = (struct stripe_c *) ti->private; + char buffer[sc->stripes + 1]; + unsigned int sz = 0; + unsigned int i; + + switch (type) { + case STATUSTYPE_INFO: + DMEMIT("%d ", sc->stripes); + for (i = 0; i < sc->stripes; i++) { + DMEMIT("%s ", sc->stripe[i].dev->name); + buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? + 'D' : 'A'; + } + buffer[i] = '\0'; + DMEMIT("1 %s", buffer); + break; + + case STATUSTYPE_TABLE: + DMEMIT("%d %llu", sc->stripes, + (unsigned long long)sc->chunk_mask + 1); + for (i = 0; i < sc->stripes; i++) + DMEMIT(" %s %llu", sc->stripe[i].dev->name, + (unsigned long long)sc->stripe[i].physical_start); + break; + } + return 0; +} + +static int stripe_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + unsigned i; + char major_minor[16]; + struct stripe_c *sc = ti->private; + + if (!error) + return 0; /* I/O complete */ + + if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) + return error; + + if (error == -EOPNOTSUPP) + return error; + + memset(major_minor, 0, sizeof(major_minor)); + sprintf(major_minor, "%d:%d", + MAJOR(disk_devt(bio->bi_bdev->bd_disk)), + MINOR(disk_devt(bio->bi_bdev->bd_disk))); + + /* + * Test to see which stripe drive triggered the event + * and increment error count for all stripes on that device. + * If the error count for a given device exceeds the threshold + * value we will no longer trigger any further events. + */ + for (i = 0; i < sc->stripes; i++) + if (!strcmp(sc->stripe[i].dev->name, major_minor)) { + atomic_inc(&(sc->stripe[i].error_count)); + if (atomic_read(&(sc->stripe[i].error_count)) < + DM_IO_ERROR_THRESHOLD) + queue_work(kstriped, &sc->kstriped_ws); + } + + return error; +} + +static struct target_type stripe_target = { + .name = "striped", + .version = {1, 1, 0}, + .module = THIS_MODULE, + .ctr = stripe_ctr, + .dtr = stripe_dtr, + .map = stripe_map, + .end_io = stripe_end_io, + .status = stripe_status, +}; + +int __init dm_stripe_init(void) +{ + int r; + + r = dm_register_target(&stripe_target); + if (r < 0) { + DMWARN("target registration failed"); + return r; + } + + kstriped = create_singlethread_workqueue("kstriped"); + if (!kstriped) { + DMERR("failed to create workqueue kstriped"); + dm_unregister_target(&stripe_target); + return -ENOMEM; + } + + return r; +} + +void dm_stripe_exit(void) +{ + if (dm_unregister_target(&stripe_target)) + DMWARN("target unregistration failed"); + + destroy_workqueue(kstriped); + + return; +} diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c new file mode 100644 index 0000000..04e5fd7 --- /dev/null +++ b/drivers/md/dm-table.c @@ -0,0 +1,998 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * Copyright (C) 2004 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/blkdev.h> +#include <linux/namei.h> +#include <linux/ctype.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <asm/atomic.h> + +#define DM_MSG_PREFIX "table" + +#define MAX_DEPTH 16 +#define NODE_SIZE L1_CACHE_BYTES +#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) +#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) + +struct dm_table { + struct mapped_device *md; + atomic_t holders; + + /* btree table */ + unsigned int depth; + unsigned int counts[MAX_DEPTH]; /* in nodes */ + sector_t *index[MAX_DEPTH]; + + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + fmode_t mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* + * These are optimistic limits taken from all the + * targets, some targets will need smaller limits. + */ + struct io_restrictions limits; + + /* events get handed up using this callback */ + void (*event_fn)(void *); + void *event_context; +}; + +/* + * Similar to ceiling(log_size(n)) + */ +static unsigned int int_log(unsigned int n, unsigned int base) +{ + int result = 0; + + while (n > 1) { + n = dm_div_up(n, base); + result++; + } + + return result; +} + +/* + * Returns the minimum that is _not_ zero, unless both are zero. + */ +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + +/* + * Combine two io_restrictions, always taking the lower value. + */ +static void combine_restrictions_low(struct io_restrictions *lhs, + struct io_restrictions *rhs) +{ + lhs->max_sectors = + min_not_zero(lhs->max_sectors, rhs->max_sectors); + + lhs->max_phys_segments = + min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); + + lhs->max_hw_segments = + min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); + + lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); + + lhs->max_segment_size = + min_not_zero(lhs->max_segment_size, rhs->max_segment_size); + + lhs->max_hw_sectors = + min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); + + lhs->seg_boundary_mask = + min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); + + lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); + + lhs->no_cluster |= rhs->no_cluster; +} + +/* + * Calculate the index of the child node of the n'th node k'th key. + */ +static inline unsigned int get_child(unsigned int n, unsigned int k) +{ + return (n * CHILDREN_PER_NODE) + k; +} + +/* + * Return the n'th node of level l from table t. + */ +static inline sector_t *get_node(struct dm_table *t, + unsigned int l, unsigned int n) +{ + return t->index[l] + (n * KEYS_PER_NODE); +} + +/* + * Return the highest key that you could lookup from the n'th + * node on level l of the btree. + */ +static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) +{ + for (; l < t->depth - 1; l++) + n = get_child(n, CHILDREN_PER_NODE - 1); + + if (n >= t->counts[l]) + return (sector_t) - 1; + + return get_node(t, l, n)[KEYS_PER_NODE - 1]; +} + +/* + * Fills in a level of the btree based on the highs of the level + * below it. + */ +static int setup_btree_index(unsigned int l, struct dm_table *t) +{ + unsigned int n, k; + sector_t *node; + + for (n = 0U; n < t->counts[l]; n++) { + node = get_node(t, l, n); + + for (k = 0U; k < KEYS_PER_NODE; k++) + node[k] = high(t, l + 1, get_child(n, k)); + } + + return 0; +} + +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) +{ + unsigned long size; + void *addr; + + /* + * Check that we're not going to overflow. + */ + if (nmemb > (ULONG_MAX / elem_size)) + return NULL; + + size = nmemb * elem_size; + addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + + return addr; +} + +/* + * highs, and targets are managed as dynamic arrays during a + * table load. + */ +static int alloc_targets(struct dm_table *t, unsigned int num) +{ + sector_t *n_highs; + struct dm_target *n_targets; + int n = t->num_targets; + + /* + * Allocate both the target array and offset array at once. + * Append an empty entry to catch sectors beyond the end of + * the device. + */ + n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + + sizeof(sector_t)); + if (!n_highs) + return -ENOMEM; + + n_targets = (struct dm_target *) (n_highs + num); + + if (n) { + memcpy(n_highs, t->highs, sizeof(*n_highs) * n); + memcpy(n_targets, t->targets, sizeof(*n_targets) * n); + } + + memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); + vfree(t->highs); + + t->num_allocated = num; + t->highs = n_highs; + t->targets = n_targets; + + return 0; +} + +int dm_table_create(struct dm_table **result, fmode_t mode, + unsigned num_targets, struct mapped_device *md) +{ + struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); + + if (!t) + return -ENOMEM; + + INIT_LIST_HEAD(&t->devices); + atomic_set(&t->holders, 1); + + if (!num_targets) + num_targets = KEYS_PER_NODE; + + num_targets = dm_round_up(num_targets, KEYS_PER_NODE); + + if (alloc_targets(t, num_targets)) { + kfree(t); + t = NULL; + return -ENOMEM; + } + + t->mode = mode; + t->md = md; + *result = t; + return 0; +} + +static void free_devices(struct list_head *devices) +{ + struct list_head *tmp, *next; + + list_for_each_safe(tmp, next, devices) { + struct dm_dev_internal *dd = + list_entry(tmp, struct dm_dev_internal, list); + kfree(dd); + } +} + +static void table_destroy(struct dm_table *t) +{ + unsigned int i; + + /* free the indexes (see dm_table_complete) */ + if (t->depth >= 2) + vfree(t->index[t->depth - 2]); + + /* free the targets */ + for (i = 0; i < t->num_targets; i++) { + struct dm_target *tgt = t->targets + i; + + if (tgt->type->dtr) + tgt->type->dtr(tgt); + + dm_put_target_type(tgt->type); + } + + vfree(t->highs); + + /* free the device list */ + if (t->devices.next != &t->devices) { + DMWARN("devices still present during destroy: " + "dm_table_remove_device calls missing"); + + free_devices(&t->devices); + } + + kfree(t); +} + +void dm_table_get(struct dm_table *t) +{ + atomic_inc(&t->holders); +} + +void dm_table_put(struct dm_table *t) +{ + if (!t) + return; + + if (atomic_dec_and_test(&t->holders)) + table_destroy(t); +} + +/* + * Checks to see if we need to extend highs or targets. + */ +static inline int check_space(struct dm_table *t) +{ + if (t->num_targets >= t->num_allocated) + return alloc_targets(t, t->num_allocated * 2); + + return 0; +} + +/* + * See if we've already got a device in the list. + */ +static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) +{ + struct dm_dev_internal *dd; + + list_for_each_entry (dd, l, list) + if (dd->dm_dev.bdev->bd_dev == dev) + return dd; + + return NULL; +} + +/* + * Open a device so we can use it as a map destination. + */ +static int open_dev(struct dm_dev_internal *d, dev_t dev, + struct mapped_device *md) +{ + static char *_claim_ptr = "I belong to device-mapper"; + struct block_device *bdev; + + int r; + + BUG_ON(d->dm_dev.bdev); + + bdev = open_by_devnum(dev, d->dm_dev.mode); + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); + if (r) + blkdev_put(bdev, d->dm_dev.mode); + else + d->dm_dev.bdev = bdev; + return r; +} + +/* + * Close a device that we've been using. + */ +static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) +{ + if (!d->dm_dev.bdev) + return; + + bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); + blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); + d->dm_dev.bdev = NULL; +} + +/* + * If possible, this checks an area of a destination device is valid. + */ +static int check_device_area(struct dm_dev_internal *dd, sector_t start, + sector_t len) +{ + sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; + + if (!dev_size) + return 1; + + return ((start < dev_size) && (len <= (dev_size - start))); +} + +/* + * This upgrades the mode on an already open dm_dev. Being + * careful to leave things as they were if we fail to reopen the + * device. + */ +static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, + struct mapped_device *md) +{ + int r; + struct dm_dev_internal dd_copy; + dev_t dev = dd->dm_dev.bdev->bd_dev; + + dd_copy = *dd; + + dd->dm_dev.mode |= new_mode; + dd->dm_dev.bdev = NULL; + r = open_dev(dd, dev, md); + if (!r) + close_dev(&dd_copy, md); + else + *dd = dd_copy; + + return r; +} + +/* + * Add a device to the list, or just increment the usage count if + * it's already present. + */ +static int __table_get_device(struct dm_table *t, struct dm_target *ti, + const char *path, sector_t start, sector_t len, + fmode_t mode, struct dm_dev **result) +{ + int r; + dev_t uninitialized_var(dev); + struct dm_dev_internal *dd; + unsigned int major, minor; + + BUG_ON(!t); + + if (sscanf(path, "%u:%u", &major, &minor) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EOVERFLOW; + } else { + /* convert the path to a device */ + struct block_device *bdev = lookup_bdev(path); + + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + dev = bdev->bd_dev; + bdput(bdev); + } + + dd = find_device(&t->devices, dev); + if (!dd) { + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + dd->dm_dev.mode = mode; + dd->dm_dev.bdev = NULL; + + if ((r = open_dev(dd, dev, t->md))) { + kfree(dd); + return r; + } + + format_dev_t(dd->dm_dev.name, dev); + + atomic_set(&dd->count, 0); + list_add(&dd->list, &t->devices); + + } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { + r = upgrade_mode(dd, mode, t->md); + if (r) + return r; + } + atomic_inc(&dd->count); + + if (!check_device_area(dd, start, len)) { + DMWARN("device %s too small for target", path); + dm_put_device(ti, &dd->dm_dev); + return -EINVAL; + } + + *result = &dd->dm_dev; + + return 0; +} + +void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + struct io_restrictions *rs = &ti->limits; + char b[BDEVNAME_SIZE]; + + if (unlikely(!q)) { + DMWARN("%s: Cannot set limits for nonexistent device %s", + dm_device_name(ti->table->md), bdevname(bdev, b)); + return; + } + + /* + * Combine the device limits low. + * + * FIXME: if we move an io_restriction struct + * into q this would just be a call to + * combine_restrictions_low() + */ + rs->max_sectors = + min_not_zero(rs->max_sectors, q->max_sectors); + + /* + * Check if merge fn is supported. + * If not we'll force DM to use PAGE_SIZE or + * smaller I/O, just to be safe. + */ + + if (q->merge_bvec_fn && !ti->type->merge) + rs->max_sectors = + min_not_zero(rs->max_sectors, + (unsigned int) (PAGE_SIZE >> 9)); + + rs->max_phys_segments = + min_not_zero(rs->max_phys_segments, + q->max_phys_segments); + + rs->max_hw_segments = + min_not_zero(rs->max_hw_segments, q->max_hw_segments); + + rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); + + rs->max_segment_size = + min_not_zero(rs->max_segment_size, q->max_segment_size); + + rs->max_hw_sectors = + min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); + + rs->seg_boundary_mask = + min_not_zero(rs->seg_boundary_mask, + q->seg_boundary_mask); + + rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); + + rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); +} +EXPORT_SYMBOL_GPL(dm_set_device_limits); + +int dm_get_device(struct dm_target *ti, const char *path, sector_t start, + sector_t len, fmode_t mode, struct dm_dev **result) +{ + int r = __table_get_device(ti->table, ti, path, + start, len, mode, result); + + if (!r) + dm_set_device_limits(ti, (*result)->bdev); + + return r; +} + +/* + * Decrement a devices use count and remove it if necessary. + */ +void dm_put_device(struct dm_target *ti, struct dm_dev *d) +{ + struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, + dm_dev); + + if (atomic_dec_and_test(&dd->count)) { + close_dev(dd, ti->table->md); + list_del(&dd->list); + kfree(dd); + } +} + +/* + * Checks to see if the target joins onto the end of the table. + */ +static int adjoin(struct dm_table *table, struct dm_target *ti) +{ + struct dm_target *prev; + + if (!table->num_targets) + return !ti->begin; + + prev = &table->targets[table->num_targets - 1]; + return (ti->begin == (prev->begin + prev->len)); +} + +/* + * Used to dynamically allocate the arg array. + */ +static char **realloc_argv(unsigned *array_size, char **old_argv) +{ + char **argv; + unsigned new_size; + + new_size = *array_size ? *array_size * 2 : 64; + argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); + if (argv) { + memcpy(argv, old_argv, *array_size * sizeof(*argv)); + *array_size = new_size; + } + + kfree(old_argv); + return argv; +} + +/* + * Destructively splits up the argument list to pass to ctr. + */ +int dm_split_args(int *argc, char ***argvp, char *input) +{ + char *start, *end = input, *out, **argv = NULL; + unsigned array_size = 0; + + *argc = 0; + + if (!input) { + *argvp = NULL; + return 0; + } + + argv = realloc_argv(&array_size, argv); + if (!argv) + return -ENOMEM; + + while (1) { + start = end; + + /* Skip whitespace */ + while (*start && isspace(*start)) + start++; + + if (!*start) + break; /* success, we hit the end */ + + /* 'out' is used to remove any back-quotes */ + end = out = start; + while (*end) { + /* Everything apart from '\0' can be quoted */ + if (*end == '\\' && *(end + 1)) { + *out++ = *(end + 1); + end += 2; + continue; + } + + if (isspace(*end)) + break; /* end of token */ + + *out++ = *end++; + } + + /* have we already filled the array ? */ + if ((*argc + 1) > array_size) { + argv = realloc_argv(&array_size, argv); + if (!argv) + return -ENOMEM; + } + + /* we know this is whitespace */ + if (*end) + end++; + + /* terminate the string and put it in the array */ + *out = '\0'; + argv[*argc] = start; + (*argc)++; + } + + *argvp = argv; + return 0; +} + +static void check_for_valid_limits(struct io_restrictions *rs) +{ + if (!rs->max_sectors) + rs->max_sectors = SAFE_MAX_SECTORS; + if (!rs->max_hw_sectors) + rs->max_hw_sectors = SAFE_MAX_SECTORS; + if (!rs->max_phys_segments) + rs->max_phys_segments = MAX_PHYS_SEGMENTS; + if (!rs->max_hw_segments) + rs->max_hw_segments = MAX_HW_SEGMENTS; + if (!rs->hardsect_size) + rs->hardsect_size = 1 << SECTOR_SHIFT; + if (!rs->max_segment_size) + rs->max_segment_size = MAX_SEGMENT_SIZE; + if (!rs->seg_boundary_mask) + rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + if (!rs->bounce_pfn) + rs->bounce_pfn = -1; +} + +int dm_table_add_target(struct dm_table *t, const char *type, + sector_t start, sector_t len, char *params) +{ + int r = -EINVAL, argc; + char **argv; + struct dm_target *tgt; + + if ((r = check_space(t))) + return r; + + tgt = t->targets + t->num_targets; + memset(tgt, 0, sizeof(*tgt)); + + if (!len) { + DMERR("%s: zero-length target", dm_device_name(t->md)); + return -EINVAL; + } + + tgt->type = dm_get_target_type(type); + if (!tgt->type) { + DMERR("%s: %s: unknown target type", dm_device_name(t->md), + type); + return -EINVAL; + } + + tgt->table = t; + tgt->begin = start; + tgt->len = len; + tgt->error = "Unknown error"; + + /* + * Does this target adjoin the previous one ? + */ + if (!adjoin(t, tgt)) { + tgt->error = "Gap in table"; + r = -EINVAL; + goto bad; + } + + r = dm_split_args(&argc, &argv, params); + if (r) { + tgt->error = "couldn't split parameters (insufficient memory)"; + goto bad; + } + + r = tgt->type->ctr(tgt, argc, argv); + kfree(argv); + if (r) + goto bad; + + t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; + + /* FIXME: the plan is to combine high here and then have + * the merge fn apply the target level restrictions. */ + combine_restrictions_low(&t->limits, &tgt->limits); + return 0; + + bad: + DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); + dm_put_target_type(tgt->type); + return r; +} + +static int setup_indexes(struct dm_table *t) +{ + int i; + unsigned int total = 0; + sector_t *indexes; + + /* allocate the space for *all* the indexes */ + for (i = t->depth - 2; i >= 0; i--) { + t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); + total += t->counts[i]; + } + + indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); + if (!indexes) + return -ENOMEM; + + /* set up internal nodes, bottom-up */ + for (i = t->depth - 2; i >= 0; i--) { + t->index[i] = indexes; + indexes += (KEYS_PER_NODE * t->counts[i]); + setup_btree_index(i, t); + } + + return 0; +} + +/* + * Builds the btree to index the map. + */ +int dm_table_complete(struct dm_table *t) +{ + int r = 0; + unsigned int leaf_nodes; + + check_for_valid_limits(&t->limits); + + /* how many indexes will the btree have ? */ + leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); + t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); + + /* leaf layer has already been set up */ + t->counts[t->depth - 1] = leaf_nodes; + t->index[t->depth - 1] = t->highs; + + if (t->depth >= 2) + r = setup_indexes(t); + + return r; +} + +static DEFINE_MUTEX(_event_lock); +void dm_table_event_callback(struct dm_table *t, + void (*fn)(void *), void *context) +{ + mutex_lock(&_event_lock); + t->event_fn = fn; + t->event_context = context; + mutex_unlock(&_event_lock); +} + +void dm_table_event(struct dm_table *t) +{ + /* + * You can no longer call dm_table_event() from interrupt + * context, use a bottom half instead. + */ + BUG_ON(in_interrupt()); + + mutex_lock(&_event_lock); + if (t->event_fn) + t->event_fn(t->event_context); + mutex_unlock(&_event_lock); +} + +sector_t dm_table_get_size(struct dm_table *t) +{ + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; +} + +struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) +{ + if (index >= t->num_targets) + return NULL; + + return t->targets + index; +} + +/* + * Search the btree for the correct target. + * + * Caller should check returned pointer with dm_target_is_valid() + * to trap I/O beyond end of device. + */ +struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) +{ + unsigned int l, n = 0, k = 0; + sector_t *node; + + for (l = 0; l < t->depth; l++) { + n = get_child(n, k); + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + if (node[k] >= sector) + break; + } + + return &t->targets[(KEYS_PER_NODE * n) + k]; +} + +void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) +{ + /* + * Make sure we obey the optimistic sub devices + * restrictions. + */ + blk_queue_max_sectors(q, t->limits.max_sectors); + q->max_phys_segments = t->limits.max_phys_segments; + q->max_hw_segments = t->limits.max_hw_segments; + q->hardsect_size = t->limits.hardsect_size; + q->max_segment_size = t->limits.max_segment_size; + q->max_hw_sectors = t->limits.max_hw_sectors; + q->seg_boundary_mask = t->limits.seg_boundary_mask; + q->bounce_pfn = t->limits.bounce_pfn; + + if (t->limits.no_cluster) + queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); + else + queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); + +} + +unsigned int dm_table_get_num_targets(struct dm_table *t) +{ + return t->num_targets; +} + +struct list_head *dm_table_get_devices(struct dm_table *t) +{ + return &t->devices; +} + +fmode_t dm_table_get_mode(struct dm_table *t) +{ + return t->mode; +} + +static void suspend_targets(struct dm_table *t, unsigned postsuspend) +{ + int i = t->num_targets; + struct dm_target *ti = t->targets; + + while (i--) { + if (postsuspend) { + if (ti->type->postsuspend) + ti->type->postsuspend(ti); + } else if (ti->type->presuspend) + ti->type->presuspend(ti); + + ti++; + } +} + +void dm_table_presuspend_targets(struct dm_table *t) +{ + if (!t) + return; + + suspend_targets(t, 0); +} + +void dm_table_postsuspend_targets(struct dm_table *t) +{ + if (!t) + return; + + suspend_targets(t, 1); +} + +int dm_table_resume_targets(struct dm_table *t) +{ + int i, r = 0; + + for (i = 0; i < t->num_targets; i++) { + struct dm_target *ti = t->targets + i; + + if (!ti->type->preresume) + continue; + + r = ti->type->preresume(ti); + if (r) + return r; + } + + for (i = 0; i < t->num_targets; i++) { + struct dm_target *ti = t->targets + i; + + if (ti->type->resume) + ti->type->resume(ti); + } + + return 0; +} + +int dm_table_any_congested(struct dm_table *t, int bdi_bits) +{ + struct dm_dev_internal *dd; + struct list_head *devices = dm_table_get_devices(t); + int r = 0; + + list_for_each_entry(dd, devices, list) { + struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); + char b[BDEVNAME_SIZE]; + + if (likely(q)) + r |= bdi_congested(&q->backing_dev_info, bdi_bits); + else + DMWARN_LIMIT("%s: any_congested: nonexistent device %s", + dm_device_name(t->md), + bdevname(dd->dm_dev.bdev, b)); + } + + return r; +} + +void dm_table_unplug_all(struct dm_table *t) +{ + struct dm_dev_internal *dd; + struct list_head *devices = dm_table_get_devices(t); + + list_for_each_entry(dd, devices, list) { + struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); + char b[BDEVNAME_SIZE]; + + if (likely(q)) + blk_unplug(q); + else + DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", + dm_device_name(t->md), + bdevname(dd->dm_dev.bdev, b)); + } +} + +struct mapped_device *dm_table_get_md(struct dm_table *t) +{ + dm_get(t->md); + + return t->md; +} + +EXPORT_SYMBOL(dm_vcalloc); +EXPORT_SYMBOL(dm_get_device); +EXPORT_SYMBOL(dm_put_device); +EXPORT_SYMBOL(dm_table_event); +EXPORT_SYMBOL(dm_table_get_size); +EXPORT_SYMBOL(dm_table_get_mode); +EXPORT_SYMBOL(dm_table_get_md); +EXPORT_SYMBOL(dm_table_put); +EXPORT_SYMBOL(dm_table_get); +EXPORT_SYMBOL(dm_table_unplug_all); diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c new file mode 100644 index 0000000..835cf95 --- /dev/null +++ b/drivers/md/dm-target.c @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kmod.h> +#include <linux/bio.h> +#include <linux/slab.h> + +#define DM_MSG_PREFIX "target" + +struct tt_internal { + struct target_type tt; + + struct list_head list; + long use; +}; + +static LIST_HEAD(_targets); +static DECLARE_RWSEM(_lock); + +#define DM_MOD_NAME_SIZE 32 + +static inline struct tt_internal *__find_target_type(const char *name) +{ + struct tt_internal *ti; + + list_for_each_entry (ti, &_targets, list) + if (!strcmp(name, ti->tt.name)) + return ti; + + return NULL; +} + +static struct tt_internal *get_target_type(const char *name) +{ + struct tt_internal *ti; + + down_read(&_lock); + + ti = __find_target_type(name); + if (ti) { + if ((ti->use == 0) && !try_module_get(ti->tt.module)) + ti = NULL; + else + ti->use++; + } + + up_read(&_lock); + return ti; +} + +static void load_module(const char *name) +{ + request_module("dm-%s", name); +} + +struct target_type *dm_get_target_type(const char *name) +{ + struct tt_internal *ti = get_target_type(name); + + if (!ti) { + load_module(name); + ti = get_target_type(name); + } + + return ti ? &ti->tt : NULL; +} + +void dm_put_target_type(struct target_type *t) +{ + struct tt_internal *ti = (struct tt_internal *) t; + + down_read(&_lock); + if (--ti->use == 0) + module_put(ti->tt.module); + + BUG_ON(ti->use < 0); + up_read(&_lock); + + return; +} + +static struct tt_internal *alloc_target(struct target_type *t) +{ + struct tt_internal *ti = kzalloc(sizeof(*ti), GFP_KERNEL); + + if (ti) + ti->tt = *t; + + return ti; +} + + +int dm_target_iterate(void (*iter_func)(struct target_type *tt, + void *param), void *param) +{ + struct tt_internal *ti; + + down_read(&_lock); + list_for_each_entry (ti, &_targets, list) + iter_func(&ti->tt, param); + up_read(&_lock); + + return 0; +} + +int dm_register_target(struct target_type *t) +{ + int rv = 0; + struct tt_internal *ti = alloc_target(t); + + if (!ti) + return -ENOMEM; + + down_write(&_lock); + if (__find_target_type(t->name)) + rv = -EEXIST; + else + list_add(&ti->list, &_targets); + + up_write(&_lock); + if (rv) + kfree(ti); + return rv; +} + +int dm_unregister_target(struct target_type *t) +{ + struct tt_internal *ti; + + down_write(&_lock); + if (!(ti = __find_target_type(t->name))) { + up_write(&_lock); + return -EINVAL; + } + + if (ti->use) { + up_write(&_lock); + return -ETXTBSY; + } + + list_del(&ti->list); + kfree(ti); + + up_write(&_lock); + return 0; +} + +/* + * io-err: always fails an io, useful for bringing + * up LVs that have holes in them. + */ +static int io_err_ctr(struct dm_target *ti, unsigned int argc, char **args) +{ + return 0; +} + +static void io_err_dtr(struct dm_target *ti) +{ + /* empty */ +} + +static int io_err_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + return -EIO; +} + +static struct target_type error_target = { + .name = "error", + .version = {1, 0, 1}, + .ctr = io_err_ctr, + .dtr = io_err_dtr, + .map = io_err_map, +}; + +int __init dm_target_init(void) +{ + return dm_register_target(&error_target); +} + +void dm_target_exit(void) +{ + if (dm_unregister_target(&error_target)) + DMWARN("error target unregistration failed"); +} + +EXPORT_SYMBOL(dm_register_target); +EXPORT_SYMBOL(dm_unregister_target); diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c new file mode 100644 index 0000000..6f65883 --- /dev/null +++ b/drivers/md/dm-uevent.c @@ -0,0 +1,222 @@ +/* + * Device Mapper Uevent Support (dm-uevent) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2007 + * Author: Mike Anderson <andmike@linux.vnet.ibm.com> + */ +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/kobject.h> +#include <linux/dm-ioctl.h> + +#include "dm.h" +#include "dm-uevent.h" + +#define DM_MSG_PREFIX "uevent" + +static const struct { + enum dm_uevent_type type; + enum kobject_action action; + char *name; +} _dm_uevent_type_names[] = { + {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"}, + {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"}, +}; + +static struct kmem_cache *_dm_event_cache; + +struct dm_uevent { + struct mapped_device *md; + enum kobject_action action; + struct kobj_uevent_env ku_env; + struct list_head elist; + char name[DM_NAME_LEN]; + char uuid[DM_UUID_LEN]; +}; + +static void dm_uevent_free(struct dm_uevent *event) +{ + kmem_cache_free(_dm_event_cache, event); +} + +static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md) +{ + struct dm_uevent *event; + + event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC); + if (!event) + return NULL; + + INIT_LIST_HEAD(&event->elist); + event->md = md; + + return event; +} + +static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, + struct dm_target *ti, + enum kobject_action action, + const char *dm_action, + const char *path, + unsigned nr_valid_paths) +{ + struct dm_uevent *event; + + event = dm_uevent_alloc(md); + if (!event) { + DMERR("%s: dm_uevent_alloc() failed", __func__); + goto err_nomem; + } + + event->action = action; + + if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { + DMERR("%s: add_uevent_var() for DM_TARGET failed", + __func__); + goto err_add; + } + + if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { + DMERR("%s: add_uevent_var() for DM_ACTION failed", + __func__); + goto err_add; + } + + if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", + dm_next_uevent_seq(md))) { + DMERR("%s: add_uevent_var() for DM_SEQNUM failed", + __func__); + goto err_add; + } + + if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { + DMERR("%s: add_uevent_var() for DM_PATH failed", __func__); + goto err_add; + } + + if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", + nr_valid_paths)) { + DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", + __func__); + goto err_add; + } + + return event; + +err_add: + dm_uevent_free(event); +err_nomem: + return ERR_PTR(-ENOMEM); +} + +/** + * dm_send_uevents - send uevents for given list + * + * @events: list of events to send + * @kobj: kobject generating event + * + */ +void dm_send_uevents(struct list_head *events, struct kobject *kobj) +{ + int r; + struct dm_uevent *event, *next; + + list_for_each_entry_safe(event, next, events, elist) { + list_del_init(&event->elist); + + /* + * Need to call dm_copy_name_and_uuid from here for now. + * Context of previous var adds and locking used for + * hash_cell not compatable. + */ + if (dm_copy_name_and_uuid(event->md, event->name, + event->uuid)) { + DMERR("%s: dm_copy_name_and_uuid() failed", + __func__); + goto uevent_free; + } + + if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { + DMERR("%s: add_uevent_var() for DM_NAME failed", + __func__); + goto uevent_free; + } + + if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { + DMERR("%s: add_uevent_var() for DM_UUID failed", + __func__); + goto uevent_free; + } + + r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); + if (r) + DMERR("%s: kobject_uevent_env failed", __func__); +uevent_free: + dm_uevent_free(event); + } +} +EXPORT_SYMBOL_GPL(dm_send_uevents); + +/** + * dm_path_uevent - called to create a new path event and queue it + * + * @event_type: path event type enum + * @ti: pointer to a dm_target + * @path: string containing pathname + * @nr_valid_paths: number of valid paths remaining + * + */ +void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, + const char *path, unsigned nr_valid_paths) +{ + struct mapped_device *md = dm_table_get_md(ti->table); + struct dm_uevent *event; + + if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { + DMERR("%s: Invalid event_type %d", __func__, event_type); + goto out; + } + + event = dm_build_path_uevent(md, ti, + _dm_uevent_type_names[event_type].action, + _dm_uevent_type_names[event_type].name, + path, nr_valid_paths); + if (IS_ERR(event)) + goto out; + + dm_uevent_add(md, &event->elist); + +out: + dm_put(md); +} +EXPORT_SYMBOL_GPL(dm_path_uevent); + +int dm_uevent_init(void) +{ + _dm_event_cache = KMEM_CACHE(dm_uevent, 0); + if (!_dm_event_cache) + return -ENOMEM; + + DMINFO("version 1.0.3"); + + return 0; +} + +void dm_uevent_exit(void) +{ + kmem_cache_destroy(_dm_event_cache); +} diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h new file mode 100644 index 0000000..2eccc8b --- /dev/null +++ b/drivers/md/dm-uevent.h @@ -0,0 +1,59 @@ +/* + * Device Mapper Uevent Support + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2007 + * Author: Mike Anderson <andmike@linux.vnet.ibm.com> + */ +#ifndef DM_UEVENT_H +#define DM_UEVENT_H + +enum dm_uevent_type { + DM_UEVENT_PATH_FAILED, + DM_UEVENT_PATH_REINSTATED, +}; + +#ifdef CONFIG_DM_UEVENT + +extern int dm_uevent_init(void); +extern void dm_uevent_exit(void); +extern void dm_send_uevents(struct list_head *events, struct kobject *kobj); +extern void dm_path_uevent(enum dm_uevent_type event_type, + struct dm_target *ti, const char *path, + unsigned nr_valid_paths); + +#else + +static inline int dm_uevent_init(void) +{ + return 0; +} +static inline void dm_uevent_exit(void) +{ +} +static inline void dm_send_uevents(struct list_head *events, + struct kobject *kobj) +{ +} +static inline void dm_path_uevent(enum dm_uevent_type event_type, + struct dm_target *ti, const char *path, + unsigned nr_valid_paths) +{ +} + +#endif /* CONFIG_DM_UEVENT */ + +#endif /* DM_UEVENT_H */ diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c new file mode 100644 index 0000000..cdbf126 --- /dev/null +++ b/drivers/md/dm-zero.c @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * + * This file is released under the GPL. + */ + +#include <linux/device-mapper.h> + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> + +#define DM_MSG_PREFIX "zero" + +/* + * Construct a dummy mapping that only returns zeros + */ +static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + if (argc != 0) { + ti->error = "No arguments required"; + return -EINVAL; + } + + return 0; +} + +/* + * Return zeros only on reads + */ +static int zero_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + switch(bio_rw(bio)) { + case READ: + zero_fill_bio(bio); + break; + case READA: + /* readahead of null bytes only wastes buffer cache */ + return -EIO; + case WRITE: + /* writes get silently dropped */ + break; + } + + bio_endio(bio, 0); + + /* accepted bio, don't make new request */ + return DM_MAPIO_SUBMITTED; +} + +static struct target_type zero_target = { + .name = "zero", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = zero_ctr, + .map = zero_map, +}; + +static int __init dm_zero_init(void) +{ + int r = dm_register_target(&zero_target); + + if (r < 0) + DMERR("register failed %d", r); + + return r; +} + +static void __exit dm_zero_exit(void) +{ + int r = dm_unregister_target(&zero_target); + + if (r < 0) + DMERR("unregister failed %d", r); +} + +module_init(dm_zero_init) +module_exit(dm_zero_exit) + +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.c b/drivers/md/dm.c new file mode 100644 index 0000000..b798bd3 --- /dev/null +++ b/drivers/md/dm.c @@ -0,0 +1,1712 @@ +/* + * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. + * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include "dm-bio-list.h" +#include "dm-uevent.h" + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/moduleparam.h> +#include <linux/blkpg.h> +#include <linux/bio.h> +#include <linux/buffer_head.h> +#include <linux/mempool.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/hdreg.h> +#include <linux/blktrace_api.h> + +#define DM_MSG_PREFIX "core" + +static const char *_name = DM_NAME; + +static unsigned int major = 0; +static unsigned int _major = 0; + +static DEFINE_SPINLOCK(_minor_lock); +/* + * One of these is allocated per bio. + */ +struct dm_io { + struct mapped_device *md; + int error; + atomic_t io_count; + struct bio *bio; + unsigned long start_time; +}; + +/* + * One of these is allocated per target within a bio. Hopefully + * this will be simplified out one day. + */ +struct dm_target_io { + struct dm_io *io; + struct dm_target *ti; + union map_info info; +}; + +union map_info *dm_get_mapinfo(struct bio *bio) +{ + if (bio && bio->bi_private) + return &((struct dm_target_io *)bio->bi_private)->info; + return NULL; +} + +#define MINOR_ALLOCED ((void *)-1) + +/* + * Bits for the md->flags field. + */ +#define DMF_BLOCK_IO 0 +#define DMF_SUSPENDED 1 +#define DMF_FROZEN 2 +#define DMF_FREEING 3 +#define DMF_DELETING 4 +#define DMF_NOFLUSH_SUSPENDING 5 + +/* + * Work processed by per-device workqueue. + */ +struct dm_wq_req { + enum { + DM_WQ_FLUSH_DEFERRED, + } type; + struct work_struct work; + struct mapped_device *md; + void *context; +}; + +struct mapped_device { + struct rw_semaphore io_lock; + struct mutex suspend_lock; + spinlock_t pushback_lock; + rwlock_t map_lock; + atomic_t holders; + atomic_t open_count; + + unsigned long flags; + + struct request_queue *queue; + struct gendisk *disk; + char name[16]; + + void *interface_ptr; + + /* + * A list of ios that arrived while we were suspended. + */ + atomic_t pending; + wait_queue_head_t wait; + struct bio_list deferred; + struct bio_list pushback; + + /* + * Processing queue (flush/barriers) + */ + struct workqueue_struct *wq; + + /* + * The current mapping. + */ + struct dm_table *map; + + /* + * io objects are allocated from here. + */ + mempool_t *io_pool; + mempool_t *tio_pool; + + struct bio_set *bs; + + /* + * Event handling. + */ + atomic_t event_nr; + wait_queue_head_t eventq; + atomic_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; /* Protect access to uevent_list */ + + /* + * freeze/thaw support require holding onto a super block + */ + struct super_block *frozen_sb; + struct block_device *suspended_bdev; + + /* forced geometry settings */ + struct hd_geometry geometry; +}; + +#define MIN_IOS 256 +static struct kmem_cache *_io_cache; +static struct kmem_cache *_tio_cache; + +static int __init local_init(void) +{ + int r = -ENOMEM; + + /* allocate a slab for the dm_ios */ + _io_cache = KMEM_CACHE(dm_io, 0); + if (!_io_cache) + return r; + + /* allocate a slab for the target ios */ + _tio_cache = KMEM_CACHE(dm_target_io, 0); + if (!_tio_cache) + goto out_free_io_cache; + + r = dm_uevent_init(); + if (r) + goto out_free_tio_cache; + + _major = major; + r = register_blkdev(_major, _name); + if (r < 0) + goto out_uevent_exit; + + if (!_major) + _major = r; + + return 0; + +out_uevent_exit: + dm_uevent_exit(); +out_free_tio_cache: + kmem_cache_destroy(_tio_cache); +out_free_io_cache: + kmem_cache_destroy(_io_cache); + + return r; +} + +static void local_exit(void) +{ + kmem_cache_destroy(_tio_cache); + kmem_cache_destroy(_io_cache); + unregister_blkdev(_major, _name); + dm_uevent_exit(); + + _major = 0; + + DMINFO("cleaned up"); +} + +static int (*_inits[])(void) __initdata = { + local_init, + dm_target_init, + dm_linear_init, + dm_stripe_init, + dm_kcopyd_init, + dm_interface_init, +}; + +static void (*_exits[])(void) = { + local_exit, + dm_target_exit, + dm_linear_exit, + dm_stripe_exit, + dm_kcopyd_exit, + dm_interface_exit, +}; + +static int __init dm_init(void) +{ + const int count = ARRAY_SIZE(_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i](); + if (r) + goto bad; + } + + return 0; + + bad: + while (i--) + _exits[i](); + + return r; +} + +static void __exit dm_exit(void) +{ + int i = ARRAY_SIZE(_exits); + + while (i--) + _exits[i](); +} + +/* + * Block device functions + */ +static int dm_blk_open(struct block_device *bdev, fmode_t mode) +{ + struct mapped_device *md; + + spin_lock(&_minor_lock); + + md = bdev->bd_disk->private_data; + if (!md) + goto out; + + if (test_bit(DMF_FREEING, &md->flags) || + test_bit(DMF_DELETING, &md->flags)) { + md = NULL; + goto out; + } + + dm_get(md); + atomic_inc(&md->open_count); + +out: + spin_unlock(&_minor_lock); + + return md ? 0 : -ENXIO; +} + +static int dm_blk_close(struct gendisk *disk, fmode_t mode) +{ + struct mapped_device *md = disk->private_data; + atomic_dec(&md->open_count); + dm_put(md); + return 0; +} + +int dm_open_count(struct mapped_device *md) +{ + return atomic_read(&md->open_count); +} + +/* + * Guarantees nothing is using the device before it's deleted. + */ +int dm_lock_for_deletion(struct mapped_device *md) +{ + int r = 0; + + spin_lock(&_minor_lock); + + if (dm_open_count(md)) + r = -EBUSY; + else + set_bit(DMF_DELETING, &md->flags); + + spin_unlock(&_minor_lock); + + return r; +} + +static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct mapped_device *md = bdev->bd_disk->private_data; + + return dm_get_geometry(md, geo); +} + +static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct mapped_device *md = bdev->bd_disk->private_data; + struct dm_table *map = dm_get_table(md); + struct dm_target *tgt; + int r = -ENOTTY; + + if (!map || !dm_table_get_size(map)) + goto out; + + /* We only support devices that have a single target */ + if (dm_table_get_num_targets(map) != 1) + goto out; + + tgt = dm_table_get_target(map, 0); + + if (dm_suspended(md)) { + r = -EAGAIN; + goto out; + } + + if (tgt->type->ioctl) + r = tgt->type->ioctl(tgt, cmd, arg); + +out: + dm_table_put(map); + + return r; +} + +static struct dm_io *alloc_io(struct mapped_device *md) +{ + return mempool_alloc(md->io_pool, GFP_NOIO); +} + +static void free_io(struct mapped_device *md, struct dm_io *io) +{ + mempool_free(io, md->io_pool); +} + +static struct dm_target_io *alloc_tio(struct mapped_device *md) +{ + return mempool_alloc(md->tio_pool, GFP_NOIO); +} + +static void free_tio(struct mapped_device *md, struct dm_target_io *tio) +{ + mempool_free(tio, md->tio_pool); +} + +static void start_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + int cpu; + + io->start_time = jiffies; + + cpu = part_stat_lock(); + part_round_stats(cpu, &dm_disk(md)->part0); + part_stat_unlock(); + dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); +} + +static void end_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->bio; + unsigned long duration = jiffies - io->start_time; + int pending, cpu; + int rw = bio_data_dir(bio); + + cpu = part_stat_lock(); + part_round_stats(cpu, &dm_disk(md)->part0); + part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); + part_stat_unlock(); + + dm_disk(md)->part0.in_flight = pending = + atomic_dec_return(&md->pending); + + /* nudge anyone waiting on suspend queue */ + if (!pending) + wake_up(&md->wait); +} + +/* + * Add the bio to the list of deferred io. + */ +static int queue_io(struct mapped_device *md, struct bio *bio) +{ + down_write(&md->io_lock); + + if (!test_bit(DMF_BLOCK_IO, &md->flags)) { + up_write(&md->io_lock); + return 1; + } + + bio_list_add(&md->deferred, bio); + + up_write(&md->io_lock); + return 0; /* deferred successfully */ +} + +/* + * Everyone (including functions in this file), should use this + * function to access the md->map field, and make sure they call + * dm_table_put() when finished. + */ +struct dm_table *dm_get_table(struct mapped_device *md) +{ + struct dm_table *t; + + read_lock(&md->map_lock); + t = md->map; + if (t) + dm_table_get(t); + read_unlock(&md->map_lock); + + return t; +} + +/* + * Get the geometry associated with a dm device + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) +{ + *geo = md->geometry; + + return 0; +} + +/* + * Set the geometry of a device. + */ +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) +{ + sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; + + if (geo->start > sz) { + DMWARN("Start sector is beyond the geometry limits."); + return -EINVAL; + } + + md->geometry = *geo; + + return 0; +} + +/*----------------------------------------------------------------- + * CRUD START: + * A more elegant soln is in the works that uses the queue + * merge fn, unfortunately there are a couple of changes to + * the block layer that I want to make for this. So in the + * interests of getting something for people to use I give + * you this clearly demarcated crap. + *---------------------------------------------------------------*/ + +static int __noflush_suspending(struct mapped_device *md) +{ + return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); +} + +/* + * Decrements the number of outstanding ios that a bio has been + * cloned into, completing the original io if necc. + */ +static void dec_pending(struct dm_io *io, int error) +{ + unsigned long flags; + int io_error; + struct bio *bio; + struct mapped_device *md = io->md; + + /* Push-back supersedes any I/O errors */ + if (error && !(io->error > 0 && __noflush_suspending(md))) + io->error = error; + + if (atomic_dec_and_test(&io->io_count)) { + if (io->error == DM_ENDIO_REQUEUE) { + /* + * Target requested pushing back the I/O. + * This must be handled before the sleeper on + * suspend queue merges the pushback list. + */ + spin_lock_irqsave(&md->pushback_lock, flags); + if (__noflush_suspending(md)) + bio_list_add(&md->pushback, io->bio); + else + /* noflush suspend was interrupted. */ + io->error = -EIO; + spin_unlock_irqrestore(&md->pushback_lock, flags); + } + + end_io_acct(io); + + io_error = io->error; + bio = io->bio; + + free_io(md, io); + + if (io_error != DM_ENDIO_REQUEUE) { + blk_add_trace_bio(md->queue, io->bio, + BLK_TA_COMPLETE); + + bio_endio(bio, io_error); + } + } +} + +static void clone_endio(struct bio *bio, int error) +{ + int r = 0; + struct dm_target_io *tio = bio->bi_private; + struct dm_io *io = tio->io; + struct mapped_device *md = tio->io->md; + dm_endio_fn endio = tio->ti->type->end_io; + + if (!bio_flagged(bio, BIO_UPTODATE) && !error) + error = -EIO; + + if (endio) { + r = endio(tio->ti, bio, error, &tio->info); + if (r < 0 || r == DM_ENDIO_REQUEUE) + /* + * error and requeue request are handled + * in dec_pending(). + */ + error = r; + else if (r == DM_ENDIO_INCOMPLETE) + /* The target will handle the io */ + return; + else if (r) { + DMWARN("unimplemented target endio return value: %d", r); + BUG(); + } + } + + /* + * Store md for cleanup instead of tio which is about to get freed. + */ + bio->bi_private = md->bs; + + free_tio(md, tio); + bio_put(bio); + dec_pending(io, error); +} + +static sector_t max_io_len(struct mapped_device *md, + sector_t sector, struct dm_target *ti) +{ + sector_t offset = sector - ti->begin; + sector_t len = ti->len - offset; + + /* + * Does the target need to split even further ? + */ + if (ti->split_io) { + sector_t boundary; + boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) + - offset; + if (len > boundary) + len = boundary; + } + + return len; +} + +static void __map_bio(struct dm_target *ti, struct bio *clone, + struct dm_target_io *tio) +{ + int r; + sector_t sector; + struct mapped_device *md; + + /* + * Sanity checks. + */ + BUG_ON(!clone->bi_size); + + clone->bi_end_io = clone_endio; + clone->bi_private = tio; + + /* + * Map the clone. If r == 0 we don't need to do + * anything, the target has assumed ownership of + * this io. + */ + atomic_inc(&tio->io->io_count); + sector = clone->bi_sector; + r = ti->type->map(ti, clone, &tio->info); + if (r == DM_MAPIO_REMAPPED) { + /* the bio has been remapped so dispatch it */ + + blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + tio->io->bio->bi_bdev->bd_dev, + clone->bi_sector, sector); + + generic_make_request(clone); + } else if (r < 0 || r == DM_MAPIO_REQUEUE) { + /* error the io and bail out, or requeue it if needed */ + md = tio->io->md; + dec_pending(tio->io, r); + /* + * Store bio_set for cleanup. + */ + clone->bi_private = md->bs; + bio_put(clone); + free_tio(md, tio); + } else if (r) { + DMWARN("unimplemented target map return value: %d", r); + BUG(); + } +} + +struct clone_info { + struct mapped_device *md; + struct dm_table *map; + struct bio *bio; + struct dm_io *io; + sector_t sector; + sector_t sector_count; + unsigned short idx; +}; + +static void dm_bio_destructor(struct bio *bio) +{ + struct bio_set *bs = bio->bi_private; + + bio_free(bio, bs); +} + +/* + * Creates a little bio that is just does part of a bvec. + */ +static struct bio *split_bvec(struct bio *bio, sector_t sector, + unsigned short idx, unsigned int offset, + unsigned int len, struct bio_set *bs) +{ + struct bio *clone; + struct bio_vec *bv = bio->bi_io_vec + idx; + + clone = bio_alloc_bioset(GFP_NOIO, 1, bs); + clone->bi_destructor = dm_bio_destructor; + *clone->bi_io_vec = *bv; + + clone->bi_sector = sector; + clone->bi_bdev = bio->bi_bdev; + clone->bi_rw = bio->bi_rw; + clone->bi_vcnt = 1; + clone->bi_size = to_bytes(len); + clone->bi_io_vec->bv_offset = offset; + clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_flags |= 1 << BIO_CLONED; + + return clone; +} + +/* + * Creates a bio that consists of range of complete bvecs. + */ +static struct bio *clone_bio(struct bio *bio, sector_t sector, + unsigned short idx, unsigned short bv_count, + unsigned int len, struct bio_set *bs) +{ + struct bio *clone; + + clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); + __bio_clone(clone, bio); + clone->bi_destructor = dm_bio_destructor; + clone->bi_sector = sector; + clone->bi_idx = idx; + clone->bi_vcnt = idx + bv_count; + clone->bi_size = to_bytes(len); + clone->bi_flags &= ~(1 << BIO_SEG_VALID); + + return clone; +} + +static int __clone_and_map(struct clone_info *ci) +{ + struct bio *clone, *bio = ci->bio; + struct dm_target *ti; + sector_t len = 0, max; + struct dm_target_io *tio; + + ti = dm_table_find_target(ci->map, ci->sector); + if (!dm_target_is_valid(ti)) + return -EIO; + + max = max_io_len(ci->md, ci->sector, ti); + + /* + * Allocate a target io object. + */ + tio = alloc_tio(ci->md); + tio->io = ci->io; + tio->ti = ti; + memset(&tio->info, 0, sizeof(tio->info)); + + if (ci->sector_count <= max) { + /* + * Optimise for the simple case where we can do all of + * the remaining io with a single clone. + */ + clone = clone_bio(bio, ci->sector, ci->idx, + bio->bi_vcnt - ci->idx, ci->sector_count, + ci->md->bs); + __map_bio(ti, clone, tio); + ci->sector_count = 0; + + } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { + /* + * There are some bvecs that don't span targets. + * Do as many of these as possible. + */ + int i; + sector_t remaining = max; + sector_t bv_len; + + for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { + bv_len = to_sector(bio->bi_io_vec[i].bv_len); + + if (bv_len > remaining) + break; + + remaining -= bv_len; + len += bv_len; + } + + clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, + ci->md->bs); + __map_bio(ti, clone, tio); + + ci->sector += len; + ci->sector_count -= len; + ci->idx = i; + + } else { + /* + * Handle a bvec that must be split between two or more targets. + */ + struct bio_vec *bv = bio->bi_io_vec + ci->idx; + sector_t remaining = to_sector(bv->bv_len); + unsigned int offset = 0; + + do { + if (offset) { + ti = dm_table_find_target(ci->map, ci->sector); + if (!dm_target_is_valid(ti)) + return -EIO; + + max = max_io_len(ci->md, ci->sector, ti); + + tio = alloc_tio(ci->md); + tio->io = ci->io; + tio->ti = ti; + memset(&tio->info, 0, sizeof(tio->info)); + } + + len = min(remaining, max); + + clone = split_bvec(bio, ci->sector, ci->idx, + bv->bv_offset + offset, len, + ci->md->bs); + + __map_bio(ti, clone, tio); + + ci->sector += len; + ci->sector_count -= len; + offset += to_bytes(len); + } while (remaining -= len); + + ci->idx++; + } + + return 0; +} + +/* + * Split the bio into several clones. + */ +static int __split_bio(struct mapped_device *md, struct bio *bio) +{ + struct clone_info ci; + int error = 0; + + ci.map = dm_get_table(md); + if (unlikely(!ci.map)) + return -EIO; + + ci.md = md; + ci.bio = bio; + ci.io = alloc_io(md); + ci.io->error = 0; + atomic_set(&ci.io->io_count, 1); + ci.io->bio = bio; + ci.io->md = md; + ci.sector = bio->bi_sector; + ci.sector_count = bio_sectors(bio); + ci.idx = bio->bi_idx; + + start_io_acct(ci.io); + while (ci.sector_count && !error) + error = __clone_and_map(&ci); + + /* drop the extra reference count */ + dec_pending(ci.io, error); + dm_table_put(ci.map); + + return 0; +} +/*----------------------------------------------------------------- + * CRUD END + *---------------------------------------------------------------*/ + +static int dm_merge_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + struct mapped_device *md = q->queuedata; + struct dm_table *map = dm_get_table(md); + struct dm_target *ti; + sector_t max_sectors; + int max_size = 0; + + if (unlikely(!map)) + goto out; + + ti = dm_table_find_target(map, bvm->bi_sector); + if (!dm_target_is_valid(ti)) + goto out_table; + + /* + * Find maximum amount of I/O that won't need splitting + */ + max_sectors = min(max_io_len(md, bvm->bi_sector, ti), + (sector_t) BIO_MAX_SECTORS); + max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; + if (max_size < 0) + max_size = 0; + + /* + * merge_bvec_fn() returns number of bytes + * it can accept at this offset + * max is precomputed maximal io size + */ + if (max_size && ti->type->merge) + max_size = ti->type->merge(ti, bvm, biovec, max_size); + +out_table: + dm_table_put(map); + +out: + /* + * Always allow an entire first page + */ + if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) + max_size = biovec->bv_len; + + return max_size; +} + +/* + * The request function that just remaps the bio built up by + * dm_merge_bvec. + */ +static int dm_request(struct request_queue *q, struct bio *bio) +{ + int r = -EIO; + int rw = bio_data_dir(bio); + struct mapped_device *md = q->queuedata; + int cpu; + + /* + * There is no use in forwarding any barrier request since we can't + * guarantee it is (or can be) handled by the targets correctly. + */ + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + down_read(&md->io_lock); + + cpu = part_stat_lock(); + part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); + part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); + part_stat_unlock(); + + /* + * If we're suspended we have to queue + * this io for later. + */ + while (test_bit(DMF_BLOCK_IO, &md->flags)) { + up_read(&md->io_lock); + + if (bio_rw(bio) != READA) + r = queue_io(md, bio); + + if (r <= 0) + goto out_req; + + /* + * We're in a while loop, because someone could suspend + * before we get to the following read lock. + */ + down_read(&md->io_lock); + } + + r = __split_bio(md, bio); + up_read(&md->io_lock); + +out_req: + if (r < 0) + bio_io_error(bio); + + return 0; +} + +static void dm_unplug_all(struct request_queue *q) +{ + struct mapped_device *md = q->queuedata; + struct dm_table *map = dm_get_table(md); + + if (map) { + dm_table_unplug_all(map); + dm_table_put(map); + } +} + +static int dm_any_congested(void *congested_data, int bdi_bits) +{ + int r = bdi_bits; + struct mapped_device *md = congested_data; + struct dm_table *map; + + atomic_inc(&md->pending); + + if (!test_bit(DMF_BLOCK_IO, &md->flags)) { + map = dm_get_table(md); + if (map) { + r = dm_table_any_congested(map, bdi_bits); + dm_table_put(map); + } + } + + if (!atomic_dec_return(&md->pending)) + /* nudge anyone waiting on suspend queue */ + wake_up(&md->wait); + + return r; +} + +/*----------------------------------------------------------------- + * An IDR is used to keep track of allocated minor numbers. + *---------------------------------------------------------------*/ +static DEFINE_IDR(_minor_idr); + +static void free_minor(int minor) +{ + spin_lock(&_minor_lock); + idr_remove(&_minor_idr, minor); + spin_unlock(&_minor_lock); +} + +/* + * See if the device with a specific minor # is free. + */ +static int specific_minor(int minor) +{ + int r, m; + + if (minor >= (1 << MINORBITS)) + return -EINVAL; + + r = idr_pre_get(&_minor_idr, GFP_KERNEL); + if (!r) + return -ENOMEM; + + spin_lock(&_minor_lock); + + if (idr_find(&_minor_idr, minor)) { + r = -EBUSY; + goto out; + } + + r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); + if (r) + goto out; + + if (m != minor) { + idr_remove(&_minor_idr, m); + r = -EBUSY; + goto out; + } + +out: + spin_unlock(&_minor_lock); + return r; +} + +static int next_free_minor(int *minor) +{ + int r, m; + + r = idr_pre_get(&_minor_idr, GFP_KERNEL); + if (!r) + return -ENOMEM; + + spin_lock(&_minor_lock); + + r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); + if (r) + goto out; + + if (m >= (1 << MINORBITS)) { + idr_remove(&_minor_idr, m); + r = -ENOSPC; + goto out; + } + + *minor = m; + +out: + spin_unlock(&_minor_lock); + return r; +} + +static struct block_device_operations dm_blk_dops; + +/* + * Allocate and initialise a blank device with a given minor. + */ +static struct mapped_device *alloc_dev(int minor) +{ + int r; + struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); + void *old_md; + + if (!md) { + DMWARN("unable to allocate device, out of memory."); + return NULL; + } + + if (!try_module_get(THIS_MODULE)) + goto bad_module_get; + + /* get a minor number for the dev */ + if (minor == DM_ANY_MINOR) + r = next_free_minor(&minor); + else + r = specific_minor(minor); + if (r < 0) + goto bad_minor; + + init_rwsem(&md->io_lock); + mutex_init(&md->suspend_lock); + spin_lock_init(&md->pushback_lock); + rwlock_init(&md->map_lock); + atomic_set(&md->holders, 1); + atomic_set(&md->open_count, 0); + atomic_set(&md->event_nr, 0); + atomic_set(&md->uevent_seq, 0); + INIT_LIST_HEAD(&md->uevent_list); + spin_lock_init(&md->uevent_lock); + + md->queue = blk_alloc_queue(GFP_KERNEL); + if (!md->queue) + goto bad_queue; + + md->queue->queuedata = md; + md->queue->backing_dev_info.congested_fn = dm_any_congested; + md->queue->backing_dev_info.congested_data = md; + blk_queue_make_request(md->queue, dm_request); + blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); + md->queue->unplug_fn = dm_unplug_all; + blk_queue_merge_bvec(md->queue, dm_merge_bvec); + + md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); + if (!md->io_pool) + goto bad_io_pool; + + md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); + if (!md->tio_pool) + goto bad_tio_pool; + + md->bs = bioset_create(16, 16); + if (!md->bs) + goto bad_no_bioset; + + md->disk = alloc_disk(1); + if (!md->disk) + goto bad_disk; + + atomic_set(&md->pending, 0); + init_waitqueue_head(&md->wait); + init_waitqueue_head(&md->eventq); + + md->disk->major = _major; + md->disk->first_minor = minor; + md->disk->fops = &dm_blk_dops; + md->disk->queue = md->queue; + md->disk->private_data = md; + sprintf(md->disk->disk_name, "dm-%d", minor); + add_disk(md->disk); + format_dev_t(md->name, MKDEV(_major, minor)); + + md->wq = create_singlethread_workqueue("kdmflush"); + if (!md->wq) + goto bad_thread; + + /* Populate the mapping, nobody knows we exist yet */ + spin_lock(&_minor_lock); + old_md = idr_replace(&_minor_idr, md, minor); + spin_unlock(&_minor_lock); + + BUG_ON(old_md != MINOR_ALLOCED); + + return md; + +bad_thread: + put_disk(md->disk); +bad_disk: + bioset_free(md->bs); +bad_no_bioset: + mempool_destroy(md->tio_pool); +bad_tio_pool: + mempool_destroy(md->io_pool); +bad_io_pool: + blk_cleanup_queue(md->queue); +bad_queue: + free_minor(minor); +bad_minor: + module_put(THIS_MODULE); +bad_module_get: + kfree(md); + return NULL; +} + +static void unlock_fs(struct mapped_device *md); + +static void free_dev(struct mapped_device *md) +{ + int minor = MINOR(disk_devt(md->disk)); + + if (md->suspended_bdev) { + unlock_fs(md); + bdput(md->suspended_bdev); + } + destroy_workqueue(md->wq); + mempool_destroy(md->tio_pool); + mempool_destroy(md->io_pool); + bioset_free(md->bs); + del_gendisk(md->disk); + free_minor(minor); + + spin_lock(&_minor_lock); + md->disk->private_data = NULL; + spin_unlock(&_minor_lock); + + put_disk(md->disk); + blk_cleanup_queue(md->queue); + module_put(THIS_MODULE); + kfree(md); +} + +/* + * Bind a table to the device. + */ +static void event_callback(void *context) +{ + unsigned long flags; + LIST_HEAD(uevents); + struct mapped_device *md = (struct mapped_device *) context; + + spin_lock_irqsave(&md->uevent_lock, flags); + list_splice_init(&md->uevent_list, &uevents); + spin_unlock_irqrestore(&md->uevent_lock, flags); + + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); + + atomic_inc(&md->event_nr); + wake_up(&md->eventq); +} + +static void __set_size(struct mapped_device *md, sector_t size) +{ + set_capacity(md->disk, size); + + mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); + i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); + mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); +} + +static int __bind(struct mapped_device *md, struct dm_table *t) +{ + struct request_queue *q = md->queue; + sector_t size; + + size = dm_table_get_size(t); + + /* + * Wipe any geometry if the size of the table changed. + */ + if (size != get_capacity(md->disk)) + memset(&md->geometry, 0, sizeof(md->geometry)); + + if (md->suspended_bdev) + __set_size(md, size); + if (size == 0) + return 0; + + dm_table_get(t); + dm_table_event_callback(t, event_callback, md); + + write_lock(&md->map_lock); + md->map = t; + dm_table_set_restrictions(t, q); + write_unlock(&md->map_lock); + + return 0; +} + +static void __unbind(struct mapped_device *md) +{ + struct dm_table *map = md->map; + + if (!map) + return; + + dm_table_event_callback(map, NULL, NULL); + write_lock(&md->map_lock); + md->map = NULL; + write_unlock(&md->map_lock); + dm_table_put(map); +} + +/* + * Constructor for a new device. + */ +int dm_create(int minor, struct mapped_device **result) +{ + struct mapped_device *md; + + md = alloc_dev(minor); + if (!md) + return -ENXIO; + + *result = md; + return 0; +} + +static struct mapped_device *dm_find_md(dev_t dev) +{ + struct mapped_device *md; + unsigned minor = MINOR(dev); + + if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) + return NULL; + + spin_lock(&_minor_lock); + + md = idr_find(&_minor_idr, minor); + if (md && (md == MINOR_ALLOCED || + (MINOR(disk_devt(dm_disk(md))) != minor) || + test_bit(DMF_FREEING, &md->flags))) { + md = NULL; + goto out; + } + +out: + spin_unlock(&_minor_lock); + + return md; +} + +struct mapped_device *dm_get_md(dev_t dev) +{ + struct mapped_device *md = dm_find_md(dev); + + if (md) + dm_get(md); + + return md; +} + +void *dm_get_mdptr(struct mapped_device *md) +{ + return md->interface_ptr; +} + +void dm_set_mdptr(struct mapped_device *md, void *ptr) +{ + md->interface_ptr = ptr; +} + +void dm_get(struct mapped_device *md) +{ + atomic_inc(&md->holders); +} + +const char *dm_device_name(struct mapped_device *md) +{ + return md->name; +} +EXPORT_SYMBOL_GPL(dm_device_name); + +void dm_put(struct mapped_device *md) +{ + struct dm_table *map; + + BUG_ON(test_bit(DMF_FREEING, &md->flags)); + + if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { + map = dm_get_table(md); + idr_replace(&_minor_idr, MINOR_ALLOCED, + MINOR(disk_devt(dm_disk(md)))); + set_bit(DMF_FREEING, &md->flags); + spin_unlock(&_minor_lock); + if (!dm_suspended(md)) { + dm_table_presuspend_targets(map); + dm_table_postsuspend_targets(map); + } + __unbind(md); + dm_table_put(map); + free_dev(md); + } +} +EXPORT_SYMBOL_GPL(dm_put); + +static int dm_wait_for_completion(struct mapped_device *md) +{ + int r = 0; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + smp_mb(); + if (!atomic_read(&md->pending)) + break; + + if (signal_pending(current)) { + r = -EINTR; + break; + } + + io_schedule(); + } + set_current_state(TASK_RUNNING); + + return r; +} + +/* + * Process the deferred bios + */ +static void __flush_deferred_io(struct mapped_device *md) +{ + struct bio *c; + + while ((c = bio_list_pop(&md->deferred))) { + if (__split_bio(md, c)) + bio_io_error(c); + } + + clear_bit(DMF_BLOCK_IO, &md->flags); +} + +static void __merge_pushback_list(struct mapped_device *md) +{ + unsigned long flags; + + spin_lock_irqsave(&md->pushback_lock, flags); + clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); + bio_list_merge_head(&md->deferred, &md->pushback); + bio_list_init(&md->pushback); + spin_unlock_irqrestore(&md->pushback_lock, flags); +} + +static void dm_wq_work(struct work_struct *work) +{ + struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); + struct mapped_device *md = req->md; + + down_write(&md->io_lock); + switch (req->type) { + case DM_WQ_FLUSH_DEFERRED: + __flush_deferred_io(md); + break; + default: + DMERR("dm_wq_work: unrecognised work type %d", req->type); + BUG(); + } + up_write(&md->io_lock); +} + +static void dm_wq_queue(struct mapped_device *md, int type, void *context, + struct dm_wq_req *req) +{ + req->type = type; + req->md = md; + req->context = context; + INIT_WORK(&req->work, dm_wq_work); + queue_work(md->wq, &req->work); +} + +static void dm_queue_flush(struct mapped_device *md, int type, void *context) +{ + struct dm_wq_req req; + + dm_wq_queue(md, type, context, &req); + flush_workqueue(md->wq); +} + +/* + * Swap in a new table (destroying old one). + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *table) +{ + int r = -EINVAL; + + mutex_lock(&md->suspend_lock); + + /* device must be suspended */ + if (!dm_suspended(md)) + goto out; + + /* without bdev, the device size cannot be changed */ + if (!md->suspended_bdev) + if (get_capacity(md->disk) != dm_table_get_size(table)) + goto out; + + __unbind(md); + r = __bind(md, table); + +out: + mutex_unlock(&md->suspend_lock); + return r; +} + +/* + * Functions to lock and unlock any filesystem running on the + * device. + */ +static int lock_fs(struct mapped_device *md) +{ + int r; + + WARN_ON(md->frozen_sb); + + md->frozen_sb = freeze_bdev(md->suspended_bdev); + if (IS_ERR(md->frozen_sb)) { + r = PTR_ERR(md->frozen_sb); + md->frozen_sb = NULL; + return r; + } + + set_bit(DMF_FROZEN, &md->flags); + + /* don't bdput right now, we don't want the bdev + * to go away while it is locked. + */ + return 0; +} + +static void unlock_fs(struct mapped_device *md) +{ + if (!test_bit(DMF_FROZEN, &md->flags)) + return; + + thaw_bdev(md->suspended_bdev, md->frozen_sb); + md->frozen_sb = NULL; + clear_bit(DMF_FROZEN, &md->flags); +} + +/* + * We need to be able to change a mapping table under a mounted + * filesystem. For example we might want to move some data in + * the background. Before the table can be swapped with + * dm_bind_table, dm_suspend must be called to flush any in + * flight bios and ensure that any further io gets deferred. + */ +int dm_suspend(struct mapped_device *md, unsigned suspend_flags) +{ + struct dm_table *map = NULL; + DECLARE_WAITQUEUE(wait, current); + int r = 0; + int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; + int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; + + mutex_lock(&md->suspend_lock); + + if (dm_suspended(md)) { + r = -EINVAL; + goto out_unlock; + } + + map = dm_get_table(md); + + /* + * DMF_NOFLUSH_SUSPENDING must be set before presuspend. + * This flag is cleared before dm_suspend returns. + */ + if (noflush) + set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); + + /* This does not get reverted if there's an error later. */ + dm_table_presuspend_targets(map); + + /* bdget() can stall if the pending I/Os are not flushed */ + if (!noflush) { + md->suspended_bdev = bdget_disk(md->disk, 0); + if (!md->suspended_bdev) { + DMWARN("bdget failed in dm_suspend"); + r = -ENOMEM; + goto out; + } + + /* + * Flush I/O to the device. noflush supersedes do_lockfs, + * because lock_fs() needs to flush I/Os. + */ + if (do_lockfs) { + r = lock_fs(md); + if (r) + goto out; + } + } + + /* + * First we set the BLOCK_IO flag so no more ios will be mapped. + */ + down_write(&md->io_lock); + set_bit(DMF_BLOCK_IO, &md->flags); + + add_wait_queue(&md->wait, &wait); + up_write(&md->io_lock); + + /* unplug */ + if (map) + dm_table_unplug_all(map); + + /* + * Wait for the already-mapped ios to complete. + */ + r = dm_wait_for_completion(md); + + down_write(&md->io_lock); + remove_wait_queue(&md->wait, &wait); + + if (noflush) + __merge_pushback_list(md); + up_write(&md->io_lock); + + /* were we interrupted ? */ + if (r < 0) { + dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); + + unlock_fs(md); + goto out; /* pushback list is already flushed, so skip flush */ + } + + dm_table_postsuspend_targets(map); + + set_bit(DMF_SUSPENDED, &md->flags); + +out: + if (r && md->suspended_bdev) { + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + } + + dm_table_put(map); + +out_unlock: + mutex_unlock(&md->suspend_lock); + return r; +} + +int dm_resume(struct mapped_device *md) +{ + int r = -EINVAL; + struct dm_table *map = NULL; + + mutex_lock(&md->suspend_lock); + if (!dm_suspended(md)) + goto out; + + map = dm_get_table(md); + if (!map || !dm_table_get_size(map)) + goto out; + + r = dm_table_resume_targets(map); + if (r) + goto out; + + dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); + + unlock_fs(md); + + if (md->suspended_bdev) { + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + } + + clear_bit(DMF_SUSPENDED, &md->flags); + + dm_table_unplug_all(map); + + dm_kobject_uevent(md); + + r = 0; + +out: + dm_table_put(map); + mutex_unlock(&md->suspend_lock); + + return r; +} + +/*----------------------------------------------------------------- + * Event notification. + *---------------------------------------------------------------*/ +void dm_kobject_uevent(struct mapped_device *md) +{ + kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); +} + +uint32_t dm_next_uevent_seq(struct mapped_device *md) +{ + return atomic_add_return(1, &md->uevent_seq); +} + +uint32_t dm_get_event_nr(struct mapped_device *md) +{ + return atomic_read(&md->event_nr); +} + +int dm_wait_event(struct mapped_device *md, int event_nr) +{ + return wait_event_interruptible(md->eventq, + (event_nr != atomic_read(&md->event_nr))); +} + +void dm_uevent_add(struct mapped_device *md, struct list_head *elist) +{ + unsigned long flags; + + spin_lock_irqsave(&md->uevent_lock, flags); + list_add(elist, &md->uevent_list); + spin_unlock_irqrestore(&md->uevent_lock, flags); +} + +/* + * The gendisk is only valid as long as you have a reference + * count on 'md'. + */ +struct gendisk *dm_disk(struct mapped_device *md) +{ + return md->disk; +} + +int dm_suspended(struct mapped_device *md) +{ + return test_bit(DMF_SUSPENDED, &md->flags); +} + +int dm_noflush_suspending(struct dm_target *ti) +{ + struct mapped_device *md = dm_table_get_md(ti->table); + int r = __noflush_suspending(md); + + dm_put(md); + + return r; +} +EXPORT_SYMBOL_GPL(dm_noflush_suspending); + +static struct block_device_operations dm_blk_dops = { + .open = dm_blk_open, + .release = dm_blk_close, + .ioctl = dm_blk_ioctl, + .getgeo = dm_blk_getgeo, + .owner = THIS_MODULE +}; + +EXPORT_SYMBOL(dm_get_mapinfo); + +/* + * module hooks + */ +module_init(dm_init); +module_exit(dm_exit); + +module_param(major, uint, 0); +MODULE_PARM_DESC(major, "The major number of the device mapper"); +MODULE_DESCRIPTION(DM_NAME " driver"); +MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h new file mode 100644 index 0000000..0ade60c --- /dev/null +++ b/drivers/md/dm.h @@ -0,0 +1,91 @@ +/* + * Internal header file for device mapper + * + * Copyright (C) 2001, 2002 Sistina Software + * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#ifndef DM_INTERNAL_H +#define DM_INTERNAL_H + +#include <linux/fs.h> +#include <linux/device-mapper.h> +#include <linux/list.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> + +/* + * Suspend feature flags + */ +#define DM_SUSPEND_LOCKFS_FLAG (1 << 0) +#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) + +/* + * List of devices that a metadevice uses and should open/close. + */ +struct dm_dev_internal { + struct list_head list; + atomic_t count; + struct dm_dev dm_dev; +}; + +struct dm_table; + +/*----------------------------------------------------------------- + * Internal table functions. + *---------------------------------------------------------------*/ +void dm_table_event_callback(struct dm_table *t, + void (*fn)(void *), void *context); +struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); +struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); +void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); +struct list_head *dm_table_get_devices(struct dm_table *t); +void dm_table_presuspend_targets(struct dm_table *t); +void dm_table_postsuspend_targets(struct dm_table *t); +int dm_table_resume_targets(struct dm_table *t); +int dm_table_any_congested(struct dm_table *t, int bdi_bits); + +/* + * To check the return value from dm_table_find_target(). + */ +#define dm_target_is_valid(t) ((t)->table) + +/*----------------------------------------------------------------- + * A registry of target types. + *---------------------------------------------------------------*/ +int dm_target_init(void); +void dm_target_exit(void); +struct target_type *dm_get_target_type(const char *name); +void dm_put_target_type(struct target_type *t); +int dm_target_iterate(void (*iter_func)(struct target_type *tt, + void *param), void *param); + +int dm_split_args(int *argc, char ***argvp, char *input); + +/* + * The device-mapper can be driven through one of two interfaces; + * ioctl or filesystem, depending which patch you have applied. + */ +int dm_interface_init(void); +void dm_interface_exit(void); + +/* + * Targets for linear and striped mappings + */ +int dm_linear_init(void); +void dm_linear_exit(void); + +int dm_stripe_init(void); +void dm_stripe_exit(void); + +int dm_open_count(struct mapped_device *md); +int dm_lock_for_deletion(struct mapped_device *md); + +void dm_kobject_uevent(struct mapped_device *md); + +int dm_kcopyd_init(void); +void dm_kcopyd_exit(void); + +#endif diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c new file mode 100644 index 0000000..f26c1f9 --- /dev/null +++ b/drivers/md/faulty.c @@ -0,0 +1,346 @@ +/* + * faulty.c : Multiple Devices driver for Linux + * + * Copyright (C) 2004 Neil Brown + * + * fautly-device-simulator personality for md + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +/* + * The "faulty" personality causes some requests to fail. + * + * Possible failure modes are: + * reads fail "randomly" but succeed on retry + * writes fail "randomly" but succeed on retry + * reads for some address fail and then persist until a write + * reads for some address fail and then persist irrespective of write + * writes for some address fail and persist + * all writes fail + * + * Different modes can be active at a time, but only + * one can be set at array creation. Others can be added later. + * A mode can be one-shot or recurrent with the recurrance being + * once in every N requests. + * The bottom 5 bits of the "layout" indicate the mode. The + * remainder indicate a period, or 0 for one-shot. + * + * There is an implementation limit on the number of concurrently + * persisting-faulty blocks. When a new fault is requested that would + * exceed the limit, it is ignored. + * All current faults can be clear using a layout of "0". + * + * Requests are always sent to the device. If they are to fail, + * we clone the bio and insert a new b_end_io into the chain. + */ + +#define WriteTransient 0 +#define ReadTransient 1 +#define WritePersistent 2 +#define ReadPersistent 3 +#define WriteAll 4 /* doesn't go to device */ +#define ReadFixable 5 +#define Modes 6 + +#define ClearErrors 31 +#define ClearFaults 30 + +#define AllPersist 100 /* internal use only */ +#define NoPersist 101 + +#define ModeMask 0x1f +#define ModeShift 5 + +#define MaxFault 50 +#include <linux/raid/md.h> + + +static void faulty_fail(struct bio *bio, int error) +{ + struct bio *b = bio->bi_private; + + b->bi_size = bio->bi_size; + b->bi_sector = bio->bi_sector; + + bio_put(bio); + + bio_io_error(b); +} + +typedef struct faulty_conf { + int period[Modes]; + atomic_t counters[Modes]; + sector_t faults[MaxFault]; + int modes[MaxFault]; + int nfaults; + mdk_rdev_t *rdev; +} conf_t; + +static int check_mode(conf_t *conf, int mode) +{ + if (conf->period[mode] == 0 && + atomic_read(&conf->counters[mode]) <= 0) + return 0; /* no failure, no decrement */ + + + if (atomic_dec_and_test(&conf->counters[mode])) { + if (conf->period[mode]) + atomic_set(&conf->counters[mode], conf->period[mode]); + return 1; + } + return 0; +} + +static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir) +{ + /* If we find a ReadFixable sector, we fix it ... */ + int i; + for (i=0; i<conf->nfaults; i++) + if (conf->faults[i] >= start && + conf->faults[i] < end) { + /* found it ... */ + switch (conf->modes[i] * 2 + dir) { + case WritePersistent*2+WRITE: return 1; + case ReadPersistent*2+READ: return 1; + case ReadFixable*2+READ: return 1; + case ReadFixable*2+WRITE: + conf->modes[i] = NoPersist; + return 0; + case AllPersist*2+READ: + case AllPersist*2+WRITE: return 1; + default: + return 0; + } + } + return 0; +} + +static void add_sector(conf_t *conf, sector_t start, int mode) +{ + int i; + int n = conf->nfaults; + for (i=0; i<conf->nfaults; i++) + if (conf->faults[i] == start) { + switch(mode) { + case NoPersist: conf->modes[i] = mode; return; + case WritePersistent: + if (conf->modes[i] == ReadPersistent || + conf->modes[i] == ReadFixable) + conf->modes[i] = AllPersist; + else + conf->modes[i] = WritePersistent; + return; + case ReadPersistent: + if (conf->modes[i] == WritePersistent) + conf->modes[i] = AllPersist; + else + conf->modes[i] = ReadPersistent; + return; + case ReadFixable: + if (conf->modes[i] == WritePersistent || + conf->modes[i] == ReadPersistent) + conf->modes[i] = AllPersist; + else + conf->modes[i] = ReadFixable; + return; + } + } else if (conf->modes[i] == NoPersist) + n = i; + + if (n >= MaxFault) + return; + conf->faults[n] = start; + conf->modes[n] = mode; + if (conf->nfaults == n) + conf->nfaults = n+1; +} + +static int make_request(struct request_queue *q, struct bio *bio) +{ + mddev_t *mddev = q->queuedata; + conf_t *conf = (conf_t*)mddev->private; + int failit = 0; + + if (bio_data_dir(bio) == WRITE) { + /* write request */ + if (atomic_read(&conf->counters[WriteAll])) { + /* special case - don't decrement, don't generic_make_request, + * just fail immediately + */ + bio_endio(bio, -EIO); + return 0; + } + + if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), + WRITE)) + failit = 1; + if (check_mode(conf, WritePersistent)) { + add_sector(conf, bio->bi_sector, WritePersistent); + failit = 1; + } + if (check_mode(conf, WriteTransient)) + failit = 1; + } else { + /* read request */ + if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9), + READ)) + failit = 1; + if (check_mode(conf, ReadTransient)) + failit = 1; + if (check_mode(conf, ReadPersistent)) { + add_sector(conf, bio->bi_sector, ReadPersistent); + failit = 1; + } + if (check_mode(conf, ReadFixable)) { + add_sector(conf, bio->bi_sector, ReadFixable); + failit = 1; + } + } + if (failit) { + struct bio *b = bio_clone(bio, GFP_NOIO); + b->bi_bdev = conf->rdev->bdev; + b->bi_private = bio; + b->bi_end_io = faulty_fail; + generic_make_request(b); + return 0; + } else { + bio->bi_bdev = conf->rdev->bdev; + return 1; + } +} + +static void status(struct seq_file *seq, mddev_t *mddev) +{ + conf_t *conf = (conf_t*)mddev->private; + int n; + + if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) + seq_printf(seq, " WriteTransient=%d(%d)", + n, conf->period[WriteTransient]); + + if ((n=atomic_read(&conf->counters[ReadTransient])) != 0) + seq_printf(seq, " ReadTransient=%d(%d)", + n, conf->period[ReadTransient]); + + if ((n=atomic_read(&conf->counters[WritePersistent])) != 0) + seq_printf(seq, " WritePersistent=%d(%d)", + n, conf->period[WritePersistent]); + + if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0) + seq_printf(seq, " ReadPersistent=%d(%d)", + n, conf->period[ReadPersistent]); + + + if ((n=atomic_read(&conf->counters[ReadFixable])) != 0) + seq_printf(seq, " ReadFixable=%d(%d)", + n, conf->period[ReadFixable]); + + if ((n=atomic_read(&conf->counters[WriteAll])) != 0) + seq_printf(seq, " WriteAll"); + + seq_printf(seq, " nfaults=%d", conf->nfaults); +} + + +static int reconfig(mddev_t *mddev, int layout, int chunk_size) +{ + int mode = layout & ModeMask; + int count = layout >> ModeShift; + conf_t *conf = mddev->private; + + if (chunk_size != -1) + return -EINVAL; + + /* new layout */ + if (mode == ClearFaults) + conf->nfaults = 0; + else if (mode == ClearErrors) { + int i; + for (i=0 ; i < Modes ; i++) { + conf->period[i] = 0; + atomic_set(&conf->counters[i], 0); + } + } else if (mode < Modes) { + conf->period[mode] = count; + if (!count) count++; + atomic_set(&conf->counters[mode], count); + } else + return -EINVAL; + mddev->layout = -1; /* makes sure further changes come through */ + return 0; +} + +static int run(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + struct list_head *tmp; + int i; + + conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); + if (!conf) + return -ENOMEM; + + for (i=0; i<Modes; i++) { + atomic_set(&conf->counters[i], 0); + conf->period[i] = 0; + } + conf->nfaults = 0; + + rdev_for_each(rdev, tmp, mddev) + conf->rdev = rdev; + + mddev->array_sectors = mddev->size * 2; + mddev->private = conf; + + reconfig(mddev, mddev->layout, -1); + + return 0; +} + +static int stop(mddev_t *mddev) +{ + conf_t *conf = (conf_t *)mddev->private; + + kfree(conf); + mddev->private = NULL; + return 0; +} + +static struct mdk_personality faulty_personality = +{ + .name = "faulty", + .level = LEVEL_FAULTY, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .reconfig = reconfig, +}; + +static int __init raid_init(void) +{ + return register_md_personality(&faulty_personality); +} + +static void raid_exit(void) +{ + unregister_md_personality(&faulty_personality); +} + +module_init(raid_init); +module_exit(raid_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-10"); /* faulty */ +MODULE_ALIAS("md-faulty"); +MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/linear.c b/drivers/md/linear.c new file mode 100644 index 0000000..eb00e64 --- /dev/null +++ b/drivers/md/linear.c @@ -0,0 +1,403 @@ +/* + linear.c : Multiple Devices driver for Linux + Copyright (C) 1994-96 Marc ZYNGIER + <zyngier@ufr-info-p7.ibp.fr> or + <maz@gloups.fdn.fr> + + Linear mode management functions. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/raid/linear.h> + +/* + * find which device holds a particular offset + */ +static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) +{ + dev_info_t *hash; + linear_conf_t *conf = mddev_to_conf(mddev); + sector_t idx = sector >> conf->sector_shift; + + /* + * sector_div(a,b) returns the remainer and sets a to a/b + */ + (void)sector_div(idx, conf->spacing); + hash = conf->hash_table[idx]; + + while (sector >= hash->num_sectors + hash->start_sector) + hash++; + return hash; +} + +/** + * linear_mergeable_bvec -- tell bio layer if two requests can be merged + * @q: request queue + * @bvm: properties of new bio + * @biovec: the request that could be merged to it. + * + * Return amount of bytes we can take at this offset + */ +static int linear_mergeable_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + mddev_t *mddev = q->queuedata; + dev_info_t *dev0; + unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; + sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + + dev0 = which_dev(mddev, sector); + maxsectors = dev0->num_sectors - (sector - dev0->start_sector); + + if (maxsectors < bio_sectors) + maxsectors = 0; + else + maxsectors -= bio_sectors; + + if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0) + return biovec->bv_len; + /* The bytes available at this offset could be really big, + * so we cap at 2^31 to avoid overflow */ + if (maxsectors > (1 << (31-9))) + return 1<<31; + return maxsectors << 9; +} + +static void linear_unplug(struct request_queue *q) +{ + mddev_t *mddev = q->queuedata; + linear_conf_t *conf = mddev_to_conf(mddev); + int i; + + for (i=0; i < mddev->raid_disks; i++) { + struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); + blk_unplug(r_queue); + } +} + +static int linear_congested(void *data, int bits) +{ + mddev_t *mddev = data; + linear_conf_t *conf = mddev_to_conf(mddev); + int i, ret = 0; + + for (i = 0; i < mddev->raid_disks && !ret ; i++) { + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); + ret |= bdi_congested(&q->backing_dev_info, bits); + } + return ret; +} + +static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) +{ + linear_conf_t *conf; + dev_info_t **table; + mdk_rdev_t *rdev; + int i, nb_zone, cnt; + sector_t min_sectors; + sector_t curr_sector; + struct list_head *tmp; + + conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), + GFP_KERNEL); + if (!conf) + return NULL; + + cnt = 0; + conf->array_sectors = 0; + + rdev_for_each(rdev, tmp, mddev) { + int j = rdev->raid_disk; + dev_info_t *disk = conf->disks + j; + + if (j < 0 || j >= raid_disks || disk->rdev) { + printk("linear: disk numbering problem. Aborting!\n"); + goto out; + } + + disk->rdev = rdev; + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + disk->num_sectors = rdev->size * 2; + conf->array_sectors += rdev->size * 2; + + cnt++; + } + if (cnt != raid_disks) { + printk("linear: not enough drives present. Aborting!\n"); + goto out; + } + + min_sectors = conf->array_sectors; + sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); + if (min_sectors == 0) + min_sectors = 1; + + /* min_sectors is the minimum spacing that will fit the hash + * table in one PAGE. This may be much smaller than needed. + * We find the smallest non-terminal set of consecutive devices + * that is larger than min_sectors and use the size of that as + * the actual spacing + */ + conf->spacing = conf->array_sectors; + for (i=0; i < cnt-1 ; i++) { + sector_t tmp = 0; + int j; + for (j = i; j < cnt - 1 && tmp < min_sectors; j++) + tmp += conf->disks[j].num_sectors; + if (tmp >= min_sectors && tmp < conf->spacing) + conf->spacing = tmp; + } + + /* spacing may be too large for sector_div to work with, + * so we might need to pre-shift + */ + conf->sector_shift = 0; + if (sizeof(sector_t) > sizeof(u32)) { + sector_t space = conf->spacing; + while (space > (sector_t)(~(u32)0)) { + space >>= 1; + conf->sector_shift++; + } + } + /* + * This code was restructured to work around a gcc-2.95.3 internal + * compiler error. Alter it with care. + */ + { + sector_t sz; + unsigned round; + unsigned long base; + + sz = conf->array_sectors >> conf->sector_shift; + sz += 1; /* force round-up */ + base = conf->spacing >> conf->sector_shift; + round = sector_div(sz, base); + nb_zone = sz + (round ? 1 : 0); + } + BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *)); + + conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone, + GFP_KERNEL); + if (!conf->hash_table) + goto out; + + /* + * Here we generate the linear hash table + * First calculate the device offsets. + */ + conf->disks[0].start_sector = 0; + for (i = 1; i < raid_disks; i++) + conf->disks[i].start_sector = + conf->disks[i-1].start_sector + + conf->disks[i-1].num_sectors; + + table = conf->hash_table; + i = 0; + for (curr_sector = 0; + curr_sector < conf->array_sectors; + curr_sector += conf->spacing) { + + while (i < raid_disks-1 && + curr_sector >= conf->disks[i+1].start_sector) + i++; + + *table ++ = conf->disks + i; + } + + if (conf->sector_shift) { + conf->spacing >>= conf->sector_shift; + /* round spacing up so that when we divide by it, + * we err on the side of "too-low", which is safest. + */ + conf->spacing++; + } + + BUG_ON(table - conf->hash_table > nb_zone); + + return conf; + +out: + kfree(conf); + return NULL; +} + +static int linear_run (mddev_t *mddev) +{ + linear_conf_t *conf; + + mddev->queue->queue_lock = &mddev->queue->__queue_lock; + conf = linear_conf(mddev, mddev->raid_disks); + + if (!conf) + return 1; + mddev->private = conf; + mddev->array_sectors = conf->array_sectors; + + blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); + mddev->queue->unplug_fn = linear_unplug; + mddev->queue->backing_dev_info.congested_fn = linear_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + return 0; +} + +static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) +{ + /* Adding a drive to a linear array allows the array to grow. + * It is permitted if the new drive has a matching superblock + * already on it, with raid_disk equal to raid_disks. + * It is achieved by creating a new linear_private_data structure + * and swapping it in in-place of the current one. + * The current one is never freed until the array is stopped. + * This avoids races. + */ + linear_conf_t *newconf; + + if (rdev->saved_raid_disk != mddev->raid_disks) + return -EINVAL; + + rdev->raid_disk = rdev->saved_raid_disk; + + newconf = linear_conf(mddev,mddev->raid_disks+1); + + if (!newconf) + return -ENOMEM; + + newconf->prev = mddev_to_conf(mddev); + mddev->private = newconf; + mddev->raid_disks++; + mddev->array_sectors = newconf->array_sectors; + set_capacity(mddev->gendisk, mddev->array_sectors); + return 0; +} + +static int linear_stop (mddev_t *mddev) +{ + linear_conf_t *conf = mddev_to_conf(mddev); + + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + do { + linear_conf_t *t = conf->prev; + kfree(conf->hash_table); + kfree(conf); + conf = t; + } while (conf); + + return 0; +} + +static int linear_make_request (struct request_queue *q, struct bio *bio) +{ + const int rw = bio_data_dir(bio); + mddev_t *mddev = q->queuedata; + dev_info_t *tmp_dev; + int cpu; + + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + + tmp_dev = which_dev(mddev, bio->bi_sector); + + if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors + + tmp_dev->start_sector) + || (bio->bi_sector < + tmp_dev->start_sector))) { + char b[BDEVNAME_SIZE]; + + printk("linear_make_request: Sector %llu out of bounds on " + "dev %s: %llu sectors, offset %llu\n", + (unsigned long long)bio->bi_sector, + bdevname(tmp_dev->rdev->bdev, b), + (unsigned long long)tmp_dev->num_sectors, + (unsigned long long)tmp_dev->start_sector); + bio_io_error(bio); + return 0; + } + if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > + tmp_dev->start_sector + tmp_dev->num_sectors)) { + /* This bio crosses a device boundary, so we have to + * split it. + */ + struct bio_pair *bp; + + bp = bio_split(bio, + tmp_dev->start_sector + tmp_dev->num_sectors + - bio->bi_sector); + + if (linear_make_request(q, &bp->bio1)) + generic_make_request(&bp->bio1); + if (linear_make_request(q, &bp->bio2)) + generic_make_request(&bp->bio2); + bio_pair_release(bp); + return 0; + } + + bio->bi_bdev = tmp_dev->rdev->bdev; + bio->bi_sector = bio->bi_sector - tmp_dev->start_sector + + tmp_dev->rdev->data_offset; + + return 1; +} + +static void linear_status (struct seq_file *seq, mddev_t *mddev) +{ + + seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); +} + + +static struct mdk_personality linear_personality = +{ + .name = "linear", + .level = LEVEL_LINEAR, + .owner = THIS_MODULE, + .make_request = linear_make_request, + .run = linear_run, + .stop = linear_stop, + .status = linear_status, + .hot_add_disk = linear_add, +}; + +static int __init linear_init (void) +{ + return register_md_personality (&linear_personality); +} + +static void linear_exit (void) +{ + unregister_md_personality (&linear_personality); +} + + +module_init(linear_init); +module_exit(linear_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ +MODULE_ALIAS("md-linear"); +MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md.c b/drivers/md/md.c new file mode 100644 index 0000000..fb15676 --- /dev/null +++ b/drivers/md/md.c @@ -0,0 +1,6438 @@ +/* + md.c : Multiple Devices driver for Linux + Copyright (C) 1998, 1999, 2000 Ingo Molnar + + completely rewritten, based on the MD driver code from Marc Zyngier + + Changes: + + - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar + - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> + - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> + - kerneld support by Boris Tobotras <boris@xtalk.msk.su> + - kmod support by: Cyrus Durgin + - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> + - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> + + - lots of fixes and improvements to the RAID1/RAID5 and generic + RAID code (such as request based resynchronization): + + Neil Brown <neilb@cse.unsw.edu.au>. + + - persistent bitmap code + Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/kthread.h> +#include <linux/raid/md.h> +#include <linux/raid/bitmap.h> +#include <linux/sysctl.h> +#include <linux/buffer_head.h> /* for invalidate_bdev */ +#include <linux/poll.h> +#include <linux/ctype.h> +#include <linux/hdreg.h> +#include <linux/proc_fs.h> +#include <linux/random.h> +#include <linux/reboot.h> +#include <linux/file.h> +#include <linux/delay.h> + +#define MAJOR_NR MD_MAJOR + +/* 63 partitions with the alternate major number (mdp) */ +#define MdpMinorShift 6 + +#define DEBUG 0 +#define dprintk(x...) ((void)(DEBUG && printk(x))) + + +#ifndef MODULE +static void autostart_arrays(int part); +#endif + +static LIST_HEAD(pers_list); +static DEFINE_SPINLOCK(pers_lock); + +static void md_print_devices(void); + +static DECLARE_WAIT_QUEUE_HEAD(resync_wait); + +#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } + +/* + * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' + * is 1000 KB/sec, so the extra system load does not show up that much. + * Increase it if you want to have more _guaranteed_ speed. Note that + * the RAID driver will use the maximum available bandwidth if the IO + * subsystem is idle. There is also an 'absolute maximum' reconstruction + * speed limit - in case reconstruction slows down your system despite + * idle IO detection. + * + * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. + * or /sys/block/mdX/md/sync_speed_{min,max} + */ + +static int sysctl_speed_limit_min = 1000; +static int sysctl_speed_limit_max = 200000; +static inline int speed_min(mddev_t *mddev) +{ + return mddev->sync_speed_min ? + mddev->sync_speed_min : sysctl_speed_limit_min; +} + +static inline int speed_max(mddev_t *mddev) +{ + return mddev->sync_speed_max ? + mddev->sync_speed_max : sysctl_speed_limit_max; +} + +static struct ctl_table_header *raid_table_header; + +static ctl_table raid_table[] = { + { + .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, + .procname = "speed_limit_min", + .data = &sysctl_speed_limit_min, + .maxlen = sizeof(int), + .mode = S_IRUGO|S_IWUSR, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, + .procname = "speed_limit_max", + .data = &sysctl_speed_limit_max, + .maxlen = sizeof(int), + .mode = S_IRUGO|S_IWUSR, + .proc_handler = &proc_dointvec, + }, + { .ctl_name = 0 } +}; + +static ctl_table raid_dir_table[] = { + { + .ctl_name = DEV_RAID, + .procname = "raid", + .maxlen = 0, + .mode = S_IRUGO|S_IXUGO, + .child = raid_table, + }, + { .ctl_name = 0 } +}; + +static ctl_table raid_root_table[] = { + { + .ctl_name = CTL_DEV, + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = raid_dir_table, + }, + { .ctl_name = 0 } +}; + +static struct block_device_operations md_fops; + +static int start_readonly; + +/* + * We have a system wide 'event count' that is incremented + * on any 'interesting' event, and readers of /proc/mdstat + * can use 'poll' or 'select' to find out when the event + * count increases. + * + * Events are: + * start array, stop array, error, add device, remove device, + * start build, activate spare + */ +static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +static atomic_t md_event_count; +void md_new_event(mddev_t *mddev) +{ + atomic_inc(&md_event_count); + wake_up(&md_event_waiters); +} +EXPORT_SYMBOL_GPL(md_new_event); + +/* Alternate version that can be called from interrupts + * when calling sysfs_notify isn't needed. + */ +static void md_new_event_inintr(mddev_t *mddev) +{ + atomic_inc(&md_event_count); + wake_up(&md_event_waiters); +} + +/* + * Enables to iterate over all existing md arrays + * all_mddevs_lock protects this list. + */ +static LIST_HEAD(all_mddevs); +static DEFINE_SPINLOCK(all_mddevs_lock); + + +/* + * iterates through all used mddevs in the system. + * We take care to grab the all_mddevs_lock whenever navigating + * the list, and to always hold a refcount when unlocked. + * Any code which breaks out of this loop while own + * a reference to the current mddev and must mddev_put it. + */ +#define for_each_mddev(mddev,tmp) \ + \ + for (({ spin_lock(&all_mddevs_lock); \ + tmp = all_mddevs.next; \ + mddev = NULL;}); \ + ({ if (tmp != &all_mddevs) \ + mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ + spin_unlock(&all_mddevs_lock); \ + if (mddev) mddev_put(mddev); \ + mddev = list_entry(tmp, mddev_t, all_mddevs); \ + tmp != &all_mddevs;}); \ + ({ spin_lock(&all_mddevs_lock); \ + tmp = tmp->next;}) \ + ) + + +static int md_fail_request(struct request_queue *q, struct bio *bio) +{ + bio_io_error(bio); + return 0; +} + +static inline mddev_t *mddev_get(mddev_t *mddev) +{ + atomic_inc(&mddev->active); + return mddev; +} + +static void mddev_put(mddev_t *mddev) +{ + if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) + return; + if (!mddev->raid_disks && list_empty(&mddev->disks)) { + list_del(&mddev->all_mddevs); + spin_unlock(&all_mddevs_lock); + blk_cleanup_queue(mddev->queue); + if (mddev->sysfs_state) + sysfs_put(mddev->sysfs_state); + mddev->sysfs_state = NULL; + kobject_put(&mddev->kobj); + } else + spin_unlock(&all_mddevs_lock); +} + +static mddev_t * mddev_find(dev_t unit) +{ + mddev_t *mddev, *new = NULL; + + retry: + spin_lock(&all_mddevs_lock); + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == unit) { + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + kfree(new); + return mddev; + } + + if (new) { + list_add(&new->all_mddevs, &all_mddevs); + spin_unlock(&all_mddevs_lock); + return new; + } + spin_unlock(&all_mddevs_lock); + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + new->unit = unit; + if (MAJOR(unit) == MD_MAJOR) + new->md_minor = MINOR(unit); + else + new->md_minor = MINOR(unit) >> MdpMinorShift; + + mutex_init(&new->reconfig_mutex); + INIT_LIST_HEAD(&new->disks); + INIT_LIST_HEAD(&new->all_mddevs); + init_timer(&new->safemode_timer); + atomic_set(&new->active, 1); + atomic_set(&new->openers, 0); + spin_lock_init(&new->write_lock); + init_waitqueue_head(&new->sb_wait); + init_waitqueue_head(&new->recovery_wait); + new->reshape_position = MaxSector; + new->resync_min = 0; + new->resync_max = MaxSector; + new->level = LEVEL_NONE; + + new->queue = blk_alloc_queue(GFP_KERNEL); + if (!new->queue) { + kfree(new); + return NULL; + } + /* Can be unlocked because the queue is new: no concurrency */ + queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue); + + blk_queue_make_request(new->queue, md_fail_request); + + goto retry; +} + +static inline int mddev_lock(mddev_t * mddev) +{ + return mutex_lock_interruptible(&mddev->reconfig_mutex); +} + +static inline int mddev_trylock(mddev_t * mddev) +{ + return mutex_trylock(&mddev->reconfig_mutex); +} + +static inline void mddev_unlock(mddev_t * mddev) +{ + mutex_unlock(&mddev->reconfig_mutex); + + md_wakeup_thread(mddev->thread); +} + +static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) +{ + mdk_rdev_t * rdev; + struct list_head *tmp; + + rdev_for_each(rdev, tmp, mddev) { + if (rdev->desc_nr == nr) + return rdev; + } + return NULL; +} + +static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) +{ + struct list_head *tmp; + mdk_rdev_t *rdev; + + rdev_for_each(rdev, tmp, mddev) { + if (rdev->bdev->bd_dev == dev) + return rdev; + } + return NULL; +} + +static struct mdk_personality *find_pers(int level, char *clevel) +{ + struct mdk_personality *pers; + list_for_each_entry(pers, &pers_list, list) { + if (level != LEVEL_NONE && pers->level == level) + return pers; + if (strcmp(pers->name, clevel)==0) + return pers; + } + return NULL; +} + +/* return the offset of the super block in 512byte sectors */ +static inline sector_t calc_dev_sboffset(struct block_device *bdev) +{ + sector_t num_sectors = bdev->bd_inode->i_size / 512; + return MD_NEW_SIZE_SECTORS(num_sectors); +} + +static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size) +{ + sector_t num_sectors = rdev->sb_start; + + if (chunk_size) + num_sectors &= ~((sector_t)chunk_size/512 - 1); + return num_sectors; +} + +static int alloc_disk_sb(mdk_rdev_t * rdev) +{ + if (rdev->sb_page) + MD_BUG(); + + rdev->sb_page = alloc_page(GFP_KERNEL); + if (!rdev->sb_page) { + printk(KERN_ALERT "md: out of memory.\n"); + return -ENOMEM; + } + + return 0; +} + +static void free_disk_sb(mdk_rdev_t * rdev) +{ + if (rdev->sb_page) { + put_page(rdev->sb_page); + rdev->sb_loaded = 0; + rdev->sb_page = NULL; + rdev->sb_start = 0; + rdev->size = 0; + } +} + + +static void super_written(struct bio *bio, int error) +{ + mdk_rdev_t *rdev = bio->bi_private; + mddev_t *mddev = rdev->mddev; + + if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { + printk("md: super_written gets error=%d, uptodate=%d\n", + error, test_bit(BIO_UPTODATE, &bio->bi_flags)); + WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); + md_error(mddev, rdev); + } + + if (atomic_dec_and_test(&mddev->pending_writes)) + wake_up(&mddev->sb_wait); + bio_put(bio); +} + +static void super_written_barrier(struct bio *bio, int error) +{ + struct bio *bio2 = bio->bi_private; + mdk_rdev_t *rdev = bio2->bi_private; + mddev_t *mddev = rdev->mddev; + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && + error == -EOPNOTSUPP) { + unsigned long flags; + /* barriers don't appear to be supported :-( */ + set_bit(BarriersNotsupp, &rdev->flags); + mddev->barriers_work = 0; + spin_lock_irqsave(&mddev->write_lock, flags); + bio2->bi_next = mddev->biolist; + mddev->biolist = bio2; + spin_unlock_irqrestore(&mddev->write_lock, flags); + wake_up(&mddev->sb_wait); + bio_put(bio); + } else { + bio_put(bio2); + bio->bi_private = rdev; + super_written(bio, error); + } +} + +void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, + sector_t sector, int size, struct page *page) +{ + /* write first size bytes of page to sector of rdev + * Increment mddev->pending_writes before returning + * and decrement it on completion, waking up sb_wait + * if zero is reached. + * If an error occurred, call md_error + * + * As we might need to resubmit the request if BIO_RW_BARRIER + * causes ENOTSUPP, we allocate a spare bio... + */ + struct bio *bio = bio_alloc(GFP_NOIO, 1); + int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); + + bio->bi_bdev = rdev->bdev; + bio->bi_sector = sector; + bio_add_page(bio, page, size, 0); + bio->bi_private = rdev; + bio->bi_end_io = super_written; + bio->bi_rw = rw; + + atomic_inc(&mddev->pending_writes); + if (!test_bit(BarriersNotsupp, &rdev->flags)) { + struct bio *rbio; + rw |= (1<<BIO_RW_BARRIER); + rbio = bio_clone(bio, GFP_NOIO); + rbio->bi_private = bio; + rbio->bi_end_io = super_written_barrier; + submit_bio(rw, rbio); + } else + submit_bio(rw, bio); +} + +void md_super_wait(mddev_t *mddev) +{ + /* wait for all superblock writes that were scheduled to complete. + * if any had to be retried (due to BARRIER problems), retry them + */ + DEFINE_WAIT(wq); + for(;;) { + prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); + if (atomic_read(&mddev->pending_writes)==0) + break; + while (mddev->biolist) { + struct bio *bio; + spin_lock_irq(&mddev->write_lock); + bio = mddev->biolist; + mddev->biolist = bio->bi_next ; + bio->bi_next = NULL; + spin_unlock_irq(&mddev->write_lock); + submit_bio(bio->bi_rw, bio); + } + schedule(); + } + finish_wait(&mddev->sb_wait, &wq); +} + +static void bi_complete(struct bio *bio, int error) +{ + complete((struct completion*)bio->bi_private); +} + +int sync_page_io(struct block_device *bdev, sector_t sector, int size, + struct page *page, int rw) +{ + struct bio *bio = bio_alloc(GFP_NOIO, 1); + struct completion event; + int ret; + + rw |= (1 << BIO_RW_SYNC); + + bio->bi_bdev = bdev; + bio->bi_sector = sector; + bio_add_page(bio, page, size, 0); + init_completion(&event); + bio->bi_private = &event; + bio->bi_end_io = bi_complete; + submit_bio(rw, bio); + wait_for_completion(&event); + + ret = test_bit(BIO_UPTODATE, &bio->bi_flags); + bio_put(bio); + return ret; +} +EXPORT_SYMBOL_GPL(sync_page_io); + +static int read_disk_sb(mdk_rdev_t * rdev, int size) +{ + char b[BDEVNAME_SIZE]; + if (!rdev->sb_page) { + MD_BUG(); + return -EINVAL; + } + if (rdev->sb_loaded) + return 0; + + + if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) + goto fail; + rdev->sb_loaded = 1; + return 0; + +fail: + printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", + bdevname(rdev->bdev,b)); + return -EINVAL; +} + +static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) +{ + return sb1->set_uuid0 == sb2->set_uuid0 && + sb1->set_uuid1 == sb2->set_uuid1 && + sb1->set_uuid2 == sb2->set_uuid2 && + sb1->set_uuid3 == sb2->set_uuid3; +} + +static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) +{ + int ret; + mdp_super_t *tmp1, *tmp2; + + tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); + tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); + + if (!tmp1 || !tmp2) { + ret = 0; + printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); + goto abort; + } + + *tmp1 = *sb1; + *tmp2 = *sb2; + + /* + * nr_disks is not constant + */ + tmp1->nr_disks = 0; + tmp2->nr_disks = 0; + + ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); +abort: + kfree(tmp1); + kfree(tmp2); + return ret; +} + + +static u32 md_csum_fold(u32 csum) +{ + csum = (csum & 0xffff) + (csum >> 16); + return (csum & 0xffff) + (csum >> 16); +} + +static unsigned int calc_sb_csum(mdp_super_t * sb) +{ + u64 newcsum = 0; + u32 *sb32 = (u32*)sb; + int i; + unsigned int disk_csum, csum; + + disk_csum = sb->sb_csum; + sb->sb_csum = 0; + + for (i = 0; i < MD_SB_BYTES/4 ; i++) + newcsum += sb32[i]; + csum = (newcsum & 0xffffffff) + (newcsum>>32); + + +#ifdef CONFIG_ALPHA + /* This used to use csum_partial, which was wrong for several + * reasons including that different results are returned on + * different architectures. It isn't critical that we get exactly + * the same return value as before (we always csum_fold before + * testing, and that removes any differences). However as we + * know that csum_partial always returned a 16bit value on + * alphas, do a fold to maximise conformity to previous behaviour. + */ + sb->sb_csum = md_csum_fold(disk_csum); +#else + sb->sb_csum = disk_csum; +#endif + return csum; +} + + +/* + * Handle superblock details. + * We want to be able to handle multiple superblock formats + * so we have a common interface to them all, and an array of + * different handlers. + * We rely on user-space to write the initial superblock, and support + * reading and updating of superblocks. + * Interface methods are: + * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) + * loads and validates a superblock on dev. + * if refdev != NULL, compare superblocks on both devices + * Return: + * 0 - dev has a superblock that is compatible with refdev + * 1 - dev has a superblock that is compatible and newer than refdev + * so dev should be used as the refdev in future + * -EINVAL superblock incompatible or invalid + * -othererror e.g. -EIO + * + * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) + * Verify that dev is acceptable into mddev. + * The first time, mddev->raid_disks will be 0, and data from + * dev should be merged in. Subsequent calls check that dev + * is new enough. Return 0 or -EINVAL + * + * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) + * Update the superblock for rdev with data in mddev + * This does not write to disc. + * + */ + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, + int minor_version); + int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); + void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); + unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, + sector_t num_sectors); +}; + +/* + * load_super for 0.90.0 + */ +static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) +{ + char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; + mdp_super_t *sb; + int ret; + + /* + * Calculate the position of the superblock (512byte sectors), + * it's at the end of the disk. + * + * It also happens to be a multiple of 4Kb. + */ + rdev->sb_start = calc_dev_sboffset(rdev->bdev); + + ret = read_disk_sb(rdev, MD_SB_BYTES); + if (ret) return ret; + + ret = -EINVAL; + + bdevname(rdev->bdev, b); + sb = (mdp_super_t*)page_address(rdev->sb_page); + + if (sb->md_magic != MD_SB_MAGIC) { + printk(KERN_ERR "md: invalid raid superblock magic on %s\n", + b); + goto abort; + } + + if (sb->major_version != 0 || + sb->minor_version < 90 || + sb->minor_version > 91) { + printk(KERN_WARNING "Bad version number %d.%d on %s\n", + sb->major_version, sb->minor_version, + b); + goto abort; + } + + if (sb->raid_disks <= 0) + goto abort; + + if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { + printk(KERN_WARNING "md: invalid superblock checksum on %s\n", + b); + goto abort; + } + + rdev->preferred_minor = sb->md_minor; + rdev->data_offset = 0; + rdev->sb_size = MD_SB_BYTES; + + if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) { + if (sb->level != 1 && sb->level != 4 + && sb->level != 5 && sb->level != 6 + && sb->level != 10) { + /* FIXME use a better test */ + printk(KERN_WARNING + "md: bitmaps not supported for this level.\n"); + goto abort; + } + } + + if (sb->level == LEVEL_MULTIPATH) + rdev->desc_nr = -1; + else + rdev->desc_nr = sb->this_disk.number; + + if (!refdev) { + ret = 1; + } else { + __u64 ev1, ev2; + mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); + if (!uuid_equal(refsb, sb)) { + printk(KERN_WARNING "md: %s has different UUID to %s\n", + b, bdevname(refdev->bdev,b2)); + goto abort; + } + if (!sb_equal(refsb, sb)) { + printk(KERN_WARNING "md: %s has same UUID" + " but different superblock to %s\n", + b, bdevname(refdev->bdev, b2)); + goto abort; + } + ev1 = md_event(sb); + ev2 = md_event(refsb); + if (ev1 > ev2) + ret = 1; + else + ret = 0; + } + rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2; + + if (rdev->size < sb->size && sb->level > 1) + /* "this cannot possibly happen" ... */ + ret = -EINVAL; + + abort: + return ret; +} + +/* + * validate_super for 0.90.0 + */ +static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) +{ + mdp_disk_t *desc; + mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); + __u64 ev1 = md_event(sb); + + rdev->raid_disk = -1; + clear_bit(Faulty, &rdev->flags); + clear_bit(In_sync, &rdev->flags); + clear_bit(WriteMostly, &rdev->flags); + clear_bit(BarriersNotsupp, &rdev->flags); + + if (mddev->raid_disks == 0) { + mddev->major_version = 0; + mddev->minor_version = sb->minor_version; + mddev->patch_version = sb->patch_version; + mddev->external = 0; + mddev->chunk_size = sb->chunk_size; + mddev->ctime = sb->ctime; + mddev->utime = sb->utime; + mddev->level = sb->level; + mddev->clevel[0] = 0; + mddev->layout = sb->layout; + mddev->raid_disks = sb->raid_disks; + mddev->size = sb->size; + mddev->events = ev1; + mddev->bitmap_offset = 0; + mddev->default_bitmap_offset = MD_SB_BYTES >> 9; + + if (mddev->minor_version >= 91) { + mddev->reshape_position = sb->reshape_position; + mddev->delta_disks = sb->delta_disks; + mddev->new_level = sb->new_level; + mddev->new_layout = sb->new_layout; + mddev->new_chunk = sb->new_chunk; + } else { + mddev->reshape_position = MaxSector; + mddev->delta_disks = 0; + mddev->new_level = mddev->level; + mddev->new_layout = mddev->layout; + mddev->new_chunk = mddev->chunk_size; + } + + if (sb->state & (1<<MD_SB_CLEAN)) + mddev->recovery_cp = MaxSector; + else { + if (sb->events_hi == sb->cp_events_hi && + sb->events_lo == sb->cp_events_lo) { + mddev->recovery_cp = sb->recovery_cp; + } else + mddev->recovery_cp = 0; + } + + memcpy(mddev->uuid+0, &sb->set_uuid0, 4); + memcpy(mddev->uuid+4, &sb->set_uuid1, 4); + memcpy(mddev->uuid+8, &sb->set_uuid2, 4); + memcpy(mddev->uuid+12,&sb->set_uuid3, 4); + + mddev->max_disks = MD_SB_DISKS; + + if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && + mddev->bitmap_file == NULL) + mddev->bitmap_offset = mddev->default_bitmap_offset; + + } else if (mddev->pers == NULL) { + /* Insist on good event counter while assembling */ + ++ev1; + if (ev1 < mddev->events) + return -EINVAL; + } else if (mddev->bitmap) { + /* if adding to array with a bitmap, then we can accept an + * older device ... but not too old. + */ + if (ev1 < mddev->bitmap->events_cleared) + return 0; + } else { + if (ev1 < mddev->events) + /* just a hot-add of a new device, leave raid_disk at -1 */ + return 0; + } + + if (mddev->level != LEVEL_MULTIPATH) { + desc = sb->disks + rdev->desc_nr; + + if (desc->state & (1<<MD_DISK_FAULTY)) + set_bit(Faulty, &rdev->flags); + else if (desc->state & (1<<MD_DISK_SYNC) /* && + desc->raid_disk < mddev->raid_disks */) { + set_bit(In_sync, &rdev->flags); + rdev->raid_disk = desc->raid_disk; + } + if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) + set_bit(WriteMostly, &rdev->flags); + } else /* MULTIPATH are always insync */ + set_bit(In_sync, &rdev->flags); + return 0; +} + +/* + * sync_super for 0.90.0 + */ +static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) +{ + mdp_super_t *sb; + struct list_head *tmp; + mdk_rdev_t *rdev2; + int next_spare = mddev->raid_disks; + + + /* make rdev->sb match mddev data.. + * + * 1/ zero out disks + * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); + * 3/ any empty disks < next_spare become removed + * + * disks[0] gets initialised to REMOVED because + * we cannot be sure from other fields if it has + * been initialised or not. + */ + int i; + int active=0, working=0,failed=0,spare=0,nr_disks=0; + + rdev->sb_size = MD_SB_BYTES; + + sb = (mdp_super_t*)page_address(rdev->sb_page); + + memset(sb, 0, sizeof(*sb)); + + sb->md_magic = MD_SB_MAGIC; + sb->major_version = mddev->major_version; + sb->patch_version = mddev->patch_version; + sb->gvalid_words = 0; /* ignored */ + memcpy(&sb->set_uuid0, mddev->uuid+0, 4); + memcpy(&sb->set_uuid1, mddev->uuid+4, 4); + memcpy(&sb->set_uuid2, mddev->uuid+8, 4); + memcpy(&sb->set_uuid3, mddev->uuid+12,4); + + sb->ctime = mddev->ctime; + sb->level = mddev->level; + sb->size = mddev->size; + sb->raid_disks = mddev->raid_disks; + sb->md_minor = mddev->md_minor; + sb->not_persistent = 0; + sb->utime = mddev->utime; + sb->state = 0; + sb->events_hi = (mddev->events>>32); + sb->events_lo = (u32)mddev->events; + + if (mddev->reshape_position == MaxSector) + sb->minor_version = 90; + else { + sb->minor_version = 91; + sb->reshape_position = mddev->reshape_position; + sb->new_level = mddev->new_level; + sb->delta_disks = mddev->delta_disks; + sb->new_layout = mddev->new_layout; + sb->new_chunk = mddev->new_chunk; + } + mddev->minor_version = sb->minor_version; + if (mddev->in_sync) + { + sb->recovery_cp = mddev->recovery_cp; + sb->cp_events_hi = (mddev->events>>32); + sb->cp_events_lo = (u32)mddev->events; + if (mddev->recovery_cp == MaxSector) + sb->state = (1<< MD_SB_CLEAN); + } else + sb->recovery_cp = 0; + + sb->layout = mddev->layout; + sb->chunk_size = mddev->chunk_size; + + if (mddev->bitmap && mddev->bitmap_file == NULL) + sb->state |= (1<<MD_SB_BITMAP_PRESENT); + + sb->disks[0].state = (1<<MD_DISK_REMOVED); + rdev_for_each(rdev2, tmp, mddev) { + mdp_disk_t *d; + int desc_nr; + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) + desc_nr = rdev2->raid_disk; + else + desc_nr = next_spare++; + rdev2->desc_nr = desc_nr; + d = &sb->disks[rdev2->desc_nr]; + nr_disks++; + d->number = rdev2->desc_nr; + d->major = MAJOR(rdev2->bdev->bd_dev); + d->minor = MINOR(rdev2->bdev->bd_dev); + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) + d->raid_disk = rdev2->raid_disk; + else + d->raid_disk = rdev2->desc_nr; /* compatibility */ + if (test_bit(Faulty, &rdev2->flags)) + d->state = (1<<MD_DISK_FAULTY); + else if (test_bit(In_sync, &rdev2->flags)) { + d->state = (1<<MD_DISK_ACTIVE); + d->state |= (1<<MD_DISK_SYNC); + active++; + working++; + } else { + d->state = 0; + spare++; + working++; + } + if (test_bit(WriteMostly, &rdev2->flags)) + d->state |= (1<<MD_DISK_WRITEMOSTLY); + } + /* now set the "removed" and "faulty" bits on any missing devices */ + for (i=0 ; i < mddev->raid_disks ; i++) { + mdp_disk_t *d = &sb->disks[i]; + if (d->state == 0 && d->number == 0) { + d->number = i; + d->raid_disk = i; + d->state = (1<<MD_DISK_REMOVED); + d->state |= (1<<MD_DISK_FAULTY); + failed++; + } + } + sb->nr_disks = nr_disks; + sb->active_disks = active; + sb->working_disks = working; + sb->failed_disks = failed; + sb->spare_disks = spare; + + sb->this_disk = sb->disks[rdev->desc_nr]; + sb->sb_csum = calc_sb_csum(sb); +} + +/* + * rdev_size_change for 0.90.0 + */ +static unsigned long long +super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) +{ + if (num_sectors && num_sectors < rdev->mddev->size * 2) + return 0; /* component must fit device */ + if (rdev->mddev->bitmap_offset) + return 0; /* can't move bitmap */ + rdev->sb_start = calc_dev_sboffset(rdev->bdev); + if (!num_sectors || num_sectors > rdev->sb_start) + num_sectors = rdev->sb_start; + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + rdev->sb_page); + md_super_wait(rdev->mddev); + return num_sectors / 2; /* kB for sysfs */ +} + + +/* + * version 1 superblock + */ + +static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) +{ + __le32 disk_csum; + u32 csum; + unsigned long long newcsum; + int size = 256 + le32_to_cpu(sb->max_dev)*2; + __le32 *isuper = (__le32*)sb; + int i; + + disk_csum = sb->sb_csum; + sb->sb_csum = 0; + newcsum = 0; + for (i=0; size>=4; size -= 4 ) + newcsum += le32_to_cpu(*isuper++); + + if (size == 2) + newcsum += le16_to_cpu(*(__le16*) isuper); + + csum = (newcsum & 0xffffffff) + (newcsum >> 32); + sb->sb_csum = disk_csum; + return cpu_to_le32(csum); +} + +static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) +{ + struct mdp_superblock_1 *sb; + int ret; + sector_t sb_start; + char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; + int bmask; + + /* + * Calculate the position of the superblock in 512byte sectors. + * It is always aligned to a 4K boundary and + * depeding on minor_version, it can be: + * 0: At least 8K, but less than 12K, from end of device + * 1: At start of device + * 2: 4K from start of device. + */ + switch(minor_version) { + case 0: + sb_start = rdev->bdev->bd_inode->i_size >> 9; + sb_start -= 8*2; + sb_start &= ~(sector_t)(4*2-1); + break; + case 1: + sb_start = 0; + break; + case 2: + sb_start = 8; + break; + default: + return -EINVAL; + } + rdev->sb_start = sb_start; + + /* superblock is rarely larger than 1K, but it can be larger, + * and it is safe to read 4k, so we do that + */ + ret = read_disk_sb(rdev, 4096); + if (ret) return ret; + + + sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + + if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || + sb->major_version != cpu_to_le32(1) || + le32_to_cpu(sb->max_dev) > (4096-256)/2 || + le64_to_cpu(sb->super_offset) != rdev->sb_start || + (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) + return -EINVAL; + + if (calc_sb_1_csum(sb) != sb->sb_csum) { + printk("md: invalid superblock checksum on %s\n", + bdevname(rdev->bdev,b)); + return -EINVAL; + } + if (le64_to_cpu(sb->data_size) < 10) { + printk("md: data_size too small on %s\n", + bdevname(rdev->bdev,b)); + return -EINVAL; + } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) { + if (sb->level != cpu_to_le32(1) && + sb->level != cpu_to_le32(4) && + sb->level != cpu_to_le32(5) && + sb->level != cpu_to_le32(6) && + sb->level != cpu_to_le32(10)) { + printk(KERN_WARNING + "md: bitmaps not supported for this level.\n"); + return -EINVAL; + } + } + + rdev->preferred_minor = 0xffff; + rdev->data_offset = le64_to_cpu(sb->data_offset); + atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + + rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; + bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; + if (rdev->sb_size & bmask) + rdev->sb_size = (rdev->sb_size | bmask) + 1; + + if (minor_version + && rdev->data_offset < sb_start + (rdev->sb_size/512)) + return -EINVAL; + + if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) + rdev->desc_nr = -1; + else + rdev->desc_nr = le32_to_cpu(sb->dev_number); + + if (!refdev) { + ret = 1; + } else { + __u64 ev1, ev2; + struct mdp_superblock_1 *refsb = + (struct mdp_superblock_1*)page_address(refdev->sb_page); + + if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || + sb->level != refsb->level || + sb->layout != refsb->layout || + sb->chunksize != refsb->chunksize) { + printk(KERN_WARNING "md: %s has strangely different" + " superblock to %s\n", + bdevname(rdev->bdev,b), + bdevname(refdev->bdev,b2)); + return -EINVAL; + } + ev1 = le64_to_cpu(sb->events); + ev2 = le64_to_cpu(refsb->events); + + if (ev1 > ev2) + ret = 1; + else + ret = 0; + } + if (minor_version) + rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; + else + rdev->size = rdev->sb_start / 2; + if (rdev->size < le64_to_cpu(sb->data_size)/2) + return -EINVAL; + rdev->size = le64_to_cpu(sb->data_size)/2; + if (le32_to_cpu(sb->chunksize)) + rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); + + if (le64_to_cpu(sb->size) > rdev->size*2) + return -EINVAL; + return ret; +} + +static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) +{ + struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + __u64 ev1 = le64_to_cpu(sb->events); + + rdev->raid_disk = -1; + clear_bit(Faulty, &rdev->flags); + clear_bit(In_sync, &rdev->flags); + clear_bit(WriteMostly, &rdev->flags); + clear_bit(BarriersNotsupp, &rdev->flags); + + if (mddev->raid_disks == 0) { + mddev->major_version = 1; + mddev->patch_version = 0; + mddev->external = 0; + mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; + mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); + mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); + mddev->level = le32_to_cpu(sb->level); + mddev->clevel[0] = 0; + mddev->layout = le32_to_cpu(sb->layout); + mddev->raid_disks = le32_to_cpu(sb->raid_disks); + mddev->size = le64_to_cpu(sb->size)/2; + mddev->events = ev1; + mddev->bitmap_offset = 0; + mddev->default_bitmap_offset = 1024 >> 9; + + mddev->recovery_cp = le64_to_cpu(sb->resync_offset); + memcpy(mddev->uuid, sb->set_uuid, 16); + + mddev->max_disks = (4096-256)/2; + + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && + mddev->bitmap_file == NULL ) + mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); + + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { + mddev->reshape_position = le64_to_cpu(sb->reshape_position); + mddev->delta_disks = le32_to_cpu(sb->delta_disks); + mddev->new_level = le32_to_cpu(sb->new_level); + mddev->new_layout = le32_to_cpu(sb->new_layout); + mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; + } else { + mddev->reshape_position = MaxSector; + mddev->delta_disks = 0; + mddev->new_level = mddev->level; + mddev->new_layout = mddev->layout; + mddev->new_chunk = mddev->chunk_size; + } + + } else if (mddev->pers == NULL) { + /* Insist of good event counter while assembling */ + ++ev1; + if (ev1 < mddev->events) + return -EINVAL; + } else if (mddev->bitmap) { + /* If adding to array with a bitmap, then we can accept an + * older device, but not too old. + */ + if (ev1 < mddev->bitmap->events_cleared) + return 0; + } else { + if (ev1 < mddev->events) + /* just a hot-add of a new device, leave raid_disk at -1 */ + return 0; + } + if (mddev->level != LEVEL_MULTIPATH) { + int role; + role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); + switch(role) { + case 0xffff: /* spare */ + break; + case 0xfffe: /* faulty */ + set_bit(Faulty, &rdev->flags); + break; + default: + if ((le32_to_cpu(sb->feature_map) & + MD_FEATURE_RECOVERY_OFFSET)) + rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); + else + set_bit(In_sync, &rdev->flags); + rdev->raid_disk = role; + break; + } + if (sb->devflags & WriteMostly1) + set_bit(WriteMostly, &rdev->flags); + } else /* MULTIPATH are always insync */ + set_bit(In_sync, &rdev->flags); + + return 0; +} + +static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) +{ + struct mdp_superblock_1 *sb; + struct list_head *tmp; + mdk_rdev_t *rdev2; + int max_dev, i; + /* make rdev->sb match mddev and rdev data. */ + + sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); + + sb->feature_map = 0; + sb->pad0 = 0; + sb->recovery_offset = cpu_to_le64(0); + memset(sb->pad1, 0, sizeof(sb->pad1)); + memset(sb->pad2, 0, sizeof(sb->pad2)); + memset(sb->pad3, 0, sizeof(sb->pad3)); + + sb->utime = cpu_to_le64((__u64)mddev->utime); + sb->events = cpu_to_le64(mddev->events); + if (mddev->in_sync) + sb->resync_offset = cpu_to_le64(mddev->recovery_cp); + else + sb->resync_offset = cpu_to_le64(0); + + sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); + + sb->raid_disks = cpu_to_le32(mddev->raid_disks); + sb->size = cpu_to_le64(mddev->size<<1); + + if (mddev->bitmap && mddev->bitmap_file == NULL) { + sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); + sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); + } + + if (rdev->raid_disk >= 0 && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset > 0) { + sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); + sb->recovery_offset = cpu_to_le64(rdev->recovery_offset); + } + + if (mddev->reshape_position != MaxSector) { + sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); + sb->reshape_position = cpu_to_le64(mddev->reshape_position); + sb->new_layout = cpu_to_le32(mddev->new_layout); + sb->delta_disks = cpu_to_le32(mddev->delta_disks); + sb->new_level = cpu_to_le32(mddev->new_level); + sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); + } + + max_dev = 0; + rdev_for_each(rdev2, tmp, mddev) + if (rdev2->desc_nr+1 > max_dev) + max_dev = rdev2->desc_nr+1; + + if (max_dev > le32_to_cpu(sb->max_dev)) + sb->max_dev = cpu_to_le32(max_dev); + for (i=0; i<max_dev;i++) + sb->dev_roles[i] = cpu_to_le16(0xfffe); + + rdev_for_each(rdev2, tmp, mddev) { + i = rdev2->desc_nr; + if (test_bit(Faulty, &rdev2->flags)) + sb->dev_roles[i] = cpu_to_le16(0xfffe); + else if (test_bit(In_sync, &rdev2->flags)) + sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); + else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) + sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); + else + sb->dev_roles[i] = cpu_to_le16(0xffff); + } + + sb->sb_csum = calc_sb_1_csum(sb); +} + +static unsigned long long +super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) +{ + struct mdp_superblock_1 *sb; + sector_t max_sectors; + if (num_sectors && num_sectors < rdev->mddev->size * 2) + return 0; /* component must fit device */ + if (rdev->sb_start < rdev->data_offset) { + /* minor versions 1 and 2; superblock before data */ + max_sectors = rdev->bdev->bd_inode->i_size >> 9; + max_sectors -= rdev->data_offset; + if (!num_sectors || num_sectors > max_sectors) + num_sectors = max_sectors; + } else if (rdev->mddev->bitmap_offset) { + /* minor version 0 with bitmap we can't move */ + return 0; + } else { + /* minor version 0; superblock after data */ + sector_t sb_start; + sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; + sb_start &= ~(sector_t)(4*2 - 1); + max_sectors = rdev->size * 2 + sb_start - rdev->sb_start; + if (!num_sectors || num_sectors > max_sectors) + num_sectors = max_sectors; + rdev->sb_start = sb_start; + } + sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); + sb->data_size = cpu_to_le64(num_sectors); + sb->super_offset = rdev->sb_start; + sb->sb_csum = calc_sb_1_csum(sb); + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + rdev->sb_page); + md_super_wait(rdev->mddev); + return num_sectors / 2; /* kB for sysfs */ +} + +static struct super_type super_types[] = { + [0] = { + .name = "0.90.0", + .owner = THIS_MODULE, + .load_super = super_90_load, + .validate_super = super_90_validate, + .sync_super = super_90_sync, + .rdev_size_change = super_90_rdev_size_change, + }, + [1] = { + .name = "md-1", + .owner = THIS_MODULE, + .load_super = super_1_load, + .validate_super = super_1_validate, + .sync_super = super_1_sync, + .rdev_size_change = super_1_rdev_size_change, + }, +}; + +static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) +{ + mdk_rdev_t *rdev, *rdev2; + + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev1) + rdev_for_each_rcu(rdev2, mddev2) + if (rdev->bdev->bd_contains == + rdev2->bdev->bd_contains) { + rcu_read_unlock(); + return 1; + } + rcu_read_unlock(); + return 0; +} + +static LIST_HEAD(pending_raid_disks); + +static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) +{ + char b[BDEVNAME_SIZE]; + struct kobject *ko; + char *s; + int err; + + if (rdev->mddev) { + MD_BUG(); + return -EINVAL; + } + + /* prevent duplicates */ + if (find_rdev(mddev, rdev->bdev->bd_dev)) + return -EEXIST; + + /* make sure rdev->size exceeds mddev->size */ + if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { + if (mddev->pers) { + /* Cannot change size, so fail + * If mddev->level <= 0, then we don't care + * about aligning sizes (e.g. linear) + */ + if (mddev->level > 0) + return -ENOSPC; + } else + mddev->size = rdev->size; + } + + /* Verify rdev->desc_nr is unique. + * If it is -1, assign a free number, else + * check number is not in use + */ + if (rdev->desc_nr < 0) { + int choice = 0; + if (mddev->pers) choice = mddev->raid_disks; + while (find_rdev_nr(mddev, choice)) + choice++; + rdev->desc_nr = choice; + } else { + if (find_rdev_nr(mddev, rdev->desc_nr)) + return -EBUSY; + } + if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { + printk(KERN_WARNING "md: %s: array is limited to %d devices\n", + mdname(mddev), mddev->max_disks); + return -EBUSY; + } + bdevname(rdev->bdev,b); + while ( (s=strchr(b, '/')) != NULL) + *s = '!'; + + rdev->mddev = mddev; + printk(KERN_INFO "md: bind<%s>\n", b); + + if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) + goto fail; + + ko = &part_to_dev(rdev->bdev->bd_part)->kobj; + if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { + kobject_del(&rdev->kobj); + goto fail; + } + rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state"); + + list_add_rcu(&rdev->same_set, &mddev->disks); + bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); + return 0; + + fail: + printk(KERN_WARNING "md: failed to register dev-%s for %s\n", + b, mdname(mddev)); + return err; +} + +static void md_delayed_delete(struct work_struct *ws) +{ + mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); + kobject_del(&rdev->kobj); + kobject_put(&rdev->kobj); +} + +static void unbind_rdev_from_array(mdk_rdev_t * rdev) +{ + char b[BDEVNAME_SIZE]; + if (!rdev->mddev) { + MD_BUG(); + return; + } + bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); + list_del_rcu(&rdev->same_set); + printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); + rdev->mddev = NULL; + sysfs_remove_link(&rdev->kobj, "block"); + sysfs_put(rdev->sysfs_state); + rdev->sysfs_state = NULL; + /* We need to delay this, otherwise we can deadlock when + * writing to 'remove' to "dev/state". We also need + * to delay it due to rcu usage. + */ + synchronize_rcu(); + INIT_WORK(&rdev->del_work, md_delayed_delete); + kobject_get(&rdev->kobj); + schedule_work(&rdev->del_work); +} + +/* + * prevent the device from being mounted, repartitioned or + * otherwise reused by a RAID array (or any other kernel + * subsystem), by bd_claiming the device. + */ +static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) +{ + int err = 0; + struct block_device *bdev; + char b[BDEVNAME_SIZE]; + + bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + if (IS_ERR(bdev)) { + printk(KERN_ERR "md: could not open %s.\n", + __bdevname(dev, b)); + return PTR_ERR(bdev); + } + err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); + if (err) { + printk(KERN_ERR "md: could not bd_claim %s.\n", + bdevname(bdev, b)); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + return err; + } + if (!shared) + set_bit(AllReserved, &rdev->flags); + rdev->bdev = bdev; + return err; +} + +static void unlock_rdev(mdk_rdev_t *rdev) +{ + struct block_device *bdev = rdev->bdev; + rdev->bdev = NULL; + if (!bdev) + MD_BUG(); + bd_release(bdev); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE); +} + +void md_autodetect_dev(dev_t dev); + +static void export_rdev(mdk_rdev_t * rdev) +{ + char b[BDEVNAME_SIZE]; + printk(KERN_INFO "md: export_rdev(%s)\n", + bdevname(rdev->bdev,b)); + if (rdev->mddev) + MD_BUG(); + free_disk_sb(rdev); +#ifndef MODULE + if (test_bit(AutoDetected, &rdev->flags)) + md_autodetect_dev(rdev->bdev->bd_dev); +#endif + unlock_rdev(rdev); + kobject_put(&rdev->kobj); +} + +static void kick_rdev_from_array(mdk_rdev_t * rdev) +{ + unbind_rdev_from_array(rdev); + export_rdev(rdev); +} + +static void export_array(mddev_t *mddev) +{ + struct list_head *tmp; + mdk_rdev_t *rdev; + + rdev_for_each(rdev, tmp, mddev) { + if (!rdev->mddev) { + MD_BUG(); + continue; + } + kick_rdev_from_array(rdev); + } + if (!list_empty(&mddev->disks)) + MD_BUG(); + mddev->raid_disks = 0; + mddev->major_version = 0; +} + +static void print_desc(mdp_disk_t *desc) +{ + printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, + desc->major,desc->minor,desc->raid_disk,desc->state); +} + +static void print_sb(mdp_super_t *sb) +{ + int i; + + printk(KERN_INFO + "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", + sb->major_version, sb->minor_version, sb->patch_version, + sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, + sb->ctime); + printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", + sb->level, sb->size, sb->nr_disks, sb->raid_disks, + sb->md_minor, sb->layout, sb->chunk_size); + printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" + " FD:%d SD:%d CSUM:%08x E:%08lx\n", + sb->utime, sb->state, sb->active_disks, sb->working_disks, + sb->failed_disks, sb->spare_disks, + sb->sb_csum, (unsigned long)sb->events_lo); + + printk(KERN_INFO); + for (i = 0; i < MD_SB_DISKS; i++) { + mdp_disk_t *desc; + + desc = sb->disks + i; + if (desc->number || desc->major || desc->minor || + desc->raid_disk || (desc->state && (desc->state != 4))) { + printk(" D %2d: ", i); + print_desc(desc); + } + } + printk(KERN_INFO "md: THIS: "); + print_desc(&sb->this_disk); + +} + +static void print_rdev(mdk_rdev_t *rdev) +{ + char b[BDEVNAME_SIZE]; + printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", + bdevname(rdev->bdev,b), (unsigned long long)rdev->size, + test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), + rdev->desc_nr); + if (rdev->sb_loaded) { + printk(KERN_INFO "md: rdev superblock:\n"); + print_sb((mdp_super_t*)page_address(rdev->sb_page)); + } else + printk(KERN_INFO "md: no rdev superblock!\n"); +} + +static void md_print_devices(void) +{ + struct list_head *tmp, *tmp2; + mdk_rdev_t *rdev; + mddev_t *mddev; + char b[BDEVNAME_SIZE]; + + printk("\n"); + printk("md: **********************************\n"); + printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); + printk("md: **********************************\n"); + for_each_mddev(mddev, tmp) { + + if (mddev->bitmap) + bitmap_print_sb(mddev->bitmap); + else + printk("%s: ", mdname(mddev)); + rdev_for_each(rdev, tmp2, mddev) + printk("<%s>", bdevname(rdev->bdev,b)); + printk("\n"); + + rdev_for_each(rdev, tmp2, mddev) + print_rdev(rdev); + } + printk("md: **********************************\n"); + printk("\n"); +} + + +static void sync_sbs(mddev_t * mddev, int nospares) +{ + /* Update each superblock (in-memory image), but + * if we are allowed to, skip spares which already + * have the right event counter, or have one earlier + * (which would mean they aren't being marked as dirty + * with the rest of the array) + */ + mdk_rdev_t *rdev; + struct list_head *tmp; + + rdev_for_each(rdev, tmp, mddev) { + if (rdev->sb_events == mddev->events || + (nospares && + rdev->raid_disk < 0 && + (rdev->sb_events&1)==0 && + rdev->sb_events+1 == mddev->events)) { + /* Don't update this superblock */ + rdev->sb_loaded = 2; + } else { + super_types[mddev->major_version]. + sync_super(mddev, rdev); + rdev->sb_loaded = 1; + } + } +} + +static void md_update_sb(mddev_t * mddev, int force_change) +{ + struct list_head *tmp; + mdk_rdev_t *rdev; + int sync_req; + int nospares = 0; + + if (mddev->external) + return; +repeat: + spin_lock_irq(&mddev->write_lock); + + set_bit(MD_CHANGE_PENDING, &mddev->flags); + if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + force_change = 1; + if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) + /* just a clean<-> dirty transition, possibly leave spares alone, + * though if events isn't the right even/odd, we will have to do + * spares after all + */ + nospares = 1; + if (force_change) + nospares = 0; + if (mddev->degraded) + /* If the array is degraded, then skipping spares is both + * dangerous and fairly pointless. + * Dangerous because a device that was removed from the array + * might have a event_count that still looks up-to-date, + * so it can be re-added without a resync. + * Pointless because if there are any spares to skip, + * then a recovery will happen and soon that array won't + * be degraded any more and the spare can go back to sleep then. + */ + nospares = 0; + + sync_req = mddev->in_sync; + mddev->utime = get_seconds(); + + /* If this is just a dirty<->clean transition, and the array is clean + * and 'events' is odd, we can roll back to the previous clean state */ + if (nospares + && (mddev->in_sync && mddev->recovery_cp == MaxSector) + && (mddev->events & 1) + && mddev->events != 1) + mddev->events--; + else { + /* otherwise we have to go forward and ... */ + mddev->events ++; + if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ + /* .. if the array isn't clean, insist on an odd 'events' */ + if ((mddev->events&1)==0) { + mddev->events++; + nospares = 0; + } + } else { + /* otherwise insist on an even 'events' (for clean states) */ + if ((mddev->events&1)) { + mddev->events++; + nospares = 0; + } + } + } + + if (!mddev->events) { + /* + * oops, this 64-bit counter should never wrap. + * Either we are in around ~1 trillion A.C., assuming + * 1 reboot per second, or we have a bug: + */ + MD_BUG(); + mddev->events --; + } + + /* + * do not write anything to disk if using + * nonpersistent superblocks + */ + if (!mddev->persistent) { + if (!mddev->external) + clear_bit(MD_CHANGE_PENDING, &mddev->flags); + + spin_unlock_irq(&mddev->write_lock); + wake_up(&mddev->sb_wait); + return; + } + sync_sbs(mddev, nospares); + spin_unlock_irq(&mddev->write_lock); + + dprintk(KERN_INFO + "md: updating %s RAID superblock on device (in sync %d)\n", + mdname(mddev),mddev->in_sync); + + bitmap_update_sb(mddev->bitmap); + rdev_for_each(rdev, tmp, mddev) { + char b[BDEVNAME_SIZE]; + dprintk(KERN_INFO "md: "); + if (rdev->sb_loaded != 1) + continue; /* no noise on spare devices */ + if (test_bit(Faulty, &rdev->flags)) + dprintk("(skipping faulty "); + + dprintk("%s ", bdevname(rdev->bdev,b)); + if (!test_bit(Faulty, &rdev->flags)) { + md_super_write(mddev,rdev, + rdev->sb_start, rdev->sb_size, + rdev->sb_page); + dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", + bdevname(rdev->bdev,b), + (unsigned long long)rdev->sb_start); + rdev->sb_events = mddev->events; + + } else + dprintk(")\n"); + if (mddev->level == LEVEL_MULTIPATH) + /* only need to write one superblock... */ + break; + } + md_super_wait(mddev); + /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ + + spin_lock_irq(&mddev->write_lock); + if (mddev->in_sync != sync_req || + test_bit(MD_CHANGE_DEVS, &mddev->flags)) { + /* have to write it out again */ + spin_unlock_irq(&mddev->write_lock); + goto repeat; + } + clear_bit(MD_CHANGE_PENDING, &mddev->flags); + spin_unlock_irq(&mddev->write_lock); + wake_up(&mddev->sb_wait); + +} + +/* words written to sysfs files may, or may not, be \n terminated. + * We want to accept with case. For this we use cmd_match. + */ +static int cmd_match(const char *cmd, const char *str) +{ + /* See if cmd, written into a sysfs file, matches + * str. They must either be the same, or cmd can + * have a trailing newline + */ + while (*cmd && *str && *cmd == *str) { + cmd++; + str++; + } + if (*cmd == '\n') + cmd++; + if (*str || *cmd) + return 0; + return 1; +} + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mdk_rdev_t *, char *); + ssize_t (*store)(mdk_rdev_t *, const char *, size_t); +}; + +static ssize_t +state_show(mdk_rdev_t *rdev, char *page) +{ + char *sep = ""; + size_t len = 0; + + if (test_bit(Faulty, &rdev->flags)) { + len+= sprintf(page+len, "%sfaulty",sep); + sep = ","; + } + if (test_bit(In_sync, &rdev->flags)) { + len += sprintf(page+len, "%sin_sync",sep); + sep = ","; + } + if (test_bit(WriteMostly, &rdev->flags)) { + len += sprintf(page+len, "%swrite_mostly",sep); + sep = ","; + } + if (test_bit(Blocked, &rdev->flags)) { + len += sprintf(page+len, "%sblocked", sep); + sep = ","; + } + if (!test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags)) { + len += sprintf(page+len, "%sspare", sep); + sep = ","; + } + return len+sprintf(page+len, "\n"); +} + +static ssize_t +state_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + /* can write + * faulty - simulates and error + * remove - disconnects the device + * writemostly - sets write_mostly + * -writemostly - clears write_mostly + * blocked - sets the Blocked flag + * -blocked - clears the Blocked flag + */ + int err = -EINVAL; + if (cmd_match(buf, "faulty") && rdev->mddev->pers) { + md_error(rdev->mddev, rdev); + err = 0; + } else if (cmd_match(buf, "remove")) { + if (rdev->raid_disk >= 0) + err = -EBUSY; + else { + mddev_t *mddev = rdev->mddev; + kick_rdev_from_array(rdev); + if (mddev->pers) + md_update_sb(mddev, 1); + md_new_event(mddev); + err = 0; + } + } else if (cmd_match(buf, "writemostly")) { + set_bit(WriteMostly, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "-writemostly")) { + clear_bit(WriteMostly, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "blocked")) { + set_bit(Blocked, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "-blocked")) { + clear_bit(Blocked, &rdev->flags); + wake_up(&rdev->blocked_wait); + set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); + md_wakeup_thread(rdev->mddev->thread); + + err = 0; + } + if (!err && rdev->sysfs_state) + sysfs_notify_dirent(rdev->sysfs_state); + return err ? err : len; +} +static struct rdev_sysfs_entry rdev_state = +__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); + +static ssize_t +errors_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); +} + +static ssize_t +errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { + atomic_set(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +} +static struct rdev_sysfs_entry rdev_errors = +__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); + +static ssize_t +slot_show(mdk_rdev_t *rdev, char *page) +{ + if (rdev->raid_disk < 0) + return sprintf(page, "none\n"); + else + return sprintf(page, "%d\n", rdev->raid_disk); +} + +static ssize_t +slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + int err; + char nm[20]; + int slot = simple_strtoul(buf, &e, 10); + if (strncmp(buf, "none", 4)==0) + slot = -1; + else if (e==buf || (*e && *e!= '\n')) + return -EINVAL; + if (rdev->mddev->pers && slot == -1) { + /* Setting 'slot' on an active array requires also + * updating the 'rd%d' link, and communicating + * with the personality with ->hot_*_disk. + * For now we only support removing + * failed/spare devices. This normally happens automatically, + * but not when the metadata is externally managed. + */ + if (rdev->raid_disk == -1) + return -EEXIST; + /* personality does all needed checks */ + if (rdev->mddev->pers->hot_add_disk == NULL) + return -EINVAL; + err = rdev->mddev->pers-> + hot_remove_disk(rdev->mddev, rdev->raid_disk); + if (err) + return err; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&rdev->mddev->kobj, nm); + set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); + md_wakeup_thread(rdev->mddev->thread); + } else if (rdev->mddev->pers) { + mdk_rdev_t *rdev2; + struct list_head *tmp; + /* Activating a spare .. or possibly reactivating + * if we every get bitmaps working here. + */ + + if (rdev->raid_disk != -1) + return -EBUSY; + + if (rdev->mddev->pers->hot_add_disk == NULL) + return -EINVAL; + + rdev_for_each(rdev2, tmp, rdev->mddev) + if (rdev2->raid_disk == slot) + return -EEXIST; + + rdev->raid_disk = slot; + if (test_bit(In_sync, &rdev->flags)) + rdev->saved_raid_disk = slot; + else + rdev->saved_raid_disk = -1; + err = rdev->mddev->pers-> + hot_add_disk(rdev->mddev, rdev); + if (err) { + rdev->raid_disk = -1; + return err; + } else + sysfs_notify_dirent(rdev->sysfs_state); + sprintf(nm, "rd%d", rdev->raid_disk); + if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) + printk(KERN_WARNING + "md: cannot register " + "%s for %s\n", + nm, mdname(rdev->mddev)); + + /* don't wakeup anyone, leave that to userspace. */ + } else { + if (slot >= rdev->mddev->raid_disks) + return -ENOSPC; + rdev->raid_disk = slot; + /* assume it is working */ + clear_bit(Faulty, &rdev->flags); + clear_bit(WriteMostly, &rdev->flags); + set_bit(In_sync, &rdev->flags); + sysfs_notify_dirent(rdev->sysfs_state); + } + return len; +} + + +static struct rdev_sysfs_entry rdev_slot = +__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); + +static ssize_t +offset_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); +} + +static ssize_t +offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long long offset = simple_strtoull(buf, &e, 10); + if (e==buf || (*e && *e != '\n')) + return -EINVAL; + if (rdev->mddev->pers && rdev->raid_disk >= 0) + return -EBUSY; + if (rdev->size && rdev->mddev->external) + /* Must set offset before size, so overlap checks + * can be sane */ + return -EBUSY; + rdev->data_offset = offset; + return len; +} + +static struct rdev_sysfs_entry rdev_offset = +__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); + +static ssize_t +rdev_size_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)rdev->size); +} + +static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) +{ + /* check if two start/length pairs overlap */ + if (s1+l1 <= s2) + return 0; + if (s2+l2 <= s1) + return 0; + return 1; +} + +static ssize_t +rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + unsigned long long size; + unsigned long long oldsize = rdev->size; + mddev_t *my_mddev = rdev->mddev; + + if (strict_strtoull(buf, 10, &size) < 0) + return -EINVAL; + if (my_mddev->pers && rdev->raid_disk >= 0) { + if (my_mddev->persistent) { + size = super_types[my_mddev->major_version]. + rdev_size_change(rdev, size * 2); + if (!size) + return -EBUSY; + } else if (!size) { + size = (rdev->bdev->bd_inode->i_size >> 10); + size -= rdev->data_offset/2; + } + } + if (size < my_mddev->size) + return -EINVAL; /* component must fit device */ + + rdev->size = size; + if (size > oldsize && my_mddev->external) { + /* need to check that all other rdevs with the same ->bdev + * do not overlap. We need to unlock the mddev to avoid + * a deadlock. We have already changed rdev->size, and if + * we have to change it back, we will have the lock again. + */ + mddev_t *mddev; + int overlap = 0; + struct list_head *tmp, *tmp2; + + mddev_unlock(my_mddev); + for_each_mddev(mddev, tmp) { + mdk_rdev_t *rdev2; + + mddev_lock(mddev); + rdev_for_each(rdev2, tmp2, mddev) + if (test_bit(AllReserved, &rdev2->flags) || + (rdev->bdev == rdev2->bdev && + rdev != rdev2 && + overlaps(rdev->data_offset, rdev->size * 2, + rdev2->data_offset, + rdev2->size * 2))) { + overlap = 1; + break; + } + mddev_unlock(mddev); + if (overlap) { + mddev_put(mddev); + break; + } + } + mddev_lock(my_mddev); + if (overlap) { + /* Someone else could have slipped in a size + * change here, but doing so is just silly. + * We put oldsize back because we *know* it is + * safe, and trust userspace not to race with + * itself + */ + rdev->size = oldsize; + return -EBUSY; + } + } + return len; +} + +static struct rdev_sysfs_entry rdev_size = +__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); + +static struct attribute *rdev_default_attrs[] = { + &rdev_state.attr, + &rdev_errors.attr, + &rdev_slot.attr, + &rdev_offset.attr, + &rdev_size.attr, + NULL, +}; +static ssize_t +rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + mddev_t *mddev = rdev->mddev; + ssize_t rv; + + if (!entry->show) + return -EIO; + + rv = mddev ? mddev_lock(mddev) : -EBUSY; + if (!rv) { + if (rdev->mddev == NULL) + rv = -EBUSY; + else + rv = entry->show(rdev, page); + mddev_unlock(mddev); + } + return rv; +} + +static ssize_t +rdev_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + ssize_t rv; + mddev_t *mddev = rdev->mddev; + + if (!entry->store) + return -EIO; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + rv = mddev ? mddev_lock(mddev): -EBUSY; + if (!rv) { + if (rdev->mddev == NULL) + rv = -EBUSY; + else + rv = entry->store(rdev, page, length); + mddev_unlock(mddev); + } + return rv; +} + +static void rdev_free(struct kobject *ko) +{ + mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); + kfree(rdev); +} +static struct sysfs_ops rdev_sysfs_ops = { + .show = rdev_attr_show, + .store = rdev_attr_store, +}; +static struct kobj_type rdev_ktype = { + .release = rdev_free, + .sysfs_ops = &rdev_sysfs_ops, + .default_attrs = rdev_default_attrs, +}; + +/* + * Import a device. If 'super_format' >= 0, then sanity check the superblock + * + * mark the device faulty if: + * + * - the device is nonexistent (zero size) + * - the device has no valid superblock + * + * a faulty rdev _never_ has rdev->sb set. + */ +static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) +{ + char b[BDEVNAME_SIZE]; + int err; + mdk_rdev_t *rdev; + sector_t size; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) { + printk(KERN_ERR "md: could not alloc mem for new device!\n"); + return ERR_PTR(-ENOMEM); + } + + if ((err = alloc_disk_sb(rdev))) + goto abort_free; + + err = lock_rdev(rdev, newdev, super_format == -2); + if (err) + goto abort_free; + + kobject_init(&rdev->kobj, &rdev_ktype); + + rdev->desc_nr = -1; + rdev->saved_raid_disk = -1; + rdev->raid_disk = -1; + rdev->flags = 0; + rdev->data_offset = 0; + rdev->sb_events = 0; + atomic_set(&rdev->nr_pending, 0); + atomic_set(&rdev->read_errors, 0); + atomic_set(&rdev->corrected_errors, 0); + + size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; + if (!size) { + printk(KERN_WARNING + "md: %s has zero or unknown size, marking faulty!\n", + bdevname(rdev->bdev,b)); + err = -EINVAL; + goto abort_free; + } + + if (super_format >= 0) { + err = super_types[super_format]. + load_super(rdev, NULL, super_minor); + if (err == -EINVAL) { + printk(KERN_WARNING + "md: %s does not have a valid v%d.%d " + "superblock, not importing!\n", + bdevname(rdev->bdev,b), + super_format, super_minor); + goto abort_free; + } + if (err < 0) { + printk(KERN_WARNING + "md: could not read %s's sb, not importing!\n", + bdevname(rdev->bdev,b)); + goto abort_free; + } + } + + INIT_LIST_HEAD(&rdev->same_set); + init_waitqueue_head(&rdev->blocked_wait); + + return rdev; + +abort_free: + if (rdev->sb_page) { + if (rdev->bdev) + unlock_rdev(rdev); + free_disk_sb(rdev); + } + kfree(rdev); + return ERR_PTR(err); +} + +/* + * Check a full RAID array for plausibility + */ + + +static void analyze_sbs(mddev_t * mddev) +{ + int i; + struct list_head *tmp; + mdk_rdev_t *rdev, *freshest; + char b[BDEVNAME_SIZE]; + + freshest = NULL; + rdev_for_each(rdev, tmp, mddev) + switch (super_types[mddev->major_version]. + load_super(rdev, freshest, mddev->minor_version)) { + case 1: + freshest = rdev; + break; + case 0: + break; + default: + printk( KERN_ERR \ + "md: fatal superblock inconsistency in %s" + " -- removing from array\n", + bdevname(rdev->bdev,b)); + kick_rdev_from_array(rdev); + } + + + super_types[mddev->major_version]. + validate_super(mddev, freshest); + + i = 0; + rdev_for_each(rdev, tmp, mddev) { + if (rdev->desc_nr >= mddev->max_disks || + i > mddev->max_disks) { + printk(KERN_WARNING + "md: %s: %s: only %d devices permitted\n", + mdname(mddev), bdevname(rdev->bdev, b), + mddev->max_disks); + kick_rdev_from_array(rdev); + continue; + } + if (rdev != freshest) + if (super_types[mddev->major_version]. + validate_super(mddev, rdev)) { + printk(KERN_WARNING "md: kicking non-fresh %s" + " from array!\n", + bdevname(rdev->bdev,b)); + kick_rdev_from_array(rdev); + continue; + } + if (mddev->level == LEVEL_MULTIPATH) { + rdev->desc_nr = i++; + rdev->raid_disk = rdev->desc_nr; + set_bit(In_sync, &rdev->flags); + } else if (rdev->raid_disk >= mddev->raid_disks) { + rdev->raid_disk = -1; + clear_bit(In_sync, &rdev->flags); + } + } + + + + if (mddev->recovery_cp != MaxSector && + mddev->level >= 1) + printk(KERN_ERR "md: %s: raid array is not clean" + " -- starting background reconstruction\n", + mdname(mddev)); + +} + +static void md_safemode_timeout(unsigned long data); + +static ssize_t +safe_delay_show(mddev_t *mddev, char *page) +{ + int msec = (mddev->safemode_delay*1000)/HZ; + return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); +} +static ssize_t +safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) +{ + int scale=1; + int dot=0; + int i; + unsigned long msec; + char buf[30]; + + /* remove a period, and count digits after it */ + if (len >= sizeof(buf)) + return -EINVAL; + strlcpy(buf, cbuf, sizeof(buf)); + for (i=0; i<len; i++) { + if (dot) { + if (isdigit(buf[i])) { + buf[i-1] = buf[i]; + scale *= 10; + } + buf[i] = 0; + } else if (buf[i] == '.') { + dot=1; + buf[i] = 0; + } + } + if (strict_strtoul(buf, 10, &msec) < 0) + return -EINVAL; + msec = (msec * 1000) / scale; + if (msec == 0) + mddev->safemode_delay = 0; + else { + unsigned long old_delay = mddev->safemode_delay; + mddev->safemode_delay = (msec*HZ)/1000; + if (mddev->safemode_delay == 0) + mddev->safemode_delay = 1; + if (mddev->safemode_delay < old_delay) + md_safemode_timeout((unsigned long)mddev); + } + return len; +} +static struct md_sysfs_entry md_safe_delay = +__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); + +static ssize_t +level_show(mddev_t *mddev, char *page) +{ + struct mdk_personality *p = mddev->pers; + if (p) + return sprintf(page, "%s\n", p->name); + else if (mddev->clevel[0]) + return sprintf(page, "%s\n", mddev->clevel); + else if (mddev->level != LEVEL_NONE) + return sprintf(page, "%d\n", mddev->level); + else + return 0; +} + +static ssize_t +level_store(mddev_t *mddev, const char *buf, size_t len) +{ + ssize_t rv = len; + if (mddev->pers) + return -EBUSY; + if (len == 0) + return 0; + if (len >= sizeof(mddev->clevel)) + return -ENOSPC; + strncpy(mddev->clevel, buf, len); + if (mddev->clevel[len-1] == '\n') + len--; + mddev->clevel[len] = 0; + mddev->level = LEVEL_NONE; + return rv; +} + +static struct md_sysfs_entry md_level = +__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); + + +static ssize_t +layout_show(mddev_t *mddev, char *page) +{ + /* just a number, not meaningful for all levels */ + if (mddev->reshape_position != MaxSector && + mddev->layout != mddev->new_layout) + return sprintf(page, "%d (%d)\n", + mddev->new_layout, mddev->layout); + return sprintf(page, "%d\n", mddev->layout); +} + +static ssize_t +layout_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) + return -EBUSY; + if (mddev->reshape_position != MaxSector) + mddev->new_layout = n; + else + mddev->layout = n; + return len; +} +static struct md_sysfs_entry md_layout = +__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); + + +static ssize_t +raid_disks_show(mddev_t *mddev, char *page) +{ + if (mddev->raid_disks == 0) + return 0; + if (mddev->reshape_position != MaxSector && + mddev->delta_disks != 0) + return sprintf(page, "%d (%d)\n", mddev->raid_disks, + mddev->raid_disks - mddev->delta_disks); + return sprintf(page, "%d\n", mddev->raid_disks); +} + +static int update_raid_disks(mddev_t *mddev, int raid_disks); + +static ssize_t +raid_disks_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + int rv = 0; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) + rv = update_raid_disks(mddev, n); + else if (mddev->reshape_position != MaxSector) { + int olddisks = mddev->raid_disks - mddev->delta_disks; + mddev->delta_disks = n - olddisks; + mddev->raid_disks = n; + } else + mddev->raid_disks = n; + return rv ? rv : len; +} +static struct md_sysfs_entry md_raid_disks = +__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); + +static ssize_t +chunk_size_show(mddev_t *mddev, char *page) +{ + if (mddev->reshape_position != MaxSector && + mddev->chunk_size != mddev->new_chunk) + return sprintf(page, "%d (%d)\n", mddev->new_chunk, + mddev->chunk_size); + return sprintf(page, "%d\n", mddev->chunk_size); +} + +static ssize_t +chunk_size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* can only set chunk_size if array is not yet active */ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) + return -EBUSY; + else if (mddev->reshape_position != MaxSector) + mddev->new_chunk = n; + else + mddev->chunk_size = n; + return len; +} +static struct md_sysfs_entry md_chunk_size = +__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); + +static ssize_t +resync_start_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); +} + +static ssize_t +resync_start_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long n = simple_strtoull(buf, &e, 10); + + if (mddev->pers) + return -EBUSY; + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + mddev->recovery_cp = n; + return len; +} +static struct md_sysfs_entry md_resync_start = +__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); + +/* + * The array state can be: + * + * clear + * No devices, no size, no level + * Equivalent to STOP_ARRAY ioctl + * inactive + * May have some settings, but array is not active + * all IO results in error + * When written, doesn't tear down array, but just stops it + * suspended (not supported yet) + * All IO requests will block. The array can be reconfigured. + * Writing this, if accepted, will block until array is quiescent + * readonly + * no resync can happen. no superblocks get written. + * write requests fail + * read-auto + * like readonly, but behaves like 'clean' on a write request. + * + * clean - no pending writes, but otherwise active. + * When written to inactive array, starts without resync + * If a write request arrives then + * if metadata is known, mark 'dirty' and switch to 'active'. + * if not known, block and switch to write-pending + * If written to an active array that has pending writes, then fails. + * active + * fully active: IO and resync can be happening. + * When written to inactive array, starts with resync + * + * write-pending + * clean, but writes are blocked waiting for 'active' to be written. + * + * active-idle + * like active, but no writes have been seen for a while (100msec). + * + */ +enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, + write_pending, active_idle, bad_word}; +static char *array_states[] = { + "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", + "write-pending", "active-idle", NULL }; + +static int match_word(const char *word, char **list) +{ + int n; + for (n=0; list[n]; n++) + if (cmd_match(word, list[n])) + break; + return n; +} + +static ssize_t +array_state_show(mddev_t *mddev, char *page) +{ + enum array_state st = inactive; + + if (mddev->pers) + switch(mddev->ro) { + case 1: + st = readonly; + break; + case 2: + st = read_auto; + break; + case 0: + if (mddev->in_sync) + st = clean; + else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) + st = write_pending; + else if (mddev->safemode) + st = active_idle; + else + st = active; + } + else { + if (list_empty(&mddev->disks) && + mddev->raid_disks == 0 && + mddev->size == 0) + st = clear; + else + st = inactive; + } + return sprintf(page, "%s\n", array_states[st]); +} + +static int do_md_stop(mddev_t * mddev, int ro, int is_open); +static int do_md_run(mddev_t * mddev); +static int restart_array(mddev_t *mddev); + +static ssize_t +array_state_store(mddev_t *mddev, const char *buf, size_t len) +{ + int err = -EINVAL; + enum array_state st = match_word(buf, array_states); + switch(st) { + case bad_word: + break; + case clear: + /* stopping an active array */ + if (atomic_read(&mddev->openers) > 0) + return -EBUSY; + err = do_md_stop(mddev, 0, 0); + break; + case inactive: + /* stopping an active array */ + if (mddev->pers) { + if (atomic_read(&mddev->openers) > 0) + return -EBUSY; + err = do_md_stop(mddev, 2, 0); + } else + err = 0; /* already inactive */ + break; + case suspended: + break; /* not supported yet */ + case readonly: + if (mddev->pers) + err = do_md_stop(mddev, 1, 0); + else { + mddev->ro = 1; + set_disk_ro(mddev->gendisk, 1); + err = do_md_run(mddev); + } + break; + case read_auto: + if (mddev->pers) { + if (mddev->ro == 0) + err = do_md_stop(mddev, 1, 0); + else if (mddev->ro == 1) + err = restart_array(mddev); + if (err == 0) { + mddev->ro = 2; + set_disk_ro(mddev->gendisk, 0); + } + } else { + mddev->ro = 2; + err = do_md_run(mddev); + } + break; + case clean: + if (mddev->pers) { + restart_array(mddev); + spin_lock_irq(&mddev->write_lock); + if (atomic_read(&mddev->writes_pending) == 0) { + if (mddev->in_sync == 0) { + mddev->in_sync = 1; + if (mddev->safemode == 1) + mddev->safemode = 0; + if (mddev->persistent) + set_bit(MD_CHANGE_CLEAN, + &mddev->flags); + } + err = 0; + } else + err = -EBUSY; + spin_unlock_irq(&mddev->write_lock); + } else { + mddev->ro = 0; + mddev->recovery_cp = MaxSector; + err = do_md_run(mddev); + } + break; + case active: + if (mddev->pers) { + restart_array(mddev); + if (mddev->external) + clear_bit(MD_CHANGE_CLEAN, &mddev->flags); + wake_up(&mddev->sb_wait); + err = 0; + } else { + mddev->ro = 0; + set_disk_ro(mddev->gendisk, 0); + err = do_md_run(mddev); + } + break; + case write_pending: + case active_idle: + /* these cannot be set */ + break; + } + if (err) + return err; + else { + sysfs_notify_dirent(mddev->sysfs_state); + return len; + } +} +static struct md_sysfs_entry md_array_state = +__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); + +static ssize_t +null_show(mddev_t *mddev, char *page) +{ + return -EINVAL; +} + +static ssize_t +new_dev_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* buf must be %d:%d\n? giving major and minor numbers */ + /* The new device is added to the array. + * If the array has a persistent superblock, we read the + * superblock to initialise info and check validity. + * Otherwise, only checking done is that in bind_rdev_to_array, + * which mainly checks size. + */ + char *e; + int major = simple_strtoul(buf, &e, 10); + int minor; + dev_t dev; + mdk_rdev_t *rdev; + int err; + + if (!*buf || *e != ':' || !e[1] || e[1] == '\n') + return -EINVAL; + minor = simple_strtoul(e+1, &e, 10); + if (*e && *e != '\n') + return -EINVAL; + dev = MKDEV(major, minor); + if (major != MAJOR(dev) || + minor != MINOR(dev)) + return -EOVERFLOW; + + + if (mddev->persistent) { + rdev = md_import_device(dev, mddev->major_version, + mddev->minor_version); + if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { + mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, + mdk_rdev_t, same_set); + err = super_types[mddev->major_version] + .load_super(rdev, rdev0, mddev->minor_version); + if (err < 0) + goto out; + } + } else if (mddev->external) + rdev = md_import_device(dev, -2, -1); + else + rdev = md_import_device(dev, -1, -1); + + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + err = bind_rdev_to_array(rdev, mddev); + out: + if (err) + export_rdev(rdev); + return err ? err : len; +} + +static struct md_sysfs_entry md_new_device = +__ATTR(new_dev, S_IWUSR, null_show, new_dev_store); + +static ssize_t +bitmap_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *end; + unsigned long chunk, end_chunk; + + if (!mddev->bitmap) + goto out; + /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ + while (*buf) { + chunk = end_chunk = simple_strtoul(buf, &end, 0); + if (buf == end) break; + if (*end == '-') { /* range */ + buf = end + 1; + end_chunk = simple_strtoul(buf, &end, 0); + if (buf == end) break; + } + if (*end && !isspace(*end)) break; + bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); + buf = end; + while (isspace(*buf)) buf++; + } + bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ +out: + return len; +} + +static struct md_sysfs_entry md_bitmap = +__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); + +static ssize_t +size_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->size); +} + +static int update_size(mddev_t *mddev, sector_t num_sectors); + +static ssize_t +size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* If array is inactive, we can reduce the component size, but + * not increase it (except from 0). + * If array is active, we can try an on-line resize + */ + char *e; + int err = 0; + unsigned long long size = simple_strtoull(buf, &e, 10); + if (!*buf || *buf == '\n' || + (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) { + err = update_size(mddev, size * 2); + md_update_sb(mddev, 1); + } else { + if (mddev->size == 0 || + mddev->size > size) + mddev->size = size; + else + err = -ENOSPC; + } + return err ? err : len; +} + +static struct md_sysfs_entry md_size = +__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); + + +/* Metdata version. + * This is one of + * 'none' for arrays with no metadata (good luck...) + * 'external' for arrays with externally managed metadata, + * or N.M for internally known formats + */ +static ssize_t +metadata_show(mddev_t *mddev, char *page) +{ + if (mddev->persistent) + return sprintf(page, "%d.%d\n", + mddev->major_version, mddev->minor_version); + else if (mddev->external) + return sprintf(page, "external:%s\n", mddev->metadata_type); + else + return sprintf(page, "none\n"); +} + +static ssize_t +metadata_store(mddev_t *mddev, const char *buf, size_t len) +{ + int major, minor; + char *e; + /* Changing the details of 'external' metadata is + * always permitted. Otherwise there must be + * no devices attached to the array. + */ + if (mddev->external && strncmp(buf, "external:", 9) == 0) + ; + else if (!list_empty(&mddev->disks)) + return -EBUSY; + + if (cmd_match(buf, "none")) { + mddev->persistent = 0; + mddev->external = 0; + mddev->major_version = 0; + mddev->minor_version = 90; + return len; + } + if (strncmp(buf, "external:", 9) == 0) { + size_t namelen = len-9; + if (namelen >= sizeof(mddev->metadata_type)) + namelen = sizeof(mddev->metadata_type)-1; + strncpy(mddev->metadata_type, buf+9, namelen); + mddev->metadata_type[namelen] = 0; + if (namelen && mddev->metadata_type[namelen-1] == '\n') + mddev->metadata_type[--namelen] = 0; + mddev->persistent = 0; + mddev->external = 1; + mddev->major_version = 0; + mddev->minor_version = 90; + return len; + } + major = simple_strtoul(buf, &e, 10); + if (e==buf || *e != '.') + return -EINVAL; + buf = e+1; + minor = simple_strtoul(buf, &e, 10); + if (e==buf || (*e && *e != '\n') ) + return -EINVAL; + if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) + return -ENOENT; + mddev->major_version = major; + mddev->minor_version = minor; + mddev->persistent = 1; + mddev->external = 0; + return len; +} + +static struct md_sysfs_entry md_metadata = +__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); + +static ssize_t +action_show(mddev_t *mddev, char *page) +{ + char *type = "idle"; + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + type = "reshape"; + else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + type = "resync"; + else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + type = "check"; + else + type = "repair"; + } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) + type = "recover"; + } + return sprintf(page, "%s\n", type); +} + +static ssize_t +action_store(mddev_t *mddev, const char *page, size_t len) +{ + if (!mddev->pers || !mddev->pers->sync_request) + return -EINVAL; + + if (cmd_match(page, "idle")) { + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + mddev->recovery = 0; + } + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + return -EBUSY; + else if (cmd_match(page, "resync")) + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + else if (cmd_match(page, "recover")) { + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } else if (cmd_match(page, "reshape")) { + int err; + if (mddev->pers->start_reshape == NULL) + return -EINVAL; + err = mddev->pers->start_reshape(mddev); + if (err) + return err; + sysfs_notify(&mddev->kobj, NULL, "degraded"); + } else { + if (cmd_match(page, "check")) + set_bit(MD_RECOVERY_CHECK, &mddev->recovery); + else if (!cmd_match(page, "repair")) + return -EINVAL; + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + sysfs_notify(&mddev->kobj, NULL, "sync_action"); + return len; +} + +static ssize_t +mismatch_cnt_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long) mddev->resync_mismatches); +} + +static struct md_sysfs_entry md_scan_mode = +__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); + + +static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); + +static ssize_t +sync_min_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_min(mddev), + mddev->sync_speed_min ? "local": "system"); +} + +static ssize_t +sync_min_store(mddev_t *mddev, const char *buf, size_t len) +{ + int min; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_min = 0; + return len; + } + min = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || min <= 0) + return -EINVAL; + mddev->sync_speed_min = min; + return len; +} + +static struct md_sysfs_entry md_sync_min = +__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); + +static ssize_t +sync_max_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_max(mddev), + mddev->sync_speed_max ? "local": "system"); +} + +static ssize_t +sync_max_store(mddev_t *mddev, const char *buf, size_t len) +{ + int max; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_max = 0; + return len; + } + max = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || max <= 0) + return -EINVAL; + mddev->sync_speed_max = max; + return len; +} + +static struct md_sysfs_entry md_sync_max = +__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); + +static ssize_t +degraded_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->degraded); +} +static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); + +static ssize_t +sync_force_parallel_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->parallel_resync); +} + +static ssize_t +sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) +{ + long n; + + if (strict_strtol(buf, 10, &n)) + return -EINVAL; + + if (n != 0 && n != 1) + return -EINVAL; + + mddev->parallel_resync = n; + + if (mddev->sync_thread) + wake_up(&resync_wait); + + return len; +} + +/* force parallel resync, even with shared block devices */ +static struct md_sysfs_entry md_sync_force_parallel = +__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, + sync_force_parallel_show, sync_force_parallel_store); + +static ssize_t +sync_speed_show(mddev_t *mddev, char *page) +{ + unsigned long resync, dt, db; + resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); + dt = (jiffies - mddev->resync_mark) / HZ; + if (!dt) dt++; + db = resync - mddev->resync_mark_cnt; + return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ +} + +static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); + +static ssize_t +sync_completed_show(mddev_t *mddev, char *page) +{ + unsigned long max_blocks, resync; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + max_blocks = mddev->resync_max_sectors; + else + max_blocks = mddev->size << 1; + + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); + return sprintf(page, "%lu / %lu\n", resync, max_blocks); +} + +static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); + +static ssize_t +min_sync_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long)mddev->resync_min); +} +static ssize_t +min_sync_store(mddev_t *mddev, const char *buf, size_t len) +{ + unsigned long long min; + if (strict_strtoull(buf, 10, &min)) + return -EINVAL; + if (min > mddev->resync_max) + return -EINVAL; + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return -EBUSY; + + /* Must be a multiple of chunk_size */ + if (mddev->chunk_size) { + if (min & (sector_t)((mddev->chunk_size>>9)-1)) + return -EINVAL; + } + mddev->resync_min = min; + + return len; +} + +static struct md_sysfs_entry md_min_sync = +__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); + +static ssize_t +max_sync_show(mddev_t *mddev, char *page) +{ + if (mddev->resync_max == MaxSector) + return sprintf(page, "max\n"); + else + return sprintf(page, "%llu\n", + (unsigned long long)mddev->resync_max); +} +static ssize_t +max_sync_store(mddev_t *mddev, const char *buf, size_t len) +{ + if (strncmp(buf, "max", 3) == 0) + mddev->resync_max = MaxSector; + else { + unsigned long long max; + if (strict_strtoull(buf, 10, &max)) + return -EINVAL; + if (max < mddev->resync_min) + return -EINVAL; + if (max < mddev->resync_max && + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return -EBUSY; + + /* Must be a multiple of chunk_size */ + if (mddev->chunk_size) { + if (max & (sector_t)((mddev->chunk_size>>9)-1)) + return -EINVAL; + } + mddev->resync_max = max; + } + wake_up(&mddev->recovery_wait); + return len; +} + +static struct md_sysfs_entry md_max_sync = +__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); + +static ssize_t +suspend_lo_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); +} + +static ssize_t +suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long new = simple_strtoull(buf, &e, 10); + + if (mddev->pers->quiesce == NULL) + return -EINVAL; + if (buf == e || (*e && *e != '\n')) + return -EINVAL; + if (new >= mddev->suspend_hi || + (new > mddev->suspend_lo && new < mddev->suspend_hi)) { + mddev->suspend_lo = new; + mddev->pers->quiesce(mddev, 2); + return len; + } else + return -EINVAL; +} +static struct md_sysfs_entry md_suspend_lo = +__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); + + +static ssize_t +suspend_hi_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); +} + +static ssize_t +suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long new = simple_strtoull(buf, &e, 10); + + if (mddev->pers->quiesce == NULL) + return -EINVAL; + if (buf == e || (*e && *e != '\n')) + return -EINVAL; + if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || + (new > mddev->suspend_lo && new > mddev->suspend_hi)) { + mddev->suspend_hi = new; + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + return len; + } else + return -EINVAL; +} +static struct md_sysfs_entry md_suspend_hi = +__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); + +static ssize_t +reshape_position_show(mddev_t *mddev, char *page) +{ + if (mddev->reshape_position != MaxSector) + return sprintf(page, "%llu\n", + (unsigned long long)mddev->reshape_position); + strcpy(page, "none\n"); + return 5; +} + +static ssize_t +reshape_position_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long new = simple_strtoull(buf, &e, 10); + if (mddev->pers) + return -EBUSY; + if (buf == e || (*e && *e != '\n')) + return -EINVAL; + mddev->reshape_position = new; + mddev->delta_disks = 0; + mddev->new_level = mddev->level; + mddev->new_layout = mddev->layout; + mddev->new_chunk = mddev->chunk_size; + return len; +} + +static struct md_sysfs_entry md_reshape_position = +__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, + reshape_position_store); + + +static struct attribute *md_default_attrs[] = { + &md_level.attr, + &md_layout.attr, + &md_raid_disks.attr, + &md_chunk_size.attr, + &md_size.attr, + &md_resync_start.attr, + &md_metadata.attr, + &md_new_device.attr, + &md_safe_delay.attr, + &md_array_state.attr, + &md_reshape_position.attr, + NULL, +}; + +static struct attribute *md_redundancy_attrs[] = { + &md_scan_mode.attr, + &md_mismatches.attr, + &md_sync_min.attr, + &md_sync_max.attr, + &md_sync_speed.attr, + &md_sync_force_parallel.attr, + &md_sync_completed.attr, + &md_min_sync.attr, + &md_max_sync.attr, + &md_suspend_lo.attr, + &md_suspend_hi.attr, + &md_bitmap.attr, + &md_degraded.attr, + NULL, +}; +static struct attribute_group md_redundancy_group = { + .name = NULL, + .attrs = md_redundancy_attrs, +}; + + +static ssize_t +md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + ssize_t rv; + + if (!entry->show) + return -EIO; + rv = mddev_lock(mddev); + if (!rv) { + rv = entry->show(mddev, page); + mddev_unlock(mddev); + } + return rv; +} + +static ssize_t +md_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + ssize_t rv; + + if (!entry->store) + return -EIO; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + rv = mddev_lock(mddev); + if (!rv) { + rv = entry->store(mddev, page, length); + mddev_unlock(mddev); + } + return rv; +} + +static void md_free(struct kobject *ko) +{ + mddev_t *mddev = container_of(ko, mddev_t, kobj); + kfree(mddev); +} + +static struct sysfs_ops md_sysfs_ops = { + .show = md_attr_show, + .store = md_attr_store, +}; +static struct kobj_type md_ktype = { + .release = md_free, + .sysfs_ops = &md_sysfs_ops, + .default_attrs = md_default_attrs, +}; + +int mdp_major = 0; + +static struct kobject *md_probe(dev_t dev, int *part, void *data) +{ + static DEFINE_MUTEX(disks_mutex); + mddev_t *mddev = mddev_find(dev); + struct gendisk *disk; + int partitioned = (MAJOR(dev) != MD_MAJOR); + int shift = partitioned ? MdpMinorShift : 0; + int unit = MINOR(dev) >> shift; + int error; + + if (!mddev) + return NULL; + + mutex_lock(&disks_mutex); + if (mddev->gendisk) { + mutex_unlock(&disks_mutex); + mddev_put(mddev); + return NULL; + } + disk = alloc_disk(1 << shift); + if (!disk) { + mutex_unlock(&disks_mutex); + mddev_put(mddev); + return NULL; + } + disk->major = MAJOR(dev); + disk->first_minor = unit << shift; + if (partitioned) + sprintf(disk->disk_name, "md_d%d", unit); + else + sprintf(disk->disk_name, "md%d", unit); + disk->fops = &md_fops; + disk->private_data = mddev; + disk->queue = mddev->queue; + /* Allow extended partitions. This makes the + * 'mdp' device redundant, but we can really + * remove it now. + */ + disk->flags |= GENHD_FL_EXT_DEVT; + add_disk(disk); + mddev->gendisk = disk; + error = kobject_init_and_add(&mddev->kobj, &md_ktype, + &disk_to_dev(disk)->kobj, "%s", "md"); + mutex_unlock(&disks_mutex); + if (error) + printk(KERN_WARNING "md: cannot register %s/md - name in use\n", + disk->disk_name); + else { + kobject_uevent(&mddev->kobj, KOBJ_ADD); + mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); + } + return NULL; +} + +static void md_safemode_timeout(unsigned long data) +{ + mddev_t *mddev = (mddev_t *) data; + + if (!atomic_read(&mddev->writes_pending)) { + mddev->safemode = 1; + if (mddev->external) + sysfs_notify_dirent(mddev->sysfs_state); + } + md_wakeup_thread(mddev->thread); +} + +static int start_dirty_degraded; + +static int do_md_run(mddev_t * mddev) +{ + int err; + int chunk_size; + struct list_head *tmp; + mdk_rdev_t *rdev; + struct gendisk *disk; + struct mdk_personality *pers; + char b[BDEVNAME_SIZE]; + + if (list_empty(&mddev->disks)) + /* cannot run an array with no devices.. */ + return -EINVAL; + + if (mddev->pers) + return -EBUSY; + + /* + * Analyze all RAID superblock(s) + */ + if (!mddev->raid_disks) { + if (!mddev->persistent) + return -EINVAL; + analyze_sbs(mddev); + } + + chunk_size = mddev->chunk_size; + + if (chunk_size) { + if (chunk_size > MAX_CHUNK_SIZE) { + printk(KERN_ERR "too big chunk_size: %d > %d\n", + chunk_size, MAX_CHUNK_SIZE); + return -EINVAL; + } + /* + * chunk-size has to be a power of 2 + */ + if ( (1 << ffz(~chunk_size)) != chunk_size) { + printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); + return -EINVAL; + } + + /* devices must have minimum size of one chunk */ + rdev_for_each(rdev, tmp, mddev) { + if (test_bit(Faulty, &rdev->flags)) + continue; + if (rdev->size < chunk_size / 1024) { + printk(KERN_WARNING + "md: Dev %s smaller than chunk_size:" + " %lluk < %dk\n", + bdevname(rdev->bdev,b), + (unsigned long long)rdev->size, + chunk_size / 1024); + return -EINVAL; + } + } + } + + if (mddev->level != LEVEL_NONE) + request_module("md-level-%d", mddev->level); + else if (mddev->clevel[0]) + request_module("md-%s", mddev->clevel); + + /* + * Drop all container device buffers, from now on + * the only valid external interface is through the md + * device. + */ + rdev_for_each(rdev, tmp, mddev) { + if (test_bit(Faulty, &rdev->flags)) + continue; + sync_blockdev(rdev->bdev); + invalidate_bdev(rdev->bdev); + + /* perform some consistency tests on the device. + * We don't want the data to overlap the metadata, + * Internal Bitmap issues has handled elsewhere. + */ + if (rdev->data_offset < rdev->sb_start) { + if (mddev->size && + rdev->data_offset + mddev->size*2 + > rdev->sb_start) { + printk("md: %s: data overlaps metadata\n", + mdname(mddev)); + return -EINVAL; + } + } else { + if (rdev->sb_start + rdev->sb_size/512 + > rdev->data_offset) { + printk("md: %s: metadata overlaps data\n", + mdname(mddev)); + return -EINVAL; + } + } + sysfs_notify_dirent(rdev->sysfs_state); + } + + md_probe(mddev->unit, NULL, NULL); + disk = mddev->gendisk; + if (!disk) + return -ENOMEM; + + spin_lock(&pers_lock); + pers = find_pers(mddev->level, mddev->clevel); + if (!pers || !try_module_get(pers->owner)) { + spin_unlock(&pers_lock); + if (mddev->level != LEVEL_NONE) + printk(KERN_WARNING "md: personality for level %d is not loaded!\n", + mddev->level); + else + printk(KERN_WARNING "md: personality for level %s is not loaded!\n", + mddev->clevel); + return -EINVAL; + } + mddev->pers = pers; + spin_unlock(&pers_lock); + mddev->level = pers->level; + strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); + + if (mddev->reshape_position != MaxSector && + pers->start_reshape == NULL) { + /* This personality cannot handle reshaping... */ + mddev->pers = NULL; + module_put(pers->owner); + return -EINVAL; + } + + if (pers->sync_request) { + /* Warn if this is a potentially silly + * configuration. + */ + char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; + mdk_rdev_t *rdev2; + struct list_head *tmp2; + int warned = 0; + rdev_for_each(rdev, tmp, mddev) { + rdev_for_each(rdev2, tmp2, mddev) { + if (rdev < rdev2 && + rdev->bdev->bd_contains == + rdev2->bdev->bd_contains) { + printk(KERN_WARNING + "%s: WARNING: %s appears to be" + " on the same physical disk as" + " %s.\n", + mdname(mddev), + bdevname(rdev->bdev,b), + bdevname(rdev2->bdev,b2)); + warned = 1; + } + } + } + if (warned) + printk(KERN_WARNING + "True protection against single-disk" + " failure might be compromised.\n"); + } + + mddev->recovery = 0; + mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ + mddev->barriers_work = 1; + mddev->ok_start_degraded = start_dirty_degraded; + + if (start_readonly) + mddev->ro = 2; /* read-only, but switch on first write */ + + err = mddev->pers->run(mddev); + if (err) + printk(KERN_ERR "md: pers->run() failed ...\n"); + else if (mddev->pers->sync_request) { + err = bitmap_create(mddev); + if (err) { + printk(KERN_ERR "%s: failed to create bitmap (%d)\n", + mdname(mddev), err); + mddev->pers->stop(mddev); + } + } + if (err) { + module_put(mddev->pers->owner); + mddev->pers = NULL; + bitmap_destroy(mddev); + return err; + } + if (mddev->pers->sync_request) { + if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) + printk(KERN_WARNING + "md: cannot register extra attributes for %s\n", + mdname(mddev)); + } else if (mddev->ro == 2) /* auto-readonly not meaningful */ + mddev->ro = 0; + + atomic_set(&mddev->writes_pending,0); + mddev->safemode = 0; + mddev->safemode_timer.function = md_safemode_timeout; + mddev->safemode_timer.data = (unsigned long) mddev; + mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ + mddev->in_sync = 1; + + rdev_for_each(rdev, tmp, mddev) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) + printk("md: cannot register %s for %s\n", + nm, mdname(mddev)); + } + + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + + if (mddev->flags) + md_update_sb(mddev, 0); + + set_capacity(disk, mddev->array_sectors); + + /* If we call blk_queue_make_request here, it will + * re-initialise max_sectors etc which may have been + * refined inside -> run. So just set the bits we need to set. + * Most initialisation happended when we called + * blk_queue_make_request(..., md_fail_request) + * earlier. + */ + mddev->queue->queuedata = mddev; + mddev->queue->make_request_fn = mddev->pers->make_request; + + /* If there is a partially-recovered drive we need to + * start recovery here. If we leave it to md_check_recovery, + * it will remove the drives and not do the right thing + */ + if (mddev->degraded && !mddev->sync_thread) { + struct list_head *rtmp; + int spares = 0; + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk >= 0 && + !test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) + /* complete an interrupted recovery */ + spares++; + if (spares && mddev->pers->sync_request) { + mddev->recovery = 0; + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, + mddev, + "%s_resync"); + if (!mddev->sync_thread) { + printk(KERN_ERR "%s: could not start resync" + " thread...\n", + mdname(mddev)); + /* leave the spares where they are, it shouldn't hurt */ + mddev->recovery = 0; + } + } + } + md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ + + mddev->changed = 1; + md_new_event(mddev); + sysfs_notify_dirent(mddev->sysfs_state); + sysfs_notify(&mddev->kobj, NULL, "sync_action"); + sysfs_notify(&mddev->kobj, NULL, "degraded"); + kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); + return 0; +} + +static int restart_array(mddev_t *mddev) +{ + struct gendisk *disk = mddev->gendisk; + + /* Complain if it has no devices */ + if (list_empty(&mddev->disks)) + return -ENXIO; + if (!mddev->pers) + return -EINVAL; + if (!mddev->ro) + return -EBUSY; + mddev->safemode = 0; + mddev->ro = 0; + set_disk_ro(disk, 0); + printk(KERN_INFO "md: %s switched to read-write mode.\n", + mdname(mddev)); + /* Kick recovery or resync if necessary */ + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); + sysfs_notify_dirent(mddev->sysfs_state); + return 0; +} + +/* similar to deny_write_access, but accounts for our holding a reference + * to the file ourselves */ +static int deny_bitmap_write_access(struct file * file) +{ + struct inode *inode = file->f_mapping->host; + + spin_lock(&inode->i_lock); + if (atomic_read(&inode->i_writecount) > 1) { + spin_unlock(&inode->i_lock); + return -ETXTBSY; + } + atomic_set(&inode->i_writecount, -1); + spin_unlock(&inode->i_lock); + + return 0; +} + +static void restore_bitmap_write_access(struct file *file) +{ + struct inode *inode = file->f_mapping->host; + + spin_lock(&inode->i_lock); + atomic_set(&inode->i_writecount, 1); + spin_unlock(&inode->i_lock); +} + +/* mode: + * 0 - completely stop and dis-assemble array + * 1 - switch to readonly + * 2 - stop but do not disassemble array + */ +static int do_md_stop(mddev_t * mddev, int mode, int is_open) +{ + int err = 0; + struct gendisk *disk = mddev->gendisk; + + if (atomic_read(&mddev->openers) > is_open) { + printk("md: %s still in use.\n",mdname(mddev)); + return -EBUSY; + } + + if (mddev->pers) { + + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + } + + del_timer_sync(&mddev->safemode_timer); + + switch(mode) { + case 1: /* readonly */ + err = -ENXIO; + if (mddev->ro==1) + goto out; + mddev->ro = 1; + break; + case 0: /* disassemble */ + case 2: /* stop */ + bitmap_flush(mddev); + md_super_wait(mddev); + if (mddev->ro) + set_disk_ro(disk, 0); + blk_queue_make_request(mddev->queue, md_fail_request); + mddev->pers->stop(mddev); + mddev->queue->merge_bvec_fn = NULL; + mddev->queue->unplug_fn = NULL; + mddev->queue->backing_dev_info.congested_fn = NULL; + if (mddev->pers->sync_request) + sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + + module_put(mddev->pers->owner); + mddev->pers = NULL; + /* tell userspace to handle 'inactive' */ + sysfs_notify_dirent(mddev->sysfs_state); + + set_capacity(disk, 0); + mddev->changed = 1; + + if (mddev->ro) + mddev->ro = 0; + } + if (!mddev->in_sync || mddev->flags) { + /* mark array as shutdown cleanly */ + mddev->in_sync = 1; + md_update_sb(mddev, 1); + } + if (mode == 1) + set_disk_ro(disk, 1); + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + } + + /* + * Free resources if final stop + */ + if (mode == 0) { + mdk_rdev_t *rdev; + struct list_head *tmp; + + printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); + + bitmap_destroy(mddev); + if (mddev->bitmap_file) { + restore_bitmap_write_access(mddev->bitmap_file); + fput(mddev->bitmap_file); + mddev->bitmap_file = NULL; + } + mddev->bitmap_offset = 0; + + rdev_for_each(rdev, tmp, mddev) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + } + + /* make sure all md_delayed_delete calls have finished */ + flush_scheduled_work(); + + export_array(mddev); + + mddev->array_sectors = 0; + mddev->size = 0; + mddev->raid_disks = 0; + mddev->recovery_cp = 0; + mddev->resync_min = 0; + mddev->resync_max = MaxSector; + mddev->reshape_position = MaxSector; + mddev->external = 0; + mddev->persistent = 0; + mddev->level = LEVEL_NONE; + mddev->clevel[0] = 0; + mddev->flags = 0; + mddev->ro = 0; + mddev->metadata_type[0] = 0; + mddev->chunk_size = 0; + mddev->ctime = mddev->utime = 0; + mddev->layout = 0; + mddev->max_disks = 0; + mddev->events = 0; + mddev->delta_disks = 0; + mddev->new_level = LEVEL_NONE; + mddev->new_layout = 0; + mddev->new_chunk = 0; + mddev->curr_resync = 0; + mddev->resync_mismatches = 0; + mddev->suspend_lo = mddev->suspend_hi = 0; + mddev->sync_speed_min = mddev->sync_speed_max = 0; + mddev->recovery = 0; + mddev->in_sync = 0; + mddev->changed = 0; + mddev->degraded = 0; + mddev->barriers_work = 0; + mddev->safemode = 0; + kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); + + } else if (mddev->pers) + printk(KERN_INFO "md: %s switched to read-only mode.\n", + mdname(mddev)); + err = 0; + md_new_event(mddev); + sysfs_notify_dirent(mddev->sysfs_state); +out: + return err; +} + +#ifndef MODULE +static void autorun_array(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + struct list_head *tmp; + int err; + + if (list_empty(&mddev->disks)) + return; + + printk(KERN_INFO "md: running: "); + + rdev_for_each(rdev, tmp, mddev) { + char b[BDEVNAME_SIZE]; + printk("<%s>", bdevname(rdev->bdev,b)); + } + printk("\n"); + + err = do_md_run(mddev); + if (err) { + printk(KERN_WARNING "md: do_md_run() returned %d\n", err); + do_md_stop(mddev, 0, 0); + } +} + +/* + * lets try to run arrays based on all disks that have arrived + * until now. (those are in pending_raid_disks) + * + * the method: pick the first pending disk, collect all disks with + * the same UUID, remove all from the pending list and put them into + * the 'same_array' list. Then order this list based on superblock + * update time (freshest comes first), kick out 'old' disks and + * compare superblocks. If everything's fine then run it. + * + * If "unit" is allocated, then bump its reference count + */ +static void autorun_devices(int part) +{ + struct list_head *tmp; + mdk_rdev_t *rdev0, *rdev; + mddev_t *mddev; + char b[BDEVNAME_SIZE]; + + printk(KERN_INFO "md: autorun ...\n"); + while (!list_empty(&pending_raid_disks)) { + int unit; + dev_t dev; + LIST_HEAD(candidates); + rdev0 = list_entry(pending_raid_disks.next, + mdk_rdev_t, same_set); + + printk(KERN_INFO "md: considering %s ...\n", + bdevname(rdev0->bdev,b)); + INIT_LIST_HEAD(&candidates); + rdev_for_each_list(rdev, tmp, pending_raid_disks) + if (super_90_load(rdev, rdev0, 0) >= 0) { + printk(KERN_INFO "md: adding %s ...\n", + bdevname(rdev->bdev,b)); + list_move(&rdev->same_set, &candidates); + } + /* + * now we have a set of devices, with all of them having + * mostly sane superblocks. It's time to allocate the + * mddev. + */ + if (part) { + dev = MKDEV(mdp_major, + rdev0->preferred_minor << MdpMinorShift); + unit = MINOR(dev) >> MdpMinorShift; + } else { + dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); + unit = MINOR(dev); + } + if (rdev0->preferred_minor != unit) { + printk(KERN_INFO "md: unit number in %s is bad: %d\n", + bdevname(rdev0->bdev, b), rdev0->preferred_minor); + break; + } + + md_probe(dev, NULL, NULL); + mddev = mddev_find(dev); + if (!mddev || !mddev->gendisk) { + if (mddev) + mddev_put(mddev); + printk(KERN_ERR + "md: cannot allocate memory for md drive.\n"); + break; + } + if (mddev_lock(mddev)) + printk(KERN_WARNING "md: %s locked, cannot run\n", + mdname(mddev)); + else if (mddev->raid_disks || mddev->major_version + || !list_empty(&mddev->disks)) { + printk(KERN_WARNING + "md: %s already running, cannot run %s\n", + mdname(mddev), bdevname(rdev0->bdev,b)); + mddev_unlock(mddev); + } else { + printk(KERN_INFO "md: created %s\n", mdname(mddev)); + mddev->persistent = 1; + rdev_for_each_list(rdev, tmp, candidates) { + list_del_init(&rdev->same_set); + if (bind_rdev_to_array(rdev, mddev)) + export_rdev(rdev); + } + autorun_array(mddev); + mddev_unlock(mddev); + } + /* on success, candidates will be empty, on error + * it won't... + */ + rdev_for_each_list(rdev, tmp, candidates) { + list_del_init(&rdev->same_set); + export_rdev(rdev); + } + mddev_put(mddev); + } + printk(KERN_INFO "md: ... autorun DONE.\n"); +} +#endif /* !MODULE */ + +static int get_version(void __user * arg) +{ + mdu_version_t ver; + + ver.major = MD_MAJOR_VERSION; + ver.minor = MD_MINOR_VERSION; + ver.patchlevel = MD_PATCHLEVEL_VERSION; + + if (copy_to_user(arg, &ver, sizeof(ver))) + return -EFAULT; + + return 0; +} + +static int get_array_info(mddev_t * mddev, void __user * arg) +{ + mdu_array_info_t info; + int nr,working,active,failed,spare; + mdk_rdev_t *rdev; + struct list_head *tmp; + + nr=working=active=failed=spare=0; + rdev_for_each(rdev, tmp, mddev) { + nr++; + if (test_bit(Faulty, &rdev->flags)) + failed++; + else { + working++; + if (test_bit(In_sync, &rdev->flags)) + active++; + else + spare++; + } + } + + info.major_version = mddev->major_version; + info.minor_version = mddev->minor_version; + info.patch_version = MD_PATCHLEVEL_VERSION; + info.ctime = mddev->ctime; + info.level = mddev->level; + info.size = mddev->size; + if (info.size != mddev->size) /* overflow */ + info.size = -1; + info.nr_disks = nr; + info.raid_disks = mddev->raid_disks; + info.md_minor = mddev->md_minor; + info.not_persistent= !mddev->persistent; + + info.utime = mddev->utime; + info.state = 0; + if (mddev->in_sync) + info.state = (1<<MD_SB_CLEAN); + if (mddev->bitmap && mddev->bitmap_offset) + info.state = (1<<MD_SB_BITMAP_PRESENT); + info.active_disks = active; + info.working_disks = working; + info.failed_disks = failed; + info.spare_disks = spare; + + info.layout = mddev->layout; + info.chunk_size = mddev->chunk_size; + + if (copy_to_user(arg, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int get_bitmap_file(mddev_t * mddev, void __user * arg) +{ + mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ + char *ptr, *buf = NULL; + int err = -ENOMEM; + + if (md_allow_write(mddev)) + file = kmalloc(sizeof(*file), GFP_NOIO); + else + file = kmalloc(sizeof(*file), GFP_KERNEL); + + if (!file) + goto out; + + /* bitmap disabled, zero the first byte and copy out */ + if (!mddev->bitmap || !mddev->bitmap->file) { + file->pathname[0] = '\0'; + goto copy_out; + } + + buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); + if (!buf) + goto out; + + ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); + if (IS_ERR(ptr)) + goto out; + + strcpy(file->pathname, ptr); + +copy_out: + err = 0; + if (copy_to_user(arg, file, sizeof(*file))) + err = -EFAULT; +out: + kfree(buf); + kfree(file); + return err; +} + +static int get_disk_info(mddev_t * mddev, void __user * arg) +{ + mdu_disk_info_t info; + mdk_rdev_t *rdev; + + if (copy_from_user(&info, arg, sizeof(info))) + return -EFAULT; + + rdev = find_rdev_nr(mddev, info.number); + if (rdev) { + info.major = MAJOR(rdev->bdev->bd_dev); + info.minor = MINOR(rdev->bdev->bd_dev); + info.raid_disk = rdev->raid_disk; + info.state = 0; + if (test_bit(Faulty, &rdev->flags)) + info.state |= (1<<MD_DISK_FAULTY); + else if (test_bit(In_sync, &rdev->flags)) { + info.state |= (1<<MD_DISK_ACTIVE); + info.state |= (1<<MD_DISK_SYNC); + } + if (test_bit(WriteMostly, &rdev->flags)) + info.state |= (1<<MD_DISK_WRITEMOSTLY); + } else { + info.major = info.minor = 0; + info.raid_disk = -1; + info.state = (1<<MD_DISK_REMOVED); + } + + if (copy_to_user(arg, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) +{ + char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; + mdk_rdev_t *rdev; + dev_t dev = MKDEV(info->major,info->minor); + + if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) + return -EOVERFLOW; + + if (!mddev->raid_disks) { + int err; + /* expecting a device which has a superblock */ + rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); + if (IS_ERR(rdev)) { + printk(KERN_WARNING + "md: md_import_device returned %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + if (!list_empty(&mddev->disks)) { + mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, + mdk_rdev_t, same_set); + int err = super_types[mddev->major_version] + .load_super(rdev, rdev0, mddev->minor_version); + if (err < 0) { + printk(KERN_WARNING + "md: %s has different UUID to %s\n", + bdevname(rdev->bdev,b), + bdevname(rdev0->bdev,b2)); + export_rdev(rdev); + return -EINVAL; + } + } + err = bind_rdev_to_array(rdev, mddev); + if (err) + export_rdev(rdev); + return err; + } + + /* + * add_new_disk can be used once the array is assembled + * to add "hot spares". They must already have a superblock + * written + */ + if (mddev->pers) { + int err; + if (!mddev->pers->hot_add_disk) { + printk(KERN_WARNING + "%s: personality does not support diskops!\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->persistent) + rdev = md_import_device(dev, mddev->major_version, + mddev->minor_version); + else + rdev = md_import_device(dev, -1, -1); + if (IS_ERR(rdev)) { + printk(KERN_WARNING + "md: md_import_device returned %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + /* set save_raid_disk if appropriate */ + if (!mddev->persistent) { + if (info->state & (1<<MD_DISK_SYNC) && + info->raid_disk < mddev->raid_disks) + rdev->raid_disk = info->raid_disk; + else + rdev->raid_disk = -1; + } else + super_types[mddev->major_version]. + validate_super(mddev, rdev); + rdev->saved_raid_disk = rdev->raid_disk; + + clear_bit(In_sync, &rdev->flags); /* just to be sure */ + if (info->state & (1<<MD_DISK_WRITEMOSTLY)) + set_bit(WriteMostly, &rdev->flags); + + rdev->raid_disk = -1; + err = bind_rdev_to_array(rdev, mddev); + if (!err && !mddev->pers->hot_remove_disk) { + /* If there is hot_add_disk but no hot_remove_disk + * then added disks for geometry changes, + * and should be added immediately. + */ + super_types[mddev->major_version]. + validate_super(mddev, rdev); + err = mddev->pers->hot_add_disk(mddev, rdev); + if (err) + unbind_rdev_from_array(rdev); + } + if (err) + export_rdev(rdev); + else + sysfs_notify_dirent(rdev->sysfs_state); + + md_update_sb(mddev, 1); + if (mddev->degraded) + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + return err; + } + + /* otherwise, add_new_disk is only allowed + * for major_version==0 superblocks + */ + if (mddev->major_version != 0) { + printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", + mdname(mddev)); + return -EINVAL; + } + + if (!(info->state & (1<<MD_DISK_FAULTY))) { + int err; + rdev = md_import_device(dev, -1, 0); + if (IS_ERR(rdev)) { + printk(KERN_WARNING + "md: error, md_import_device() returned %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + rdev->desc_nr = info->number; + if (info->raid_disk < mddev->raid_disks) + rdev->raid_disk = info->raid_disk; + else + rdev->raid_disk = -1; + + if (rdev->raid_disk < mddev->raid_disks) + if (info->state & (1<<MD_DISK_SYNC)) + set_bit(In_sync, &rdev->flags); + + if (info->state & (1<<MD_DISK_WRITEMOSTLY)) + set_bit(WriteMostly, &rdev->flags); + + if (!mddev->persistent) { + printk(KERN_INFO "md: nonpersistent superblock ...\n"); + rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; + } else + rdev->sb_start = calc_dev_sboffset(rdev->bdev); + rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; + + err = bind_rdev_to_array(rdev, mddev); + if (err) { + export_rdev(rdev); + return err; + } + } + + return 0; +} + +static int hot_remove_disk(mddev_t * mddev, dev_t dev) +{ + char b[BDEVNAME_SIZE]; + mdk_rdev_t *rdev; + + rdev = find_rdev(mddev, dev); + if (!rdev) + return -ENXIO; + + if (rdev->raid_disk >= 0) + goto busy; + + kick_rdev_from_array(rdev); + md_update_sb(mddev, 1); + md_new_event(mddev); + + return 0; +busy: + printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", + bdevname(rdev->bdev,b), mdname(mddev)); + return -EBUSY; +} + +static int hot_add_disk(mddev_t * mddev, dev_t dev) +{ + char b[BDEVNAME_SIZE]; + int err; + mdk_rdev_t *rdev; + + if (!mddev->pers) + return -ENODEV; + + if (mddev->major_version != 0) { + printk(KERN_WARNING "%s: HOT_ADD may only be used with" + " version-0 superblocks.\n", + mdname(mddev)); + return -EINVAL; + } + if (!mddev->pers->hot_add_disk) { + printk(KERN_WARNING + "%s: personality does not support diskops!\n", + mdname(mddev)); + return -EINVAL; + } + + rdev = md_import_device(dev, -1, 0); + if (IS_ERR(rdev)) { + printk(KERN_WARNING + "md: error, md_import_device() returned %ld\n", + PTR_ERR(rdev)); + return -EINVAL; + } + + if (mddev->persistent) + rdev->sb_start = calc_dev_sboffset(rdev->bdev); + else + rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; + + rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; + + if (test_bit(Faulty, &rdev->flags)) { + printk(KERN_WARNING + "md: can not hot-add faulty %s disk to %s!\n", + bdevname(rdev->bdev,b), mdname(mddev)); + err = -EINVAL; + goto abort_export; + } + clear_bit(In_sync, &rdev->flags); + rdev->desc_nr = -1; + rdev->saved_raid_disk = -1; + err = bind_rdev_to_array(rdev, mddev); + if (err) + goto abort_export; + + /* + * The rest should better be atomic, we can have disk failures + * noticed in interrupt contexts ... + */ + + rdev->raid_disk = -1; + + md_update_sb(mddev, 1); + + /* + * Kick recovery, maybe this spare has to be added to the + * array immediately. + */ + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + md_new_event(mddev); + return 0; + +abort_export: + export_rdev(rdev); + return err; +} + +static int set_bitmap_file(mddev_t *mddev, int fd) +{ + int err; + + if (mddev->pers) { + if (!mddev->pers->quiesce) + return -EBUSY; + if (mddev->recovery || mddev->sync_thread) + return -EBUSY; + /* we should be able to change the bitmap.. */ + } + + + if (fd >= 0) { + if (mddev->bitmap) + return -EEXIST; /* cannot add when bitmap is present */ + mddev->bitmap_file = fget(fd); + + if (mddev->bitmap_file == NULL) { + printk(KERN_ERR "%s: error: failed to get bitmap file\n", + mdname(mddev)); + return -EBADF; + } + + err = deny_bitmap_write_access(mddev->bitmap_file); + if (err) { + printk(KERN_ERR "%s: error: bitmap file is already in use\n", + mdname(mddev)); + fput(mddev->bitmap_file); + mddev->bitmap_file = NULL; + return err; + } + mddev->bitmap_offset = 0; /* file overrides offset */ + } else if (mddev->bitmap == NULL) + return -ENOENT; /* cannot remove what isn't there */ + err = 0; + if (mddev->pers) { + mddev->pers->quiesce(mddev, 1); + if (fd >= 0) + err = bitmap_create(mddev); + if (fd < 0 || err) { + bitmap_destroy(mddev); + fd = -1; /* make sure to put the file */ + } + mddev->pers->quiesce(mddev, 0); + } + if (fd < 0) { + if (mddev->bitmap_file) { + restore_bitmap_write_access(mddev->bitmap_file); + fput(mddev->bitmap_file); + } + mddev->bitmap_file = NULL; + } + + return err; +} + +/* + * set_array_info is used two different ways + * The original usage is when creating a new array. + * In this usage, raid_disks is > 0 and it together with + * level, size, not_persistent,layout,chunksize determine the + * shape of the array. + * This will always create an array with a type-0.90.0 superblock. + * The newer usage is when assembling an array. + * In this case raid_disks will be 0, and the major_version field is + * use to determine which style super-blocks are to be found on the devices. + * The minor and patch _version numbers are also kept incase the + * super_block handler wishes to interpret them. + */ +static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) +{ + + if (info->raid_disks == 0) { + /* just setting version number for superblock loading */ + if (info->major_version < 0 || + info->major_version >= ARRAY_SIZE(super_types) || + super_types[info->major_version].name == NULL) { + /* maybe try to auto-load a module? */ + printk(KERN_INFO + "md: superblock version %d not known\n", + info->major_version); + return -EINVAL; + } + mddev->major_version = info->major_version; + mddev->minor_version = info->minor_version; + mddev->patch_version = info->patch_version; + mddev->persistent = !info->not_persistent; + return 0; + } + mddev->major_version = MD_MAJOR_VERSION; + mddev->minor_version = MD_MINOR_VERSION; + mddev->patch_version = MD_PATCHLEVEL_VERSION; + mddev->ctime = get_seconds(); + + mddev->level = info->level; + mddev->clevel[0] = 0; + mddev->size = info->size; + mddev->raid_disks = info->raid_disks; + /* don't set md_minor, it is determined by which /dev/md* was + * openned + */ + if (info->state & (1<<MD_SB_CLEAN)) + mddev->recovery_cp = MaxSector; + else + mddev->recovery_cp = 0; + mddev->persistent = ! info->not_persistent; + mddev->external = 0; + + mddev->layout = info->layout; + mddev->chunk_size = info->chunk_size; + + mddev->max_disks = MD_SB_DISKS; + + if (mddev->persistent) + mddev->flags = 0; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + + mddev->default_bitmap_offset = MD_SB_BYTES >> 9; + mddev->bitmap_offset = 0; + + mddev->reshape_position = MaxSector; + + /* + * Generate a 128 bit UUID + */ + get_random_bytes(mddev->uuid, 16); + + mddev->new_level = mddev->level; + mddev->new_chunk = mddev->chunk_size; + mddev->new_layout = mddev->layout; + mddev->delta_disks = 0; + + return 0; +} + +static int update_size(mddev_t *mddev, sector_t num_sectors) +{ + mdk_rdev_t * rdev; + int rv; + struct list_head *tmp; + int fit = (num_sectors == 0); + + if (mddev->pers->resize == NULL) + return -EINVAL; + /* The "num_sectors" is the number of sectors of each device that + * is used. This can only make sense for arrays with redundancy. + * linear and raid0 always use whatever space is available. We can only + * consider changing this number if no resync or reconstruction is + * happening, and if the new size is acceptable. It must fit before the + * sb_start or, if that is <data_offset, it must fit before the size + * of each device. If num_sectors is zero, we find the largest size + * that fits. + + */ + if (mddev->sync_thread) + return -EBUSY; + if (mddev->bitmap) + /* Sorry, cannot grow a bitmap yet, just remove it, + * grow, and re-add. + */ + return -EBUSY; + rdev_for_each(rdev, tmp, mddev) { + sector_t avail; + avail = rdev->size * 2; + + if (fit && (num_sectors == 0 || num_sectors > avail)) + num_sectors = avail; + if (avail < num_sectors) + return -ENOSPC; + } + rv = mddev->pers->resize(mddev, num_sectors); + if (!rv) { + struct block_device *bdev; + + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + mutex_lock(&bdev->bd_inode->i_mutex); + i_size_write(bdev->bd_inode, + (loff_t)mddev->array_sectors << 9); + mutex_unlock(&bdev->bd_inode->i_mutex); + bdput(bdev); + } + } + return rv; +} + +static int update_raid_disks(mddev_t *mddev, int raid_disks) +{ + int rv; + /* change the number of raid disks */ + if (mddev->pers->check_reshape == NULL) + return -EINVAL; + if (raid_disks <= 0 || + raid_disks >= mddev->max_disks) + return -EINVAL; + if (mddev->sync_thread || mddev->reshape_position != MaxSector) + return -EBUSY; + mddev->delta_disks = raid_disks - mddev->raid_disks; + + rv = mddev->pers->check_reshape(mddev); + return rv; +} + + +/* + * update_array_info is used to change the configuration of an + * on-line array. + * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size + * fields in the info are checked against the array. + * Any differences that cannot be handled will cause an error. + * Normally, only one change can be managed at a time. + */ +static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) +{ + int rv = 0; + int cnt = 0; + int state = 0; + + /* calculate expected state,ignoring low bits */ + if (mddev->bitmap && mddev->bitmap_offset) + state |= (1 << MD_SB_BITMAP_PRESENT); + + if (mddev->major_version != info->major_version || + mddev->minor_version != info->minor_version || +/* mddev->patch_version != info->patch_version || */ + mddev->ctime != info->ctime || + mddev->level != info->level || +/* mddev->layout != info->layout || */ + !mddev->persistent != info->not_persistent|| + mddev->chunk_size != info->chunk_size || + /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ + ((state^info->state) & 0xfffffe00) + ) + return -EINVAL; + /* Check there is only one change */ + if (info->size >= 0 && mddev->size != info->size) cnt++; + if (mddev->raid_disks != info->raid_disks) cnt++; + if (mddev->layout != info->layout) cnt++; + if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; + if (cnt == 0) return 0; + if (cnt > 1) return -EINVAL; + + if (mddev->layout != info->layout) { + /* Change layout + * we don't need to do anything at the md level, the + * personality will take care of it all. + */ + if (mddev->pers->reconfig == NULL) + return -EINVAL; + else + return mddev->pers->reconfig(mddev, info->layout, -1); + } + if (info->size >= 0 && mddev->size != info->size) + rv = update_size(mddev, (sector_t)info->size * 2); + + if (mddev->raid_disks != info->raid_disks) + rv = update_raid_disks(mddev, info->raid_disks); + + if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { + if (mddev->pers->quiesce == NULL) + return -EINVAL; + if (mddev->recovery || mddev->sync_thread) + return -EBUSY; + if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { + /* add the bitmap */ + if (mddev->bitmap) + return -EEXIST; + if (mddev->default_bitmap_offset == 0) + return -EINVAL; + mddev->bitmap_offset = mddev->default_bitmap_offset; + mddev->pers->quiesce(mddev, 1); + rv = bitmap_create(mddev); + if (rv) + bitmap_destroy(mddev); + mddev->pers->quiesce(mddev, 0); + } else { + /* remove the bitmap */ + if (!mddev->bitmap) + return -ENOENT; + if (mddev->bitmap->file) + return -EINVAL; + mddev->pers->quiesce(mddev, 1); + bitmap_destroy(mddev); + mddev->pers->quiesce(mddev, 0); + mddev->bitmap_offset = 0; + } + } + md_update_sb(mddev, 1); + return rv; +} + +static int set_disk_faulty(mddev_t *mddev, dev_t dev) +{ + mdk_rdev_t *rdev; + + if (mddev->pers == NULL) + return -ENODEV; + + rdev = find_rdev(mddev, dev); + if (!rdev) + return -ENODEV; + + md_error(mddev, rdev); + return 0; +} + +/* + * We have a problem here : there is no easy way to give a CHS + * virtual geometry. We currently pretend that we have a 2 heads + * 4 sectors (with a BIG number of cylinders...). This drives + * dosfs just mad... ;-) + */ +static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + mddev_t *mddev = bdev->bd_disk->private_data; + + geo->heads = 2; + geo->sectors = 4; + geo->cylinders = get_capacity(mddev->gendisk) / 8; + return 0; +} + +static int md_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + void __user *argp = (void __user *)arg; + mddev_t *mddev = NULL; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* + * Commands dealing with the RAID driver but not any + * particular array: + */ + switch (cmd) + { + case RAID_VERSION: + err = get_version(argp); + goto done; + + case PRINT_RAID_DEBUG: + err = 0; + md_print_devices(); + goto done; + +#ifndef MODULE + case RAID_AUTORUN: + err = 0; + autostart_arrays(arg); + goto done; +#endif + default:; + } + + /* + * Commands creating/starting a new array: + */ + + mddev = bdev->bd_disk->private_data; + + if (!mddev) { + BUG(); + goto abort; + } + + err = mddev_lock(mddev); + if (err) { + printk(KERN_INFO + "md: ioctl lock interrupted, reason %d, cmd %d\n", + err, cmd); + goto abort; + } + + switch (cmd) + { + case SET_ARRAY_INFO: + { + mdu_array_info_t info; + if (!arg) + memset(&info, 0, sizeof(info)); + else if (copy_from_user(&info, argp, sizeof(info))) { + err = -EFAULT; + goto abort_unlock; + } + if (mddev->pers) { + err = update_array_info(mddev, &info); + if (err) { + printk(KERN_WARNING "md: couldn't update" + " array info. %d\n", err); + goto abort_unlock; + } + goto done_unlock; + } + if (!list_empty(&mddev->disks)) { + printk(KERN_WARNING + "md: array %s already has disks!\n", + mdname(mddev)); + err = -EBUSY; + goto abort_unlock; + } + if (mddev->raid_disks) { + printk(KERN_WARNING + "md: array %s already initialised!\n", + mdname(mddev)); + err = -EBUSY; + goto abort_unlock; + } + err = set_array_info(mddev, &info); + if (err) { + printk(KERN_WARNING "md: couldn't set" + " array info. %d\n", err); + goto abort_unlock; + } + } + goto done_unlock; + + default:; + } + + /* + * Commands querying/configuring an existing array: + */ + /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, + * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ + if ((!mddev->raid_disks && !mddev->external) + && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY + && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE + && cmd != GET_BITMAP_FILE) { + err = -ENODEV; + goto abort_unlock; + } + + /* + * Commands even a read-only array can execute: + */ + switch (cmd) + { + case GET_ARRAY_INFO: + err = get_array_info(mddev, argp); + goto done_unlock; + + case GET_BITMAP_FILE: + err = get_bitmap_file(mddev, argp); + goto done_unlock; + + case GET_DISK_INFO: + err = get_disk_info(mddev, argp); + goto done_unlock; + + case RESTART_ARRAY_RW: + err = restart_array(mddev); + goto done_unlock; + + case STOP_ARRAY: + err = do_md_stop(mddev, 0, 1); + goto done_unlock; + + case STOP_ARRAY_RO: + err = do_md_stop(mddev, 1, 1); + goto done_unlock; + + } + + /* + * The remaining ioctls are changing the state of the + * superblock, so we do not allow them on read-only arrays. + * However non-MD ioctls (e.g. get-size) will still come through + * here and hit the 'default' below, so only disallow + * 'md' ioctls, and switch to rw mode if started auto-readonly. + */ + if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { + if (mddev->ro == 2) { + mddev->ro = 0; + sysfs_notify_dirent(mddev->sysfs_state); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } else { + err = -EROFS; + goto abort_unlock; + } + } + + switch (cmd) + { + case ADD_NEW_DISK: + { + mdu_disk_info_t info; + if (copy_from_user(&info, argp, sizeof(info))) + err = -EFAULT; + else + err = add_new_disk(mddev, &info); + goto done_unlock; + } + + case HOT_REMOVE_DISK: + err = hot_remove_disk(mddev, new_decode_dev(arg)); + goto done_unlock; + + case HOT_ADD_DISK: + err = hot_add_disk(mddev, new_decode_dev(arg)); + goto done_unlock; + + case SET_DISK_FAULTY: + err = set_disk_faulty(mddev, new_decode_dev(arg)); + goto done_unlock; + + case RUN_ARRAY: + err = do_md_run(mddev); + goto done_unlock; + + case SET_BITMAP_FILE: + err = set_bitmap_file(mddev, (int)arg); + goto done_unlock; + + default: + err = -EINVAL; + goto abort_unlock; + } + +done_unlock: +abort_unlock: + mddev_unlock(mddev); + + return err; +done: + if (err) + MD_BUG(); +abort: + return err; +} + +static int md_open(struct block_device *bdev, fmode_t mode) +{ + /* + * Succeed if we can lock the mddev, which confirms that + * it isn't being stopped right now. + */ + mddev_t *mddev = bdev->bd_disk->private_data; + int err; + + if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) + goto out; + + err = 0; + mddev_get(mddev); + atomic_inc(&mddev->openers); + mddev_unlock(mddev); + + check_disk_change(bdev); + out: + return err; +} + +static int md_release(struct gendisk *disk, fmode_t mode) +{ + mddev_t *mddev = disk->private_data; + + BUG_ON(!mddev); + atomic_dec(&mddev->openers); + mddev_put(mddev); + + return 0; +} + +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} +static struct block_device_operations md_fops = +{ + .owner = THIS_MODULE, + .open = md_open, + .release = md_release, + .locked_ioctl = md_ioctl, + .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, +}; + +static int md_thread(void * arg) +{ + mdk_thread_t *thread = arg; + + /* + * md_thread is a 'system-thread', it's priority should be very + * high. We avoid resource deadlocks individually in each + * raid personality. (RAID5 does preallocation) We also use RR and + * the very same RT priority as kswapd, thus we will never get + * into a priority inversion deadlock. + * + * we definitely have to have equal or higher priority than + * bdflush, otherwise bdflush will deadlock if there are too + * many dirty RAID5 blocks. + */ + + allow_signal(SIGKILL); + while (!kthread_should_stop()) { + + /* We need to wait INTERRUPTIBLE so that + * we don't add to the load-average. + * That means we need to be sure no signals are + * pending + */ + if (signal_pending(current)) + flush_signals(current); + + wait_event_interruptible_timeout + (thread->wqueue, + test_bit(THREAD_WAKEUP, &thread->flags) + || kthread_should_stop(), + thread->timeout); + + clear_bit(THREAD_WAKEUP, &thread->flags); + + thread->run(thread->mddev); + } + + return 0; +} + +void md_wakeup_thread(mdk_thread_t *thread) +{ + if (thread) { + dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); + set_bit(THREAD_WAKEUP, &thread->flags); + wake_up(&thread->wqueue); + } +} + +mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, + const char *name) +{ + mdk_thread_t *thread; + + thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); + if (!thread) + return NULL; + + init_waitqueue_head(&thread->wqueue); + + thread->run = run; + thread->mddev = mddev; + thread->timeout = MAX_SCHEDULE_TIMEOUT; + thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); + if (IS_ERR(thread->tsk)) { + kfree(thread); + return NULL; + } + return thread; +} + +void md_unregister_thread(mdk_thread_t *thread) +{ + dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); + + kthread_stop(thread->tsk); + kfree(thread); +} + +void md_error(mddev_t *mddev, mdk_rdev_t *rdev) +{ + if (!mddev) { + MD_BUG(); + return; + } + + if (!rdev || test_bit(Faulty, &rdev->flags)) + return; + + if (mddev->external) + set_bit(Blocked, &rdev->flags); +/* + dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", + mdname(mddev), + MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), + __builtin_return_address(0),__builtin_return_address(1), + __builtin_return_address(2),__builtin_return_address(3)); +*/ + if (!mddev->pers) + return; + if (!mddev->pers->error_handler) + return; + mddev->pers->error_handler(mddev,rdev); + if (mddev->degraded) + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + set_bit(StateChanged, &rdev->flags); + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + md_new_event_inintr(mddev); +} + +/* seq_file implementation /proc/mdstat */ + +static void status_unused(struct seq_file *seq) +{ + int i = 0; + mdk_rdev_t *rdev; + struct list_head *tmp; + + seq_printf(seq, "unused devices: "); + + rdev_for_each_list(rdev, tmp, pending_raid_disks) { + char b[BDEVNAME_SIZE]; + i++; + seq_printf(seq, "%s ", + bdevname(rdev->bdev,b)); + } + if (!i) + seq_printf(seq, "<none>"); + + seq_printf(seq, "\n"); +} + + +static void status_resync(struct seq_file *seq, mddev_t * mddev) +{ + sector_t max_blocks, resync, res; + unsigned long dt, db, rt; + int scale; + unsigned int per_milli; + + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + max_blocks = mddev->resync_max_sectors >> 1; + else + max_blocks = mddev->size; + + /* + * Should not happen. + */ + if (!max_blocks) { + MD_BUG(); + return; + } + /* Pick 'scale' such that (resync>>scale)*1000 will fit + * in a sector_t, and (max_blocks>>scale) will fit in a + * u32, as those are the requirements for sector_div. + * Thus 'scale' must be at least 10 + */ + scale = 10; + if (sizeof(sector_t) > sizeof(unsigned long)) { + while ( max_blocks/2 > (1ULL<<(scale+32))) + scale++; + } + res = (resync>>scale)*1000; + sector_div(res, (u32)((max_blocks>>scale)+1)); + + per_milli = res; + { + int i, x = per_milli/50, y = 20-x; + seq_printf(seq, "["); + for (i = 0; i < x; i++) + seq_printf(seq, "="); + seq_printf(seq, ">"); + for (i = 0; i < y; i++) + seq_printf(seq, "."); + seq_printf(seq, "] "); + } + seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", + (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? + "reshape" : + (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? + "check" : + (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? + "resync" : "recovery"))), + per_milli/10, per_milli % 10, + (unsigned long long) resync, + (unsigned long long) max_blocks); + + /* + * We do not want to overflow, so the order of operands and + * the * 100 / 100 trick are important. We do a +1 to be + * safe against division by zero. We only estimate anyway. + * + * dt: time from mark until now + * db: blocks written from mark until now + * rt: remaining time + */ + dt = ((jiffies - mddev->resync_mark) / HZ); + if (!dt) dt++; + db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) + - mddev->resync_mark_cnt; + rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100; + + seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); + + seq_printf(seq, " speed=%ldK/sec", db/2/dt); +} + +static void *md_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct list_head *tmp; + loff_t l = *pos; + mddev_t *mddev; + + if (l >= 0x10000) + return NULL; + if (!l--) + /* header */ + return (void*)1; + + spin_lock(&all_mddevs_lock); + list_for_each(tmp,&all_mddevs) + if (!l--) { + mddev = list_entry(tmp, mddev_t, all_mddevs); + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + return mddev; + } + spin_unlock(&all_mddevs_lock); + if (!l--) + return (void*)2;/* tail */ + return NULL; +} + +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct list_head *tmp; + mddev_t *next_mddev, *mddev = v; + + ++*pos; + if (v == (void*)2) + return NULL; + + spin_lock(&all_mddevs_lock); + if (v == (void*)1) + tmp = all_mddevs.next; + else + tmp = mddev->all_mddevs.next; + if (tmp != &all_mddevs) + next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); + else { + next_mddev = (void*)2; + *pos = 0x10000; + } + spin_unlock(&all_mddevs_lock); + + if (v != (void*)1) + mddev_put(mddev); + return next_mddev; + +} + +static void md_seq_stop(struct seq_file *seq, void *v) +{ + mddev_t *mddev = v; + + if (mddev && v != (void*)1 && v != (void*)2) + mddev_put(mddev); +} + +struct mdstat_info { + int event; +}; + +static int md_seq_show(struct seq_file *seq, void *v) +{ + mddev_t *mddev = v; + sector_t size; + struct list_head *tmp2; + mdk_rdev_t *rdev; + struct mdstat_info *mi = seq->private; + struct bitmap *bitmap; + + if (v == (void*)1) { + struct mdk_personality *pers; + seq_printf(seq, "Personalities : "); + spin_lock(&pers_lock); + list_for_each_entry(pers, &pers_list, list) + seq_printf(seq, "[%s] ", pers->name); + + spin_unlock(&pers_lock); + seq_printf(seq, "\n"); + mi->event = atomic_read(&md_event_count); + return 0; + } + if (v == (void*)2) { + status_unused(seq); + return 0; + } + + if (mddev_lock(mddev) < 0) + return -EINTR; + + if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { + seq_printf(seq, "%s : %sactive", mdname(mddev), + mddev->pers ? "" : "in"); + if (mddev->pers) { + if (mddev->ro==1) + seq_printf(seq, " (read-only)"); + if (mddev->ro==2) + seq_printf(seq, " (auto-read-only)"); + seq_printf(seq, " %s", mddev->pers->name); + } + + size = 0; + rdev_for_each(rdev, tmp2, mddev) { + char b[BDEVNAME_SIZE]; + seq_printf(seq, " %s[%d]", + bdevname(rdev->bdev,b), rdev->desc_nr); + if (test_bit(WriteMostly, &rdev->flags)) + seq_printf(seq, "(W)"); + if (test_bit(Faulty, &rdev->flags)) { + seq_printf(seq, "(F)"); + continue; + } else if (rdev->raid_disk < 0) + seq_printf(seq, "(S)"); /* spare */ + size += rdev->size; + } + + if (!list_empty(&mddev->disks)) { + if (mddev->pers) + seq_printf(seq, "\n %llu blocks", + (unsigned long long) + mddev->array_sectors / 2); + else + seq_printf(seq, "\n %llu blocks", + (unsigned long long)size); + } + if (mddev->persistent) { + if (mddev->major_version != 0 || + mddev->minor_version != 90) { + seq_printf(seq," super %d.%d", + mddev->major_version, + mddev->minor_version); + } + } else if (mddev->external) + seq_printf(seq, " super external:%s", + mddev->metadata_type); + else + seq_printf(seq, " super non-persistent"); + + if (mddev->pers) { + mddev->pers->status(seq, mddev); + seq_printf(seq, "\n "); + if (mddev->pers->sync_request) { + if (mddev->curr_resync > 2) { + status_resync(seq, mddev); + seq_printf(seq, "\n "); + } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) + seq_printf(seq, "\tresync=DELAYED\n "); + else if (mddev->recovery_cp < MaxSector) + seq_printf(seq, "\tresync=PENDING\n "); + } + } else + seq_printf(seq, "\n "); + + if ((bitmap = mddev->bitmap)) { + unsigned long chunk_kb; + unsigned long flags; + spin_lock_irqsave(&bitmap->lock, flags); + chunk_kb = bitmap->chunksize >> 10; + seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " + "%lu%s chunk", + bitmap->pages - bitmap->missing_pages, + bitmap->pages, + (bitmap->pages - bitmap->missing_pages) + << (PAGE_SHIFT - 10), + chunk_kb ? chunk_kb : bitmap->chunksize, + chunk_kb ? "KB" : "B"); + if (bitmap->file) { + seq_printf(seq, ", file: "); + seq_path(seq, &bitmap->file->f_path, " \t\n"); + } + + seq_printf(seq, "\n"); + spin_unlock_irqrestore(&bitmap->lock, flags); + } + + seq_printf(seq, "\n"); + } + mddev_unlock(mddev); + + return 0; +} + +static struct seq_operations md_seq_ops = { + .start = md_seq_start, + .next = md_seq_next, + .stop = md_seq_stop, + .show = md_seq_show, +}; + +static int md_seq_open(struct inode *inode, struct file *file) +{ + int error; + struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); + if (mi == NULL) + return -ENOMEM; + + error = seq_open(file, &md_seq_ops); + if (error) + kfree(mi); + else { + struct seq_file *p = file->private_data; + p->private = mi; + mi->event = atomic_read(&md_event_count); + } + return error; +} + +static unsigned int mdstat_poll(struct file *filp, poll_table *wait) +{ + struct seq_file *m = filp->private_data; + struct mdstat_info *mi = m->private; + int mask; + + poll_wait(filp, &md_event_waiters, wait); + + /* always allow read */ + mask = POLLIN | POLLRDNORM; + + if (mi->event != atomic_read(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; +} + +static const struct file_operations md_seq_fops = { + .owner = THIS_MODULE, + .open = md_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, + .poll = mdstat_poll, +}; + +int register_md_personality(struct mdk_personality *p) +{ + spin_lock(&pers_lock); + list_add_tail(&p->list, &pers_list); + printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); + spin_unlock(&pers_lock); + return 0; +} + +int unregister_md_personality(struct mdk_personality *p) +{ + printk(KERN_INFO "md: %s personality unregistered\n", p->name); + spin_lock(&pers_lock); + list_del_init(&p->list); + spin_unlock(&pers_lock); + return 0; +} + +static int is_mddev_idle(mddev_t *mddev) +{ + mdk_rdev_t * rdev; + int idle; + long curr_events; + + idle = 1; + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) { + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + curr_events = part_stat_read(&disk->part0, sectors[0]) + + part_stat_read(&disk->part0, sectors[1]) - + atomic_read(&disk->sync_io); + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. + * So resync activity will cause curr_events to be smaller than + * when there was no such activity. + * non-sync IO will cause disk_stat to increase without + * increasing sync_io so curr_events will (eventually) + * be larger than it was before. Once it becomes + * substantially larger, the test below will cause + * the array to appear non-idle, and resync will slow + * down. + * If there is a lot of outstanding resync activity when + * we set last_event to curr_events, then all that activity + * completing might cause the array to appear non-idle + * and resync will be slowed down even though there might + * not have been non-resync activity. This will only + * happen once though. 'last_events' will soon reflect + * the state where there is little or no outstanding + * resync requests, and further resync activity will + * always make curr_events less than last_events. + * + */ + if (curr_events - rdev->last_events > 4096) { + rdev->last_events = curr_events; + idle = 0; + } + } + rcu_read_unlock(); + return idle; +} + +void md_done_sync(mddev_t *mddev, int blocks, int ok) +{ + /* another "blocks" (512byte) blocks have been synced */ + atomic_sub(blocks, &mddev->recovery_active); + wake_up(&mddev->recovery_wait); + if (!ok) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_wakeup_thread(mddev->thread); + // stop recovery, signal do_sync .... + } +} + + +/* md_write_start(mddev, bi) + * If we need to update some array metadata (e.g. 'active' flag + * in superblock) before writing, schedule a superblock update + * and wait for it to complete. + */ +void md_write_start(mddev_t *mddev, struct bio *bi) +{ + int did_change = 0; + if (bio_data_dir(bi) != WRITE) + return; + + BUG_ON(mddev->ro == 1); + if (mddev->ro == 2) { + /* need to switch to read/write */ + mddev->ro = 0; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); + did_change = 1; + } + atomic_inc(&mddev->writes_pending); + if (mddev->safemode == 1) + mddev->safemode = 0; + if (mddev->in_sync) { + spin_lock_irq(&mddev->write_lock); + if (mddev->in_sync) { + mddev->in_sync = 0; + set_bit(MD_CHANGE_CLEAN, &mddev->flags); + md_wakeup_thread(mddev->thread); + did_change = 1; + } + spin_unlock_irq(&mddev->write_lock); + } + if (did_change) + sysfs_notify_dirent(mddev->sysfs_state); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); +} + +void md_write_end(mddev_t *mddev) +{ + if (atomic_dec_and_test(&mddev->writes_pending)) { + if (mddev->safemode == 2) + md_wakeup_thread(mddev->thread); + else if (mddev->safemode_delay) + mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); + } +} + +/* md_allow_write(mddev) + * Calling this ensures that the array is marked 'active' so that writes + * may proceed without blocking. It is important to call this before + * attempting a GFP_KERNEL allocation while holding the mddev lock. + * Must be called with mddev_lock held. + * + * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock + * is dropped, so return -EAGAIN after notifying userspace. + */ +int md_allow_write(mddev_t *mddev) +{ + if (!mddev->pers) + return 0; + if (mddev->ro) + return 0; + if (!mddev->pers->sync_request) + return 0; + + spin_lock_irq(&mddev->write_lock); + if (mddev->in_sync) { + mddev->in_sync = 0; + set_bit(MD_CHANGE_CLEAN, &mddev->flags); + if (mddev->safemode_delay && + mddev->safemode == 0) + mddev->safemode = 1; + spin_unlock_irq(&mddev->write_lock); + md_update_sb(mddev, 0); + sysfs_notify_dirent(mddev->sysfs_state); + } else + spin_unlock_irq(&mddev->write_lock); + + if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) + return -EAGAIN; + else + return 0; +} +EXPORT_SYMBOL_GPL(md_allow_write); + +#define SYNC_MARKS 10 +#define SYNC_MARK_STEP (3*HZ) +void md_do_sync(mddev_t *mddev) +{ + mddev_t *mddev2; + unsigned int currspeed = 0, + window; + sector_t max_sectors,j, io_sectors; + unsigned long mark[SYNC_MARKS]; + sector_t mark_cnt[SYNC_MARKS]; + int last_mark,m; + struct list_head *tmp; + sector_t last_check; + int skipped = 0; + struct list_head *rtmp; + mdk_rdev_t *rdev; + char *desc; + + /* just incase thread restarts... */ + if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) + return; + if (mddev->ro) /* never try to sync a read-only array */ + return; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + desc = "data-check"; + else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + desc = "requested-resync"; + else + desc = "resync"; + } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + desc = "reshape"; + else + desc = "recovery"; + + /* we overload curr_resync somewhat here. + * 0 == not engaged in resync at all + * 2 == checking that there is no conflict with another sync + * 1 == like 2, but have yielded to allow conflicting resync to + * commense + * other == active in resync - this many blocks + * + * Before starting a resync we must have set curr_resync to + * 2, and then checked that every "conflicting" array has curr_resync + * less than ours. When we find one that is the same or higher + * we wait on resync_wait. To avoid deadlock, we reduce curr_resync + * to 1 if we choose to yield (based arbitrarily on address of mddev structure). + * This will mean we have to start checking from the beginning again. + * + */ + + do { + mddev->curr_resync = 2; + + try_again: + if (kthread_should_stop()) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + goto skip; + } + for_each_mddev(mddev2, tmp) { + if (mddev2 == mddev) + continue; + if (!mddev->parallel_resync + && mddev2->curr_resync + && match_mddev_units(mddev, mddev2)) { + DEFINE_WAIT(wq); + if (mddev < mddev2 && mddev->curr_resync == 2) { + /* arbitrarily yield */ + mddev->curr_resync = 1; + wake_up(&resync_wait); + } + if (mddev > mddev2 && mddev->curr_resync == 1) + /* no need to wait here, we can wait the next + * time 'round when curr_resync == 2 + */ + continue; + /* We need to wait 'interruptible' so as not to + * contribute to the load average, and not to + * be caught by 'softlockup' + */ + prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); + if (!kthread_should_stop() && + mddev2->curr_resync >= mddev->curr_resync) { + printk(KERN_INFO "md: delaying %s of %s" + " until %s has finished (they" + " share one or more physical units)\n", + desc, mdname(mddev), mdname(mddev2)); + mddev_put(mddev2); + if (signal_pending(current)) + flush_signals(current); + schedule(); + finish_wait(&resync_wait, &wq); + goto try_again; + } + finish_wait(&resync_wait, &wq); + } + } + } while (mddev->curr_resync < 2); + + j = 0; + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + /* resync follows the size requested by the personality, + * which defaults to physical size, but can be virtual size + */ + max_sectors = mddev->resync_max_sectors; + mddev->resync_mismatches = 0; + /* we don't use the checkpoint if there's a bitmap */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + j = mddev->resync_min; + else if (!mddev->bitmap) + j = mddev->recovery_cp; + + } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + max_sectors = mddev->size << 1; + else { + /* recovery follows the physical size of devices */ + max_sectors = mddev->size << 1; + j = MaxSector; + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk >= 0 && + !test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < j) + j = rdev->recovery_offset; + } + + printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); + printk(KERN_INFO "md: minimum _guaranteed_ speed:" + " %d KB/sec/disk.\n", speed_min(mddev)); + printk(KERN_INFO "md: using maximum available idle IO bandwidth " + "(but not more than %d KB/sec) for %s.\n", + speed_max(mddev), desc); + + is_mddev_idle(mddev); /* this also initializes IO event counters */ + + io_sectors = 0; + for (m = 0; m < SYNC_MARKS; m++) { + mark[m] = jiffies; + mark_cnt[m] = io_sectors; + } + last_mark = 0; + mddev->resync_mark = mark[last_mark]; + mddev->resync_mark_cnt = mark_cnt[last_mark]; + + /* + * Tune reconstruction: + */ + window = 32*(PAGE_SIZE/512); + printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", + window/2,(unsigned long long) max_sectors/2); + + atomic_set(&mddev->recovery_active, 0); + last_check = 0; + + if (j>2) { + printk(KERN_INFO + "md: resuming %s of %s from checkpoint.\n", + desc, mdname(mddev)); + mddev->curr_resync = j; + } + + while (j < max_sectors) { + sector_t sectors; + + skipped = 0; + if (j >= mddev->resync_max) { + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + wait_event(mddev->recovery_wait, + mddev->resync_max > j + || kthread_should_stop()); + } + if (kthread_should_stop()) + goto interrupted; + sectors = mddev->pers->sync_request(mddev, j, &skipped, + currspeed < speed_min(mddev)); + if (sectors == 0) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + goto out; + } + + if (!skipped) { /* actual IO requested */ + io_sectors += sectors; + atomic_add(sectors, &mddev->recovery_active); + } + + j += sectors; + if (j>1) mddev->curr_resync = j; + mddev->curr_mark_cnt = io_sectors; + if (last_check == 0) + /* this is the earliers that rebuilt will be + * visible in /proc/mdstat + */ + md_new_event(mddev); + + if (last_check + window > io_sectors || j == max_sectors) + continue; + + last_check = io_sectors; + + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; + + repeat: + if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { + /* step marks */ + int next = (last_mark+1) % SYNC_MARKS; + + mddev->resync_mark = mark[next]; + mddev->resync_mark_cnt = mark_cnt[next]; + mark[next] = jiffies; + mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); + last_mark = next; + } + + + if (kthread_should_stop()) + goto interrupted; + + + /* + * this loop exits only if either when we are slower than + * the 'hard' speed limit, or the system was IO-idle for + * a jiffy. + * the system might be non-idle CPU-wise, but we only care + * about not overloading the IO subsystem. (things like an + * e2fsck being done on the RAID array should execute fast) + */ + blk_unplug(mddev->queue); + cond_resched(); + + currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 + /((jiffies-mddev->resync_mark)/HZ +1) +1; + + if (currspeed > speed_min(mddev)) { + if ((currspeed > speed_max(mddev)) || + !is_mddev_idle(mddev)) { + msleep(500); + goto repeat; + } + } + } + printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); + /* + * this also signals 'finished resyncing' to md_stop + */ + out: + blk_unplug(mddev->queue); + + wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); + + /* tell personality that we are finished */ + mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); + + if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && + mddev->curr_resync > 2) { + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + if (mddev->curr_resync >= mddev->recovery_cp) { + printk(KERN_INFO + "md: checkpointing %s of %s.\n", + desc, mdname(mddev)); + mddev->recovery_cp = mddev->curr_resync; + } + } else + mddev->recovery_cp = MaxSector; + } else { + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + mddev->curr_resync = MaxSector; + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk >= 0 && + !test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < mddev->curr_resync) + rdev->recovery_offset = mddev->curr_resync; + } + } + set_bit(MD_CHANGE_DEVS, &mddev->flags); + + skip: + mddev->curr_resync = 0; + mddev->resync_min = 0; + mddev->resync_max = MaxSector; + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + wake_up(&resync_wait); + set_bit(MD_RECOVERY_DONE, &mddev->recovery); + md_wakeup_thread(mddev->thread); + return; + + interrupted: + /* + * got a signal, exit. + */ + printk(KERN_INFO + "md: md_do_sync() got signal ... exiting\n"); + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + goto out; + +} +EXPORT_SYMBOL_GPL(md_do_sync); + + +static int remove_and_add_spares(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + struct list_head *rtmp; + int spares = 0; + + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk >= 0 && + !test_bit(Blocked, &rdev->flags) && + (test_bit(Faulty, &rdev->flags) || + ! test_bit(In_sync, &rdev->flags)) && + atomic_read(&rdev->nr_pending)==0) { + if (mddev->pers->hot_remove_disk( + mddev, rdev->raid_disk)==0) { + char nm[20]; + sprintf(nm,"rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + rdev->raid_disk = -1; + } + } + + if (mddev->degraded && ! mddev->ro) { + rdev_for_each(rdev, rtmp, mddev) { + if (rdev->raid_disk >= 0 && + !test_bit(In_sync, &rdev->flags) && + !test_bit(Blocked, &rdev->flags)) + spares++; + if (rdev->raid_disk < 0 + && !test_bit(Faulty, &rdev->flags)) { + rdev->recovery_offset = 0; + if (mddev->pers-> + hot_add_disk(mddev, rdev) == 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + if (sysfs_create_link(&mddev->kobj, + &rdev->kobj, nm)) + printk(KERN_WARNING + "md: cannot register " + "%s for %s\n", + nm, mdname(mddev)); + spares++; + md_new_event(mddev); + } else + break; + } + } + } + return spares; +} +/* + * This routine is regularly called by all per-raid-array threads to + * deal with generic issues like resync and super-block update. + * Raid personalities that don't have a thread (linear/raid0) do not + * need this as they never do any recovery or update the superblock. + * + * It does not do any resync itself, but rather "forks" off other threads + * to do that as needed. + * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in + * "->recovery" and create a thread at ->sync_thread. + * When the thread finishes it sets MD_RECOVERY_DONE + * and wakeups up this thread which will reap the thread and finish up. + * This thread also removes any faulty devices (with nr_pending == 0). + * + * The overall approach is: + * 1/ if the superblock needs updating, update it. + * 2/ If a recovery thread is running, don't do anything else. + * 3/ If recovery has finished, clean up, possibly marking spares active. + * 4/ If there are any faulty devices, remove them. + * 5/ If array is degraded, try to add spares devices + * 6/ If array has spares or is not in-sync, start a resync thread. + */ +void md_check_recovery(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + struct list_head *rtmp; + + + if (mddev->bitmap) + bitmap_daemon_work(mddev->bitmap); + + if (mddev->ro) + return; + + if (signal_pending(current)) { + if (mddev->pers->sync_request && !mddev->external) { + printk(KERN_INFO "md: %s in immediate safe mode\n", + mdname(mddev)); + mddev->safemode = 2; + } + flush_signals(current); + } + + if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + return; + if ( ! ( + (mddev->flags && !mddev->external) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || + test_bit(MD_RECOVERY_DONE, &mddev->recovery) || + (mddev->external == 0 && mddev->safemode == 1) || + (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) + && !mddev->in_sync && mddev->recovery_cp == MaxSector) + )) + return; + + if (mddev_trylock(mddev)) { + int spares = 0; + + if (mddev->ro) { + /* Only thing we do on a ro array is remove + * failed devices. + */ + remove_and_add_spares(mddev); + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + goto unlock; + } + + if (!mddev->external) { + int did_change = 0; + spin_lock_irq(&mddev->write_lock); + if (mddev->safemode && + !atomic_read(&mddev->writes_pending) && + !mddev->in_sync && + mddev->recovery_cp == MaxSector) { + mddev->in_sync = 1; + did_change = 1; + if (mddev->persistent) + set_bit(MD_CHANGE_CLEAN, &mddev->flags); + } + if (mddev->safemode == 1) + mddev->safemode = 0; + spin_unlock_irq(&mddev->write_lock); + if (did_change) + sysfs_notify_dirent(mddev->sysfs_state); + } + + if (mddev->flags) + md_update_sb(mddev, 0); + + rdev_for_each(rdev, rtmp, mddev) + if (test_and_clear_bit(StateChanged, &rdev->flags)) + sysfs_notify_dirent(rdev->sysfs_state); + + + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && + !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { + /* resync/recovery still happening */ + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + goto unlock; + } + if (mddev->sync_thread) { + /* resync has finished, collect result */ + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* success...*/ + /* activate any spares */ + if (mddev->pers->spare_active(mddev)) + sysfs_notify(&mddev->kobj, NULL, + "degraded"); + } + md_update_sb(mddev, 1); + + /* if array is no-longer degraded, then any saved_raid_disk + * information must be scrapped + */ + if (!mddev->degraded) + rdev_for_each(rdev, rtmp, mddev) + rdev->saved_raid_disk = -1; + + mddev->recovery = 0; + /* flag recovery needed just to double check */ + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + sysfs_notify(&mddev->kobj, NULL, "sync_action"); + md_new_event(mddev); + goto unlock; + } + /* Set RUNNING before clearing NEEDED to avoid + * any transients in the value of "sync_action". + */ + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + /* Clear some bits that don't mean anything, but + * might be left set + */ + clear_bit(MD_RECOVERY_INTR, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); + + if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) + goto unlock; + /* no recovery is running. + * remove any failed drives, then + * add spares if possible. + * Spare are also removed and re-added, to allow + * the personality to fail the re-add. + */ + + if (mddev->reshape_position != MaxSector) { + if (mddev->pers->check_reshape(mddev) != 0) + /* Cannot proceed */ + goto unlock; + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + } else if ((spares = remove_and_add_spares(mddev))) { + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + } else if (mddev->recovery_cp < MaxSector) { + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + /* nothing to be done ... */ + goto unlock; + + if (mddev->pers->sync_request) { + if (spares && mddev->bitmap && ! mddev->bitmap->file) { + /* We are adding a device or devices to an array + * which has the bitmap stored on all devices. + * So make sure all bitmap pages get written + */ + bitmap_write_all(mddev->bitmap); + } + mddev->sync_thread = md_register_thread(md_do_sync, + mddev, + "%s_resync"); + if (!mddev->sync_thread) { + printk(KERN_ERR "%s: could not start resync" + " thread...\n", + mdname(mddev)); + /* leave the spares where they are, it shouldn't hurt */ + mddev->recovery = 0; + } else + md_wakeup_thread(mddev->sync_thread); + sysfs_notify(&mddev->kobj, NULL, "sync_action"); + md_new_event(mddev); + } + unlock: + if (!mddev->sync_thread) { + clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + if (test_and_clear_bit(MD_RECOVERY_RECOVER, + &mddev->recovery)) + sysfs_notify(&mddev->kobj, NULL, "sync_action"); + } + mddev_unlock(mddev); + } +} + +void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) +{ + sysfs_notify_dirent(rdev->sysfs_state); + wait_event_timeout(rdev->blocked_wait, + !test_bit(Blocked, &rdev->flags), + msecs_to_jiffies(5000)); + rdev_dec_pending(rdev, mddev); +} +EXPORT_SYMBOL(md_wait_for_blocked_rdev); + +static int md_notify_reboot(struct notifier_block *this, + unsigned long code, void *x) +{ + struct list_head *tmp; + mddev_t *mddev; + + if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { + + printk(KERN_INFO "md: stopping all md devices.\n"); + + for_each_mddev(mddev, tmp) + if (mddev_trylock(mddev)) { + /* Force a switch to readonly even array + * appears to still be in use. Hence + * the '100'. + */ + do_md_stop(mddev, 1, 100); + mddev_unlock(mddev); + } + /* + * certain more exotic SCSI devices are known to be + * volatile wrt too early system reboots. While the + * right place to handle this issue is the given + * driver, we do want to have a safe RAID driver ... + */ + mdelay(1000*1); + } + return NOTIFY_DONE; +} + +static struct notifier_block md_notifier = { + .notifier_call = md_notify_reboot, + .next = NULL, + .priority = INT_MAX, /* before any real devices */ +}; + +static void md_geninit(void) +{ + dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); + + proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); +} + +static int __init md_init(void) +{ + if (register_blkdev(MAJOR_NR, "md")) + return -1; + if ((mdp_major=register_blkdev(0, "mdp"))<=0) { + unregister_blkdev(MAJOR_NR, "md"); + return -1; + } + blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE, + md_probe, NULL, NULL); + blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, + md_probe, NULL, NULL); + + register_reboot_notifier(&md_notifier); + raid_table_header = register_sysctl_table(raid_root_table); + + md_geninit(); + return 0; +} + + +#ifndef MODULE + +/* + * Searches all registered partitions for autorun RAID arrays + * at boot time. + */ + +static LIST_HEAD(all_detected_devices); +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +void md_autodetect_dev(dev_t dev) +{ + struct detected_devices_node *node_detected_dev; + + node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); + if (node_detected_dev) { + node_detected_dev->dev = dev; + list_add_tail(&node_detected_dev->list, &all_detected_devices); + } else { + printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" + ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); + } +} + + +static void autostart_arrays(int part) +{ + mdk_rdev_t *rdev; + struct detected_devices_node *node_detected_dev; + dev_t dev; + int i_scanned, i_passed; + + i_scanned = 0; + i_passed = 0; + + printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); + + while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { + i_scanned++; + node_detected_dev = list_entry(all_detected_devices.next, + struct detected_devices_node, list); + list_del(&node_detected_dev->list); + dev = node_detected_dev->dev; + kfree(node_detected_dev); + rdev = md_import_device(dev,0, 90); + if (IS_ERR(rdev)) + continue; + + if (test_bit(Faulty, &rdev->flags)) { + MD_BUG(); + continue; + } + set_bit(AutoDetected, &rdev->flags); + list_add(&rdev->same_set, &pending_raid_disks); + i_passed++; + } + + printk(KERN_INFO "md: Scanned %d and added %d devices.\n", + i_scanned, i_passed); + + autorun_devices(part); +} + +#endif /* !MODULE */ + +static __exit void md_exit(void) +{ + mddev_t *mddev; + struct list_head *tmp; + + blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS); + blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); + + unregister_blkdev(MAJOR_NR,"md"); + unregister_blkdev(mdp_major, "mdp"); + unregister_reboot_notifier(&md_notifier); + unregister_sysctl_table(raid_table_header); + remove_proc_entry("mdstat", NULL); + for_each_mddev(mddev, tmp) { + struct gendisk *disk = mddev->gendisk; + if (!disk) + continue; + export_array(mddev); + del_gendisk(disk); + put_disk(disk); + mddev->gendisk = NULL; + mddev_put(mddev); + } +} + +subsys_initcall(md_init); +module_exit(md_exit) + +static int get_ro(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "%d", start_readonly); +} +static int set_ro(const char *val, struct kernel_param *kp) +{ + char *e; + int num = simple_strtoul(val, &e, 10); + if (*val && (*e == '\0' || *e == '\n')) { + start_readonly = num; + return 0; + } + return -EINVAL; +} + +module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); +module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); + + +EXPORT_SYMBOL(register_md_personality); +EXPORT_SYMBOL(unregister_md_personality); +EXPORT_SYMBOL(md_error); +EXPORT_SYMBOL(md_done_sync); +EXPORT_SYMBOL(md_write_start); +EXPORT_SYMBOL(md_write_end); +EXPORT_SYMBOL(md_register_thread); +EXPORT_SYMBOL(md_unregister_thread); +EXPORT_SYMBOL(md_wakeup_thread); +EXPORT_SYMBOL(md_check_recovery); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md"); +MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); diff --git a/drivers/md/mktables.c b/drivers/md/mktables.c new file mode 100644 index 0000000..b61d576 --- /dev/null +++ b/drivers/md/mktables.c @@ -0,0 +1,120 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved + * + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2 or (at your + * option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * mktables.c + * + * Make RAID-6 tables. This is a host user space program to be run at + * compile time. + */ + +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <stdlib.h> +#include <time.h> + +static uint8_t gfmul(uint8_t a, uint8_t b) +{ + uint8_t v = 0; + + while (b) { + if (b & 1) + v ^= a; + a = (a << 1) ^ (a & 0x80 ? 0x1d : 0); + b >>= 1; + } + + return v; +} + +static uint8_t gfpow(uint8_t a, int b) +{ + uint8_t v = 1; + + b %= 255; + if (b < 0) + b += 255; + + while (b) { + if (b & 1) + v = gfmul(v, a); + a = gfmul(a, a); + b >>= 1; + } + + return v; +} + +int main(int argc, char *argv[]) +{ + int i, j, k; + uint8_t v; + uint8_t exptbl[256], invtbl[256]; + + printf("#include \"raid6.h\"\n"); + + /* Compute multiplication table */ + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gfmul[256][256] =\n" + "{\n"); + for (i = 0; i < 256; i++) { + printf("\t{\n"); + for (j = 0; j < 256; j += 8) { + printf("\t\t"); + for (k = 0; k < 8; k++) + printf("0x%02x,%c", gfmul(i, j + k), + (k == 7) ? '\n' : ' '); + } + printf("\t},\n"); + } + printf("};\n"); + + /* Compute power-of-2 table (exponent) */ + v = 1; + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gfexp[256] =\n" "{\n"); + for (i = 0; i < 256; i += 8) { + printf("\t"); + for (j = 0; j < 8; j++) { + exptbl[i + j] = v; + printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); + v = gfmul(v, 2); + if (v == 1) + v = 0; /* For entry 255, not a real entry */ + } + } + printf("};\n"); + + /* Compute inverse table x^-1 == x^254 */ + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gfinv[256] =\n" "{\n"); + for (i = 0; i < 256; i += 8) { + printf("\t"); + for (j = 0; j < 8; j++) { + invtbl[i + j] = v = gfpow(i + j, 254); + printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); + } + } + printf("};\n"); + + /* Compute inv(2^x + 1) (exponent-xor-inverse) table */ + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gfexi[256] =\n" "{\n"); + for (i = 0; i < 256; i += 8) { + printf("\t"); + for (j = 0; j < 8; j++) + printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1], + (j == 7) ? '\n' : ' '); + } + printf("};\n"); + + return 0; +} diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c new file mode 100644 index 0000000..d4ac47d --- /dev/null +++ b/drivers/md/multipath.c @@ -0,0 +1,564 @@ +/* + * multipath.c : Multiple Devices driver for Linux + * + * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat + * + * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman + * + * MULTIPATH management functions. + * + * derived from raid1.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/raid/multipath.h> + +#define MAX_WORK_PER_DISK 128 + +#define NR_RESERVED_BUFS 32 + + +static int multipath_map (multipath_conf_t *conf) +{ + int i, disks = conf->raid_disks; + + /* + * Later we do read balancing on the read side + * now we use the first available disk. + */ + + rcu_read_lock(); + for (i = 0; i < disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); + if (rdev && test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + return i; + } + } + rcu_read_unlock(); + + printk(KERN_ERR "multipath_map(): no more operational IO paths?\n"); + return (-1); +} + +static void multipath_reschedule_retry (struct multipath_bh *mp_bh) +{ + unsigned long flags; + mddev_t *mddev = mp_bh->mddev; + multipath_conf_t *conf = mddev_to_conf(mddev); + + spin_lock_irqsave(&conf->device_lock, flags); + list_add(&mp_bh->retry_list, &conf->retry_list); + spin_unlock_irqrestore(&conf->device_lock, flags); + md_wakeup_thread(mddev->thread); +} + + +/* + * multipath_end_bh_io() is called when we have finished servicing a multipathed + * operation and are ready to return a success/failure code to the buffer + * cache layer. + */ +static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) +{ + struct bio *bio = mp_bh->master_bio; + multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); + + bio_endio(bio, err); + mempool_free(mp_bh, conf->pool); +} + +static void multipath_end_request(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); + multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); + mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; + + if (uptodate) + multipath_end_bh_io(mp_bh, 0); + else if (!bio_rw_ahead(bio)) { + /* + * oops, IO error: + */ + char b[BDEVNAME_SIZE]; + md_error (mp_bh->mddev, rdev); + printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", + bdevname(rdev->bdev,b), + (unsigned long long)bio->bi_sector); + multipath_reschedule_retry(mp_bh); + } else + multipath_end_bh_io(mp_bh, error); + rdev_dec_pending(rdev, conf->mddev); +} + +static void unplug_slaves(mddev_t *mddev) +{ + multipath_conf_t *conf = mddev_to_conf(mddev); + int i; + + rcu_read_lock(); + for (i=0; i<mddev->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags) + && atomic_read(&rdev->nr_pending)) { + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); + + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + blk_unplug(r_queue); + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + rcu_read_unlock(); +} + +static void multipath_unplug(struct request_queue *q) +{ + unplug_slaves(q->queuedata); +} + + +static int multipath_make_request (struct request_queue *q, struct bio * bio) +{ + mddev_t *mddev = q->queuedata; + multipath_conf_t *conf = mddev_to_conf(mddev); + struct multipath_bh * mp_bh; + struct multipath_info *multipath; + const int rw = bio_data_dir(bio); + int cpu; + + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + mp_bh = mempool_alloc(conf->pool, GFP_NOIO); + + mp_bh->master_bio = bio; + mp_bh->mddev = mddev; + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + + mp_bh->path = multipath_map(conf); + if (mp_bh->path < 0) { + bio_endio(bio, -EIO); + mempool_free(mp_bh, conf->pool); + return 0; + } + multipath = conf->multipaths + mp_bh->path; + + mp_bh->bio = *bio; + mp_bh->bio.bi_sector += multipath->rdev->data_offset; + mp_bh->bio.bi_bdev = multipath->rdev->bdev; + mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); + mp_bh->bio.bi_end_io = multipath_end_request; + mp_bh->bio.bi_private = mp_bh; + generic_make_request(&mp_bh->bio); + return 0; +} + +static void multipath_status (struct seq_file *seq, mddev_t *mddev) +{ + multipath_conf_t *conf = mddev_to_conf(mddev); + int i; + + seq_printf (seq, " [%d/%d] [", conf->raid_disks, + conf->working_disks); + for (i = 0; i < conf->raid_disks; i++) + seq_printf (seq, "%s", + conf->multipaths[i].rdev && + test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); + seq_printf (seq, "]"); +} + +static int multipath_congested(void *data, int bits) +{ + mddev_t *mddev = data; + multipath_conf_t *conf = mddev_to_conf(mddev); + int i, ret = 0; + + rcu_read_lock(); + for (i = 0; i < mddev->raid_disks ; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = bdev_get_queue(rdev->bdev); + + ret |= bdi_congested(&q->backing_dev_info, bits); + /* Just like multipath_map, we just check the + * first available device + */ + break; + } + } + rcu_read_unlock(); + return ret; +} + +/* + * Careful, this can execute in IRQ contexts as well! + */ +static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) +{ + multipath_conf_t *conf = mddev_to_conf(mddev); + + if (conf->working_disks <= 1) { + /* + * Uh oh, we can do nothing if this is our last path, but + * first check if this is a queued request for a device + * which has just failed. + */ + printk(KERN_ALERT + "multipath: only one IO path left and IO error.\n"); + /* leave it active... it's all we have */ + } else { + /* + * Mark disk as unusable + */ + if (!test_bit(Faulty, &rdev->flags)) { + char b[BDEVNAME_SIZE]; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + conf->working_disks--; + mddev->degraded++; + printk(KERN_ALERT "multipath: IO failure on %s," + " disabling IO path.\n" + "multipath: Operation continuing" + " on %d IO paths.\n", + bdevname (rdev->bdev,b), + conf->working_disks); + } + } +} + +static void print_multipath_conf (multipath_conf_t *conf) +{ + int i; + struct multipath_info *tmp; + + printk("MULTIPATH conf printout:\n"); + if (!conf) { + printk("(conf==NULL)\n"); + return; + } + printk(" --- wd:%d rd:%d\n", conf->working_disks, + conf->raid_disks); + + for (i = 0; i < conf->raid_disks; i++) { + char b[BDEVNAME_SIZE]; + tmp = conf->multipaths + i; + if (tmp->rdev) + printk(" disk%d, o:%d, dev:%s\n", + i,!test_bit(Faulty, &tmp->rdev->flags), + bdevname(tmp->rdev->bdev,b)); + } +} + + +static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) +{ + multipath_conf_t *conf = mddev->private; + struct request_queue *q; + int err = -EEXIST; + int path; + struct multipath_info *p; + int first = 0; + int last = mddev->raid_disks - 1; + + if (rdev->raid_disk >= 0) + first = last = rdev->raid_disk; + + print_multipath_conf(conf); + + for (path = first; path <= last; path++) + if ((p=conf->multipaths+path)->rdev == NULL) { + q = rdev->bdev->bd_disk->queue; + blk_queue_stack_limits(mddev->queue, q); + + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + * (Note: it is very unlikely that a device with + * merge_bvec_fn will be involved in multipath.) + */ + if (q->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + conf->working_disks++; + mddev->degraded--; + rdev->raid_disk = path; + set_bit(In_sync, &rdev->flags); + rcu_assign_pointer(p->rdev, rdev); + err = 0; + break; + } + + print_multipath_conf(conf); + + return err; +} + +static int multipath_remove_disk(mddev_t *mddev, int number) +{ + multipath_conf_t *conf = mddev->private; + int err = 0; + mdk_rdev_t *rdev; + struct multipath_info *p = conf->multipaths + number; + + print_multipath_conf(conf); + + rdev = p->rdev; + if (rdev) { + if (test_bit(In_sync, &rdev->flags) || + atomic_read(&rdev->nr_pending)) { + printk(KERN_ERR "hot-remove-disk, slot %d is identified" + " but is still operational!\n", number); + err = -EBUSY; + goto abort; + } + p->rdev = NULL; + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + } + } +abort: + + print_multipath_conf(conf); + return err; +} + + + +/* + * This is a kernel thread which: + * + * 1. Retries failed read operations on working multipaths. + * 2. Updates the raid superblock when problems encounter. + * 3. Performs writes following reads for array syncronising. + */ + +static void multipathd (mddev_t *mddev) +{ + struct multipath_bh *mp_bh; + struct bio *bio; + unsigned long flags; + multipath_conf_t *conf = mddev_to_conf(mddev); + struct list_head *head = &conf->retry_list; + + md_check_recovery(mddev); + for (;;) { + char b[BDEVNAME_SIZE]; + spin_lock_irqsave(&conf->device_lock, flags); + if (list_empty(head)) + break; + mp_bh = list_entry(head->prev, struct multipath_bh, retry_list); + list_del(head->prev); + spin_unlock_irqrestore(&conf->device_lock, flags); + + bio = &mp_bh->bio; + bio->bi_sector = mp_bh->master_bio->bi_sector; + + if ((mp_bh->path = multipath_map (conf))<0) { + printk(KERN_ALERT "multipath: %s: unrecoverable IO read" + " error for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)bio->bi_sector); + multipath_end_bh_io(mp_bh, -EIO); + } else { + printk(KERN_ERR "multipath: %s: redirecting sector %llu" + " to another IO path\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)bio->bi_sector); + *bio = *(mp_bh->master_bio); + bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; + bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); + bio->bi_end_io = multipath_end_request; + bio->bi_private = mp_bh; + generic_make_request(bio); + } + } + spin_unlock_irqrestore(&conf->device_lock, flags); +} + +static int multipath_run (mddev_t *mddev) +{ + multipath_conf_t *conf; + int disk_idx; + struct multipath_info *disk; + mdk_rdev_t *rdev; + struct list_head *tmp; + + if (mddev->level != LEVEL_MULTIPATH) { + printk("multipath: %s: raid level not set to multipath IO (%d)\n", + mdname(mddev), mddev->level); + goto out; + } + /* + * copy the already verified devices into our private MULTIPATH + * bookkeeping area. [whatever we allocate in multipath_run(), + * should be freed in multipath_stop()] + */ + mddev->queue->queue_lock = &mddev->queue->__queue_lock; + + conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); + mddev->private = conf; + if (!conf) { + printk(KERN_ERR + "multipath: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out; + } + + conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, + GFP_KERNEL); + if (!conf->multipaths) { + printk(KERN_ERR + "multipath: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + conf->working_disks = 0; + rdev_for_each(rdev, tmp, mddev) { + disk_idx = rdev->raid_disk; + if (disk_idx < 0 || + disk_idx >= mddev->raid_disks) + continue; + + disk = conf->multipaths + disk_idx; + disk->rdev = rdev; + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, not that we ever expect a device with + * a merge_bvec_fn to be involved in multipath */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + if (!test_bit(Faulty, &rdev->flags)) + conf->working_disks++; + } + + conf->raid_disks = mddev->raid_disks; + conf->mddev = mddev; + spin_lock_init(&conf->device_lock); + INIT_LIST_HEAD(&conf->retry_list); + + if (!conf->working_disks) { + printk(KERN_ERR "multipath: no operational IO paths for %s\n", + mdname(mddev)); + goto out_free_conf; + } + mddev->degraded = conf->raid_disks - conf->working_disks; + + conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS, + sizeof(struct multipath_bh)); + if (conf->pool == NULL) { + printk(KERN_ERR + "multipath: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + { + mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath"); + if (!mddev->thread) { + printk(KERN_ERR "multipath: couldn't allocate thread" + " for %s\n", mdname(mddev)); + goto out_free_conf; + } + } + + printk(KERN_INFO + "multipath: array %s active with %d out of %d IO paths\n", + mdname(mddev), conf->working_disks, mddev->raid_disks); + /* + * Ok, everything is just fine now + */ + mddev->array_sectors = mddev->size * 2; + + mddev->queue->unplug_fn = multipath_unplug; + mddev->queue->backing_dev_info.congested_fn = multipath_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + return 0; + +out_free_conf: + if (conf->pool) + mempool_destroy(conf->pool); + kfree(conf->multipaths); + kfree(conf); + mddev->private = NULL; +out: + return -EIO; +} + + +static int multipath_stop (mddev_t *mddev) +{ + multipath_conf_t *conf = mddev_to_conf(mddev); + + md_unregister_thread(mddev->thread); + mddev->thread = NULL; + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + mempool_destroy(conf->pool); + kfree(conf->multipaths); + kfree(conf); + mddev->private = NULL; + return 0; +} + +static struct mdk_personality multipath_personality = +{ + .name = "multipath", + .level = LEVEL_MULTIPATH, + .owner = THIS_MODULE, + .make_request = multipath_make_request, + .run = multipath_run, + .stop = multipath_stop, + .status = multipath_status, + .error_handler = multipath_error, + .hot_add_disk = multipath_add_disk, + .hot_remove_disk= multipath_remove_disk, +}; + +static int __init multipath_init (void) +{ + return register_md_personality (&multipath_personality); +} + +static void __exit multipath_exit (void) +{ + unregister_md_personality (&multipath_personality); +} + +module_init(multipath_init); +module_exit(multipath_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ +MODULE_ALIAS("md-multipath"); +MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c new file mode 100644 index 0000000..8ac6488 --- /dev/null +++ b/drivers/md/raid0.c @@ -0,0 +1,531 @@ +/* + raid0.c : Multiple Devices driver for Linux + Copyright (C) 1994-96 Marc ZYNGIER + <zyngier@ufr-info-p7.ibp.fr> or + <maz@gloups.fdn.fr> + Copyright (C) 1999, 2000 Ingo Molnar, Red Hat + + + RAID-0 management functions. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/raid/raid0.h> + +static void raid0_unplug(struct request_queue *q) +{ + mddev_t *mddev = q->queuedata; + raid0_conf_t *conf = mddev_to_conf(mddev); + mdk_rdev_t **devlist = conf->strip_zone[0].dev; + int i; + + for (i=0; i<mddev->raid_disks; i++) { + struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); + + blk_unplug(r_queue); + } +} + +static int raid0_congested(void *data, int bits) +{ + mddev_t *mddev = data; + raid0_conf_t *conf = mddev_to_conf(mddev); + mdk_rdev_t **devlist = conf->strip_zone[0].dev; + int i, ret = 0; + + for (i = 0; i < mddev->raid_disks && !ret ; i++) { + struct request_queue *q = bdev_get_queue(devlist[i]->bdev); + + ret |= bdi_congested(&q->backing_dev_info, bits); + } + return ret; +} + + +static int create_strip_zones (mddev_t *mddev) +{ + int i, c, j; + sector_t current_offset, curr_zone_offset; + sector_t min_spacing; + raid0_conf_t *conf = mddev_to_conf(mddev); + mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; + struct list_head *tmp1, *tmp2; + struct strip_zone *zone; + int cnt; + char b[BDEVNAME_SIZE]; + + /* + * The number of 'same size groups' + */ + conf->nr_strip_zones = 0; + + rdev_for_each(rdev1, tmp1, mddev) { + printk("raid0: looking at %s\n", + bdevname(rdev1->bdev,b)); + c = 0; + rdev_for_each(rdev2, tmp2, mddev) { + printk("raid0: comparing %s(%llu)", + bdevname(rdev1->bdev,b), + (unsigned long long)rdev1->size); + printk(" with %s(%llu)\n", + bdevname(rdev2->bdev,b), + (unsigned long long)rdev2->size); + if (rdev2 == rdev1) { + printk("raid0: END\n"); + break; + } + if (rdev2->size == rdev1->size) + { + /* + * Not unique, don't count it as a new + * group + */ + printk("raid0: EQUAL\n"); + c = 1; + break; + } + printk("raid0: NOT EQUAL\n"); + } + if (!c) { + printk("raid0: ==> UNIQUE\n"); + conf->nr_strip_zones++; + printk("raid0: %d zones\n", conf->nr_strip_zones); + } + } + printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); + + conf->strip_zone = kzalloc(sizeof(struct strip_zone)* + conf->nr_strip_zones, GFP_KERNEL); + if (!conf->strip_zone) + return 1; + conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* + conf->nr_strip_zones*mddev->raid_disks, + GFP_KERNEL); + if (!conf->devlist) + return 1; + + /* The first zone must contain all devices, so here we check that + * there is a proper alignment of slots to devices and find them all + */ + zone = &conf->strip_zone[0]; + cnt = 0; + smallest = NULL; + zone->dev = conf->devlist; + rdev_for_each(rdev1, tmp1, mddev) { + int j = rdev1->raid_disk; + + if (j < 0 || j >= mddev->raid_disks) { + printk("raid0: bad disk number %d - aborting!\n", j); + goto abort; + } + if (zone->dev[j]) { + printk("raid0: multiple devices for %d - aborting!\n", + j); + goto abort; + } + zone->dev[j] = rdev1; + + blk_queue_stack_limits(mddev->queue, + rdev1->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + if (!smallest || (rdev1->size <smallest->size)) + smallest = rdev1; + cnt++; + } + if (cnt != mddev->raid_disks) { + printk("raid0: too few disks (%d of %d) - aborting!\n", + cnt, mddev->raid_disks); + goto abort; + } + zone->nb_dev = cnt; + zone->size = smallest->size * cnt; + zone->zone_offset = 0; + + current_offset = smallest->size; + curr_zone_offset = zone->size; + + /* now do the other zones */ + for (i = 1; i < conf->nr_strip_zones; i++) + { + zone = conf->strip_zone + i; + zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; + + printk("raid0: zone %d\n", i); + zone->dev_offset = current_offset; + smallest = NULL; + c = 0; + + for (j=0; j<cnt; j++) { + char b[BDEVNAME_SIZE]; + rdev = conf->strip_zone[0].dev[j]; + printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); + if (rdev->size > current_offset) + { + printk(" contained as device %d\n", c); + zone->dev[c] = rdev; + c++; + if (!smallest || (rdev->size <smallest->size)) { + smallest = rdev; + printk(" (%llu) is smallest!.\n", + (unsigned long long)rdev->size); + } + } else + printk(" nope.\n"); + } + + zone->nb_dev = c; + zone->size = (smallest->size - current_offset) * c; + printk("raid0: zone->nb_dev: %d, size: %llu\n", + zone->nb_dev, (unsigned long long)zone->size); + + zone->zone_offset = curr_zone_offset; + curr_zone_offset += zone->size; + + current_offset = smallest->size; + printk("raid0: current zone offset: %llu\n", + (unsigned long long)current_offset); + } + + /* Now find appropriate hash spacing. + * We want a number which causes most hash entries to cover + * at most two strips, but the hash table must be at most + * 1 PAGE. We choose the smallest strip, or contiguous collection + * of strips, that has big enough size. We never consider the last + * strip though as it's size has no bearing on the efficacy of the hash + * table. + */ + conf->hash_spacing = curr_zone_offset; + min_spacing = curr_zone_offset; + sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); + for (i=0; i < conf->nr_strip_zones-1; i++) { + sector_t sz = 0; + for (j=i; j<conf->nr_strip_zones-1 && + sz < min_spacing ; j++) + sz += conf->strip_zone[j].size; + if (sz >= min_spacing && sz < conf->hash_spacing) + conf->hash_spacing = sz; + } + + mddev->queue->unplug_fn = raid0_unplug; + + mddev->queue->backing_dev_info.congested_fn = raid0_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + printk("raid0: done.\n"); + return 0; + abort: + return 1; +} + +/** + * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged + * @q: request queue + * @bvm: properties of new bio + * @biovec: the request that could be merged to it. + * + * Return amount of bytes we can accept at this offset + */ +static int raid0_mergeable_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + mddev_t *mddev = q->queuedata; + sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + int max; + unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int bio_sectors = bvm->bi_size >> 9; + + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; + if (max < 0) max = 0; /* bio_add cannot handle a negative return */ + if (max <= biovec->bv_len && bio_sectors == 0) + return biovec->bv_len; + else + return max; +} + +static int raid0_run (mddev_t *mddev) +{ + unsigned cur=0, i=0, nb_zone; + s64 size; + raid0_conf_t *conf; + mdk_rdev_t *rdev; + struct list_head *tmp; + + if (mddev->chunk_size == 0) { + printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); + return -EINVAL; + } + printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", + mdname(mddev), + mddev->chunk_size >> 9, + (mddev->chunk_size>>1)-1); + blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); + blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); + mddev->queue->queue_lock = &mddev->queue->__queue_lock; + + conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); + if (!conf) + goto out; + mddev->private = (void *)conf; + + conf->strip_zone = NULL; + conf->devlist = NULL; + if (create_strip_zones (mddev)) + goto out_free_conf; + + /* calculate array device size */ + mddev->array_sectors = 0; + rdev_for_each(rdev, tmp, mddev) + mddev->array_sectors += rdev->size * 2; + + printk("raid0 : md_size is %llu blocks.\n", + (unsigned long long)mddev->array_sectors / 2); + printk("raid0 : conf->hash_spacing is %llu blocks.\n", + (unsigned long long)conf->hash_spacing); + { + sector_t s = mddev->array_sectors / 2; + sector_t space = conf->hash_spacing; + int round; + conf->preshift = 0; + if (sizeof(sector_t) > sizeof(u32)) { + /*shift down space and s so that sector_div will work */ + while (space > (sector_t) (~(u32)0)) { + s >>= 1; + space >>= 1; + s += 1; /* force round-up */ + conf->preshift++; + } + } + round = sector_div(s, (u32)space) ? 1 : 0; + nb_zone = s + round; + } + printk("raid0 : nb_zone is %d.\n", nb_zone); + + printk("raid0 : Allocating %Zd bytes for hash.\n", + nb_zone*sizeof(struct strip_zone*)); + conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); + if (!conf->hash_table) + goto out_free_conf; + size = conf->strip_zone[cur].size; + + conf->hash_table[0] = conf->strip_zone + cur; + for (i=1; i< nb_zone; i++) { + while (size <= conf->hash_spacing) { + cur++; + size += conf->strip_zone[cur].size; + } + size -= conf->hash_spacing; + conf->hash_table[i] = conf->strip_zone + cur; + } + if (conf->preshift) { + conf->hash_spacing >>= conf->preshift; + /* round hash_spacing up so when we divide by it, we + * err on the side of too-low, which is safest + */ + conf->hash_spacing++; + } + + /* calculate the max read-ahead size. + * For read-ahead of large files to be effective, we need to + * readahead at least twice a whole stripe. i.e. number of devices + * multiplied by chunk size times 2. + * If an individual device has an ra_pages greater than the + * chunk size, then we will not drive that device as hard as it + * wants. We consider this a configuration error: a larger + * chunksize should be used in that case. + */ + { + int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; + if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) + mddev->queue->backing_dev_info.ra_pages = 2* stripe; + } + + + blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); + return 0; + +out_free_conf: + kfree(conf->strip_zone); + kfree(conf->devlist); + kfree(conf); + mddev->private = NULL; +out: + return -ENOMEM; +} + +static int raid0_stop (mddev_t *mddev) +{ + raid0_conf_t *conf = mddev_to_conf(mddev); + + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + kfree(conf->hash_table); + conf->hash_table = NULL; + kfree(conf->strip_zone); + conf->strip_zone = NULL; + kfree(conf); + mddev->private = NULL; + + return 0; +} + +static int raid0_make_request (struct request_queue *q, struct bio *bio) +{ + mddev_t *mddev = q->queuedata; + unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; + raid0_conf_t *conf = mddev_to_conf(mddev); + struct strip_zone *zone; + mdk_rdev_t *tmp_dev; + sector_t chunk; + sector_t block, rsect; + const int rw = bio_data_dir(bio); + int cpu; + + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + + chunk_size = mddev->chunk_size >> 10; + chunk_sects = mddev->chunk_size >> 9; + chunksize_bits = ffz(~chunk_size); + block = bio->bi_sector >> 1; + + + if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { + struct bio_pair *bp; + /* Sanity check -- queue functions should prevent this happening */ + if (bio->bi_vcnt != 1 || + bio->bi_idx != 0) + goto bad_map; + /* This is a one page bio that upper layers + * refuse to split for us, so we need to split it. + */ + bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); + if (raid0_make_request(q, &bp->bio1)) + generic_make_request(&bp->bio1); + if (raid0_make_request(q, &bp->bio2)) + generic_make_request(&bp->bio2); + + bio_pair_release(bp); + return 0; + } + + + { + sector_t x = block >> conf->preshift; + sector_div(x, (u32)conf->hash_spacing); + zone = conf->hash_table[x]; + } + + while (block >= (zone->zone_offset + zone->size)) + zone++; + + sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); + + + { + sector_t x = (block - zone->zone_offset) >> chunksize_bits; + + sector_div(x, zone->nb_dev); + chunk = x; + + x = block >> chunksize_bits; + tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; + } + rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) + + sect_in_chunk; + + bio->bi_bdev = tmp_dev->bdev; + bio->bi_sector = rsect + tmp_dev->data_offset; + + /* + * Let the main block layer submit the IO and resolve recursion: + */ + return 1; + +bad_map: + printk("raid0_make_request bug: can't convert block across chunks" + " or bigger than %dk %llu %d\n", chunk_size, + (unsigned long long)bio->bi_sector, bio->bi_size >> 10); + + bio_io_error(bio); + return 0; +} + +static void raid0_status (struct seq_file *seq, mddev_t *mddev) +{ +#undef MD_DEBUG +#ifdef MD_DEBUG + int j, k, h; + char b[BDEVNAME_SIZE]; + raid0_conf_t *conf = mddev_to_conf(mddev); + + h = 0; + for (j = 0; j < conf->nr_strip_zones; j++) { + seq_printf(seq, " z%d", j); + if (conf->hash_table[h] == conf->strip_zone+j) + seq_printf(seq, "(h%d)", h++); + seq_printf(seq, "=["); + for (k = 0; k < conf->strip_zone[j].nb_dev; k++) + seq_printf(seq, "%s/", bdevname( + conf->strip_zone[j].dev[k]->bdev,b)); + + seq_printf(seq, "] zo=%d do=%d s=%d\n", + conf->strip_zone[j].zone_offset, + conf->strip_zone[j].dev_offset, + conf->strip_zone[j].size); + } +#endif + seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); + return; +} + +static struct mdk_personality raid0_personality= +{ + .name = "raid0", + .level = 0, + .owner = THIS_MODULE, + .make_request = raid0_make_request, + .run = raid0_run, + .stop = raid0_stop, + .status = raid0_status, +}; + +static int __init raid0_init (void) +{ + return register_md_personality (&raid0_personality); +} + +static void raid0_exit (void) +{ + unregister_md_personality (&raid0_personality); +} + +module_init(raid0_init); +module_exit(raid0_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-2"); /* RAID0 */ +MODULE_ALIAS("md-raid0"); +MODULE_ALIAS("md-level-0"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c new file mode 100644 index 0000000..4101bf0 --- /dev/null +++ b/drivers/md/raid1.c @@ -0,0 +1,2282 @@ +/* + * raid1.c : Multiple Devices driver for Linux + * + * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat + * + * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman + * + * RAID-1 management functions. + * + * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 + * + * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> + * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> + * + * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support + * bitmapped intelligence in resync: + * + * - bitmap marked during normal i/o + * - bitmap used to skip nondirty blocks during sync + * + * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: + * - persistent bitmap code + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include "dm-bio-list.h" +#include <linux/delay.h> +#include <linux/raid/raid1.h> +#include <linux/raid/bitmap.h> + +#define DEBUG 0 +#if DEBUG +#define PRINTK(x...) printk(x) +#else +#define PRINTK(x...) +#endif + +/* + * Number of guaranteed r1bios in case of extreme VM load: + */ +#define NR_RAID1_BIOS 256 + + +static void unplug_slaves(mddev_t *mddev); + +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); + +static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) +{ + struct pool_info *pi = data; + r1bio_t *r1_bio; + int size = offsetof(r1bio_t, bios[pi->raid_disks]); + + /* allocate a r1bio with room for raid_disks entries in the bios array */ + r1_bio = kzalloc(size, gfp_flags); + if (!r1_bio) + unplug_slaves(pi->mddev); + + return r1_bio; +} + +static void r1bio_pool_free(void *r1_bio, void *data) +{ + kfree(r1_bio); +} + +#define RESYNC_BLOCK_SIZE (64*1024) +//#define RESYNC_BLOCK_SIZE PAGE_SIZE +#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) +#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) +#define RESYNC_WINDOW (2048*1024) + +static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) +{ + struct pool_info *pi = data; + struct page *page; + r1bio_t *r1_bio; + struct bio *bio; + int i, j; + + r1_bio = r1bio_pool_alloc(gfp_flags, pi); + if (!r1_bio) { + unplug_slaves(pi->mddev); + return NULL; + } + + /* + * Allocate bios : 1 for reading, n-1 for writing + */ + for (j = pi->raid_disks ; j-- ; ) { + bio = bio_alloc(gfp_flags, RESYNC_PAGES); + if (!bio) + goto out_free_bio; + r1_bio->bios[j] = bio; + } + /* + * Allocate RESYNC_PAGES data pages and attach them to + * the first bio. + * If this is a user-requested check/repair, allocate + * RESYNC_PAGES for each bio. + */ + if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) + j = pi->raid_disks; + else + j = 1; + while(j--) { + bio = r1_bio->bios[j]; + for (i = 0; i < RESYNC_PAGES; i++) { + page = alloc_page(gfp_flags); + if (unlikely(!page)) + goto out_free_pages; + + bio->bi_io_vec[i].bv_page = page; + } + } + /* If not user-requests, copy the page pointers to all bios */ + if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { + for (i=0; i<RESYNC_PAGES ; i++) + for (j=1; j<pi->raid_disks; j++) + r1_bio->bios[j]->bi_io_vec[i].bv_page = + r1_bio->bios[0]->bi_io_vec[i].bv_page; + } + + r1_bio->master_bio = NULL; + + return r1_bio; + +out_free_pages: + for (i=0; i < RESYNC_PAGES ; i++) + for (j=0 ; j < pi->raid_disks; j++) + safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + j = -1; +out_free_bio: + while ( ++j < pi->raid_disks ) + bio_put(r1_bio->bios[j]); + r1bio_pool_free(r1_bio, data); + return NULL; +} + +static void r1buf_pool_free(void *__r1_bio, void *data) +{ + struct pool_info *pi = data; + int i,j; + r1bio_t *r1bio = __r1_bio; + + for (i = 0; i < RESYNC_PAGES; i++) + for (j = pi->raid_disks; j-- ;) { + if (j == 0 || + r1bio->bios[j]->bi_io_vec[i].bv_page != + r1bio->bios[0]->bi_io_vec[i].bv_page) + safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); + } + for (i=0 ; i < pi->raid_disks; i++) + bio_put(r1bio->bios[i]); + + r1bio_pool_free(r1bio, data); +} + +static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) +{ + int i; + + for (i = 0; i < conf->raid_disks; i++) { + struct bio **bio = r1_bio->bios + i; + if (*bio && *bio != IO_BLOCKED) + bio_put(*bio); + *bio = NULL; + } +} + +static void free_r1bio(r1bio_t *r1_bio) +{ + conf_t *conf = mddev_to_conf(r1_bio->mddev); + + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf); + + put_all_bios(conf, r1_bio); + mempool_free(r1_bio, conf->r1bio_pool); +} + +static void put_buf(r1bio_t *r1_bio) +{ + conf_t *conf = mddev_to_conf(r1_bio->mddev); + int i; + + for (i=0; i<conf->raid_disks; i++) { + struct bio *bio = r1_bio->bios[i]; + if (bio->bi_end_io) + rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); + } + + mempool_free(r1_bio, conf->r1buf_pool); + + lower_barrier(conf); +} + +static void reschedule_retry(r1bio_t *r1_bio) +{ + unsigned long flags; + mddev_t *mddev = r1_bio->mddev; + conf_t *conf = mddev_to_conf(mddev); + + spin_lock_irqsave(&conf->device_lock, flags); + list_add(&r1_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; + spin_unlock_irqrestore(&conf->device_lock, flags); + + wake_up(&conf->wait_barrier); + md_wakeup_thread(mddev->thread); +} + +/* + * raid_end_bio_io() is called when we have finished servicing a mirrored + * operation and are ready to return a success/failure code to the buffer + * cache layer. + */ +static void raid_end_bio_io(r1bio_t *r1_bio) +{ + struct bio *bio = r1_bio->master_bio; + + /* if nobody has done the final endio yet, do it now */ + if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { + PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", + (bio_data_dir(bio) == WRITE) ? "write" : "read", + (unsigned long long) bio->bi_sector, + (unsigned long long) bio->bi_sector + + (bio->bi_size >> 9) - 1); + + bio_endio(bio, + test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); + } + free_r1bio(r1_bio); +} + +/* + * Update disk head position estimator based on IRQ completion info. + */ +static inline void update_head_pos(int disk, r1bio_t *r1_bio) +{ + conf_t *conf = mddev_to_conf(r1_bio->mddev); + + conf->mirrors[disk].head_position = + r1_bio->sector + (r1_bio->sectors); +} + +static void raid1_end_read_request(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + int mirror; + conf_t *conf = mddev_to_conf(r1_bio->mddev); + + mirror = r1_bio->read_disk; + /* + * this branch is our 'one mirror IO has finished' event handler: + */ + update_head_pos(mirror, r1_bio); + + if (uptodate) + set_bit(R1BIO_Uptodate, &r1_bio->state); + else { + /* If all other devices have failed, we want to return + * the error upwards rather than fail the last device. + * Here we redefine "uptodate" to mean "Don't want to retry" + */ + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + if (r1_bio->mddev->degraded == conf->raid_disks || + (r1_bio->mddev->degraded == conf->raid_disks-1 && + !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) + uptodate = 1; + spin_unlock_irqrestore(&conf->device_lock, flags); + } + + if (uptodate) + raid_end_bio_io(r1_bio); + else { + /* + * oops, read error: + */ + char b[BDEVNAME_SIZE]; + if (printk_ratelimit()) + printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n", + bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); + reschedule_retry(r1_bio); + } + + rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); +} + +static void raid1_end_write_request(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); + conf_t *conf = mddev_to_conf(r1_bio->mddev); + struct bio *to_put = NULL; + + + for (mirror = 0; mirror < conf->raid_disks; mirror++) + if (r1_bio->bios[mirror] == bio) + break; + + if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { + set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); + set_bit(R1BIO_BarrierRetry, &r1_bio->state); + r1_bio->mddev->barriers_work = 0; + /* Don't rdev_dec_pending in this branch - keep it for the retry */ + } else { + /* + * this branch is our 'one mirror IO has finished' event handler: + */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + if (!uptodate) { + md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } else + /* + * Set R1BIO_Uptodate in our master bio, so that + * we will return a good error code for to the higher + * levels even if IO on some other mirrored buffer fails. + * + * The 'master' represents the composite IO operation to + * user-side. So if something waits for IO, then it will + * wait for the 'master' bio. + */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + + update_head_pos(mirror, r1_bio); + + if (behind) { + if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) + atomic_dec(&r1_bio->behind_remaining); + + /* In behind mode, we ACK the master bio once the I/O has safely + * reached all non-writemostly disks. Setting the Returned bit + * ensures that this gets done only once -- we don't ever want to + * return -EIO here, instead we'll wait */ + + if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && + test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* Maybe we can return now */ + if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { + struct bio *mbio = r1_bio->master_bio; + PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", + (unsigned long long) mbio->bi_sector, + (unsigned long long) mbio->bi_sector + + (mbio->bi_size >> 9) - 1); + bio_endio(mbio, 0); + } + } + } + rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); + } + /* + * + * Let's see if all mirrored write operations have finished + * already. + */ + if (atomic_dec_and_test(&r1_bio->remaining)) { + if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) + reschedule_retry(r1_bio); + else { + /* it really is the end of this request */ + if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { + /* free extra copy of the data pages */ + int i = bio->bi_vcnt; + while (i--) + safe_put_page(bio->bi_io_vec[i].bv_page); + } + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, + r1_bio->sectors, + !test_bit(R1BIO_Degraded, &r1_bio->state), + behind); + md_write_end(r1_bio->mddev); + raid_end_bio_io(r1_bio); + } + } + + if (to_put) + bio_put(to_put); +} + + +/* + * This routine returns the disk from which the requested read should + * be done. There is a per-array 'next expected sequential IO' sector + * number - if this matches on the next IO then we use the last disk. + * There is also a per-disk 'last know head position' sector that is + * maintained from IRQ contexts, both the normal and the resync IO + * completion handlers update this position correctly. If there is no + * perfect sequential match then we pick the disk whose head is closest. + * + * If there are 2 mirrors in the same 2 devices, performance degrades + * because position is mirror, not device based. + * + * The rdev for the device selected will have nr_pending incremented. + */ +static int read_balance(conf_t *conf, r1bio_t *r1_bio) +{ + const unsigned long this_sector = r1_bio->sector; + int new_disk = conf->last_used, disk = new_disk; + int wonly_disk = -1; + const int sectors = r1_bio->sectors; + sector_t new_distance, current_distance; + mdk_rdev_t *rdev; + + rcu_read_lock(); + /* + * Check if we can balance. We can balance on the whole + * device if no resync is going on, or below the resync window. + * We take the first readable disk when above the resync window. + */ + retry: + if (conf->mddev->recovery_cp < MaxSector && + (this_sector + sectors >= conf->next_resync)) { + /* Choose the first operation device, for consistancy */ + new_disk = 0; + + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || + !rdev || !test_bit(In_sync, &rdev->flags) + || test_bit(WriteMostly, &rdev->flags); + rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { + + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) + wonly_disk = new_disk; + + if (new_disk == conf->raid_disks - 1) { + new_disk = wonly_disk; + break; + } + } + goto rb_out; + } + + + /* make sure the disk is operational */ + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || + !rdev || !test_bit(In_sync, &rdev->flags) || + test_bit(WriteMostly, &rdev->flags); + rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { + + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) + wonly_disk = new_disk; + + if (new_disk <= 0) + new_disk = conf->raid_disks; + new_disk--; + if (new_disk == disk) { + new_disk = wonly_disk; + break; + } + } + + if (new_disk < 0) + goto rb_out; + + disk = new_disk; + /* now disk == new_disk == starting point for search */ + + /* + * Don't change to another disk for sequential reads: + */ + if (conf->next_seq_sect == this_sector) + goto rb_out; + if (this_sector == conf->mirrors[new_disk].head_position) + goto rb_out; + + current_distance = abs(this_sector - conf->mirrors[disk].head_position); + + /* Find the disk whose head is closest */ + + do { + if (disk <= 0) + disk = conf->raid_disks; + disk--; + + rdev = rcu_dereference(conf->mirrors[disk].rdev); + + if (!rdev || r1_bio->bios[disk] == IO_BLOCKED || + !test_bit(In_sync, &rdev->flags) || + test_bit(WriteMostly, &rdev->flags)) + continue; + + if (!atomic_read(&rdev->nr_pending)) { + new_disk = disk; + break; + } + new_distance = abs(this_sector - conf->mirrors[disk].head_position); + if (new_distance < current_distance) { + current_distance = new_distance; + new_disk = disk; + } + } while (disk != conf->last_used); + + rb_out: + + + if (new_disk >= 0) { + rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + if (!rdev) + goto retry; + atomic_inc(&rdev->nr_pending); + if (!test_bit(In_sync, &rdev->flags)) { + /* cannot risk returning a device that failed + * before we inc'ed nr_pending + */ + rdev_dec_pending(rdev, conf->mddev); + goto retry; + } + conf->next_seq_sect = this_sector + sectors; + conf->last_used = new_disk; + } + rcu_read_unlock(); + + return new_disk; +} + +static void unplug_slaves(mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + int i; + + rcu_read_lock(); + for (i=0; i<mddev->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); + + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + blk_unplug(r_queue); + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + rcu_read_unlock(); +} + +static void raid1_unplug(struct request_queue *q) +{ + mddev_t *mddev = q->queuedata; + + unplug_slaves(mddev); + md_wakeup_thread(mddev->thread); +} + +static int raid1_congested(void *data, int bits) +{ + mddev_t *mddev = data; + conf_t *conf = mddev_to_conf(mddev); + int i, ret = 0; + + rcu_read_lock(); + for (i = 0; i < mddev->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = bdev_get_queue(rdev->bdev); + + /* Note the '|| 1' - when read_balance prefers + * non-congested targets, it can be removed + */ + if ((bits & (1<<BDI_write_congested)) || 1) + ret |= bdi_congested(&q->backing_dev_info, bits); + else + ret &= bdi_congested(&q->backing_dev_info, bits); + } + } + rcu_read_unlock(); + return ret; +} + + +static int flush_pending_writes(conf_t *conf) +{ + /* Any writes that have been queued but are awaiting + * bitmap updates get flushed here. + * We return 1 if any requests were actually submitted. + */ + int rv = 0; + + spin_lock_irq(&conf->device_lock); + + if (conf->pending_bio_list.head) { + struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(conf->mddev->queue); + spin_unlock_irq(&conf->device_lock); + /* flush any pending bitmap writes to + * disk before proceeding w/ I/O */ + bitmap_unplug(conf->mddev->bitmap); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + rv = 1; + } else + spin_unlock_irq(&conf->device_lock); + return rv; +} + +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. + */ +#define RESYNC_DEPTH 32 + +static void raise_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + + /* Wait until no block IO is waiting */ + wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + conf->nr_waiting--; + } + conf->nr_pending++; + spin_unlock_irq(&conf->resync_lock); +} + +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quite. + * We increment barrier and nr_waiting, and then + * wait until nr_pending match nr_queued+1 + * This is called in the context of one normal IO request + * that has failed. Thus any sync request that might be pending + * will be blocked by nr_pending, and we need to wait for + * pending IO requests to complete or be queued for re-try. + * Thus the number queued (nr_queued) plus this request (1) + * must match the number of pending IOs (nr_pending) before + * we continue. + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->nr_pending == conf->nr_queued+1, + conf->resync_lock, + ({ flush_pending_writes(conf); + raid1_unplug(conf->mddev->queue); })); + spin_unlock_irq(&conf->resync_lock); +} +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); + spin_unlock_irq(&conf->resync_lock); +} + + +/* duplicate the data pages for behind I/O */ +static struct page **alloc_behind_pages(struct bio *bio) +{ + int i; + struct bio_vec *bvec; + struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *), + GFP_NOIO); + if (unlikely(!pages)) + goto do_sync_io; + + bio_for_each_segment(bvec, bio, i) { + pages[i] = alloc_page(GFP_NOIO); + if (unlikely(!pages[i])) + goto do_sync_io; + memcpy(kmap(pages[i]) + bvec->bv_offset, + kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); + kunmap(pages[i]); + kunmap(bvec->bv_page); + } + + return pages; + +do_sync_io: + if (pages) + for (i = 0; i < bio->bi_vcnt && pages[i]; i++) + put_page(pages[i]); + kfree(pages); + PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); + return NULL; +} + +static int make_request(struct request_queue *q, struct bio * bio) +{ + mddev_t *mddev = q->queuedata; + conf_t *conf = mddev_to_conf(mddev); + mirror_info_t *mirror; + r1bio_t *r1_bio; + struct bio *read_bio; + int i, targets = 0, disks; + struct bitmap *bitmap; + unsigned long flags; + struct bio_list bl; + struct page **behind_pages = NULL; + const int rw = bio_data_dir(bio); + const int do_sync = bio_sync(bio); + int cpu, do_barriers; + mdk_rdev_t *blocked_rdev; + + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + * We test barriers_work *after* md_write_start as md_write_start + * may cause the first superblock write, and that will check out + * if barriers work. + */ + + md_write_start(mddev, bio); /* wait on superblock update early */ + + if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { + if (rw == WRITE) + md_write_end(mddev); + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + wait_barrier(conf); + + bitmap = mddev->bitmap; + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + + /* + * make_request() can abort the operation when READA is being + * used and no empty request is available. + * + */ + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio->bi_size >> 9; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_sector; + + if (rw == READ) { + /* + * read balancing logic: + */ + int rdisk = read_balance(conf, r1_bio); + + if (rdisk < 0) { + /* couldn't find anywhere to read from */ + raid_end_bio_io(r1_bio); + return 0; + } + mirror = conf->mirrors + rdisk; + + r1_bio->read_disk = rdisk; + + read_bio = bio_clone(bio, GFP_NOIO); + + r1_bio->bios[rdisk] = read_bio; + + read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; + read_bio->bi_bdev = mirror->rdev->bdev; + read_bio->bi_end_io = raid1_end_read_request; + read_bio->bi_rw = READ | do_sync; + read_bio->bi_private = r1_bio; + + generic_make_request(read_bio); + return 0; + } + + /* + * WRITE: + */ + /* first select target devices under spinlock and + * inc refcount on their rdev. Record them by setting + * bios[x] to bio + */ + disks = conf->raid_disks; +#if 0 + { static int first=1; + if (first) printk("First Write sector %llu disks %d\n", + (unsigned long long)r1_bio->sector, disks); + first = 0; + } +#endif + retry_write: + blocked_rdev = NULL; + rcu_read_lock(); + for (i = 0; i < disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { + atomic_inc(&rdev->nr_pending); + blocked_rdev = rdev; + break; + } + if (rdev && !test_bit(Faulty, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + if (test_bit(Faulty, &rdev->flags)) { + rdev_dec_pending(rdev, mddev); + r1_bio->bios[i] = NULL; + } else + r1_bio->bios[i] = bio; + targets++; + } else + r1_bio->bios[i] = NULL; + } + rcu_read_unlock(); + + if (unlikely(blocked_rdev)) { + /* Wait for this device to become unblocked */ + int j; + + for (j = 0; j < i; j++) + if (r1_bio->bios[j]) + rdev_dec_pending(conf->mirrors[j].rdev, mddev); + + allow_barrier(conf); + md_wait_for_blocked_rdev(blocked_rdev, mddev); + wait_barrier(conf); + goto retry_write; + } + + BUG_ON(targets == 0); /* we never fail the last device */ + + if (targets < conf->raid_disks) { + /* array is degraded, we will not clear the bitmap + * on I/O completion (see raid1_end_write_request) */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } + + /* do behind I/O ? */ + if (bitmap && + atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind && + (behind_pages = alloc_behind_pages(bio)) != NULL) + set_bit(R1BIO_BehindIO, &r1_bio->state); + + atomic_set(&r1_bio->remaining, 0); + atomic_set(&r1_bio->behind_remaining, 0); + + do_barriers = bio_barrier(bio); + if (do_barriers) + set_bit(R1BIO_Barrier, &r1_bio->state); + + bio_list_init(&bl); + for (i = 0; i < disks; i++) { + struct bio *mbio; + if (!r1_bio->bios[i]) + continue; + + mbio = bio_clone(bio, GFP_NOIO); + r1_bio->bios[i] = mbio; + + mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; + mbio->bi_bdev = conf->mirrors[i].rdev->bdev; + mbio->bi_end_io = raid1_end_write_request; + mbio->bi_rw = WRITE | do_barriers | do_sync; + mbio->bi_private = r1_bio; + + if (behind_pages) { + struct bio_vec *bvec; + int j; + + /* Yes, I really want the '__' version so that + * we clear any unused pointer in the io_vec, rather + * than leave them unchanged. This is important + * because when we come to free the pages, we won't + * know the originial bi_idx, so we just free + * them all + */ + __bio_for_each_segment(bvec, mbio, j, 0) + bvec->bv_page = behind_pages[j]; + if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) + atomic_inc(&r1_bio->behind_remaining); + } + + atomic_inc(&r1_bio->remaining); + + bio_list_add(&bl, mbio); + } + kfree(behind_pages); /* the behind pages are attached to the bios now */ + + bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, + test_bit(R1BIO_BehindIO, &r1_bio->state)); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_merge(&conf->pending_bio_list, &bl); + bio_list_init(&bl); + + blk_plug_device(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); + + /* In case raid1d snuck into freeze_array */ + wake_up(&conf->wait_barrier); + + if (do_sync) + md_wakeup_thread(mddev->thread); +#if 0 + while ((bio = bio_list_pop(&bl)) != NULL) + generic_make_request(bio); +#endif + + return 0; +} + +static void status(struct seq_file *seq, mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + int i; + + seq_printf(seq, " [%d/%d] [", conf->raid_disks, + conf->raid_disks - mddev->degraded); + rcu_read_lock(); + for (i = 0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + seq_printf(seq, "%s", + rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); + } + rcu_read_unlock(); + seq_printf(seq, "]"); +} + + +static void error(mddev_t *mddev, mdk_rdev_t *rdev) +{ + char b[BDEVNAME_SIZE]; + conf_t *conf = mddev_to_conf(mddev); + + /* + * If it is not operational, then we have already marked it as dead + * else if it is the last working disks, ignore the error, let the + * next level up know. + * else mark the drive as failed + */ + if (test_bit(In_sync, &rdev->flags) + && (conf->raid_disks - mddev->degraded) == 1) + /* + * Don't fail the drive, act as though we were just a + * normal single drive + */ + return; + if (test_and_clear_bit(In_sync, &rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded++; + set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * if recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + } else + set_bit(Faulty, &rdev->flags); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n" + "raid1: Operation continuing on %d devices.\n", + bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); +} + +static void print_conf(conf_t *conf) +{ + int i; + + printk("RAID1 conf printout:\n"); + if (!conf) { + printk("(!conf)\n"); + return; + } + printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, + conf->raid_disks); + + rcu_read_lock(); + for (i = 0; i < conf->raid_disks; i++) { + char b[BDEVNAME_SIZE]; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev) + printk(" disk %d, wo:%d, o:%d, dev:%s\n", + i, !test_bit(In_sync, &rdev->flags), + !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev,b)); + } + rcu_read_unlock(); +} + +static void close_sync(conf_t *conf) +{ + wait_barrier(conf); + allow_barrier(conf); + + mempool_destroy(conf->r1buf_pool); + conf->r1buf_pool = NULL; +} + +static int raid1_spare_active(mddev_t *mddev) +{ + int i; + conf_t *conf = mddev->private; + + /* + * Find all failed disks within the RAID1 configuration + * and mark them readable. + * Called under mddev lock, so rcu protection not needed. + */ + for (i = 0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev = conf->mirrors[i].rdev; + if (rdev + && !test_bit(Faulty, &rdev->flags) + && !test_and_set_bit(In_sync, &rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded--; + spin_unlock_irqrestore(&conf->device_lock, flags); + } + } + + print_conf(conf); + return 0; +} + + +static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) +{ + conf_t *conf = mddev->private; + int err = -EEXIST; + int mirror = 0; + mirror_info_t *p; + int first = 0; + int last = mddev->raid_disks - 1; + + if (rdev->raid_disk >= 0) + first = last = rdev->raid_disk; + + for (mirror = first; mirror <= last; mirror++) + if ( !(p=conf->mirrors+mirror)->rdev) { + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + p->head_position = 0; + rdev->raid_disk = mirror; + err = 0; + /* As all devices are equivalent, we don't need a full recovery + * if this was recently any drive of the array + */ + if (rdev->saved_raid_disk < 0) + conf->fullsync = 1; + rcu_assign_pointer(p->rdev, rdev); + break; + } + + print_conf(conf); + return err; +} + +static int raid1_remove_disk(mddev_t *mddev, int number) +{ + conf_t *conf = mddev->private; + int err = 0; + mdk_rdev_t *rdev; + mirror_info_t *p = conf->mirrors+ number; + + print_conf(conf); + rdev = p->rdev; + if (rdev) { + if (test_bit(In_sync, &rdev->flags) || + atomic_read(&rdev->nr_pending)) { + err = -EBUSY; + goto abort; + } + /* Only remove non-faulty devices is recovery + * is not possible. + */ + if (!test_bit(Faulty, &rdev->flags) && + mddev->degraded < conf->raid_disks) { + err = -EBUSY; + goto abort; + } + p->rdev = NULL; + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + } + } +abort: + + print_conf(conf); + return err; +} + + +static void end_sync_read(struct bio *bio, int error) +{ + r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + int i; + + for (i=r1_bio->mddev->raid_disks; i--; ) + if (r1_bio->bios[i] == bio) + break; + BUG_ON(i < 0); + update_head_pos(i, r1_bio); + /* + * we have read a block, now it needs to be re-written, + * or re-read if the read failed. + * We don't do much here, just schedule handling by raid1d + */ + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + set_bit(R1BIO_Uptodate, &r1_bio->state); + + if (atomic_dec_and_test(&r1_bio->remaining)) + reschedule_retry(r1_bio); +} + +static void end_sync_write(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + mddev_t *mddev = r1_bio->mddev; + conf_t *conf = mddev_to_conf(mddev); + int i; + int mirror=0; + + for (i = 0; i < conf->raid_disks; i++) + if (r1_bio->bios[i] == bio) { + mirror = i; + break; + } + if (!uptodate) { + int sync_blocks = 0; + sector_t s = r1_bio->sector; + long sectors_to_go = r1_bio->sectors; + /* make sure these bits doesn't get cleared. */ + do { + bitmap_end_sync(mddev->bitmap, s, + &sync_blocks, 1); + s += sync_blocks; + sectors_to_go -= sync_blocks; + } while (sectors_to_go > 0); + md_error(mddev, conf->mirrors[mirror].rdev); + } + + update_head_pos(mirror, r1_bio); + + if (atomic_dec_and_test(&r1_bio->remaining)) { + sector_t s = r1_bio->sectors; + put_buf(r1_bio); + md_done_sync(mddev, s, uptodate); + } +} + +static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) +{ + conf_t *conf = mddev_to_conf(mddev); + int i; + int disks = conf->raid_disks; + struct bio *bio, *wbio; + + bio = r1_bio->bios[r1_bio->read_disk]; + + + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We have read all readable devices. If we haven't + * got the block, then there is no hope left. + * If we have, then we want to do a comparison + * and skip the write if everything is the same. + * If any blocks failed to read, then we need to + * attempt an over-write + */ + int primary; + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + for (i=0; i<mddev->raid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read) + md_error(mddev, conf->mirrors[i].rdev); + + md_done_sync(mddev, r1_bio->sectors, 1); + put_buf(r1_bio); + return; + } + for (primary=0; primary<mddev->raid_disks; primary++) + if (r1_bio->bios[primary]->bi_end_io == end_sync_read && + test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { + r1_bio->bios[primary]->bi_end_io = NULL; + rdev_dec_pending(conf->mirrors[primary].rdev, mddev); + break; + } + r1_bio->read_disk = primary; + for (i=0; i<mddev->raid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read) { + int j; + int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); + struct bio *pbio = r1_bio->bios[primary]; + struct bio *sbio = r1_bio->bios[i]; + + if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { + for (j = vcnt; j-- ; ) { + struct page *p, *s; + p = pbio->bi_io_vec[j].bv_page; + s = sbio->bi_io_vec[j].bv_page; + if (memcmp(page_address(p), + page_address(s), + PAGE_SIZE)) + break; + } + } else + j = 0; + if (j >= 0) + mddev->resync_mismatches += r1_bio->sectors; + if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) + && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { + sbio->bi_end_io = NULL; + rdev_dec_pending(conf->mirrors[i].rdev, mddev); + } else { + /* fixup the bio for reuse */ + int size; + sbio->bi_vcnt = vcnt; + sbio->bi_size = r1_bio->sectors << 9; + sbio->bi_idx = 0; + sbio->bi_phys_segments = 0; + sbio->bi_flags &= ~(BIO_POOL_MASK - 1); + sbio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bi_next = NULL; + sbio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + sbio->bi_bdev = conf->mirrors[i].rdev->bdev; + size = sbio->bi_size; + for (j = 0; j < vcnt ; j++) { + struct bio_vec *bi; + bi = &sbio->bi_io_vec[j]; + bi->bv_offset = 0; + if (size > PAGE_SIZE) + bi->bv_len = PAGE_SIZE; + else + bi->bv_len = size; + size -= PAGE_SIZE; + memcpy(page_address(bi->bv_page), + page_address(pbio->bi_io_vec[j].bv_page), + PAGE_SIZE); + } + + } + } + } + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* ouch - failed to read all of that. + * Try some synchronous reads of other devices to get + * good data, much like with normal read errors. Only + * read into the pages we already have so we don't + * need to re-issue the read request. + * We don't need to freeze the array, because being in an + * active sync request, there is no normal IO, and + * no overlapping syncs. + */ + sector_t sect = r1_bio->sector; + int sectors = r1_bio->sectors; + int idx = 0; + + while(sectors) { + int s = sectors; + int d = r1_bio->read_disk; + int success = 0; + mdk_rdev_t *rdev; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + do { + if (r1_bio->bios[d]->bi_end_io == end_sync_read) { + /* No rcu protection needed here devices + * can only be removed when no resync is + * active, and resync is currently active + */ + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ)) { + success = 1; + break; + } + } + d++; + if (d == conf->raid_disks) + d = 0; + } while (!success && d != r1_bio->read_disk); + + if (success) { + int start = d; + /* write it back and re-read */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + WRITE) == 0) + md_error(mddev, rdev); + } + d = start; + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ) == 0) + md_error(mddev, rdev); + } + } else { + char b[BDEVNAME_SIZE]; + /* Cannot read from anywhere, array is toast */ + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" + " for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)r1_bio->sector); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); + return; + } + sectors -= s; + sect += s; + idx ++; + } + } + + /* + * schedule writes + */ + atomic_set(&r1_bio->remaining, 1); + for (i = 0; i < disks ; i++) { + wbio = r1_bio->bios[i]; + if (wbio->bi_end_io == NULL || + (wbio->bi_end_io == end_sync_read && + (i == r1_bio->read_disk || + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) + continue; + + wbio->bi_rw = WRITE; + wbio->bi_end_io = end_sync_write; + atomic_inc(&r1_bio->remaining); + md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); + + generic_make_request(wbio); + } + + if (atomic_dec_and_test(&r1_bio->remaining)) { + /* if we're here, all write(s) have completed, so clean up */ + md_done_sync(mddev, r1_bio->sectors, 1); + put_buf(r1_bio); + } +} + +/* + * This is a kernel thread which: + * + * 1. Retries failed read operations on working mirrors. + * 2. Updates the raid superblock when problems encounter. + * 3. Performs writes following reads for array syncronising. + */ + +static void fix_read_error(conf_t *conf, int read_disk, + sector_t sect, int sectors) +{ + mddev_t *mddev = conf->mddev; + while(sectors) { + int s = sectors; + int d = read_disk; + int success = 0; + int start; + mdk_rdev_t *rdev; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + do { + /* Note: no rcu protection needed here + * as this is synchronous in the raid1d thread + * which is the thread that might remove + * a device. If raid1d ever becomes multi-threaded.... + */ + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags) && + sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ)) + success = 1; + else { + d++; + if (d == conf->raid_disks) + d = 0; + } + } while (!success && d != read_disk); + + if (!success) { + /* Cannot read from anywhere -- bye bye array */ + md_error(mddev, conf->mirrors[read_disk].rdev); + break; + } + /* write it back and re-read */ + start = d; + while (d != read_disk) { + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) + == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + d = start; + while (d != read_disk) { + char b[BDEVNAME_SIZE]; + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) + == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + else { + atomic_add(s, &rdev->corrected_errors); + printk(KERN_INFO + "raid1:%s: read error corrected " + "(%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect + + rdev->data_offset), + bdevname(rdev->bdev, b)); + } + } + } + sectors -= s; + sect += s; + } +} + +static void raid1d(mddev_t *mddev) +{ + r1bio_t *r1_bio; + struct bio *bio; + unsigned long flags; + conf_t *conf = mddev_to_conf(mddev); + struct list_head *head = &conf->retry_list; + int unplug=0; + mdk_rdev_t *rdev; + + md_check_recovery(mddev); + + for (;;) { + char b[BDEVNAME_SIZE]; + + unplug += flush_pending_writes(conf); + + spin_lock_irqsave(&conf->device_lock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(&conf->device_lock, flags); + break; + } + r1_bio = list_entry(head->prev, r1bio_t, retry_list); + list_del(head->prev); + conf->nr_queued--; + spin_unlock_irqrestore(&conf->device_lock, flags); + + mddev = r1_bio->mddev; + conf = mddev_to_conf(mddev); + if (test_bit(R1BIO_IsSync, &r1_bio->state)) { + sync_request_write(mddev, r1_bio); + unplug = 1; + } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { + /* some requests in the r1bio were BIO_RW_BARRIER + * requests which failed with -EOPNOTSUPP. Hohumm.. + * Better resubmit without the barrier. + * We know which devices to resubmit for, because + * all others have had their bios[] entry cleared. + * We already have a nr_pending reference on these rdevs. + */ + int i; + const int do_sync = bio_sync(r1_bio->master_bio); + clear_bit(R1BIO_BarrierRetry, &r1_bio->state); + clear_bit(R1BIO_Barrier, &r1_bio->state); + for (i=0; i < conf->raid_disks; i++) + if (r1_bio->bios[i]) + atomic_inc(&r1_bio->remaining); + for (i=0; i < conf->raid_disks; i++) + if (r1_bio->bios[i]) { + struct bio_vec *bvec; + int j; + + bio = bio_clone(r1_bio->master_bio, GFP_NOIO); + /* copy pages from the failed bio, as + * this might be a write-behind device */ + __bio_for_each_segment(bvec, bio, j, 0) + bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; + bio_put(r1_bio->bios[i]); + bio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + bio->bi_bdev = conf->mirrors[i].rdev->bdev; + bio->bi_end_io = raid1_end_write_request; + bio->bi_rw = WRITE | do_sync; + bio->bi_private = r1_bio; + r1_bio->bios[i] = bio; + generic_make_request(bio); + } + } else { + int disk; + + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen + */ + if (mddev->ro == 0) { + freeze_array(conf); + fix_read_error(conf, r1_bio->read_disk, + r1_bio->sector, + r1_bio->sectors); + unfreeze_array(conf); + } + + bio = r1_bio->bios[r1_bio->read_disk]; + if ((disk=read_balance(conf, r1_bio)) == -1) { + printk(KERN_ALERT "raid1: %s: unrecoverable I/O" + " read error for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)r1_bio->sector); + raid_end_bio_io(r1_bio); + } else { + const int do_sync = bio_sync(r1_bio->master_bio); + r1_bio->bios[r1_bio->read_disk] = + mddev->ro ? IO_BLOCKED : NULL; + r1_bio->read_disk = disk; + bio_put(bio); + bio = bio_clone(r1_bio->master_bio, GFP_NOIO); + r1_bio->bios[r1_bio->read_disk] = bio; + rdev = conf->mirrors[disk].rdev; + if (printk_ratelimit()) + printk(KERN_ERR "raid1: %s: redirecting sector %llu to" + " another mirror\n", + bdevname(rdev->bdev,b), + (unsigned long long)r1_bio->sector); + bio->bi_sector = r1_bio->sector + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_end_io = raid1_end_read_request; + bio->bi_rw = READ | do_sync; + bio->bi_private = r1_bio; + unplug = 1; + generic_make_request(bio); + } + } + } + if (unplug) + unplug_slaves(mddev); +} + + +static int init_resync(conf_t *conf) +{ + int buffs; + + buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; + BUG_ON(conf->r1buf_pool); + conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, + conf->poolinfo); + if (!conf->r1buf_pool) + return -ENOMEM; + conf->next_resync = 0; + return 0; +} + +/* + * perform a "sync" on one "block" + * + * We need to make sure that no normal I/O request - particularly write + * requests - conflict with active sync requests. + * + * This is achieved by tracking pending requests and a 'barrier' concept + * that can be installed to exclude normal IO requests. + */ + +static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) +{ + conf_t *conf = mddev_to_conf(mddev); + r1bio_t *r1_bio; + struct bio *bio; + sector_t max_sector, nr_sectors; + int disk = -1; + int i; + int wonly = -1; + int write_targets = 0, read_targets = 0; + int sync_blocks; + int still_degraded = 0; + + if (!conf->r1buf_pool) + { +/* + printk("sync start - bitmap %p\n", mddev->bitmap); +*/ + if (init_resync(conf)) + return 0; + } + + max_sector = mddev->size << 1; + if (sector_nr >= max_sector) { + /* If we aborted, we need to abort the + * sync on the 'current' bitmap chunk (there will + * only be one in raid1 resync. + * We can find the current addess in mddev->curr_resync + */ + if (mddev->curr_resync < max_sector) /* aborted */ + bitmap_end_sync(mddev->bitmap, mddev->curr_resync, + &sync_blocks, 1); + else /* completed sync */ + conf->fullsync = 0; + + bitmap_close_sync(mddev->bitmap); + close_sync(conf); + return 0; + } + + if (mddev->bitmap == NULL && + mddev->recovery_cp == MaxSector && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && + conf->fullsync == 0) { + *skipped = 1; + return max_sector - sector_nr; + } + /* before building a request, check if we can skip these blocks.. + * This call the bitmap_start_sync doesn't actually record anything + */ + if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We can skip this block, and probably several more */ + *skipped = 1; + return sync_blocks; + } + /* + * If there is non-resync activity waiting for a turn, + * and resync is going fast enough, + * then let it though before starting on this new sync request. + */ + if (!go_faster && conf->nr_waiting) + msleep_interruptible(1000); + + bitmap_cond_end_sync(mddev->bitmap, sector_nr); + raise_barrier(conf); + + conf->next_resync = sector_nr; + + r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); + rcu_read_lock(); + /* + * If we get a correctably read error during resync or recovery, + * we might want to read from a different device. So we + * flag all drives that could conceivably be read from for READ, + * and any others (which will be non-In_sync devices) for WRITE. + * If a read fails, we try reading from something else for which READ + * is OK. + */ + + r1_bio->mddev = mddev; + r1_bio->sector = sector_nr; + r1_bio->state = 0; + set_bit(R1BIO_IsSync, &r1_bio->state); + + for (i=0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev; + bio = r1_bio->bios[i]; + + /* take from bio_init */ + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_rw = READ; + bio->bi_vcnt = 0; + bio->bi_idx = 0; + bio->bi_phys_segments = 0; + bio->bi_size = 0; + bio->bi_end_io = NULL; + bio->bi_private = NULL; + + rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev == NULL || + test_bit(Faulty, &rdev->flags)) { + still_degraded = 1; + continue; + } else if (!test_bit(In_sync, &rdev->flags)) { + bio->bi_rw = WRITE; + bio->bi_end_io = end_sync_write; + write_targets ++; + } else { + /* may need to read from here */ + bio->bi_rw = READ; + bio->bi_end_io = end_sync_read; + if (test_bit(WriteMostly, &rdev->flags)) { + if (wonly < 0) + wonly = i; + } else { + if (disk < 0) + disk = i; + } + read_targets++; + } + atomic_inc(&rdev->nr_pending); + bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_private = r1_bio; + } + rcu_read_unlock(); + if (disk < 0) + disk = wonly; + r1_bio->read_disk = disk; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) + /* extra read targets are also write targets */ + write_targets += read_targets-1; + + if (write_targets == 0 || read_targets == 0) { + /* There is nowhere to write, so all non-sync + * drives must be failed - so we are finished + */ + sector_t rv = max_sector - sector_nr; + *skipped = 1; + put_buf(r1_bio); + return rv; + } + + if (max_sector > mddev->resync_max) + max_sector = mddev->resync_max; /* Don't do IO beyond here */ + nr_sectors = 0; + sync_blocks = 0; + do { + struct page *page; + int len = PAGE_SIZE; + if (sector_nr + (len>>9) > max_sector) + len = (max_sector - sector_nr) << 9; + if (len == 0) + break; + if (sync_blocks == 0) { + if (!bitmap_start_sync(mddev->bitmap, sector_nr, + &sync_blocks, still_degraded) && + !conf->fullsync && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + break; + BUG_ON(sync_blocks < (PAGE_SIZE>>9)); + if (len > (sync_blocks<<9)) + len = sync_blocks<<9; + } + + for (i=0 ; i < conf->raid_disks; i++) { + bio = r1_bio->bios[i]; + if (bio->bi_end_io) { + page = bio->bi_io_vec[bio->bi_vcnt].bv_page; + if (bio_add_page(bio, page, len, 0) == 0) { + /* stop here */ + bio->bi_io_vec[bio->bi_vcnt].bv_page = page; + while (i > 0) { + i--; + bio = r1_bio->bios[i]; + if (bio->bi_end_io==NULL) + continue; + /* remove last page from this bio */ + bio->bi_vcnt--; + bio->bi_size -= len; + bio->bi_flags &= ~(1<< BIO_SEG_VALID); + } + goto bio_full; + } + } + } + nr_sectors += len>>9; + sector_nr += len>>9; + sync_blocks -= (len>>9); + } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); + bio_full: + r1_bio->sectors = nr_sectors; + + /* For a user-requested sync, we read all readable devices and do a + * compare + */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + atomic_set(&r1_bio->remaining, read_targets); + for (i=0; i<conf->raid_disks; i++) { + bio = r1_bio->bios[i]; + if (bio->bi_end_io == end_sync_read) { + md_sync_acct(bio->bi_bdev, nr_sectors); + generic_make_request(bio); + } + } + } else { + atomic_set(&r1_bio->remaining, 1); + bio = r1_bio->bios[r1_bio->read_disk]; + md_sync_acct(bio->bi_bdev, nr_sectors); + generic_make_request(bio); + + } + return nr_sectors; +} + +static int run(mddev_t *mddev) +{ + conf_t *conf; + int i, j, disk_idx; + mirror_info_t *disk; + mdk_rdev_t *rdev; + struct list_head *tmp; + + if (mddev->level != 1) { + printk("raid1: %s: raid level not set to mirroring (%d)\n", + mdname(mddev), mddev->level); + goto out; + } + if (mddev->reshape_position != MaxSector) { + printk("raid1: %s: reshape_position set but not supported\n", + mdname(mddev)); + goto out; + } + /* + * copy the already verified devices into our private RAID1 + * bookkeeping area. [whatever we allocate in run(), + * should be freed in stop()] + */ + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); + mddev->private = conf; + if (!conf) + goto out_no_mem; + + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, + GFP_KERNEL); + if (!conf->mirrors) + goto out_no_mem; + + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_no_mem; + + conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); + if (!conf->poolinfo) + goto out_no_mem; + conf->poolinfo->mddev = mddev; + conf->poolinfo->raid_disks = mddev->raid_disks; + conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, + r1bio_pool_free, + conf->poolinfo); + if (!conf->r1bio_pool) + goto out_no_mem; + + spin_lock_init(&conf->device_lock); + mddev->queue->queue_lock = &conf->device_lock; + + rdev_for_each(rdev, tmp, mddev) { + disk_idx = rdev->raid_disk; + if (disk_idx >= mddev->raid_disks + || disk_idx < 0) + continue; + disk = conf->mirrors + disk_idx; + + disk->rdev = rdev; + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + + disk->head_position = 0; + } + conf->raid_disks = mddev->raid_disks; + conf->mddev = mddev; + INIT_LIST_HEAD(&conf->retry_list); + + spin_lock_init(&conf->resync_lock); + init_waitqueue_head(&conf->wait_barrier); + + bio_list_init(&conf->pending_bio_list); + bio_list_init(&conf->flushing_bio_list); + + + mddev->degraded = 0; + for (i = 0; i < conf->raid_disks; i++) { + + disk = conf->mirrors + i; + + if (!disk->rdev || + !test_bit(In_sync, &disk->rdev->flags)) { + disk->head_position = 0; + mddev->degraded++; + if (disk->rdev) + conf->fullsync = 1; + } + } + if (mddev->degraded == conf->raid_disks) { + printk(KERN_ERR "raid1: no operational mirrors for %s\n", + mdname(mddev)); + goto out_free_conf; + } + if (conf->raid_disks - mddev->degraded == 1) + mddev->recovery_cp = MaxSector; + + /* + * find the first working one and use it as a starting point + * to read balancing. + */ + for (j = 0; j < conf->raid_disks && + (!conf->mirrors[j].rdev || + !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) + /* nothing */; + conf->last_used = j; + + + mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1"); + if (!mddev->thread) { + printk(KERN_ERR + "raid1: couldn't allocate thread for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + printk(KERN_INFO + "raid1: raid set %s active with %d out of %d mirrors\n", + mdname(mddev), mddev->raid_disks - mddev->degraded, + mddev->raid_disks); + /* + * Ok, everything is just fine now + */ + mddev->array_sectors = mddev->size * 2; + + mddev->queue->unplug_fn = raid1_unplug; + mddev->queue->backing_dev_info.congested_fn = raid1_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + return 0; + +out_no_mem: + printk(KERN_ERR "raid1: couldn't allocate memory for %s\n", + mdname(mddev)); + +out_free_conf: + if (conf) { + if (conf->r1bio_pool) + mempool_destroy(conf->r1bio_pool); + kfree(conf->mirrors); + safe_put_page(conf->tmppage); + kfree(conf->poolinfo); + kfree(conf); + mddev->private = NULL; + } +out: + return -EIO; +} + +static int stop(mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + struct bitmap *bitmap = mddev->bitmap; + int behind_wait = 0; + + /* wait for behind writes to complete */ + while (bitmap && atomic_read(&bitmap->behind_writes) > 0) { + behind_wait++; + printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); /* wait a second */ + /* need to kick something here to make sure I/O goes? */ + } + + md_unregister_thread(mddev->thread); + mddev->thread = NULL; + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (conf->r1bio_pool) + mempool_destroy(conf->r1bio_pool); + kfree(conf->mirrors); + kfree(conf->poolinfo); + kfree(conf); + mddev->private = NULL; + return 0; +} + +static int raid1_resize(mddev_t *mddev, sector_t sectors) +{ + /* no resync is happening, and there is enough space + * on all devices, so we can resize. + * We need to make sure resync covers any new space. + * If the array is shrinking we should possibly wait until + * any io in the removed space completes, but it hardly seems + * worth it. + */ + mddev->array_sectors = sectors; + set_capacity(mddev->gendisk, mddev->array_sectors); + mddev->changed = 1; + if (mddev->array_sectors / 2 > mddev->size && + mddev->recovery_cp == MaxSector) { + mddev->recovery_cp = mddev->size << 1; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } + mddev->size = mddev->array_sectors / 2; + mddev->resync_max_sectors = sectors; + return 0; +} + +static int raid1_reshape(mddev_t *mddev) +{ + /* We need to: + * 1/ resize the r1bio_pool + * 2/ resize conf->mirrors + * + * We allocate a new r1bio_pool if we can. + * Then raise a device barrier and wait until all IO stops. + * Then resize conf->mirrors and swap in the new r1bio pool. + * + * At the same time, we "pack" the devices so that all the missing + * devices have the higher raid_disk numbers. + */ + mempool_t *newpool, *oldpool; + struct pool_info *newpoolinfo; + mirror_info_t *newmirrors; + conf_t *conf = mddev_to_conf(mddev); + int cnt, raid_disks; + unsigned long flags; + int d, d2, err; + + /* Cannot change chunk_size, layout, or level */ + if (mddev->chunk_size != mddev->new_chunk || + mddev->layout != mddev->new_layout || + mddev->level != mddev->new_level) { + mddev->new_chunk = mddev->chunk_size; + mddev->new_layout = mddev->layout; + mddev->new_level = mddev->level; + return -EINVAL; + } + + err = md_allow_write(mddev); + if (err) + return err; + + raid_disks = mddev->raid_disks + mddev->delta_disks; + + if (raid_disks < conf->raid_disks) { + cnt=0; + for (d= 0; d < conf->raid_disks; d++) + if (conf->mirrors[d].rdev) + cnt++; + if (cnt > raid_disks) + return -EBUSY; + } + + newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); + if (!newpoolinfo) + return -ENOMEM; + newpoolinfo->mddev = mddev; + newpoolinfo->raid_disks = raid_disks; + + newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, + r1bio_pool_free, newpoolinfo); + if (!newpool) { + kfree(newpoolinfo); + return -ENOMEM; + } + newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); + if (!newmirrors) { + kfree(newpoolinfo); + mempool_destroy(newpool); + return -ENOMEM; + } + + raise_barrier(conf); + + /* ok, everything is stopped */ + oldpool = conf->r1bio_pool; + conf->r1bio_pool = newpool; + + for (d = d2 = 0; d < conf->raid_disks; d++) { + mdk_rdev_t *rdev = conf->mirrors[d].rdev; + if (rdev && rdev->raid_disk != d2) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + rdev->raid_disk = d2; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + if (sysfs_create_link(&mddev->kobj, + &rdev->kobj, nm)) + printk(KERN_WARNING + "md/raid1: cannot register " + "%s for %s\n", + nm, mdname(mddev)); + } + if (rdev) + newmirrors[d2++].rdev = rdev; + } + kfree(conf->mirrors); + conf->mirrors = newmirrors; + kfree(conf->poolinfo); + conf->poolinfo = newpoolinfo; + + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded += (raid_disks - conf->raid_disks); + spin_unlock_irqrestore(&conf->device_lock, flags); + conf->raid_disks = mddev->raid_disks = raid_disks; + mddev->delta_disks = 0; + + conf->last_used = 0; /* just make sure it is in-range */ + lower_barrier(conf); + + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + + mempool_destroy(oldpool); + return 0; +} + +static void raid1_quiesce(mddev_t *mddev, int state) +{ + conf_t *conf = mddev_to_conf(mddev); + + switch(state) { + case 1: + raise_barrier(conf); + break; + case 0: + lower_barrier(conf); + break; + } +} + + +static struct mdk_personality raid1_personality = +{ + .name = "raid1", + .level = 1, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid1_add_disk, + .hot_remove_disk= raid1_remove_disk, + .spare_active = raid1_spare_active, + .sync_request = sync_request, + .resize = raid1_resize, + .check_reshape = raid1_reshape, + .quiesce = raid1_quiesce, +}; + +static int __init raid_init(void) +{ + return register_md_personality(&raid1_personality); +} + +static void raid_exit(void) +{ + unregister_md_personality(&raid1_personality); +} + +module_init(raid_init); +module_exit(raid_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-3"); /* RAID1 */ +MODULE_ALIAS("md-raid1"); +MODULE_ALIAS("md-level-1"); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c new file mode 100644 index 0000000..b2fde77 --- /dev/null +++ b/drivers/md/raid10.c @@ -0,0 +1,2276 @@ +/* + * raid10.c : Multiple Devices driver for Linux + * + * Copyright (C) 2000-2004 Neil Brown + * + * RAID-10 support for md. + * + * Base on code in raid1.c. See raid1.c for futher copyright information. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include "dm-bio-list.h" +#include <linux/delay.h> +#include <linux/raid/raid10.h> +#include <linux/raid/bitmap.h> + +/* + * RAID10 provides a combination of RAID0 and RAID1 functionality. + * The layout of data is defined by + * chunk_size + * raid_disks + * near_copies (stored in low byte of layout) + * far_copies (stored in second byte of layout) + * far_offset (stored in bit 16 of layout ) + * + * The data to be stored is divided into chunks using chunksize. + * Each device is divided into far_copies sections. + * In each section, chunks are laid out in a style similar to raid0, but + * near_copies copies of each chunk is stored (each on a different drive). + * The starting device for each section is offset near_copies from the starting + * device of the previous section. + * Thus they are (near_copies*far_copies) of each chunk, and each is on a different + * drive. + * near_copies and far_copies must be at least one, and their product is at most + * raid_disks. + * + * If far_offset is true, then the far_copies are handled a bit differently. + * The copies are still in different stripes, but instead of be very far apart + * on disk, there are adjacent stripes. + */ + +/* + * Number of guaranteed r10bios in case of extreme VM load: + */ +#define NR_RAID10_BIOS 256 + +static void unplug_slaves(mddev_t *mddev); + +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); + +static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) +{ + conf_t *conf = data; + r10bio_t *r10_bio; + int size = offsetof(struct r10bio_s, devs[conf->copies]); + + /* allocate a r10bio with room for raid_disks entries in the bios array */ + r10_bio = kzalloc(size, gfp_flags); + if (!r10_bio) + unplug_slaves(conf->mddev); + + return r10_bio; +} + +static void r10bio_pool_free(void *r10_bio, void *data) +{ + kfree(r10_bio); +} + +/* Maximum size of each resync request */ +#define RESYNC_BLOCK_SIZE (64*1024) +#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) +/* amount of memory to reserve for resync requests */ +#define RESYNC_WINDOW (1024*1024) +/* maximum number of concurrent requests, memory permitting */ +#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) + +/* + * When performing a resync, we need to read and compare, so + * we need as many pages are there are copies. + * When performing a recovery, we need 2 bios, one for read, + * one for write (we recover only one drive per r10buf) + * + */ +static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) +{ + conf_t *conf = data; + struct page *page; + r10bio_t *r10_bio; + struct bio *bio; + int i, j; + int nalloc; + + r10_bio = r10bio_pool_alloc(gfp_flags, conf); + if (!r10_bio) { + unplug_slaves(conf->mddev); + return NULL; + } + + if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + nalloc = conf->copies; /* resync */ + else + nalloc = 2; /* recovery */ + + /* + * Allocate bios. + */ + for (j = nalloc ; j-- ; ) { + bio = bio_alloc(gfp_flags, RESYNC_PAGES); + if (!bio) + goto out_free_bio; + r10_bio->devs[j].bio = bio; + } + /* + * Allocate RESYNC_PAGES data pages and attach them + * where needed. + */ + for (j = 0 ; j < nalloc; j++) { + bio = r10_bio->devs[j].bio; + for (i = 0; i < RESYNC_PAGES; i++) { + page = alloc_page(gfp_flags); + if (unlikely(!page)) + goto out_free_pages; + + bio->bi_io_vec[i].bv_page = page; + } + } + + return r10_bio; + +out_free_pages: + for ( ; i > 0 ; i--) + safe_put_page(bio->bi_io_vec[i-1].bv_page); + while (j--) + for (i = 0; i < RESYNC_PAGES ; i++) + safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); + j = -1; +out_free_bio: + while ( ++j < nalloc ) + bio_put(r10_bio->devs[j].bio); + r10bio_pool_free(r10_bio, conf); + return NULL; +} + +static void r10buf_pool_free(void *__r10_bio, void *data) +{ + int i; + conf_t *conf = data; + r10bio_t *r10bio = __r10_bio; + int j; + + for (j=0; j < conf->copies; j++) { + struct bio *bio = r10bio->devs[j].bio; + if (bio) { + for (i = 0; i < RESYNC_PAGES; i++) { + safe_put_page(bio->bi_io_vec[i].bv_page); + bio->bi_io_vec[i].bv_page = NULL; + } + bio_put(bio); + } + } + r10bio_pool_free(r10bio, conf); +} + +static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) +{ + int i; + + for (i = 0; i < conf->copies; i++) { + struct bio **bio = & r10_bio->devs[i].bio; + if (*bio && *bio != IO_BLOCKED) + bio_put(*bio); + *bio = NULL; + } +} + +static void free_r10bio(r10bio_t *r10_bio) +{ + conf_t *conf = mddev_to_conf(r10_bio->mddev); + + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf); + + put_all_bios(conf, r10_bio); + mempool_free(r10_bio, conf->r10bio_pool); +} + +static void put_buf(r10bio_t *r10_bio) +{ + conf_t *conf = mddev_to_conf(r10_bio->mddev); + + mempool_free(r10_bio, conf->r10buf_pool); + + lower_barrier(conf); +} + +static void reschedule_retry(r10bio_t *r10_bio) +{ + unsigned long flags; + mddev_t *mddev = r10_bio->mddev; + conf_t *conf = mddev_to_conf(mddev); + + spin_lock_irqsave(&conf->device_lock, flags); + list_add(&r10_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; + spin_unlock_irqrestore(&conf->device_lock, flags); + + /* wake up frozen array... */ + wake_up(&conf->wait_barrier); + + md_wakeup_thread(mddev->thread); +} + +/* + * raid_end_bio_io() is called when we have finished servicing a mirrored + * operation and are ready to return a success/failure code to the buffer + * cache layer. + */ +static void raid_end_bio_io(r10bio_t *r10_bio) +{ + struct bio *bio = r10_bio->master_bio; + + bio_endio(bio, + test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); + free_r10bio(r10_bio); +} + +/* + * Update disk head position estimator based on IRQ completion info. + */ +static inline void update_head_pos(int slot, r10bio_t *r10_bio) +{ + conf_t *conf = mddev_to_conf(r10_bio->mddev); + + conf->mirrors[r10_bio->devs[slot].devnum].head_position = + r10_bio->devs[slot].addr + (r10_bio->sectors); +} + +static void raid10_end_read_request(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); + int slot, dev; + conf_t *conf = mddev_to_conf(r10_bio->mddev); + + + slot = r10_bio->read_slot; + dev = r10_bio->devs[slot].devnum; + /* + * this branch is our 'one mirror IO has finished' event handler: + */ + update_head_pos(slot, r10_bio); + + if (uptodate) { + /* + * Set R10BIO_Uptodate in our master bio, so that + * we will return a good error code to the higher + * levels even if IO on some other mirrored buffer fails. + * + * The 'master' represents the composite IO operation to + * user-side. So if something waits for IO, then it will + * wait for the 'master' bio. + */ + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); + } else { + /* + * oops, read error: + */ + char b[BDEVNAME_SIZE]; + if (printk_ratelimit()) + printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n", + bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector); + reschedule_retry(r10_bio); + } + + rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); +} + +static void raid10_end_write_request(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); + int slot, dev; + conf_t *conf = mddev_to_conf(r10_bio->mddev); + + for (slot = 0; slot < conf->copies; slot++) + if (r10_bio->devs[slot].bio == bio) + break; + dev = r10_bio->devs[slot].devnum; + + /* + * this branch is our 'one mirror IO has finished' event handler: + */ + if (!uptodate) { + md_error(r10_bio->mddev, conf->mirrors[dev].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R10BIO_Degraded, &r10_bio->state); + } else + /* + * Set R10BIO_Uptodate in our master bio, so that + * we will return a good error code for to the higher + * levels even if IO on some other mirrored buffer fails. + * + * The 'master' represents the composite IO operation to + * user-side. So if something waits for IO, then it will + * wait for the 'master' bio. + */ + set_bit(R10BIO_Uptodate, &r10_bio->state); + + update_head_pos(slot, r10_bio); + + /* + * + * Let's see if all mirrored write operations have finished + * already. + */ + if (atomic_dec_and_test(&r10_bio->remaining)) { + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, + r10_bio->sectors, + !test_bit(R10BIO_Degraded, &r10_bio->state), + 0); + md_write_end(r10_bio->mddev); + raid_end_bio_io(r10_bio); + } + + rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); +} + + +/* + * RAID10 layout manager + * Aswell as the chunksize and raid_disks count, there are two + * parameters: near_copies and far_copies. + * near_copies * far_copies must be <= raid_disks. + * Normally one of these will be 1. + * If both are 1, we get raid0. + * If near_copies == raid_disks, we get raid1. + * + * Chunks are layed out in raid0 style with near_copies copies of the + * first chunk, followed by near_copies copies of the next chunk and + * so on. + * If far_copies > 1, then after 1/far_copies of the array has been assigned + * as described above, we start again with a device offset of near_copies. + * So we effectively have another copy of the whole array further down all + * the drives, but with blocks on different drives. + * With this layout, and block is never stored twice on the one device. + * + * raid10_find_phys finds the sector offset of a given virtual sector + * on each device that it is on. + * + * raid10_find_virt does the reverse mapping, from a device and a + * sector offset to a virtual address + */ + +static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio) +{ + int n,f; + sector_t sector; + sector_t chunk; + sector_t stripe; + int dev; + + int slot = 0; + + /* now calculate first sector/dev */ + chunk = r10bio->sector >> conf->chunk_shift; + sector = r10bio->sector & conf->chunk_mask; + + chunk *= conf->near_copies; + stripe = chunk; + dev = sector_div(stripe, conf->raid_disks); + if (conf->far_offset) + stripe *= conf->far_copies; + + sector += stripe << conf->chunk_shift; + + /* and calculate all the others */ + for (n=0; n < conf->near_copies; n++) { + int d = dev; + sector_t s = sector; + r10bio->devs[slot].addr = sector; + r10bio->devs[slot].devnum = d; + slot++; + + for (f = 1; f < conf->far_copies; f++) { + d += conf->near_copies; + if (d >= conf->raid_disks) + d -= conf->raid_disks; + s += conf->stride; + r10bio->devs[slot].devnum = d; + r10bio->devs[slot].addr = s; + slot++; + } + dev++; + if (dev >= conf->raid_disks) { + dev = 0; + sector += (conf->chunk_mask + 1); + } + } + BUG_ON(slot != conf->copies); +} + +static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev) +{ + sector_t offset, chunk, vchunk; + + offset = sector & conf->chunk_mask; + if (conf->far_offset) { + int fc; + chunk = sector >> conf->chunk_shift; + fc = sector_div(chunk, conf->far_copies); + dev -= fc * conf->near_copies; + if (dev < 0) + dev += conf->raid_disks; + } else { + while (sector >= conf->stride) { + sector -= conf->stride; + if (dev < conf->near_copies) + dev += conf->raid_disks - conf->near_copies; + else + dev -= conf->near_copies; + } + chunk = sector >> conf->chunk_shift; + } + vchunk = chunk * conf->raid_disks + dev; + sector_div(vchunk, conf->near_copies); + return (vchunk << conf->chunk_shift) + offset; +} + +/** + * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged + * @q: request queue + * @bvm: properties of new bio + * @biovec: the request that could be merged to it. + * + * Return amount of bytes we can accept at this offset + * If near_copies == raid_disk, there are no striping issues, + * but in that case, the function isn't called at all. + */ +static int raid10_mergeable_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + mddev_t *mddev = q->queuedata; + sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + int max; + unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int bio_sectors = bvm->bi_size >> 9; + + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; + if (max < 0) max = 0; /* bio_add cannot handle a negative return */ + if (max <= biovec->bv_len && bio_sectors == 0) + return biovec->bv_len; + else + return max; +} + +/* + * This routine returns the disk from which the requested read should + * be done. There is a per-array 'next expected sequential IO' sector + * number - if this matches on the next IO then we use the last disk. + * There is also a per-disk 'last know head position' sector that is + * maintained from IRQ contexts, both the normal and the resync IO + * completion handlers update this position correctly. If there is no + * perfect sequential match then we pick the disk whose head is closest. + * + * If there are 2 mirrors in the same 2 devices, performance degrades + * because position is mirror, not device based. + * + * The rdev for the device selected will have nr_pending incremented. + */ + +/* + * FIXME: possibly should rethink readbalancing and do it differently + * depending on near_copies / far_copies geometry. + */ +static int read_balance(conf_t *conf, r10bio_t *r10_bio) +{ + const unsigned long this_sector = r10_bio->sector; + int disk, slot, nslot; + const int sectors = r10_bio->sectors; + sector_t new_distance, current_distance; + mdk_rdev_t *rdev; + + raid10_find_phys(conf, r10_bio); + rcu_read_lock(); + /* + * Check if we can balance. We can balance on the whole + * device if no resync is going on (recovery is ok), or below + * the resync window. We take the first readable disk when + * above the resync window. + */ + if (conf->mddev->recovery_cp < MaxSector + && (this_sector + sectors >= conf->next_resync)) { + /* make sure that disk is operational */ + slot = 0; + disk = r10_bio->devs[slot].devnum; + + while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || + !test_bit(In_sync, &rdev->flags)) { + slot++; + if (slot == conf->copies) { + slot = 0; + disk = -1; + break; + } + disk = r10_bio->devs[slot].devnum; + } + goto rb_out; + } + + + /* make sure the disk is operational */ + slot = 0; + disk = r10_bio->devs[slot].devnum; + while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || + !test_bit(In_sync, &rdev->flags)) { + slot ++; + if (slot == conf->copies) { + disk = -1; + goto rb_out; + } + disk = r10_bio->devs[slot].devnum; + } + + + current_distance = abs(r10_bio->devs[slot].addr - + conf->mirrors[disk].head_position); + + /* Find the disk whose head is closest, + * or - for far > 1 - find the closest to partition beginning */ + + for (nslot = slot; nslot < conf->copies; nslot++) { + int ndisk = r10_bio->devs[nslot].devnum; + + + if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || + r10_bio->devs[nslot].bio == IO_BLOCKED || + !test_bit(In_sync, &rdev->flags)) + continue; + + /* This optimisation is debatable, and completely destroys + * sequential read speed for 'far copies' arrays. So only + * keep it for 'near' arrays, and review those later. + */ + if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) { + disk = ndisk; + slot = nslot; + break; + } + + /* for far > 1 always use the lowest address */ + if (conf->far_copies > 1) + new_distance = r10_bio->devs[nslot].addr; + else + new_distance = abs(r10_bio->devs[nslot].addr - + conf->mirrors[ndisk].head_position); + if (new_distance < current_distance) { + current_distance = new_distance; + disk = ndisk; + slot = nslot; + } + } + +rb_out: + r10_bio->read_slot = slot; +/* conf->next_seq_sect = this_sector + sectors;*/ + + if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL) + atomic_inc(&conf->mirrors[disk].rdev->nr_pending); + else + disk = -1; + rcu_read_unlock(); + + return disk; +} + +static void unplug_slaves(mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + int i; + + rcu_read_lock(); + for (i=0; i<mddev->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); + + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + blk_unplug(r_queue); + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + rcu_read_unlock(); +} + +static void raid10_unplug(struct request_queue *q) +{ + mddev_t *mddev = q->queuedata; + + unplug_slaves(q->queuedata); + md_wakeup_thread(mddev->thread); +} + +static int raid10_congested(void *data, int bits) +{ + mddev_t *mddev = data; + conf_t *conf = mddev_to_conf(mddev); + int i, ret = 0; + + rcu_read_lock(); + for (i = 0; i < mddev->raid_disks && ret == 0; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags)) { + struct request_queue *q = bdev_get_queue(rdev->bdev); + + ret |= bdi_congested(&q->backing_dev_info, bits); + } + } + rcu_read_unlock(); + return ret; +} + +static int flush_pending_writes(conf_t *conf) +{ + /* Any writes that have been queued but are awaiting + * bitmap updates get flushed here. + * We return 1 if any requests were actually submitted. + */ + int rv = 0; + + spin_lock_irq(&conf->device_lock); + + if (conf->pending_bio_list.head) { + struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(conf->mddev->queue); + spin_unlock_irq(&conf->device_lock); + /* flush any pending bitmap writes to disk + * before proceeding w/ I/O */ + bitmap_unplug(conf->mddev->bitmap); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + rv = 1; + } else + spin_unlock_irq(&conf->device_lock); + return rv; +} +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. + */ + +static void raise_barrier(conf_t *conf, int force) +{ + BUG_ON(force && !conf->barrier); + spin_lock_irq(&conf->resync_lock); + + /* Wait until no block IO is waiting (unless 'force') */ + wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + conf->nr_waiting--; + } + conf->nr_pending++; + spin_unlock_irq(&conf->resync_lock); +} + +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quiet. + * We increment barrier and nr_waiting, and then + * wait until nr_pending match nr_queued+1 + * This is called in the context of one normal IO request + * that has failed. Thus any sync request that might be pending + * will be blocked by nr_pending, and we need to wait for + * pending IO requests to complete or be queued for re-try. + * Thus the number queued (nr_queued) plus this request (1) + * must match the number of pending IOs (nr_pending) before + * we continue. + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->nr_pending == conf->nr_queued+1, + conf->resync_lock, + ({ flush_pending_writes(conf); + raid10_unplug(conf->mddev->queue); })); + spin_unlock_irq(&conf->resync_lock); +} + +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); + spin_unlock_irq(&conf->resync_lock); +} + +static int make_request(struct request_queue *q, struct bio * bio) +{ + mddev_t *mddev = q->queuedata; + conf_t *conf = mddev_to_conf(mddev); + mirror_info_t *mirror; + r10bio_t *r10_bio; + struct bio *read_bio; + int cpu; + int i; + int chunk_sects = conf->chunk_mask + 1; + const int rw = bio_data_dir(bio); + const int do_sync = bio_sync(bio); + struct bio_list bl; + unsigned long flags; + mdk_rdev_t *blocked_rdev; + + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + /* If this request crosses a chunk boundary, we need to + * split it. This will only happen for 1 PAGE (or less) requests. + */ + if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9) + > chunk_sects && + conf->near_copies < conf->raid_disks)) { + struct bio_pair *bp; + /* Sanity check -- queue functions should prevent this happening */ + if (bio->bi_vcnt != 1 || + bio->bi_idx != 0) + goto bad_map; + /* This is a one page bio that upper layers + * refuse to split for us, so we need to split it. + */ + bp = bio_split(bio, + chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); + if (make_request(q, &bp->bio1)) + generic_make_request(&bp->bio1); + if (make_request(q, &bp->bio2)) + generic_make_request(&bp->bio2); + + bio_pair_release(bp); + return 0; + bad_map: + printk("raid10_make_request bug: can't convert block across chunks" + " or bigger than %dk %llu %d\n", chunk_sects/2, + (unsigned long long)bio->bi_sector, bio->bi_size >> 10); + + bio_io_error(bio); + return 0; + } + + md_write_start(mddev, bio); + + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bio)); + part_stat_unlock(); + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = bio->bi_size >> 9; + + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_sector; + r10_bio->state = 0; + + if (rw == READ) { + /* + * read balancing logic: + */ + int disk = read_balance(conf, r10_bio); + int slot = r10_bio->read_slot; + if (disk < 0) { + raid_end_bio_io(r10_bio); + return 0; + } + mirror = conf->mirrors + disk; + + read_bio = bio_clone(bio, GFP_NOIO); + + r10_bio->devs[slot].bio = read_bio; + + read_bio->bi_sector = r10_bio->devs[slot].addr + + mirror->rdev->data_offset; + read_bio->bi_bdev = mirror->rdev->bdev; + read_bio->bi_end_io = raid10_end_read_request; + read_bio->bi_rw = READ | do_sync; + read_bio->bi_private = r10_bio; + + generic_make_request(read_bio); + return 0; + } + + /* + * WRITE: + */ + /* first select target devices under rcu_lock and + * inc refcount on their rdev. Record them by setting + * bios[x] to bio + */ + raid10_find_phys(conf, r10_bio); + retry_write: + blocked_rdev = NULL; + rcu_read_lock(); + for (i = 0; i < conf->copies; i++) { + int d = r10_bio->devs[i].devnum; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { + atomic_inc(&rdev->nr_pending); + blocked_rdev = rdev; + break; + } + if (rdev && !test_bit(Faulty, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + r10_bio->devs[i].bio = bio; + } else { + r10_bio->devs[i].bio = NULL; + set_bit(R10BIO_Degraded, &r10_bio->state); + } + } + rcu_read_unlock(); + + if (unlikely(blocked_rdev)) { + /* Have to wait for this device to get unblocked, then retry */ + int j; + int d; + + for (j = 0; j < i; j++) + if (r10_bio->devs[j].bio) { + d = r10_bio->devs[j].devnum; + rdev_dec_pending(conf->mirrors[d].rdev, mddev); + } + allow_barrier(conf); + md_wait_for_blocked_rdev(blocked_rdev, mddev); + wait_barrier(conf); + goto retry_write; + } + + atomic_set(&r10_bio->remaining, 0); + + bio_list_init(&bl); + for (i = 0; i < conf->copies; i++) { + struct bio *mbio; + int d = r10_bio->devs[i].devnum; + if (!r10_bio->devs[i].bio) + continue; + + mbio = bio_clone(bio, GFP_NOIO); + r10_bio->devs[i].bio = mbio; + + mbio->bi_sector = r10_bio->devs[i].addr+ + conf->mirrors[d].rdev->data_offset; + mbio->bi_bdev = conf->mirrors[d].rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync; + mbio->bi_private = r10_bio; + + atomic_inc(&r10_bio->remaining); + bio_list_add(&bl, mbio); + } + + if (unlikely(!atomic_read(&r10_bio->remaining))) { + /* the array is dead */ + md_write_end(mddev); + raid_end_bio_io(r10_bio); + return 0; + } + + bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_merge(&conf->pending_bio_list, &bl); + blk_plug_device(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); + + /* In case raid10d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); + + if (do_sync) + md_wakeup_thread(mddev->thread); + + return 0; +} + +static void status(struct seq_file *seq, mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + int i; + + if (conf->near_copies < conf->raid_disks) + seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); + if (conf->near_copies > 1) + seq_printf(seq, " %d near-copies", conf->near_copies); + if (conf->far_copies > 1) { + if (conf->far_offset) + seq_printf(seq, " %d offset-copies", conf->far_copies); + else + seq_printf(seq, " %d far-copies", conf->far_copies); + } + seq_printf(seq, " [%d/%d] [", conf->raid_disks, + conf->raid_disks - mddev->degraded); + for (i = 0; i < conf->raid_disks; i++) + seq_printf(seq, "%s", + conf->mirrors[i].rdev && + test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); + seq_printf(seq, "]"); +} + +static void error(mddev_t *mddev, mdk_rdev_t *rdev) +{ + char b[BDEVNAME_SIZE]; + conf_t *conf = mddev_to_conf(mddev); + + /* + * If it is not operational, then we have already marked it as dead + * else if it is the last working disks, ignore the error, let the + * next level up know. + * else mark the drive as failed + */ + if (test_bit(In_sync, &rdev->flags) + && conf->raid_disks-mddev->degraded == 1) + /* + * Don't fail the drive, just return an IO error. + * The test should really be more sophisticated than + * "working_disks == 1", but it isn't critical, and + * can wait until we do more sophisticated "is the drive + * really dead" tests... + */ + return; + if (test_and_clear_bit(In_sync, &rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded++; + spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * if recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + } + set_bit(Faulty, &rdev->flags); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n" + "raid10: Operation continuing on %d devices.\n", + bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); +} + +static void print_conf(conf_t *conf) +{ + int i; + mirror_info_t *tmp; + + printk("RAID10 conf printout:\n"); + if (!conf) { + printk("(!conf)\n"); + return; + } + printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, + conf->raid_disks); + + for (i = 0; i < conf->raid_disks; i++) { + char b[BDEVNAME_SIZE]; + tmp = conf->mirrors + i; + if (tmp->rdev) + printk(" disk %d, wo:%d, o:%d, dev:%s\n", + i, !test_bit(In_sync, &tmp->rdev->flags), + !test_bit(Faulty, &tmp->rdev->flags), + bdevname(tmp->rdev->bdev,b)); + } +} + +static void close_sync(conf_t *conf) +{ + wait_barrier(conf); + allow_barrier(conf); + + mempool_destroy(conf->r10buf_pool); + conf->r10buf_pool = NULL; +} + +/* check if there are enough drives for + * every block to appear on atleast one + */ +static int enough(conf_t *conf) +{ + int first = 0; + + do { + int n = conf->copies; + int cnt = 0; + while (n--) { + if (conf->mirrors[first].rdev) + cnt++; + first = (first+1) % conf->raid_disks; + } + if (cnt == 0) + return 0; + } while (first != 0); + return 1; +} + +static int raid10_spare_active(mddev_t *mddev) +{ + int i; + conf_t *conf = mddev->private; + mirror_info_t *tmp; + + /* + * Find all non-in_sync disks within the RAID10 configuration + * and mark them in_sync + */ + for (i = 0; i < conf->raid_disks; i++) { + tmp = conf->mirrors + i; + if (tmp->rdev + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded--; + spin_unlock_irqrestore(&conf->device_lock, flags); + } + } + + print_conf(conf); + return 0; +} + + +static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) +{ + conf_t *conf = mddev->private; + int err = -EEXIST; + int mirror; + mirror_info_t *p; + int first = 0; + int last = mddev->raid_disks - 1; + + if (mddev->recovery_cp < MaxSector) + /* only hot-add to in-sync arrays, as recovery is + * very different from resync + */ + return -EBUSY; + if (!enough(conf)) + return -EINVAL; + + if (rdev->raid_disk >= 0) + first = last = rdev->raid_disk; + + if (rdev->saved_raid_disk >= 0 && + rdev->saved_raid_disk >= first && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + mirror = rdev->saved_raid_disk; + else + mirror = first; + for ( ; mirror <= last ; mirror++) + if ( !(p=conf->mirrors+mirror)->rdev) { + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + mddev->queue->max_sectors = (PAGE_SIZE>>9); + + p->head_position = 0; + rdev->raid_disk = mirror; + err = 0; + if (rdev->saved_raid_disk != mirror) + conf->fullsync = 1; + rcu_assign_pointer(p->rdev, rdev); + break; + } + + print_conf(conf); + return err; +} + +static int raid10_remove_disk(mddev_t *mddev, int number) +{ + conf_t *conf = mddev->private; + int err = 0; + mdk_rdev_t *rdev; + mirror_info_t *p = conf->mirrors+ number; + + print_conf(conf); + rdev = p->rdev; + if (rdev) { + if (test_bit(In_sync, &rdev->flags) || + atomic_read(&rdev->nr_pending)) { + err = -EBUSY; + goto abort; + } + /* Only remove faulty devices in recovery + * is not possible. + */ + if (!test_bit(Faulty, &rdev->flags) && + enough(conf)) { + err = -EBUSY; + goto abort; + } + p->rdev = NULL; + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + } + } +abort: + + print_conf(conf); + return err; +} + + +static void end_sync_read(struct bio *bio, int error) +{ + r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); + conf_t *conf = mddev_to_conf(r10_bio->mddev); + int i,d; + + for (i=0; i<conf->copies; i++) + if (r10_bio->devs[i].bio == bio) + break; + BUG_ON(i == conf->copies); + update_head_pos(i, r10_bio); + d = r10_bio->devs[i].devnum; + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + set_bit(R10BIO_Uptodate, &r10_bio->state); + else { + atomic_add(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + md_error(r10_bio->mddev, + conf->mirrors[d].rdev); + } + + /* for reconstruct, we always reschedule after a read. + * for resync, only after all reads + */ + rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); + if (test_bit(R10BIO_IsRecover, &r10_bio->state) || + atomic_dec_and_test(&r10_bio->remaining)) { + /* we have read all the blocks, + * do the comparison in process context in raid10d + */ + reschedule_retry(r10_bio); + } +} + +static void end_sync_write(struct bio *bio, int error) +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); + mddev_t *mddev = r10_bio->mddev; + conf_t *conf = mddev_to_conf(mddev); + int i,d; + + for (i = 0; i < conf->copies; i++) + if (r10_bio->devs[i].bio == bio) + break; + d = r10_bio->devs[i].devnum; + + if (!uptodate) + md_error(mddev, conf->mirrors[d].rdev); + + update_head_pos(i, r10_bio); + + rdev_dec_pending(conf->mirrors[d].rdev, mddev); + while (atomic_dec_and_test(&r10_bio->remaining)) { + if (r10_bio->master_bio == NULL) { + /* the primary of several recovery bios */ + sector_t s = r10_bio->sectors; + put_buf(r10_bio); + md_done_sync(mddev, s, 1); + break; + } else { + r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; + put_buf(r10_bio); + r10_bio = r10_bio2; + } + } +} + +/* + * Note: sync and recover and handled very differently for raid10 + * This code is for resync. + * For resync, we read through virtual addresses and read all blocks. + * If there is any error, we schedule a write. The lowest numbered + * drive is authoritative. + * However requests come for physical address, so we need to map. + * For every physical address there are raid_disks/copies virtual addresses, + * which is always are least one, but is not necessarly an integer. + * This means that a physical address can span multiple chunks, so we may + * have to submit multiple io requests for a single sync request. + */ +/* + * We check if all blocks are in-sync and only write to blocks that + * aren't in sync + */ +static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) +{ + conf_t *conf = mddev_to_conf(mddev); + int i, first; + struct bio *tbio, *fbio; + + atomic_set(&r10_bio->remaining, 1); + + /* find the first device with a block */ + for (i=0; i<conf->copies; i++) + if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) + break; + + if (i == conf->copies) + goto done; + + first = i; + fbio = r10_bio->devs[i].bio; + + /* now find blocks with errors */ + for (i=0 ; i < conf->copies ; i++) { + int j, d; + int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); + + tbio = r10_bio->devs[i].bio; + + if (tbio->bi_end_io != end_sync_read) + continue; + if (i == first) + continue; + if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { + /* We know that the bi_io_vec layout is the same for + * both 'first' and 'i', so we just compare them. + * All vec entries are PAGE_SIZE; + */ + for (j = 0; j < vcnt; j++) + if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), + page_address(tbio->bi_io_vec[j].bv_page), + PAGE_SIZE)) + break; + if (j == vcnt) + continue; + mddev->resync_mismatches += r10_bio->sectors; + } + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + /* Don't fix anything. */ + continue; + /* Ok, we need to write this bio + * First we need to fixup bv_offset, bv_len and + * bi_vecs, as the read request might have corrupted these + */ + tbio->bi_vcnt = vcnt; + tbio->bi_size = r10_bio->sectors << 9; + tbio->bi_idx = 0; + tbio->bi_phys_segments = 0; + tbio->bi_flags &= ~(BIO_POOL_MASK - 1); + tbio->bi_flags |= 1 << BIO_UPTODATE; + tbio->bi_next = NULL; + tbio->bi_rw = WRITE; + tbio->bi_private = r10_bio; + tbio->bi_sector = r10_bio->devs[i].addr; + + for (j=0; j < vcnt ; j++) { + tbio->bi_io_vec[j].bv_offset = 0; + tbio->bi_io_vec[j].bv_len = PAGE_SIZE; + + memcpy(page_address(tbio->bi_io_vec[j].bv_page), + page_address(fbio->bi_io_vec[j].bv_page), + PAGE_SIZE); + } + tbio->bi_end_io = end_sync_write; + + d = r10_bio->devs[i].devnum; + atomic_inc(&conf->mirrors[d].rdev->nr_pending); + atomic_inc(&r10_bio->remaining); + md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); + + tbio->bi_sector += conf->mirrors[d].rdev->data_offset; + tbio->bi_bdev = conf->mirrors[d].rdev->bdev; + generic_make_request(tbio); + } + +done: + if (atomic_dec_and_test(&r10_bio->remaining)) { + md_done_sync(mddev, r10_bio->sectors, 1); + put_buf(r10_bio); + } +} + +/* + * Now for the recovery code. + * Recovery happens across physical sectors. + * We recover all non-is_sync drives by finding the virtual address of + * each, and then choose a working drive that also has that virt address. + * There is a separate r10_bio for each non-in_sync drive. + * Only the first two slots are in use. The first for reading, + * The second for writing. + * + */ + +static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) +{ + conf_t *conf = mddev_to_conf(mddev); + int i, d; + struct bio *bio, *wbio; + + + /* move the pages across to the second bio + * and submit the write request + */ + bio = r10_bio->devs[0].bio; + wbio = r10_bio->devs[1].bio; + for (i=0; i < wbio->bi_vcnt; i++) { + struct page *p = bio->bi_io_vec[i].bv_page; + bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page; + wbio->bi_io_vec[i].bv_page = p; + } + d = r10_bio->devs[1].devnum; + + atomic_inc(&conf->mirrors[d].rdev->nr_pending); + md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); + if (test_bit(R10BIO_Uptodate, &r10_bio->state)) + generic_make_request(wbio); + else + bio_endio(wbio, -EIO); +} + + +/* + * This is a kernel thread which: + * + * 1. Retries failed read operations on working mirrors. + * 2. Updates the raid superblock when problems encounter. + * 3. Performs writes following reads for array synchronising. + */ + +static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) +{ + int sect = 0; /* Offset from r10_bio->sector */ + int sectors = r10_bio->sectors; + mdk_rdev_t*rdev; + while(sectors) { + int s = sectors; + int sl = r10_bio->read_slot; + int success = 0; + int start; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + rcu_read_lock(); + do { + int d = r10_bio->devs[sl].devnum; + rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev && + test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + success = sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + if (success) + break; + } + sl++; + if (sl == conf->copies) + sl = 0; + } while (!success && sl != r10_bio->read_slot); + rcu_read_unlock(); + + if (!success) { + /* Cannot read from anywhere -- bye bye array */ + int dn = r10_bio->devs[r10_bio->read_slot].devnum; + md_error(mddev, conf->mirrors[dn].rdev); + break; + } + + start = sl; + /* write it back and re-read */ + rcu_read_lock(); + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev && + test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + atomic_add(s, &rdev->corrected_errors); + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) + == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + sl = start; + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev && + test_bit(In_sync, &rdev->flags)) { + char b[BDEVNAME_SIZE]; + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + else + printk(KERN_INFO + "raid10:%s: read error corrected" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect+ + rdev->data_offset), + bdevname(rdev->bdev, b)); + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + rcu_read_unlock(); + + sectors -= s; + sect += s; + } +} + +static void raid10d(mddev_t *mddev) +{ + r10bio_t *r10_bio; + struct bio *bio; + unsigned long flags; + conf_t *conf = mddev_to_conf(mddev); + struct list_head *head = &conf->retry_list; + int unplug=0; + mdk_rdev_t *rdev; + + md_check_recovery(mddev); + + for (;;) { + char b[BDEVNAME_SIZE]; + + unplug += flush_pending_writes(conf); + + spin_lock_irqsave(&conf->device_lock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(&conf->device_lock, flags); + break; + } + r10_bio = list_entry(head->prev, r10bio_t, retry_list); + list_del(head->prev); + conf->nr_queued--; + spin_unlock_irqrestore(&conf->device_lock, flags); + + mddev = r10_bio->mddev; + conf = mddev_to_conf(mddev); + if (test_bit(R10BIO_IsSync, &r10_bio->state)) { + sync_request_write(mddev, r10_bio); + unplug = 1; + } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) { + recovery_request_write(mddev, r10_bio); + unplug = 1; + } else { + int mirror; + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen. + */ + if (mddev->ro == 0) { + freeze_array(conf); + fix_read_error(conf, mddev, r10_bio); + unfreeze_array(conf); + } + + bio = r10_bio->devs[r10_bio->read_slot].bio; + r10_bio->devs[r10_bio->read_slot].bio = + mddev->ro ? IO_BLOCKED : NULL; + mirror = read_balance(conf, r10_bio); + if (mirror == -1) { + printk(KERN_ALERT "raid10: %s: unrecoverable I/O" + " read error for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)r10_bio->sector); + raid_end_bio_io(r10_bio); + bio_put(bio); + } else { + const int do_sync = bio_sync(r10_bio->master_bio); + bio_put(bio); + rdev = conf->mirrors[mirror].rdev; + if (printk_ratelimit()) + printk(KERN_ERR "raid10: %s: redirecting sector %llu to" + " another mirror\n", + bdevname(rdev->bdev,b), + (unsigned long long)r10_bio->sector); + bio = bio_clone(r10_bio->master_bio, GFP_NOIO); + r10_bio->devs[r10_bio->read_slot].bio = bio; + bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr + + rdev->data_offset; + bio->bi_bdev = rdev->bdev; + bio->bi_rw = READ | do_sync; + bio->bi_private = r10_bio; + bio->bi_end_io = raid10_end_read_request; + unplug = 1; + generic_make_request(bio); + } + } + } + if (unplug) + unplug_slaves(mddev); +} + + +static int init_resync(conf_t *conf) +{ + int buffs; + + buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; + BUG_ON(conf->r10buf_pool); + conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); + if (!conf->r10buf_pool) + return -ENOMEM; + conf->next_resync = 0; + return 0; +} + +/* + * perform a "sync" on one "block" + * + * We need to make sure that no normal I/O request - particularly write + * requests - conflict with active sync requests. + * + * This is achieved by tracking pending requests and a 'barrier' concept + * that can be installed to exclude normal IO requests. + * + * Resync and recovery are handled very differently. + * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. + * + * For resync, we iterate over virtual addresses, read all copies, + * and update if there are differences. If only one copy is live, + * skip it. + * For recovery, we iterate over physical addresses, read a good + * value for each non-in_sync drive, and over-write. + * + * So, for recovery we may have several outstanding complex requests for a + * given address, one for each out-of-sync device. We model this by allocating + * a number of r10_bio structures, one for each out-of-sync device. + * As we setup these structures, we collect all bio's together into a list + * which we then process collectively to add pages, and then process again + * to pass to generic_make_request. + * + * The r10_bio structures are linked using a borrowed master_bio pointer. + * This link is counted in ->remaining. When the r10_bio that points to NULL + * has its remaining count decremented to 0, the whole complex operation + * is complete. + * + */ + +static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) +{ + conf_t *conf = mddev_to_conf(mddev); + r10bio_t *r10_bio; + struct bio *biolist = NULL, *bio; + sector_t max_sector, nr_sectors; + int disk; + int i; + int max_sync; + int sync_blocks; + + sector_t sectors_skipped = 0; + int chunks_skipped = 0; + + if (!conf->r10buf_pool) + if (init_resync(conf)) + return 0; + + skipped: + max_sector = mddev->size << 1; + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + max_sector = mddev->resync_max_sectors; + if (sector_nr >= max_sector) { + /* If we aborted, we need to abort the + * sync on the 'current' bitmap chucks (there can + * be several when recovering multiple devices). + * as we may have started syncing it but not finished. + * We can find the current address in + * mddev->curr_resync, but for recovery, + * we need to convert that to several + * virtual addresses. + */ + if (mddev->curr_resync < max_sector) { /* aborted */ + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + bitmap_end_sync(mddev->bitmap, mddev->curr_resync, + &sync_blocks, 1); + else for (i=0; i<conf->raid_disks; i++) { + sector_t sect = + raid10_find_virt(conf, mddev->curr_resync, i); + bitmap_end_sync(mddev->bitmap, sect, + &sync_blocks, 1); + } + } else /* completed sync */ + conf->fullsync = 0; + + bitmap_close_sync(mddev->bitmap); + close_sync(conf); + *skipped = 1; + return sectors_skipped; + } + if (chunks_skipped >= conf->raid_disks) { + /* if there has been nothing to do on any drive, + * then there is nothing to do at all.. + */ + *skipped = 1; + return (max_sector - sector_nr) + sectors_skipped; + } + + if (max_sector > mddev->resync_max) + max_sector = mddev->resync_max; /* Don't do IO beyond here */ + + /* make sure whole request will fit in a chunk - if chunks + * are meaningful + */ + if (conf->near_copies < conf->raid_disks && + max_sector > (sector_nr | conf->chunk_mask)) + max_sector = (sector_nr | conf->chunk_mask) + 1; + /* + * If there is non-resync activity waiting for us then + * put in a delay to throttle resync. + */ + if (!go_faster && conf->nr_waiting) + msleep_interruptible(1000); + + /* Again, very different code for resync and recovery. + * Both must result in an r10bio with a list of bios that + * have bi_end_io, bi_sector, bi_bdev set, + * and bi_private set to the r10bio. + * For recovery, we may actually create several r10bios + * with 2 bios in each, that correspond to the bios in the main one. + * In this case, the subordinate r10bios link back through a + * borrowed master_bio pointer, and the counter in the master + * includes a ref from each subordinate. + */ + /* First, we decide what to do and set ->bi_end_io + * To end_sync_read if we want to read, and + * end_sync_write if we will want to write. + */ + + max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); + if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + /* recovery... the complicated one */ + int i, j, k; + r10_bio = NULL; + + for (i=0 ; i<conf->raid_disks; i++) + if (conf->mirrors[i].rdev && + !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { + int still_degraded = 0; + /* want to reconstruct this device */ + r10bio_t *rb2 = r10_bio; + sector_t sect = raid10_find_virt(conf, sector_nr, i); + int must_sync; + /* Unless we are doing a full sync, we only need + * to recover the block if it is set in the bitmap + */ + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, 1); + if (sync_blocks < max_sync) + max_sync = sync_blocks; + if (!must_sync && + !conf->fullsync) { + /* yep, skip the sync_blocks here, but don't assume + * that there will never be anything to do here + */ + chunks_skipped = -1; + continue; + } + + r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + raise_barrier(conf, rb2 != NULL); + atomic_set(&r10_bio->remaining, 0); + + r10_bio->master_bio = (struct bio*)rb2; + if (rb2) + atomic_inc(&rb2->remaining); + r10_bio->mddev = mddev; + set_bit(R10BIO_IsRecover, &r10_bio->state); + r10_bio->sector = sect; + + raid10_find_phys(conf, r10_bio); + /* Need to check if this section will still be + * degraded + */ + for (j=0; j<conf->copies;j++) { + int d = r10_bio->devs[j].devnum; + if (conf->mirrors[d].rdev == NULL || + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) { + still_degraded = 1; + break; + } + } + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, still_degraded); + + for (j=0; j<conf->copies;j++) { + int d = r10_bio->devs[j].devnum; + if (conf->mirrors[d].rdev && + test_bit(In_sync, &conf->mirrors[d].rdev->flags)) { + /* This is where we read from */ + bio = r10_bio->devs[0].bio; + bio->bi_next = biolist; + biolist = bio; + bio->bi_private = r10_bio; + bio->bi_end_io = end_sync_read; + bio->bi_rw = READ; + bio->bi_sector = r10_bio->devs[j].addr + + conf->mirrors[d].rdev->data_offset; + bio->bi_bdev = conf->mirrors[d].rdev->bdev; + atomic_inc(&conf->mirrors[d].rdev->nr_pending); + atomic_inc(&r10_bio->remaining); + /* and we write to 'i' */ + + for (k=0; k<conf->copies; k++) + if (r10_bio->devs[k].devnum == i) + break; + BUG_ON(k == conf->copies); + bio = r10_bio->devs[1].bio; + bio->bi_next = biolist; + biolist = bio; + bio->bi_private = r10_bio; + bio->bi_end_io = end_sync_write; + bio->bi_rw = WRITE; + bio->bi_sector = r10_bio->devs[k].addr + + conf->mirrors[i].rdev->data_offset; + bio->bi_bdev = conf->mirrors[i].rdev->bdev; + + r10_bio->devs[0].devnum = d; + r10_bio->devs[1].devnum = i; + + break; + } + } + if (j == conf->copies) { + /* Cannot recover, so abort the recovery */ + put_buf(r10_bio); + if (rb2) + atomic_dec(&rb2->remaining); + r10_bio = rb2; + if (!test_and_set_bit(MD_RECOVERY_INTR, + &mddev->recovery)) + printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", + mdname(mddev)); + break; + } + } + if (biolist == NULL) { + while (r10_bio) { + r10bio_t *rb2 = r10_bio; + r10_bio = (r10bio_t*) rb2->master_bio; + rb2->master_bio = NULL; + put_buf(rb2); + } + goto giveup; + } + } else { + /* resync. Schedule a read for every block at this virt offset */ + int count = 0; + + bitmap_cond_end_sync(mddev->bitmap, sector_nr); + + if (!bitmap_start_sync(mddev->bitmap, sector_nr, + &sync_blocks, mddev->degraded) && + !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We can skip this block */ + *skipped = 1; + return sync_blocks + sectors_skipped; + } + if (sync_blocks < max_sync) + max_sync = sync_blocks; + r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + + r10_bio->mddev = mddev; + atomic_set(&r10_bio->remaining, 0); + raise_barrier(conf, 0); + conf->next_resync = sector_nr; + + r10_bio->master_bio = NULL; + r10_bio->sector = sector_nr; + set_bit(R10BIO_IsSync, &r10_bio->state); + raid10_find_phys(conf, r10_bio); + r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1; + + for (i=0; i<conf->copies; i++) { + int d = r10_bio->devs[i].devnum; + bio = r10_bio->devs[i].bio; + bio->bi_end_io = NULL; + clear_bit(BIO_UPTODATE, &bio->bi_flags); + if (conf->mirrors[d].rdev == NULL || + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) + continue; + atomic_inc(&conf->mirrors[d].rdev->nr_pending); + atomic_inc(&r10_bio->remaining); + bio->bi_next = biolist; + biolist = bio; + bio->bi_private = r10_bio; + bio->bi_end_io = end_sync_read; + bio->bi_rw = READ; + bio->bi_sector = r10_bio->devs[i].addr + + conf->mirrors[d].rdev->data_offset; + bio->bi_bdev = conf->mirrors[d].rdev->bdev; + count++; + } + + if (count < 2) { + for (i=0; i<conf->copies; i++) { + int d = r10_bio->devs[i].devnum; + if (r10_bio->devs[i].bio->bi_end_io) + rdev_dec_pending(conf->mirrors[d].rdev, mddev); + } + put_buf(r10_bio); + biolist = NULL; + goto giveup; + } + } + + for (bio = biolist; bio ; bio=bio->bi_next) { + + bio->bi_flags &= ~(BIO_POOL_MASK - 1); + if (bio->bi_end_io) + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_vcnt = 0; + bio->bi_idx = 0; + bio->bi_phys_segments = 0; + bio->bi_size = 0; + } + + nr_sectors = 0; + if (sector_nr + max_sync < max_sector) + max_sector = sector_nr + max_sync; + do { + struct page *page; + int len = PAGE_SIZE; + disk = 0; + if (sector_nr + (len>>9) > max_sector) + len = (max_sector - sector_nr) << 9; + if (len == 0) + break; + for (bio= biolist ; bio ; bio=bio->bi_next) { + page = bio->bi_io_vec[bio->bi_vcnt].bv_page; + if (bio_add_page(bio, page, len, 0) == 0) { + /* stop here */ + struct bio *bio2; + bio->bi_io_vec[bio->bi_vcnt].bv_page = page; + for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) { + /* remove last page from this bio */ + bio2->bi_vcnt--; + bio2->bi_size -= len; + bio2->bi_flags &= ~(1<< BIO_SEG_VALID); + } + goto bio_full; + } + disk = i; + } + nr_sectors += len>>9; + sector_nr += len>>9; + } while (biolist->bi_vcnt < RESYNC_PAGES); + bio_full: + r10_bio->sectors = nr_sectors; + + while (biolist) { + bio = biolist; + biolist = biolist->bi_next; + + bio->bi_next = NULL; + r10_bio = bio->bi_private; + r10_bio->sectors = nr_sectors; + + if (bio->bi_end_io == end_sync_read) { + md_sync_acct(bio->bi_bdev, nr_sectors); + generic_make_request(bio); + } + } + + if (sectors_skipped) + /* pretend they weren't skipped, it makes + * no important difference in this case + */ + md_done_sync(mddev, sectors_skipped, 1); + + return sectors_skipped + nr_sectors; + giveup: + /* There is nowhere to write, so all non-sync + * drives must be failed, so try the next chunk... + */ + if (sector_nr + max_sync < max_sector) + max_sector = sector_nr + max_sync; + + sectors_skipped += (max_sector - sector_nr); + chunks_skipped ++; + sector_nr = max_sector; + goto skipped; +} + +static int run(mddev_t *mddev) +{ + conf_t *conf; + int i, disk_idx; + mirror_info_t *disk; + mdk_rdev_t *rdev; + struct list_head *tmp; + int nc, fc, fo; + sector_t stride, size; + + if (mddev->chunk_size < PAGE_SIZE) { + printk(KERN_ERR "md/raid10: chunk size must be " + "at least PAGE_SIZE(%ld).\n", PAGE_SIZE); + return -EINVAL; + } + + nc = mddev->layout & 255; + fc = (mddev->layout >> 8) & 255; + fo = mddev->layout & (1<<16); + if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || + (mddev->layout >> 17)) { + printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n", + mdname(mddev), mddev->layout); + goto out; + } + /* + * copy the already verified devices into our private RAID10 + * bookkeeping area. [whatever we allocate in run(), + * should be freed in stop()] + */ + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); + mddev->private = conf; + if (!conf) { + printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out; + } + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, + GFP_KERNEL); + if (!conf->mirrors) { + printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_free_conf; + + conf->mddev = mddev; + conf->raid_disks = mddev->raid_disks; + conf->near_copies = nc; + conf->far_copies = fc; + conf->copies = nc*fc; + conf->far_offset = fo; + conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; + conf->chunk_shift = ffz(~mddev->chunk_size) - 9; + size = mddev->size >> (conf->chunk_shift-1); + sector_div(size, fc); + size = size * conf->raid_disks; + sector_div(size, nc); + /* 'size' is now the number of chunks in the array */ + /* calculate "used chunks per device" in 'stride' */ + stride = size * conf->copies; + + /* We need to round up when dividing by raid_disks to + * get the stride size. + */ + stride += conf->raid_disks - 1; + sector_div(stride, conf->raid_disks); + mddev->size = stride << (conf->chunk_shift-1); + + if (fo) + stride = 1; + else + sector_div(stride, fc); + conf->stride = stride << conf->chunk_shift; + + conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, + r10bio_pool_free, conf); + if (!conf->r10bio_pool) { + printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + spin_lock_init(&conf->device_lock); + mddev->queue->queue_lock = &conf->device_lock; + + rdev_for_each(rdev, tmp, mddev) { + disk_idx = rdev->raid_disk; + if (disk_idx >= mddev->raid_disks + || disk_idx < 0) + continue; + disk = conf->mirrors + disk_idx; + + disk->rdev = rdev; + + blk_queue_stack_limits(mddev->queue, + rdev->bdev->bd_disk->queue); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + mddev->queue->max_sectors > (PAGE_SIZE>>9)) + mddev->queue->max_sectors = (PAGE_SIZE>>9); + + disk->head_position = 0; + } + INIT_LIST_HEAD(&conf->retry_list); + + spin_lock_init(&conf->resync_lock); + init_waitqueue_head(&conf->wait_barrier); + + /* need to check that every block has at least one working mirror */ + if (!enough(conf)) { + printk(KERN_ERR "raid10: not enough operational mirrors for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + mddev->degraded = 0; + for (i = 0; i < conf->raid_disks; i++) { + + disk = conf->mirrors + i; + + if (!disk->rdev || + !test_bit(In_sync, &disk->rdev->flags)) { + disk->head_position = 0; + mddev->degraded++; + if (disk->rdev) + conf->fullsync = 1; + } + } + + + mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10"); + if (!mddev->thread) { + printk(KERN_ERR + "raid10: couldn't allocate thread for %s\n", + mdname(mddev)); + goto out_free_conf; + } + + printk(KERN_INFO + "raid10: raid set %s active with %d out of %d devices\n", + mdname(mddev), mddev->raid_disks - mddev->degraded, + mddev->raid_disks); + /* + * Ok, everything is just fine now + */ + mddev->array_sectors = size << conf->chunk_shift; + mddev->resync_max_sectors = size << conf->chunk_shift; + + mddev->queue->unplug_fn = raid10_unplug; + mddev->queue->backing_dev_info.congested_fn = raid10_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + /* Calculate max read-ahead size. + * We need to readahead at least twice a whole stripe.... + * maybe... + */ + { + int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); + stripe /= conf->near_copies; + if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) + mddev->queue->backing_dev_info.ra_pages = 2* stripe; + } + + if (conf->near_copies < mddev->raid_disks) + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); + return 0; + +out_free_conf: + if (conf->r10bio_pool) + mempool_destroy(conf->r10bio_pool); + safe_put_page(conf->tmppage); + kfree(conf->mirrors); + kfree(conf); + mddev->private = NULL; +out: + return -EIO; +} + +static int stop(mddev_t *mddev) +{ + conf_t *conf = mddev_to_conf(mddev); + + md_unregister_thread(mddev->thread); + mddev->thread = NULL; + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (conf->r10bio_pool) + mempool_destroy(conf->r10bio_pool); + kfree(conf->mirrors); + kfree(conf); + mddev->private = NULL; + return 0; +} + +static void raid10_quiesce(mddev_t *mddev, int state) +{ + conf_t *conf = mddev_to_conf(mddev); + + switch(state) { + case 1: + raise_barrier(conf, 0); + break; + case 0: + lower_barrier(conf); + break; + } + if (mddev->thread) { + if (mddev->bitmap) + mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; + else + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + md_wakeup_thread(mddev->thread); + } +} + +static struct mdk_personality raid10_personality = +{ + .name = "raid10", + .level = 10, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid10_add_disk, + .hot_remove_disk= raid10_remove_disk, + .spare_active = raid10_spare_active, + .sync_request = sync_request, + .quiesce = raid10_quiesce, +}; + +static int __init raid_init(void) +{ + return register_md_personality(&raid10_personality); +} + +static void raid_exit(void) +{ + unregister_md_personality(&raid10_personality); +} + +module_init(raid_init); +module_exit(raid_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-9"); /* RAID10 */ +MODULE_ALIAS("md-raid10"); +MODULE_ALIAS("md-level-10"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c new file mode 100644 index 0000000..a36a743 --- /dev/null +++ b/drivers/md/raid5.c @@ -0,0 +1,4776 @@ +/* + * raid5.c : Multiple Devices driver for Linux + * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman + * Copyright (C) 1999, 2000 Ingo Molnar + * Copyright (C) 2002, 2003 H. Peter Anvin + * + * RAID-4/5/6 management functions. + * Thanks to Penguin Computing for making the RAID-6 development possible + * by donating a test server! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * BITMAP UNPLUGGING: + * + * The sequencing for updating the bitmap reliably is a little + * subtle (and I got it wrong the first time) so it deserves some + * explanation. + * + * We group bitmap updates into batches. Each batch has a number. + * We may write out several batches at once, but that isn't very important. + * conf->bm_write is the number of the last batch successfully written. + * conf->bm_flush is the number of the last batch that was closed to + * new additions. + * When we discover that we will need to write to any block in a stripe + * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq + * the number of the batch it will be in. This is bm_flush+1. + * When we are ready to do a write, if that batch hasn't been written yet, + * we plug the array and queue the stripe for later. + * When an unplug happens, we increment bm_flush, thus closing the current + * batch. + * When we notice that bm_flush > bm_write, we write out all pending updates + * to the bitmap, and advance bm_write to where bm_flush was. + * This may occasionally write a bit out twice, but is sure never to + * miss any bits. + */ + +#include <linux/kthread.h> +#include "raid6.h" + +#include <linux/raid/bitmap.h> +#include <linux/async_tx.h> + +/* + * Stripe cache + */ + +#define NR_STRIPES 256 +#define STRIPE_SIZE PAGE_SIZE +#define STRIPE_SHIFT (PAGE_SHIFT - 9) +#define STRIPE_SECTORS (STRIPE_SIZE>>9) +#define IO_THRESHOLD 1 +#define BYPASS_THRESHOLD 1 +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) +#define HASH_MASK (NR_HASH - 1) + +#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) + +/* bio's attached to a stripe+device for I/O are linked together in bi_sector + * order without overlap. There may be several bio's per stripe+device, and + * a bio could span several devices. + * When walking this list for a particular stripe+device, we must never proceed + * beyond a bio that extends past this device, as the next bio might no longer + * be valid. + * This macro is used to determine the 'next' bio in the list, given the sector + * of the current stripe+device + */ +#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) +/* + * The following can be used to debug the driver + */ +#define RAID5_PARANOIA 1 +#if RAID5_PARANOIA && defined(CONFIG_SMP) +# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) +#else +# define CHECK_DEVLOCK() +#endif + +#ifdef DEBUG +#define inline +#define __inline__ +#endif + +#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) + +#if !RAID6_USE_EMPTY_ZERO_PAGE +/* In .bss so it's zeroed */ +const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); +#endif + +/* + * We maintain a biased count of active stripes in the bottom 16 bits of + * bi_phys_segments, and a count of processed stripes in the upper 16 bits + */ +static inline int raid5_bi_phys_segments(struct bio *bio) +{ + return bio->bi_phys_segments & 0xffff; +} + +static inline int raid5_bi_hw_segments(struct bio *bio) +{ + return (bio->bi_phys_segments >> 16) & 0xffff; +} + +static inline int raid5_dec_bi_phys_segments(struct bio *bio) +{ + --bio->bi_phys_segments; + return raid5_bi_phys_segments(bio); +} + +static inline int raid5_dec_bi_hw_segments(struct bio *bio) +{ + unsigned short val = raid5_bi_hw_segments(bio); + + --val; + bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); + return val; +} + +static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) +{ + bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); +} + +static inline int raid6_next_disk(int disk, int raid_disks) +{ + disk++; + return (disk < raid_disks) ? disk : 0; +} + +static void return_io(struct bio *return_bi) +{ + struct bio *bi = return_bi; + while (bi) { + + return_bi = bi->bi_next; + bi->bi_next = NULL; + bi->bi_size = 0; + bio_endio(bi, 0); + bi = return_bi; + } +} + +static void print_raid5_conf (raid5_conf_t *conf); + +static int stripe_operations_active(struct stripe_head *sh) +{ + return sh->check_state || sh->reconstruct_state || + test_bit(STRIPE_BIOFILL_RUN, &sh->state) || + test_bit(STRIPE_COMPUTE_RUN, &sh->state); +} + +static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) +{ + if (atomic_dec_and_test(&sh->count)) { + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state)) { + list_add_tail(&sh->lru, &conf->delayed_list); + blk_plug_device(conf->mddev->queue); + } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) { + list_add_tail(&sh->lru, &conf->bitmap_list); + blk_plug_device(conf->mddev->queue); + } else { + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } + md_wakeup_thread(conf->mddev->thread); + } else { + BUG_ON(stripe_operations_active(sh)); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } + atomic_dec(&conf->active_stripes); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); + } + } + } +} +static void release_stripe(struct stripe_head *sh) +{ + raid5_conf_t *conf = sh->raid_conf; + unsigned long flags; + + spin_lock_irqsave(&conf->device_lock, flags); + __release_stripe(conf, sh); + spin_unlock_irqrestore(&conf->device_lock, flags); +} + +static inline void remove_hash(struct stripe_head *sh) +{ + pr_debug("remove_hash(), stripe %llu\n", + (unsigned long long)sh->sector); + + hlist_del_init(&sh->hash); +} + +static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) +{ + struct hlist_head *hp = stripe_hash(conf, sh->sector); + + pr_debug("insert_hash(), stripe %llu\n", + (unsigned long long)sh->sector); + + CHECK_DEVLOCK(); + hlist_add_head(&sh->hash, hp); +} + + +/* find an idle stripe, make sure it is unhashed, and return it. */ +static struct stripe_head *get_free_stripe(raid5_conf_t *conf) +{ + struct stripe_head *sh = NULL; + struct list_head *first; + + CHECK_DEVLOCK(); + if (list_empty(&conf->inactive_list)) + goto out; + first = conf->inactive_list.next; + sh = list_entry(first, struct stripe_head, lru); + list_del_init(first); + remove_hash(sh); + atomic_inc(&conf->active_stripes); +out: + return sh; +} + +static void shrink_buffers(struct stripe_head *sh, int num) +{ + struct page *p; + int i; + + for (i=0; i<num ; i++) { + p = sh->dev[i].page; + if (!p) + continue; + sh->dev[i].page = NULL; + put_page(p); + } +} + +static int grow_buffers(struct stripe_head *sh, int num) +{ + int i; + + for (i=0; i<num; i++) { + struct page *page; + + if (!(page = alloc_page(GFP_KERNEL))) { + return 1; + } + sh->dev[i].page = page; + } + return 0; +} + +static void raid5_build_block(struct stripe_head *sh, int i); + +static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) +{ + raid5_conf_t *conf = sh->raid_conf; + int i; + + BUG_ON(atomic_read(&sh->count) != 0); + BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); + BUG_ON(stripe_operations_active(sh)); + + CHECK_DEVLOCK(); + pr_debug("init_stripe called, stripe %llu\n", + (unsigned long long)sh->sector); + + remove_hash(sh); + + sh->sector = sector; + sh->pd_idx = pd_idx; + sh->state = 0; + + sh->disks = disks; + + for (i = sh->disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + + if (dev->toread || dev->read || dev->towrite || dev->written || + test_bit(R5_LOCKED, &dev->flags)) { + printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", + (unsigned long long)sh->sector, i, dev->toread, + dev->read, dev->towrite, dev->written, + test_bit(R5_LOCKED, &dev->flags)); + BUG(); + } + dev->flags = 0; + raid5_build_block(sh, i); + } + insert_hash(conf, sh); +} + +static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) +{ + struct stripe_head *sh; + struct hlist_node *hn; + + CHECK_DEVLOCK(); + pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); + hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) + if (sh->sector == sector && sh->disks == disks) + return sh; + pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); + return NULL; +} + +static void unplug_slaves(mddev_t *mddev); +static void raid5_unplug_device(struct request_queue *q); + +static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, + int pd_idx, int noblock) +{ + struct stripe_head *sh; + + pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); + + spin_lock_irq(&conf->device_lock); + + do { + wait_event_lock_irq(conf->wait_for_stripe, + conf->quiesce == 0, + conf->device_lock, /* nothing */); + sh = __find_stripe(conf, sector, disks); + if (!sh) { + if (!conf->inactive_blocked) + sh = get_free_stripe(conf); + if (noblock && sh == NULL) + break; + if (!sh) { + conf->inactive_blocked = 1; + wait_event_lock_irq(conf->wait_for_stripe, + !list_empty(&conf->inactive_list) && + (atomic_read(&conf->active_stripes) + < (conf->max_nr_stripes *3/4) + || !conf->inactive_blocked), + conf->device_lock, + raid5_unplug_device(conf->mddev->queue) + ); + conf->inactive_blocked = 0; + } else + init_stripe(sh, sector, pd_idx, disks); + } else { + if (atomic_read(&sh->count)) { + BUG_ON(!list_empty(&sh->lru)); + } else { + if (!test_bit(STRIPE_HANDLE, &sh->state)) + atomic_inc(&conf->active_stripes); + if (list_empty(&sh->lru) && + !test_bit(STRIPE_EXPANDING, &sh->state)) + BUG(); + list_del_init(&sh->lru); + } + } + } while (sh == NULL); + + if (sh) + atomic_inc(&sh->count); + + spin_unlock_irq(&conf->device_lock); + return sh; +} + +static void +raid5_end_read_request(struct bio *bi, int error); +static void +raid5_end_write_request(struct bio *bi, int error); + +static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) +{ + raid5_conf_t *conf = sh->raid_conf; + int i, disks = sh->disks; + + might_sleep(); + + for (i = disks; i--; ) { + int rw; + struct bio *bi; + mdk_rdev_t *rdev; + if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) + rw = WRITE; + else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) + rw = READ; + else + continue; + + bi = &sh->dev[i].req; + + bi->bi_rw = rw; + if (rw == WRITE) + bi->bi_end_io = raid5_end_write_request; + else + bi->bi_end_io = raid5_end_read_request; + + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = NULL; + if (rdev) + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + if (rdev) { + if (s->syncing || s->expanding || s->expanded) + md_sync_acct(rdev->bdev, STRIPE_SECTORS); + + set_bit(STRIPE_IO_STARTED, &sh->state); + + bi->bi_bdev = rdev->bdev; + pr_debug("%s: for %llu schedule op %ld on disc %d\n", + __func__, (unsigned long long)sh->sector, + bi->bi_rw, i); + atomic_inc(&sh->count); + bi->bi_sector = sh->sector + rdev->data_offset; + bi->bi_flags = 1 << BIO_UPTODATE; + bi->bi_vcnt = 1; + bi->bi_max_vecs = 1; + bi->bi_idx = 0; + bi->bi_io_vec = &sh->dev[i].vec; + bi->bi_io_vec[0].bv_len = STRIPE_SIZE; + bi->bi_io_vec[0].bv_offset = 0; + bi->bi_size = STRIPE_SIZE; + bi->bi_next = NULL; + if (rw == WRITE && + test_bit(R5_ReWrite, &sh->dev[i].flags)) + atomic_add(STRIPE_SECTORS, + &rdev->corrected_errors); + generic_make_request(bi); + } else { + if (rw == WRITE) + set_bit(STRIPE_DEGRADED, &sh->state); + pr_debug("skip op %ld on disc %d for sector %llu\n", + bi->bi_rw, i, (unsigned long long)sh->sector); + clear_bit(R5_LOCKED, &sh->dev[i].flags); + set_bit(STRIPE_HANDLE, &sh->state); + } + } +} + +static struct dma_async_tx_descriptor * +async_copy_data(int frombio, struct bio *bio, struct page *page, + sector_t sector, struct dma_async_tx_descriptor *tx) +{ + struct bio_vec *bvl; + struct page *bio_page; + int i; + int page_offset; + + if (bio->bi_sector >= sector) + page_offset = (signed)(bio->bi_sector - sector) * 512; + else + page_offset = (signed)(sector - bio->bi_sector) * -512; + bio_for_each_segment(bvl, bio, i) { + int len = bio_iovec_idx(bio, i)->bv_len; + int clen; + int b_offset = 0; + + if (page_offset < 0) { + b_offset = -page_offset; + page_offset += b_offset; + len -= b_offset; + } + + if (len > 0 && page_offset + len > STRIPE_SIZE) + clen = STRIPE_SIZE - page_offset; + else + clen = len; + + if (clen > 0) { + b_offset += bio_iovec_idx(bio, i)->bv_offset; + bio_page = bio_iovec_idx(bio, i)->bv_page; + if (frombio) + tx = async_memcpy(page, bio_page, page_offset, + b_offset, clen, + ASYNC_TX_DEP_ACK, + tx, NULL, NULL); + else + tx = async_memcpy(bio_page, page, b_offset, + page_offset, clen, + ASYNC_TX_DEP_ACK, + tx, NULL, NULL); + } + if (clen < len) /* hit end of page */ + break; + page_offset += len; + } + + return tx; +} + +static void ops_complete_biofill(void *stripe_head_ref) +{ + struct stripe_head *sh = stripe_head_ref; + struct bio *return_bi = NULL; + raid5_conf_t *conf = sh->raid_conf; + int i; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + /* clear completed biofills */ + spin_lock_irq(&conf->device_lock); + for (i = sh->disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + + /* acknowledge completion of a biofill operation */ + /* and check if we need to reply to a read request, + * new R5_Wantfill requests are held off until + * !STRIPE_BIOFILL_RUN + */ + if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { + struct bio *rbi, *rbi2; + + BUG_ON(!dev->read); + rbi = dev->read; + dev->read = NULL; + while (rbi && rbi->bi_sector < + dev->sector + STRIPE_SECTORS) { + rbi2 = r5_next_bio(rbi, dev->sector); + if (!raid5_dec_bi_phys_segments(rbi)) { + rbi->bi_next = return_bi; + return_bi = rbi; + } + rbi = rbi2; + } + } + } + spin_unlock_irq(&conf->device_lock); + clear_bit(STRIPE_BIOFILL_RUN, &sh->state); + + return_io(return_bi); + + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + +static void ops_run_biofill(struct stripe_head *sh) +{ + struct dma_async_tx_descriptor *tx = NULL; + raid5_conf_t *conf = sh->raid_conf; + int i; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + for (i = sh->disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (test_bit(R5_Wantfill, &dev->flags)) { + struct bio *rbi; + spin_lock_irq(&conf->device_lock); + dev->read = rbi = dev->toread; + dev->toread = NULL; + spin_unlock_irq(&conf->device_lock); + while (rbi && rbi->bi_sector < + dev->sector + STRIPE_SECTORS) { + tx = async_copy_data(0, rbi, dev->page, + dev->sector, tx); + rbi = r5_next_bio(rbi, dev->sector); + } + } + } + + atomic_inc(&sh->count); + async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, + ops_complete_biofill, sh); +} + +static void ops_complete_compute5(void *stripe_head_ref) +{ + struct stripe_head *sh = stripe_head_ref; + int target = sh->ops.target; + struct r5dev *tgt = &sh->dev[target]; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + set_bit(R5_UPTODATE, &tgt->flags); + BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); + clear_bit(R5_Wantcompute, &tgt->flags); + clear_bit(STRIPE_COMPUTE_RUN, &sh->state); + if (sh->check_state == check_state_compute_run) + sh->check_state = check_state_compute_result; + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + +static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) +{ + /* kernel stack size limits the total number of disks */ + int disks = sh->disks; + struct page *xor_srcs[disks]; + int target = sh->ops.target; + struct r5dev *tgt = &sh->dev[target]; + struct page *xor_dest = tgt->page; + int count = 0; + struct dma_async_tx_descriptor *tx; + int i; + + pr_debug("%s: stripe %llu block: %d\n", + __func__, (unsigned long long)sh->sector, target); + BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); + + for (i = disks; i--; ) + if (i != target) + xor_srcs[count++] = sh->dev[i].page; + + atomic_inc(&sh->count); + + if (unlikely(count == 1)) + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, + 0, NULL, ops_complete_compute5, sh); + else + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, + ASYNC_TX_XOR_ZERO_DST, NULL, + ops_complete_compute5, sh); + + return tx; +} + +static void ops_complete_prexor(void *stripe_head_ref) +{ + struct stripe_head *sh = stripe_head_ref; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); +} + +static struct dma_async_tx_descriptor * +ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +{ + /* kernel stack size limits the total number of disks */ + int disks = sh->disks; + struct page *xor_srcs[disks]; + int count = 0, pd_idx = sh->pd_idx, i; + + /* existing parity data subtracted */ + struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + /* Only process blocks that are known to be uptodate */ + if (test_bit(R5_Wantdrain, &dev->flags)) + xor_srcs[count++] = dev->page; + } + + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, + ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, + ops_complete_prexor, sh); + + return tx; +} + +static struct dma_async_tx_descriptor * +ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +{ + int disks = sh->disks; + int i; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + struct bio *chosen; + + if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { + struct bio *wbi; + + spin_lock(&sh->lock); + chosen = dev->towrite; + dev->towrite = NULL; + BUG_ON(dev->written); + wbi = dev->written = chosen; + spin_unlock(&sh->lock); + + while (wbi && wbi->bi_sector < + dev->sector + STRIPE_SECTORS) { + tx = async_copy_data(1, wbi, dev->page, + dev->sector, tx); + wbi = r5_next_bio(wbi, dev->sector); + } + } + } + + return tx; +} + +static void ops_complete_postxor(void *stripe_head_ref) +{ + struct stripe_head *sh = stripe_head_ref; + int disks = sh->disks, i, pd_idx = sh->pd_idx; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (dev->written || i == pd_idx) + set_bit(R5_UPTODATE, &dev->flags); + } + + if (sh->reconstruct_state == reconstruct_state_drain_run) + sh->reconstruct_state = reconstruct_state_drain_result; + else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) + sh->reconstruct_state = reconstruct_state_prexor_drain_result; + else { + BUG_ON(sh->reconstruct_state != reconstruct_state_run); + sh->reconstruct_state = reconstruct_state_result; + } + + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + +static void +ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +{ + /* kernel stack size limits the total number of disks */ + int disks = sh->disks; + struct page *xor_srcs[disks]; + + int count = 0, pd_idx = sh->pd_idx, i; + struct page *xor_dest; + int prexor = 0; + unsigned long flags; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + /* check if prexor is active which means only process blocks + * that are part of a read-modify-write (written) + */ + if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { + prexor = 1; + xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (dev->written) + xor_srcs[count++] = dev->page; + } + } else { + xor_dest = sh->dev[pd_idx].page; + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (i != pd_idx) + xor_srcs[count++] = dev->page; + } + } + + /* 1/ if we prexor'd then the dest is reused as a source + * 2/ if we did not prexor then we are redoing the parity + * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST + * for the synchronous xor case + */ + flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | + (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); + + atomic_inc(&sh->count); + + if (unlikely(count == 1)) { + flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, + flags, tx, ops_complete_postxor, sh); + } else + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, + flags, tx, ops_complete_postxor, sh); +} + +static void ops_complete_check(void *stripe_head_ref) +{ + struct stripe_head *sh = stripe_head_ref; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + sh->check_state = check_state_check_result; + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + +static void ops_run_check(struct stripe_head *sh) +{ + /* kernel stack size limits the total number of disks */ + int disks = sh->disks; + struct page *xor_srcs[disks]; + struct dma_async_tx_descriptor *tx; + + int count = 0, pd_idx = sh->pd_idx, i; + struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; + + pr_debug("%s: stripe %llu\n", __func__, + (unsigned long long)sh->sector); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (i != pd_idx) + xor_srcs[count++] = dev->page; + } + + tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, + &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); + + atomic_inc(&sh->count); + tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, + ops_complete_check, sh); +} + +static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) +{ + int overlap_clear = 0, i, disks = sh->disks; + struct dma_async_tx_descriptor *tx = NULL; + + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; + } + + if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { + tx = ops_run_compute5(sh); + /* terminate the chain if postxor is not set to be run */ + if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) + async_tx_ack(tx); + } + + if (test_bit(STRIPE_OP_PREXOR, &ops_request)) + tx = ops_run_prexor(sh, tx); + + if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { + tx = ops_run_biodrain(sh, tx); + overlap_clear++; + } + + if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) + ops_run_postxor(sh, tx); + + if (test_bit(STRIPE_OP_CHECK, &ops_request)) + ops_run_check(sh); + + if (overlap_clear) + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +} + +static int grow_one_stripe(raid5_conf_t *conf) +{ + struct stripe_head *sh; + sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); + if (!sh) + return 0; + memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); + sh->raid_conf = conf; + spin_lock_init(&sh->lock); + + if (grow_buffers(sh, conf->raid_disks)) { + shrink_buffers(sh, conf->raid_disks); + kmem_cache_free(conf->slab_cache, sh); + return 0; + } + sh->disks = conf->raid_disks; + /* we just created an active stripe so... */ + atomic_set(&sh->count, 1); + atomic_inc(&conf->active_stripes); + INIT_LIST_HEAD(&sh->lru); + release_stripe(sh); + return 1; +} + +static int grow_stripes(raid5_conf_t *conf, int num) +{ + struct kmem_cache *sc; + int devs = conf->raid_disks; + + sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); + sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); + conf->active_name = 0; + sc = kmem_cache_create(conf->cache_name[conf->active_name], + sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), + 0, 0, NULL); + if (!sc) + return 1; + conf->slab_cache = sc; + conf->pool_size = devs; + while (num--) + if (!grow_one_stripe(conf)) + return 1; + return 0; +} + +#ifdef CONFIG_MD_RAID5_RESHAPE +static int resize_stripes(raid5_conf_t *conf, int newsize) +{ + /* Make all the stripes able to hold 'newsize' devices. + * New slots in each stripe get 'page' set to a new page. + * + * This happens in stages: + * 1/ create a new kmem_cache and allocate the required number of + * stripe_heads. + * 2/ gather all the old stripe_heads and tranfer the pages across + * to the new stripe_heads. This will have the side effect of + * freezing the array as once all stripe_heads have been collected, + * no IO will be possible. Old stripe heads are freed once their + * pages have been transferred over, and the old kmem_cache is + * freed when all stripes are done. + * 3/ reallocate conf->disks to be suitable bigger. If this fails, + * we simple return a failre status - no need to clean anything up. + * 4/ allocate new pages for the new slots in the new stripe_heads. + * If this fails, we don't bother trying the shrink the + * stripe_heads down again, we just leave them as they are. + * As each stripe_head is processed the new one is released into + * active service. + * + * Once step2 is started, we cannot afford to wait for a write, + * so we use GFP_NOIO allocations. + */ + struct stripe_head *osh, *nsh; + LIST_HEAD(newstripes); + struct disk_info *ndisks; + int err; + struct kmem_cache *sc; + int i; + + if (newsize <= conf->pool_size) + return 0; /* never bother to shrink */ + + err = md_allow_write(conf->mddev); + if (err) + return err; + + /* Step 1 */ + sc = kmem_cache_create(conf->cache_name[1-conf->active_name], + sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), + 0, 0, NULL); + if (!sc) + return -ENOMEM; + + for (i = conf->max_nr_stripes; i; i--) { + nsh = kmem_cache_alloc(sc, GFP_KERNEL); + if (!nsh) + break; + + memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); + + nsh->raid_conf = conf; + spin_lock_init(&nsh->lock); + + list_add(&nsh->lru, &newstripes); + } + if (i) { + /* didn't get enough, give up */ + while (!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); + list_del(&nsh->lru); + kmem_cache_free(sc, nsh); + } + kmem_cache_destroy(sc); + return -ENOMEM; + } + /* Step 2 - Must use GFP_NOIO now. + * OK, we have enough stripes, start collecting inactive + * stripes and copying them over + */ + list_for_each_entry(nsh, &newstripes, lru) { + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_stripe, + !list_empty(&conf->inactive_list), + conf->device_lock, + unplug_slaves(conf->mddev) + ); + osh = get_free_stripe(conf); + spin_unlock_irq(&conf->device_lock); + atomic_set(&nsh->count, 1); + for(i=0; i<conf->pool_size; i++) + nsh->dev[i].page = osh->dev[i].page; + for( ; i<newsize; i++) + nsh->dev[i].page = NULL; + kmem_cache_free(conf->slab_cache, osh); + } + kmem_cache_destroy(conf->slab_cache); + + /* Step 3. + * At this point, we are holding all the stripes so the array + * is completely stalled, so now is a good time to resize + * conf->disks. + */ + ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); + if (ndisks) { + for (i=0; i<conf->raid_disks; i++) + ndisks[i] = conf->disks[i]; + kfree(conf->disks); + conf->disks = ndisks; + } else + err = -ENOMEM; + + /* Step 4, return new stripes to service */ + while(!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); + list_del_init(&nsh->lru); + for (i=conf->raid_disks; i < newsize; i++) + if (nsh->dev[i].page == NULL) { + struct page *p = alloc_page(GFP_NOIO); + nsh->dev[i].page = p; + if (!p) + err = -ENOMEM; + } + release_stripe(nsh); + } + /* critical section pass, GFP_NOIO no longer needed */ + + conf->slab_cache = sc; + conf->active_name = 1-conf->active_name; + conf->pool_size = newsize; + return err; +} +#endif + +static int drop_one_stripe(raid5_conf_t *conf) +{ + struct stripe_head *sh; + + spin_lock_irq(&conf->device_lock); + sh = get_free_stripe(conf); + spin_unlock_irq(&conf->device_lock); + if (!sh) + return 0; + BUG_ON(atomic_read(&sh->count)); + shrink_buffers(sh, conf->pool_size); + kmem_cache_free(conf->slab_cache, sh); + atomic_dec(&conf->active_stripes); + return 1; +} + +static void shrink_stripes(raid5_conf_t *conf) +{ + while (drop_one_stripe(conf)) + ; + + if (conf->slab_cache) + kmem_cache_destroy(conf->slab_cache); + conf->slab_cache = NULL; +} + +static void raid5_end_read_request(struct bio * bi, int error) +{ + struct stripe_head *sh = bi->bi_private; + raid5_conf_t *conf = sh->raid_conf; + int disks = sh->disks, i; + int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); + char b[BDEVNAME_SIZE]; + mdk_rdev_t *rdev; + + + for (i=0 ; i<disks; i++) + if (bi == &sh->dev[i].req) + break; + + pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", + (unsigned long long)sh->sector, i, atomic_read(&sh->count), + uptodate); + if (i == disks) { + BUG(); + return; + } + + if (uptodate) { + set_bit(R5_UPTODATE, &sh->dev[i].flags); + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + rdev = conf->disks[i].rdev; + printk_rl(KERN_INFO "raid5:%s: read error corrected" + " (%lu sectors at %llu on %s)\n", + mdname(conf->mddev), STRIPE_SECTORS, + (unsigned long long)(sh->sector + + rdev->data_offset), + bdevname(rdev->bdev, b)); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } + if (atomic_read(&conf->disks[i].rdev->read_errors)) + atomic_set(&conf->disks[i].rdev->read_errors, 0); + } else { + const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); + int retry = 0; + rdev = conf->disks[i].rdev; + + clear_bit(R5_UPTODATE, &sh->dev[i].flags); + atomic_inc(&rdev->read_errors); + if (conf->mddev->degraded) + printk_rl(KERN_WARNING + "raid5:%s: read error not correctable " + "(sector %llu on %s).\n", + mdname(conf->mddev), + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); + else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) + /* Oh, no!!! */ + printk_rl(KERN_WARNING + "raid5:%s: read error NOT corrected!! " + "(sector %llu on %s).\n", + mdname(conf->mddev), + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); + else if (atomic_read(&rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "raid5:%s: Too many read errors, failing device %s.\n", + mdname(conf->mddev), bdn); + else + retry = 1; + if (retry) + set_bit(R5_ReadError, &sh->dev[i].flags); + else { + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + md_error(conf->mddev, rdev); + } + } + rdev_dec_pending(conf->disks[i].rdev, conf->mddev); + clear_bit(R5_LOCKED, &sh->dev[i].flags); + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + +static void raid5_end_write_request(struct bio *bi, int error) +{ + struct stripe_head *sh = bi->bi_private; + raid5_conf_t *conf = sh->raid_conf; + int disks = sh->disks, i; + int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); + + for (i=0 ; i<disks; i++) + if (bi == &sh->dev[i].req) + break; + + pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", + (unsigned long long)sh->sector, i, atomic_read(&sh->count), + uptodate); + if (i == disks) { + BUG(); + return; + } + + if (!uptodate) + md_error(conf->mddev, conf->disks[i].rdev); + + rdev_dec_pending(conf->disks[i].rdev, conf->mddev); + + clear_bit(R5_LOCKED, &sh->dev[i].flags); + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); +} + + +static sector_t compute_blocknr(struct stripe_head *sh, int i); + +static void raid5_build_block(struct stripe_head *sh, int i) +{ + struct r5dev *dev = &sh->dev[i]; + + bio_init(&dev->req); + dev->req.bi_io_vec = &dev->vec; + dev->req.bi_vcnt++; + dev->req.bi_max_vecs++; + dev->vec.bv_page = dev->page; + dev->vec.bv_len = STRIPE_SIZE; + dev->vec.bv_offset = 0; + + dev->req.bi_sector = sh->sector; + dev->req.bi_private = sh; + + dev->flags = 0; + dev->sector = compute_blocknr(sh, i); +} + +static void error(mddev_t *mddev, mdk_rdev_t *rdev) +{ + char b[BDEVNAME_SIZE]; + raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + pr_debug("raid5: error called\n"); + + if (!test_bit(Faulty, &rdev->flags)) { + set_bit(MD_CHANGE_DEVS, &mddev->flags); + if (test_and_clear_bit(In_sync, &rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded++; + spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * if recovery was running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + } + set_bit(Faulty, &rdev->flags); + printk(KERN_ALERT + "raid5: Disk failure on %s, disabling device.\n" + "raid5: Operation continuing on %d devices.\n", + bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); + } +} + +/* + * Input: a 'big' sector number, + * Output: index of the data and parity disk, and the sector # in them. + */ +static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, + unsigned int data_disks, unsigned int * dd_idx, + unsigned int * pd_idx, raid5_conf_t *conf) +{ + long stripe; + unsigned long chunk_number; + unsigned int chunk_offset; + sector_t new_sector; + int sectors_per_chunk = conf->chunk_size >> 9; + + /* First compute the information on this sector */ + + /* + * Compute the chunk number and the sector offset inside the chunk + */ + chunk_offset = sector_div(r_sector, sectors_per_chunk); + chunk_number = r_sector; + BUG_ON(r_sector != chunk_number); + + /* + * Compute the stripe number + */ + stripe = chunk_number / data_disks; + + /* + * Compute the data disk and parity disk indexes inside the stripe + */ + *dd_idx = chunk_number % data_disks; + + /* + * Select the parity disk based on the user selected algorithm. + */ + switch(conf->level) { + case 4: + *pd_idx = data_disks; + break; + case 5: + switch (conf->algorithm) { + case ALGORITHM_LEFT_ASYMMETRIC: + *pd_idx = data_disks - stripe % raid_disks; + if (*dd_idx >= *pd_idx) + (*dd_idx)++; + break; + case ALGORITHM_RIGHT_ASYMMETRIC: + *pd_idx = stripe % raid_disks; + if (*dd_idx >= *pd_idx) + (*dd_idx)++; + break; + case ALGORITHM_LEFT_SYMMETRIC: + *pd_idx = data_disks - stripe % raid_disks; + *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; + break; + case ALGORITHM_RIGHT_SYMMETRIC: + *pd_idx = stripe % raid_disks; + *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; + break; + default: + printk(KERN_ERR "raid5: unsupported algorithm %d\n", + conf->algorithm); + } + break; + case 6: + + /**** FIX THIS ****/ + switch (conf->algorithm) { + case ALGORITHM_LEFT_ASYMMETRIC: + *pd_idx = raid_disks - 1 - (stripe % raid_disks); + if (*pd_idx == raid_disks-1) + (*dd_idx)++; /* Q D D D P */ + else if (*dd_idx >= *pd_idx) + (*dd_idx) += 2; /* D D P Q D */ + break; + case ALGORITHM_RIGHT_ASYMMETRIC: + *pd_idx = stripe % raid_disks; + if (*pd_idx == raid_disks-1) + (*dd_idx)++; /* Q D D D P */ + else if (*dd_idx >= *pd_idx) + (*dd_idx) += 2; /* D D P Q D */ + break; + case ALGORITHM_LEFT_SYMMETRIC: + *pd_idx = raid_disks - 1 - (stripe % raid_disks); + *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; + break; + case ALGORITHM_RIGHT_SYMMETRIC: + *pd_idx = stripe % raid_disks; + *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; + break; + default: + printk(KERN_CRIT "raid6: unsupported algorithm %d\n", + conf->algorithm); + } + break; + } + + /* + * Finally, compute the new sector number + */ + new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; + return new_sector; +} + + +static sector_t compute_blocknr(struct stripe_head *sh, int i) +{ + raid5_conf_t *conf = sh->raid_conf; + int raid_disks = sh->disks; + int data_disks = raid_disks - conf->max_degraded; + sector_t new_sector = sh->sector, check; + int sectors_per_chunk = conf->chunk_size >> 9; + sector_t stripe; + int chunk_offset; + int chunk_number, dummy1, dummy2, dd_idx = i; + sector_t r_sector; + + + chunk_offset = sector_div(new_sector, sectors_per_chunk); + stripe = new_sector; + BUG_ON(new_sector != stripe); + + if (i == sh->pd_idx) + return 0; + switch(conf->level) { + case 4: break; + case 5: + switch (conf->algorithm) { + case ALGORITHM_LEFT_ASYMMETRIC: + case ALGORITHM_RIGHT_ASYMMETRIC: + if (i > sh->pd_idx) + i--; + break; + case ALGORITHM_LEFT_SYMMETRIC: + case ALGORITHM_RIGHT_SYMMETRIC: + if (i < sh->pd_idx) + i += raid_disks; + i -= (sh->pd_idx + 1); + break; + default: + printk(KERN_ERR "raid5: unsupported algorithm %d\n", + conf->algorithm); + } + break; + case 6: + if (i == raid6_next_disk(sh->pd_idx, raid_disks)) + return 0; /* It is the Q disk */ + switch (conf->algorithm) { + case ALGORITHM_LEFT_ASYMMETRIC: + case ALGORITHM_RIGHT_ASYMMETRIC: + if (sh->pd_idx == raid_disks-1) + i--; /* Q D D D P */ + else if (i > sh->pd_idx) + i -= 2; /* D D P Q D */ + break; + case ALGORITHM_LEFT_SYMMETRIC: + case ALGORITHM_RIGHT_SYMMETRIC: + if (sh->pd_idx == raid_disks-1) + i--; /* Q D D D P */ + else { + /* D D P Q D */ + if (i < sh->pd_idx) + i += raid_disks; + i -= (sh->pd_idx + 2); + } + break; + default: + printk(KERN_CRIT "raid6: unsupported algorithm %d\n", + conf->algorithm); + } + break; + } + + chunk_number = stripe * data_disks + i; + r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; + + check = raid5_compute_sector(r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); + if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { + printk(KERN_ERR "compute_blocknr: map not correct\n"); + return 0; + } + return r_sector; +} + + + +/* + * Copy data between a page in the stripe cache, and one or more bion + * The page could align with the middle of the bio, or there could be + * several bion, each with several bio_vecs, which cover part of the page + * Multiple bion are linked together on bi_next. There may be extras + * at the end of this list. We ignore them. + */ +static void copy_data(int frombio, struct bio *bio, + struct page *page, + sector_t sector) +{ + char *pa = page_address(page); + struct bio_vec *bvl; + int i; + int page_offset; + + if (bio->bi_sector >= sector) + page_offset = (signed)(bio->bi_sector - sector) * 512; + else + page_offset = (signed)(sector - bio->bi_sector) * -512; + bio_for_each_segment(bvl, bio, i) { + int len = bio_iovec_idx(bio,i)->bv_len; + int clen; + int b_offset = 0; + + if (page_offset < 0) { + b_offset = -page_offset; + page_offset += b_offset; + len -= b_offset; + } + + if (len > 0 && page_offset + len > STRIPE_SIZE) + clen = STRIPE_SIZE - page_offset; + else clen = len; + + if (clen > 0) { + char *ba = __bio_kmap_atomic(bio, i, KM_USER0); + if (frombio) + memcpy(pa+page_offset, ba+b_offset, clen); + else + memcpy(ba+b_offset, pa+page_offset, clen); + __bio_kunmap_atomic(ba, KM_USER0); + } + if (clen < len) /* hit end of page */ + break; + page_offset += len; + } +} + +#define check_xor() do { \ + if (count == MAX_XOR_BLOCKS) { \ + xor_blocks(count, STRIPE_SIZE, dest, ptr);\ + count = 0; \ + } \ + } while(0) + +static void compute_parity6(struct stripe_head *sh, int method) +{ + raid6_conf_t *conf = sh->raid_conf; + int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; + struct bio *chosen; + /**** FIX THIS: This could be very bad if disks is close to 256 ****/ + void *ptrs[disks]; + + qd_idx = raid6_next_disk(pd_idx, disks); + d0_idx = raid6_next_disk(qd_idx, disks); + + pr_debug("compute_parity, stripe %llu, method %d\n", + (unsigned long long)sh->sector, method); + + switch(method) { + case READ_MODIFY_WRITE: + BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ + case RECONSTRUCT_WRITE: + for (i= disks; i-- ;) + if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { + chosen = sh->dev[i].towrite; + sh->dev[i].towrite = NULL; + + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + wake_up(&conf->wait_for_overlap); + + BUG_ON(sh->dev[i].written); + sh->dev[i].written = chosen; + } + break; + case CHECK_PARITY: + BUG(); /* Not implemented yet */ + } + + for (i = disks; i--;) + if (sh->dev[i].written) { + sector_t sector = sh->dev[i].sector; + struct bio *wbi = sh->dev[i].written; + while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { + copy_data(1, wbi, sh->dev[i].page, sector); + wbi = r5_next_bio(wbi, sector); + } + + set_bit(R5_LOCKED, &sh->dev[i].flags); + set_bit(R5_UPTODATE, &sh->dev[i].flags); + } + +// switch(method) { +// case RECONSTRUCT_WRITE: +// case CHECK_PARITY: +// case UPDATE_PARITY: + /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ + /* FIX: Is this ordering of drives even remotely optimal? */ + count = 0; + i = d0_idx; + do { + ptrs[count++] = page_address(sh->dev[i].page); + if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) + printk("block %d/%d not uptodate on parity calc\n", i,count); + i = raid6_next_disk(i, disks); + } while ( i != d0_idx ); +// break; +// } + + raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); + + switch(method) { + case RECONSTRUCT_WRITE: + set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); + set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); + set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); + set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); + break; + case UPDATE_PARITY: + set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); + set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); + break; + } +} + + +/* Compute one missing block */ +static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) +{ + int i, count, disks = sh->disks; + void *ptr[MAX_XOR_BLOCKS], *dest, *p; + int pd_idx = sh->pd_idx; + int qd_idx = raid6_next_disk(pd_idx, disks); + + pr_debug("compute_block_1, stripe %llu, idx %d\n", + (unsigned long long)sh->sector, dd_idx); + + if ( dd_idx == qd_idx ) { + /* We're actually computing the Q drive */ + compute_parity6(sh, UPDATE_PARITY); + } else { + dest = page_address(sh->dev[dd_idx].page); + if (!nozero) memset(dest, 0, STRIPE_SIZE); + count = 0; + for (i = disks ; i--; ) { + if (i == dd_idx || i == qd_idx) + continue; + p = page_address(sh->dev[i].page); + if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) + ptr[count++] = p; + else + printk("compute_block() %d, stripe %llu, %d" + " not present\n", dd_idx, + (unsigned long long)sh->sector, i); + + check_xor(); + } + if (count) + xor_blocks(count, STRIPE_SIZE, dest, ptr); + if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + } +} + +/* Compute two missing blocks */ +static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) +{ + int i, count, disks = sh->disks; + int pd_idx = sh->pd_idx; + int qd_idx = raid6_next_disk(pd_idx, disks); + int d0_idx = raid6_next_disk(qd_idx, disks); + int faila, failb; + + /* faila and failb are disk numbers relative to d0_idx */ + /* pd_idx become disks-2 and qd_idx become disks-1 */ + faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; + failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; + + BUG_ON(faila == failb); + if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } + + pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", + (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); + + if ( failb == disks-1 ) { + /* Q disk is one of the missing disks */ + if ( faila == disks-2 ) { + /* Missing P+Q, just recompute */ + compute_parity6(sh, UPDATE_PARITY); + return; + } else { + /* We're missing D+Q; recompute D from P */ + compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); + compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ + return; + } + } + + /* We're missing D+P or D+D; build pointer table */ + { + /**** FIX THIS: This could be very bad if disks is close to 256 ****/ + void *ptrs[disks]; + + count = 0; + i = d0_idx; + do { + ptrs[count++] = page_address(sh->dev[i].page); + i = raid6_next_disk(i, disks); + if (i != dd_idx1 && i != dd_idx2 && + !test_bit(R5_UPTODATE, &sh->dev[i].flags)) + printk("compute_2 with missing block %d/%d\n", count, i); + } while ( i != d0_idx ); + + if ( failb == disks-2 ) { + /* We're missing D+P. */ + raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); + } else { + /* We're missing D+D. */ + raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); + } + + /* Both the above update both missing blocks */ + set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); + set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); + } +} + +static void +schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, + int rcw, int expand) +{ + int i, pd_idx = sh->pd_idx, disks = sh->disks; + + if (rcw) { + /* if we are not expanding this is a proper write request, and + * there will be bios with new data to be drained into the + * stripe cache + */ + if (!expand) { + sh->reconstruct_state = reconstruct_state_drain_run; + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + } else + sh->reconstruct_state = reconstruct_state_run; + + set_bit(STRIPE_OP_POSTXOR, &s->ops_request); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + + if (dev->towrite) { + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantdrain, &dev->flags); + if (!expand) + clear_bit(R5_UPTODATE, &dev->flags); + s->locked++; + } + } + if (s->locked + 1 == disks) + if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) + atomic_inc(&sh->raid_conf->pending_full_writes); + } else { + BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || + test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); + + sh->reconstruct_state = reconstruct_state_prexor_drain_run; + set_bit(STRIPE_OP_PREXOR, &s->ops_request); + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + set_bit(STRIPE_OP_POSTXOR, &s->ops_request); + + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (i == pd_idx) + continue; + + if (dev->towrite && + (test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags))) { + set_bit(R5_Wantdrain, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + clear_bit(R5_UPTODATE, &dev->flags); + s->locked++; + } + } + } + + /* keep the parity disk locked while asynchronous operations + * are in flight + */ + set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); + clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); + s->locked++; + + pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", + __func__, (unsigned long long)sh->sector, + s->locked, s->ops_request); +} + +/* + * Each stripe/dev can have one or more bion attached. + * toread/towrite point to the first in a chain. + * The bi_next chain must be in order. + */ +static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) +{ + struct bio **bip; + raid5_conf_t *conf = sh->raid_conf; + int firstwrite=0; + + pr_debug("adding bh b#%llu to stripe s#%llu\n", + (unsigned long long)bi->bi_sector, + (unsigned long long)sh->sector); + + + spin_lock(&sh->lock); + spin_lock_irq(&conf->device_lock); + if (forwrite) { + bip = &sh->dev[dd_idx].towrite; + if (*bip == NULL && sh->dev[dd_idx].written == NULL) + firstwrite = 1; + } else + bip = &sh->dev[dd_idx].toread; + while (*bip && (*bip)->bi_sector < bi->bi_sector) { + if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) + goto overlap; + bip = & (*bip)->bi_next; + } + if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) + goto overlap; + + BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); + if (*bip) + bi->bi_next = *bip; + *bip = bi; + bi->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + spin_unlock(&sh->lock); + + pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", + (unsigned long long)bi->bi_sector, + (unsigned long long)sh->sector, dd_idx); + + if (conf->mddev->bitmap && firstwrite) { + bitmap_startwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0); + sh->bm_seq = conf->seq_flush+1; + set_bit(STRIPE_BIT_DELAY, &sh->state); + } + + if (forwrite) { + /* check if page is covered */ + sector_t sector = sh->dev[dd_idx].sector; + for (bi=sh->dev[dd_idx].towrite; + sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && + bi && bi->bi_sector <= sector; + bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { + if (bi->bi_sector + (bi->bi_size>>9) >= sector) + sector = bi->bi_sector + (bi->bi_size>>9); + } + if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) + set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); + } + return 1; + + overlap: + set_bit(R5_Overlap, &sh->dev[dd_idx].flags); + spin_unlock_irq(&conf->device_lock); + spin_unlock(&sh->lock); + return 0; +} + +static void end_reshape(raid5_conf_t *conf); + +static int page_is_zero(struct page *p) +{ + char *a = page_address(p); + return ((*(u32*)a) == 0 && + memcmp(a, a+4, STRIPE_SIZE-4)==0); +} + +static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) +{ + int sectors_per_chunk = conf->chunk_size >> 9; + int pd_idx, dd_idx; + int chunk_offset = sector_div(stripe, sectors_per_chunk); + + raid5_compute_sector(stripe * (disks - conf->max_degraded) + *sectors_per_chunk + chunk_offset, + disks, disks - conf->max_degraded, + &dd_idx, &pd_idx, conf); + return pd_idx; +} + +static void +handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, + struct stripe_head_state *s, int disks, + struct bio **return_bi) +{ + int i; + for (i = disks; i--; ) { + struct bio *bi; + int bitmap_end = 0; + + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + mdk_rdev_t *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(In_sync, &rdev->flags)) + /* multiple read failures in one stripe */ + md_error(conf->mddev, rdev); + rcu_read_unlock(); + } + spin_lock_irq(&conf->device_lock); + /* fail all writes first */ + bi = sh->dev[i].towrite; + sh->dev[i].towrite = NULL; + if (bi) { + s->to_write--; + bitmap_end = 1; + } + + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + wake_up(&conf->wait_for_overlap); + + while (bi && bi->bi_sector < + sh->dev[i].sector + STRIPE_SECTORS) { + struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); + clear_bit(BIO_UPTODATE, &bi->bi_flags); + if (!raid5_dec_bi_phys_segments(bi)) { + md_write_end(conf->mddev); + bi->bi_next = *return_bi; + *return_bi = bi; + } + bi = nextbi; + } + /* and fail all 'written' */ + bi = sh->dev[i].written; + sh->dev[i].written = NULL; + if (bi) bitmap_end = 1; + while (bi && bi->bi_sector < + sh->dev[i].sector + STRIPE_SECTORS) { + struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); + clear_bit(BIO_UPTODATE, &bi->bi_flags); + if (!raid5_dec_bi_phys_segments(bi)) { + md_write_end(conf->mddev); + bi->bi_next = *return_bi; + *return_bi = bi; + } + bi = bi2; + } + + /* fail any reads if this device is non-operational and + * the data has not reached the cache yet. + */ + if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && + (!test_bit(R5_Insync, &sh->dev[i].flags) || + test_bit(R5_ReadError, &sh->dev[i].flags))) { + bi = sh->dev[i].toread; + sh->dev[i].toread = NULL; + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + wake_up(&conf->wait_for_overlap); + if (bi) s->to_read--; + while (bi && bi->bi_sector < + sh->dev[i].sector + STRIPE_SECTORS) { + struct bio *nextbi = + r5_next_bio(bi, sh->dev[i].sector); + clear_bit(BIO_UPTODATE, &bi->bi_flags); + if (!raid5_dec_bi_phys_segments(bi)) { + bi->bi_next = *return_bi; + *return_bi = bi; + } + bi = nextbi; + } + } + spin_unlock_irq(&conf->device_lock); + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0, 0); + } + + if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) + if (atomic_dec_and_test(&conf->pending_full_writes)) + md_wakeup_thread(conf->mddev->thread); +} + +/* fetch_block5 - checks the given member device to see if its data needs + * to be read or computed to satisfy a request. + * + * Returns 1 when no more member devices need to be checked, otherwise returns + * 0 to tell the loop in handle_stripe_fill5 to continue + */ +static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, + int disk_idx, int disks) +{ + struct r5dev *dev = &sh->dev[disk_idx]; + struct r5dev *failed_dev = &sh->dev[s->failed_num]; + + /* is the data in this block needed, and can we get it? */ + if (!test_bit(R5_LOCKED, &dev->flags) && + !test_bit(R5_UPTODATE, &dev->flags) && + (dev->toread || + (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || + s->syncing || s->expanding || + (s->failed && + (failed_dev->toread || + (failed_dev->towrite && + !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { + /* We would like to get this block, possibly by computing it, + * otherwise read it if the backing disk is insync + */ + if ((s->uptodate == disks - 1) && + (s->failed && disk_idx == s->failed_num)) { + set_bit(STRIPE_COMPUTE_RUN, &sh->state); + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); + set_bit(R5_Wantcompute, &dev->flags); + sh->ops.target = disk_idx; + s->req_compute = 1; + /* Careful: from this point on 'uptodate' is in the eye + * of raid5_run_ops which services 'compute' operations + * before writes. R5_Wantcompute flags a block that will + * be R5_UPTODATE by the time it is needed for a + * subsequent operation. + */ + s->uptodate++; + return 1; /* uptodate + compute == disks */ + } else if (test_bit(R5_Insync, &dev->flags)) { + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + pr_debug("Reading block %d (sync=%d)\n", disk_idx, + s->syncing); + } + } + + return 0; +} + +/** + * handle_stripe_fill5 - read or compute data to satisfy pending requests. + */ +static void handle_stripe_fill5(struct stripe_head *sh, + struct stripe_head_state *s, int disks) +{ + int i; + + /* look for blocks to read/compute, skip this if a compute + * is already in flight, or if the stripe contents are in the + * midst of changing due to a write + */ + if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && + !sh->reconstruct_state) + for (i = disks; i--; ) + if (fetch_block5(sh, s, i, disks)) + break; + set_bit(STRIPE_HANDLE, &sh->state); +} + +static void handle_stripe_fill6(struct stripe_head *sh, + struct stripe_head_state *s, struct r6_state *r6s, + int disks) +{ + int i; + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (!test_bit(R5_LOCKED, &dev->flags) && + !test_bit(R5_UPTODATE, &dev->flags) && + (dev->toread || (dev->towrite && + !test_bit(R5_OVERWRITE, &dev->flags)) || + s->syncing || s->expanding || + (s->failed >= 1 && + (sh->dev[r6s->failed_num[0]].toread || + s->to_write)) || + (s->failed >= 2 && + (sh->dev[r6s->failed_num[1]].toread || + s->to_write)))) { + /* we would like to get this block, possibly + * by computing it, but we might not be able to + */ + if ((s->uptodate == disks - 1) && + (s->failed && (i == r6s->failed_num[0] || + i == r6s->failed_num[1]))) { + pr_debug("Computing stripe %llu block %d\n", + (unsigned long long)sh->sector, i); + compute_block_1(sh, i, 0); + s->uptodate++; + } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { + /* Computing 2-failure is *very* expensive; only + * do it if failed >= 2 + */ + int other; + for (other = disks; other--; ) { + if (other == i) + continue; + if (!test_bit(R5_UPTODATE, + &sh->dev[other].flags)) + break; + } + BUG_ON(other < 0); + pr_debug("Computing stripe %llu blocks %d,%d\n", + (unsigned long long)sh->sector, + i, other); + compute_block_2(sh, i, other); + s->uptodate += 2; + } else if (test_bit(R5_Insync, &dev->flags)) { + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + pr_debug("Reading block %d (sync=%d)\n", + i, s->syncing); + } + } + } + set_bit(STRIPE_HANDLE, &sh->state); +} + + +/* handle_stripe_clean_event + * any written block on an uptodate or failed drive can be returned. + * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but + * never LOCKED, so we don't need to test 'failed' directly. + */ +static void handle_stripe_clean_event(raid5_conf_t *conf, + struct stripe_head *sh, int disks, struct bio **return_bi) +{ + int i; + struct r5dev *dev; + + for (i = disks; i--; ) + if (sh->dev[i].written) { + dev = &sh->dev[i]; + if (!test_bit(R5_LOCKED, &dev->flags) && + test_bit(R5_UPTODATE, &dev->flags)) { + /* We can return any write requests */ + struct bio *wbi, *wbi2; + int bitmap_end = 0; + pr_debug("Return write for disc %d\n", i); + spin_lock_irq(&conf->device_lock); + wbi = dev->written; + dev->written = NULL; + while (wbi && wbi->bi_sector < + dev->sector + STRIPE_SECTORS) { + wbi2 = r5_next_bio(wbi, dev->sector); + if (!raid5_dec_bi_phys_segments(wbi)) { + md_write_end(conf->mddev); + wbi->bi_next = *return_bi; + *return_bi = wbi; + } + wbi = wbi2; + } + if (dev->towrite == NULL) + bitmap_end = 1; + spin_unlock_irq(&conf->device_lock); + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, + sh->sector, + STRIPE_SECTORS, + !test_bit(STRIPE_DEGRADED, &sh->state), + 0); + } + } + + if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) + if (atomic_dec_and_test(&conf->pending_full_writes)) + md_wakeup_thread(conf->mddev->thread); +} + +static void handle_stripe_dirtying5(raid5_conf_t *conf, + struct stripe_head *sh, struct stripe_head_state *s, int disks) +{ + int rmw = 0, rcw = 0, i; + for (i = disks; i--; ) { + /* would I have to read this buffer for read_modify_write */ + struct r5dev *dev = &sh->dev[i]; + if ((dev->towrite || i == sh->pd_idx) && + !test_bit(R5_LOCKED, &dev->flags) && + !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags))) { + if (test_bit(R5_Insync, &dev->flags)) + rmw++; + else + rmw += 2*disks; /* cannot read it */ + } + /* Would I have to read this buffer for reconstruct_write */ + if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && + !test_bit(R5_LOCKED, &dev->flags) && + !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags))) { + if (test_bit(R5_Insync, &dev->flags)) rcw++; + else + rcw += 2*disks; + } + } + pr_debug("for sector %llu, rmw=%d rcw=%d\n", + (unsigned long long)sh->sector, rmw, rcw); + set_bit(STRIPE_HANDLE, &sh->state); + if (rmw < rcw && rmw > 0) + /* prefer read-modify-write, but need to get some data */ + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if ((dev->towrite || i == sh->pd_idx) && + !test_bit(R5_LOCKED, &dev->flags) && + !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags)) && + test_bit(R5_Insync, &dev->flags)) { + if ( + test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + pr_debug("Read_old block " + "%d for r-m-w\n", i); + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + } else { + set_bit(STRIPE_DELAYED, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + } + } + } + if (rcw <= rmw && rcw > 0) + /* want reconstruct write, but need to get some data */ + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (!test_bit(R5_OVERWRITE, &dev->flags) && + i != sh->pd_idx && + !test_bit(R5_LOCKED, &dev->flags) && + !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags)) && + test_bit(R5_Insync, &dev->flags)) { + if ( + test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + pr_debug("Read_old block " + "%d for Reconstruct\n", i); + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + } else { + set_bit(STRIPE_DELAYED, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + } + } + } + /* now if nothing is locked, and if we have enough data, + * we can start a write request + */ + /* since handle_stripe can be called at any time we need to handle the + * case where a compute block operation has been submitted and then a + * subsequent call wants to start a write request. raid5_run_ops only + * handles the case where compute block and postxor are requested + * simultaneously. If this is not the case then new writes need to be + * held off until the compute completes. + */ + if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && + (s->locked == 0 && (rcw == 0 || rmw == 0) && + !test_bit(STRIPE_BIT_DELAY, &sh->state))) + schedule_reconstruction5(sh, s, rcw == 0, 0); +} + +static void handle_stripe_dirtying6(raid5_conf_t *conf, + struct stripe_head *sh, struct stripe_head_state *s, + struct r6_state *r6s, int disks) +{ + int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; + int qd_idx = r6s->qd_idx; + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + /* Would I have to read this buffer for reconstruct_write */ + if (!test_bit(R5_OVERWRITE, &dev->flags) + && i != pd_idx && i != qd_idx + && (!test_bit(R5_LOCKED, &dev->flags) + ) && + !test_bit(R5_UPTODATE, &dev->flags)) { + if (test_bit(R5_Insync, &dev->flags)) rcw++; + else { + pr_debug("raid6: must_compute: " + "disk %d flags=%#lx\n", i, dev->flags); + must_compute++; + } + } + } + pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", + (unsigned long long)sh->sector, rcw, must_compute); + set_bit(STRIPE_HANDLE, &sh->state); + + if (rcw > 0) + /* want reconstruct write, but need to get some data */ + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + if (!test_bit(R5_OVERWRITE, &dev->flags) + && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) + && !test_bit(R5_LOCKED, &dev->flags) && + !test_bit(R5_UPTODATE, &dev->flags) && + test_bit(R5_Insync, &dev->flags)) { + if ( + test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + pr_debug("Read_old stripe %llu " + "block %d for Reconstruct\n", + (unsigned long long)sh->sector, i); + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + } else { + pr_debug("Request delayed stripe %llu " + "block %d for Reconstruct\n", + (unsigned long long)sh->sector, i); + set_bit(STRIPE_DELAYED, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + } + } + } + /* now if nothing is locked, and if we have enough data, we can start a + * write request + */ + if (s->locked == 0 && rcw == 0 && + !test_bit(STRIPE_BIT_DELAY, &sh->state)) { + if (must_compute > 0) { + /* We have failed blocks and need to compute them */ + switch (s->failed) { + case 0: + BUG(); + case 1: + compute_block_1(sh, r6s->failed_num[0], 0); + break; + case 2: + compute_block_2(sh, r6s->failed_num[0], + r6s->failed_num[1]); + break; + default: /* This request should have been failed? */ + BUG(); + } + } + + pr_debug("Computing parity for stripe %llu\n", + (unsigned long long)sh->sector); + compute_parity6(sh, RECONSTRUCT_WRITE); + /* now every locked buffer is ready to be written */ + for (i = disks; i--; ) + if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { + pr_debug("Writing stripe %llu block %d\n", + (unsigned long long)sh->sector, i); + s->locked++; + set_bit(R5_Wantwrite, &sh->dev[i].flags); + } + if (s->locked == disks) + if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) + atomic_inc(&conf->pending_full_writes); + /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ + set_bit(STRIPE_INSYNC, &sh->state); + + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < + IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } + } +} + +static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, + struct stripe_head_state *s, int disks) +{ + struct r5dev *dev = NULL; + + set_bit(STRIPE_HANDLE, &sh->state); + + switch (sh->check_state) { + case check_state_idle: + /* start a new check operation if there are no failures */ + if (s->failed == 0) { + BUG_ON(s->uptodate != disks); + sh->check_state = check_state_run; + set_bit(STRIPE_OP_CHECK, &s->ops_request); + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); + s->uptodate--; + break; + } + dev = &sh->dev[s->failed_num]; + /* fall through */ + case check_state_compute_result: + sh->check_state = check_state_idle; + if (!dev) + dev = &sh->dev[sh->pd_idx]; + + /* check that a write has not made the stripe insync */ + if (test_bit(STRIPE_INSYNC, &sh->state)) + break; + + /* either failed parity check, or recovery is happening */ + BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); + BUG_ON(s->uptodate != disks); + + set_bit(R5_LOCKED, &dev->flags); + s->locked++; + set_bit(R5_Wantwrite, &dev->flags); + + clear_bit(STRIPE_DEGRADED, &sh->state); + set_bit(STRIPE_INSYNC, &sh->state); + break; + case check_state_run: + break; /* we will be called again upon completion */ + case check_state_check_result: + sh->check_state = check_state_idle; + + /* if a failure occurred during the check operation, leave + * STRIPE_INSYNC not set and let the stripe be handled again + */ + if (s->failed) + break; + + /* handle a successful check operation, if parity is correct + * we are done. Otherwise update the mismatch count and repair + * parity if !MD_RECOVERY_CHECK + */ + if (sh->ops.zero_sum_result == 0) + /* parity is correct (on disc, + * not in buffer any more) + */ + set_bit(STRIPE_INSYNC, &sh->state); + else { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + set_bit(STRIPE_INSYNC, &sh->state); + else { + sh->check_state = check_state_compute_run; + set_bit(STRIPE_COMPUTE_RUN, &sh->state); + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); + set_bit(R5_Wantcompute, + &sh->dev[sh->pd_idx].flags); + sh->ops.target = sh->pd_idx; + s->uptodate++; + } + } + break; + case check_state_compute_run: + break; + default: + printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", + __func__, sh->check_state, + (unsigned long long) sh->sector); + BUG(); + } +} + + +static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, + struct stripe_head_state *s, + struct r6_state *r6s, struct page *tmp_page, + int disks) +{ + int update_p = 0, update_q = 0; + struct r5dev *dev; + int pd_idx = sh->pd_idx; + int qd_idx = r6s->qd_idx; + + set_bit(STRIPE_HANDLE, &sh->state); + + BUG_ON(s->failed > 2); + BUG_ON(s->uptodate < disks); + /* Want to check and possibly repair P and Q. + * However there could be one 'failed' device, in which + * case we can only check one of them, possibly using the + * other to generate missing data + */ + + /* If !tmp_page, we cannot do the calculations, + * but as we have set STRIPE_HANDLE, we will soon be called + * by stripe_handle with a tmp_page - just wait until then. + */ + if (tmp_page) { + if (s->failed == r6s->q_failed) { + /* The only possible failed device holds 'Q', so it + * makes sense to check P (If anything else were failed, + * we would have used P to recreate it). + */ + compute_block_1(sh, pd_idx, 1); + if (!page_is_zero(sh->dev[pd_idx].page)) { + compute_block_1(sh, pd_idx, 0); + update_p = 1; + } + } + if (!r6s->q_failed && s->failed < 2) { + /* q is not failed, and we didn't use it to generate + * anything, so it makes sense to check it + */ + memcpy(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE); + compute_parity6(sh, UPDATE_PARITY); + if (memcmp(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE) != 0) { + clear_bit(STRIPE_INSYNC, &sh->state); + update_q = 1; + } + } + if (update_p || update_q) { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + update_p = update_q = 0; + } + + /* now write out any block on a failed drive, + * or P or Q if they need it + */ + + if (s->failed == 2) { + dev = &sh->dev[r6s->failed_num[1]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (s->failed >= 1) { + dev = &sh->dev[r6s->failed_num[0]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + + if (update_p) { + dev = &sh->dev[pd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (update_q) { + dev = &sh->dev[qd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + clear_bit(STRIPE_DEGRADED, &sh->state); + + set_bit(STRIPE_INSYNC, &sh->state); + } +} + +static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, + struct r6_state *r6s) +{ + int i; + + /* We have read all the blocks in this stripe and now we need to + * copy some of them into a target stripe for expand. + */ + struct dma_async_tx_descriptor *tx = NULL; + clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); + for (i = 0; i < sh->disks; i++) + if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { + int dd_idx, pd_idx, j; + struct stripe_head *sh2; + + sector_t bn = compute_blocknr(sh, i); + sector_t s = raid5_compute_sector(bn, conf->raid_disks, + conf->raid_disks - + conf->max_degraded, &dd_idx, + &pd_idx, conf); + sh2 = get_active_stripe(conf, s, conf->raid_disks, + pd_idx, 1); + if (sh2 == NULL) + /* so far only the early blocks of this stripe + * have been requested. When later blocks + * get requested, we will try again + */ + continue; + if (!test_bit(STRIPE_EXPANDING, &sh2->state) || + test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { + /* must have already done this block */ + release_stripe(sh2); + continue; + } + + /* place all the copies on one channel */ + tx = async_memcpy(sh2->dev[dd_idx].page, + sh->dev[i].page, 0, 0, STRIPE_SIZE, + ASYNC_TX_DEP_ACK, tx, NULL, NULL); + + set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); + set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); + for (j = 0; j < conf->raid_disks; j++) + if (j != sh2->pd_idx && + (!r6s || j != raid6_next_disk(sh2->pd_idx, + sh2->disks)) && + !test_bit(R5_Expanded, &sh2->dev[j].flags)) + break; + if (j == conf->raid_disks) { + set_bit(STRIPE_EXPAND_READY, &sh2->state); + set_bit(STRIPE_HANDLE, &sh2->state); + } + release_stripe(sh2); + + } + /* done submitting copies, wait for them to complete */ + if (tx) { + async_tx_ack(tx); + dma_wait_for_async_tx(tx); + } +} + + +/* + * handle_stripe - do things to a stripe. + * + * We lock the stripe and then examine the state of various bits + * to see what needs to be done. + * Possible results: + * return some read request which now have data + * return some write requests which are safely on disc + * schedule a read on some buffers + * schedule a write of some buffers + * return confirmation of parity correctness + * + * buffers are taken off read_list or write_list, and bh_cache buffers + * get BH_Lock set before the stripe lock is released. + * + */ + +static bool handle_stripe5(struct stripe_head *sh) +{ + raid5_conf_t *conf = sh->raid_conf; + int disks = sh->disks, i; + struct bio *return_bi = NULL; + struct stripe_head_state s; + struct r5dev *dev; + mdk_rdev_t *blocked_rdev = NULL; + int prexor; + + memset(&s, 0, sizeof(s)); + pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " + "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, + atomic_read(&sh->count), sh->pd_idx, sh->check_state, + sh->reconstruct_state); + + spin_lock(&sh->lock); + clear_bit(STRIPE_HANDLE, &sh->state); + clear_bit(STRIPE_DELAYED, &sh->state); + + s.syncing = test_bit(STRIPE_SYNCING, &sh->state); + s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); + s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); + + /* Now to look around and see what can be done */ + rcu_read_lock(); + for (i=disks; i--; ) { + mdk_rdev_t *rdev; + struct r5dev *dev = &sh->dev[i]; + clear_bit(R5_Insync, &dev->flags); + + pr_debug("check %d: state 0x%lx toread %p read %p write %p " + "written %p\n", i, dev->flags, dev->toread, dev->read, + dev->towrite, dev->written); + + /* maybe we can request a biofill operation + * + * new wantfill requests are only permitted while + * ops_complete_biofill is guaranteed to be inactive + */ + if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && + !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) + set_bit(R5_Wantfill, &dev->flags); + + /* now count some things */ + if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; + if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; + if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; + + if (test_bit(R5_Wantfill, &dev->flags)) + s.to_fill++; + else if (dev->toread) + s.to_read++; + if (dev->towrite) { + s.to_write++; + if (!test_bit(R5_OVERWRITE, &dev->flags)) + s.non_overwrite++; + } + if (dev->written) + s.written++; + rdev = rcu_dereference(conf->disks[i].rdev); + if (blocked_rdev == NULL && + rdev && unlikely(test_bit(Blocked, &rdev->flags))) { + blocked_rdev = rdev; + atomic_inc(&rdev->nr_pending); + } + if (!rdev || !test_bit(In_sync, &rdev->flags)) { + /* The ReadError flag will just be confusing now */ + clear_bit(R5_ReadError, &dev->flags); + clear_bit(R5_ReWrite, &dev->flags); + } + if (!rdev || !test_bit(In_sync, &rdev->flags) + || test_bit(R5_ReadError, &dev->flags)) { + s.failed++; + s.failed_num = i; + } else + set_bit(R5_Insync, &dev->flags); + } + rcu_read_unlock(); + + if (unlikely(blocked_rdev)) { + if (s.syncing || s.expanding || s.expanded || + s.to_write || s.written) { + set_bit(STRIPE_HANDLE, &sh->state); + goto unlock; + } + /* There is nothing for the blocked_rdev to block */ + rdev_dec_pending(blocked_rdev, conf->mddev); + blocked_rdev = NULL; + } + + if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { + set_bit(STRIPE_OP_BIOFILL, &s.ops_request); + set_bit(STRIPE_BIOFILL_RUN, &sh->state); + } + + pr_debug("locked=%d uptodate=%d to_read=%d" + " to_write=%d failed=%d failed_num=%d\n", + s.locked, s.uptodate, s.to_read, s.to_write, + s.failed, s.failed_num); + /* check if the array has lost two devices and, if so, some requests might + * need to be failed + */ + if (s.failed > 1 && s.to_read+s.to_write+s.written) + handle_failed_stripe(conf, sh, &s, disks, &return_bi); + if (s.failed > 1 && s.syncing) { + md_done_sync(conf->mddev, STRIPE_SECTORS,0); + clear_bit(STRIPE_SYNCING, &sh->state); + s.syncing = 0; + } + + /* might be able to return some write requests if the parity block + * is safe, or on a failed drive + */ + dev = &sh->dev[sh->pd_idx]; + if ( s.written && + ((test_bit(R5_Insync, &dev->flags) && + !test_bit(R5_LOCKED, &dev->flags) && + test_bit(R5_UPTODATE, &dev->flags)) || + (s.failed == 1 && s.failed_num == sh->pd_idx))) + handle_stripe_clean_event(conf, sh, disks, &return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite || + (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) + handle_stripe_fill5(sh, &s, disks); + + /* Now we check to see if any write operations have recently + * completed + */ + prexor = 0; + if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) + prexor = 1; + if (sh->reconstruct_state == reconstruct_state_drain_result || + sh->reconstruct_state == reconstruct_state_prexor_drain_result) { + sh->reconstruct_state = reconstruct_state_idle; + + /* All the 'written' buffers and the parity block are ready to + * be written back to disk + */ + BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); + for (i = disks; i--; ) { + dev = &sh->dev[i]; + if (test_bit(R5_LOCKED, &dev->flags) && + (i == sh->pd_idx || dev->written)) { + pr_debug("Writing block %d\n", i); + set_bit(R5_Wantwrite, &dev->flags); + if (prexor) + continue; + if (!test_bit(R5_Insync, &dev->flags) || + (i == sh->pd_idx && s.failed == 0)) + set_bit(STRIPE_INSYNC, &sh->state); + } + } + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < + IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } + } + + /* Now to consider new write requests and what else, if anything + * should be read. We do not handle new writes when: + * 1/ A 'write' operation (copy+xor) is already in flight. + * 2/ A 'check' operation is in flight, as it may clobber the parity + * block. + */ + if (s.to_write && !sh->reconstruct_state && !sh->check_state) + handle_stripe_dirtying5(conf, sh, &s, disks); + + /* maybe we need to check and possibly fix the parity for this stripe + * Any reads will already have been scheduled, so we just see if enough + * data is available. The parity check is held off while parity + * dependent operations are in flight. + */ + if (sh->check_state || + (s.syncing && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && + !test_bit(STRIPE_INSYNC, &sh->state))) + handle_parity_checks5(conf, sh, &s, disks); + + if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { + md_done_sync(conf->mddev, STRIPE_SECTORS,1); + clear_bit(STRIPE_SYNCING, &sh->state); + } + + /* If the failed drive is just a ReadError, then we might need to progress + * the repair/check process + */ + if (s.failed == 1 && !conf->mddev->ro && + test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) + && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) + && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) + ) { + dev = &sh->dev[s.failed_num]; + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + s.locked++; + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + s.locked++; + } + } + + /* Finish reconstruct operations initiated by the expansion process */ + if (sh->reconstruct_state == reconstruct_state_result) { + sh->reconstruct_state = reconstruct_state_idle; + clear_bit(STRIPE_EXPANDING, &sh->state); + for (i = conf->raid_disks; i--; ) { + set_bit(R5_Wantwrite, &sh->dev[i].flags); + set_bit(R5_LOCKED, &sh->dev[i].flags); + s.locked++; + } + } + + if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && + !sh->reconstruct_state) { + /* Need to write out all blocks after computing parity */ + sh->disks = conf->raid_disks; + sh->pd_idx = stripe_to_pdidx(sh->sector, conf, + conf->raid_disks); + schedule_reconstruction5(sh, &s, 1, 1); + } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { + clear_bit(STRIPE_EXPAND_READY, &sh->state); + atomic_dec(&conf->reshape_stripes); + wake_up(&conf->wait_for_overlap); + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + } + + if (s.expanding && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) + handle_stripe_expansion(conf, sh, NULL); + + unlock: + spin_unlock(&sh->lock); + + /* wait for this device to become unblocked */ + if (unlikely(blocked_rdev)) + md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); + + if (s.ops_request) + raid5_run_ops(sh, s.ops_request); + + ops_run_io(sh, &s); + + return_io(return_bi); + + return blocked_rdev == NULL; +} + +static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) +{ + raid6_conf_t *conf = sh->raid_conf; + int disks = sh->disks; + struct bio *return_bi = NULL; + int i, pd_idx = sh->pd_idx; + struct stripe_head_state s; + struct r6_state r6s; + struct r5dev *dev, *pdev, *qdev; + mdk_rdev_t *blocked_rdev = NULL; + + r6s.qd_idx = raid6_next_disk(pd_idx, disks); + pr_debug("handling stripe %llu, state=%#lx cnt=%d, " + "pd_idx=%d, qd_idx=%d\n", + (unsigned long long)sh->sector, sh->state, + atomic_read(&sh->count), pd_idx, r6s.qd_idx); + memset(&s, 0, sizeof(s)); + + spin_lock(&sh->lock); + clear_bit(STRIPE_HANDLE, &sh->state); + clear_bit(STRIPE_DELAYED, &sh->state); + + s.syncing = test_bit(STRIPE_SYNCING, &sh->state); + s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); + s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); + /* Now to look around and see what can be done */ + + rcu_read_lock(); + for (i=disks; i--; ) { + mdk_rdev_t *rdev; + dev = &sh->dev[i]; + clear_bit(R5_Insync, &dev->flags); + + pr_debug("check %d: state 0x%lx read %p write %p written %p\n", + i, dev->flags, dev->toread, dev->towrite, dev->written); + /* maybe we can reply to a read */ + if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { + struct bio *rbi, *rbi2; + pr_debug("Return read for disc %d\n", i); + spin_lock_irq(&conf->device_lock); + rbi = dev->toread; + dev->toread = NULL; + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&conf->wait_for_overlap); + spin_unlock_irq(&conf->device_lock); + while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { + copy_data(0, rbi, dev->page, dev->sector); + rbi2 = r5_next_bio(rbi, dev->sector); + spin_lock_irq(&conf->device_lock); + if (!raid5_dec_bi_phys_segments(rbi)) { + rbi->bi_next = return_bi; + return_bi = rbi; + } + spin_unlock_irq(&conf->device_lock); + rbi = rbi2; + } + } + + /* now count some things */ + if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; + if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; + + + if (dev->toread) + s.to_read++; + if (dev->towrite) { + s.to_write++; + if (!test_bit(R5_OVERWRITE, &dev->flags)) + s.non_overwrite++; + } + if (dev->written) + s.written++; + rdev = rcu_dereference(conf->disks[i].rdev); + if (blocked_rdev == NULL && + rdev && unlikely(test_bit(Blocked, &rdev->flags))) { + blocked_rdev = rdev; + atomic_inc(&rdev->nr_pending); + } + if (!rdev || !test_bit(In_sync, &rdev->flags)) { + /* The ReadError flag will just be confusing now */ + clear_bit(R5_ReadError, &dev->flags); + clear_bit(R5_ReWrite, &dev->flags); + } + if (!rdev || !test_bit(In_sync, &rdev->flags) + || test_bit(R5_ReadError, &dev->flags)) { + if (s.failed < 2) + r6s.failed_num[s.failed] = i; + s.failed++; + } else + set_bit(R5_Insync, &dev->flags); + } + rcu_read_unlock(); + + if (unlikely(blocked_rdev)) { + if (s.syncing || s.expanding || s.expanded || + s.to_write || s.written) { + set_bit(STRIPE_HANDLE, &sh->state); + goto unlock; + } + /* There is nothing for the blocked_rdev to block */ + rdev_dec_pending(blocked_rdev, conf->mddev); + blocked_rdev = NULL; + } + + pr_debug("locked=%d uptodate=%d to_read=%d" + " to_write=%d failed=%d failed_num=%d,%d\n", + s.locked, s.uptodate, s.to_read, s.to_write, s.failed, + r6s.failed_num[0], r6s.failed_num[1]); + /* check if the array has lost >2 devices and, if so, some requests + * might need to be failed + */ + if (s.failed > 2 && s.to_read+s.to_write+s.written) + handle_failed_stripe(conf, sh, &s, disks, &return_bi); + if (s.failed > 2 && s.syncing) { + md_done_sync(conf->mddev, STRIPE_SECTORS,0); + clear_bit(STRIPE_SYNCING, &sh->state); + s.syncing = 0; + } + + /* + * might be able to return some write requests if the parity blocks + * are safe, or on a failed drive + */ + pdev = &sh->dev[pd_idx]; + r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) + || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); + qdev = &sh->dev[r6s.qd_idx]; + r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) + || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); + + if ( s.written && + ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + && !test_bit(R5_LOCKED, &pdev->flags) + && test_bit(R5_UPTODATE, &pdev->flags)))) && + ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + && !test_bit(R5_LOCKED, &qdev->flags) + && test_bit(R5_UPTODATE, &qdev->flags))))) + handle_stripe_clean_event(conf, sh, disks, &return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || + (s.syncing && (s.uptodate < disks)) || s.expanding) + handle_stripe_fill6(sh, &s, &r6s, disks); + + /* now to consider writing and what else, if anything should be read */ + if (s.to_write) + handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); + + /* maybe we need to check and possibly fix the parity for this stripe + * Any reads will already have been scheduled, so we just see if enough + * data is available + */ + if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) + handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); + + if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { + md_done_sync(conf->mddev, STRIPE_SECTORS,1); + clear_bit(STRIPE_SYNCING, &sh->state); + } + + /* If the failed drives are just a ReadError, then we might need + * to progress the repair/check process + */ + if (s.failed <= 2 && !conf->mddev->ro) + for (i = 0; i < s.failed; i++) { + dev = &sh->dev[r6s.failed_num[i]]; + if (test_bit(R5_ReadError, &dev->flags) + && !test_bit(R5_LOCKED, &dev->flags) + && test_bit(R5_UPTODATE, &dev->flags) + ) { + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } + } + } + + if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { + /* Need to write out all blocks after computing P&Q */ + sh->disks = conf->raid_disks; + sh->pd_idx = stripe_to_pdidx(sh->sector, conf, + conf->raid_disks); + compute_parity6(sh, RECONSTRUCT_WRITE); + for (i = conf->raid_disks ; i-- ; ) { + set_bit(R5_LOCKED, &sh->dev[i].flags); + s.locked++; + set_bit(R5_Wantwrite, &sh->dev[i].flags); + } + clear_bit(STRIPE_EXPANDING, &sh->state); + } else if (s.expanded) { + clear_bit(STRIPE_EXPAND_READY, &sh->state); + atomic_dec(&conf->reshape_stripes); + wake_up(&conf->wait_for_overlap); + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + } + + if (s.expanding && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) + handle_stripe_expansion(conf, sh, &r6s); + + unlock: + spin_unlock(&sh->lock); + + /* wait for this device to become unblocked */ + if (unlikely(blocked_rdev)) + md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); + + ops_run_io(sh, &s); + + return_io(return_bi); + + return blocked_rdev == NULL; +} + +/* returns true if the stripe was handled */ +static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) +{ + if (sh->raid_conf->level == 6) + return handle_stripe6(sh, tmp_page); + else + return handle_stripe5(sh); +} + + + +static void raid5_activate_delayed(raid5_conf_t *conf) +{ + if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { + while (!list_empty(&conf->delayed_list)) { + struct list_head *l = conf->delayed_list.next; + struct stripe_head *sh; + sh = list_entry(l, struct stripe_head, lru); + list_del_init(l); + clear_bit(STRIPE_DELAYED, &sh->state); + if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + atomic_inc(&conf->preread_active_stripes); + list_add_tail(&sh->lru, &conf->hold_list); + } + } else + blk_plug_device(conf->mddev->queue); +} + +static void activate_bit_delay(raid5_conf_t *conf) +{ + /* device_lock is held */ + struct list_head head; + list_add(&head, &conf->bitmap_list); + list_del_init(&conf->bitmap_list); + while (!list_empty(&head)) { + struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); + list_del_init(&sh->lru); + atomic_inc(&sh->count); + __release_stripe(conf, sh); + } +} + +static void unplug_slaves(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + int i; + + rcu_read_lock(); + for (i=0; i<mddev->raid_disks; i++) { + mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); + + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + blk_unplug(r_queue); + + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + } + } + rcu_read_unlock(); +} + +static void raid5_unplug_device(struct request_queue *q) +{ + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); + unsigned long flags; + + spin_lock_irqsave(&conf->device_lock, flags); + + if (blk_remove_plug(q)) { + conf->seq_flush++; + raid5_activate_delayed(conf); + } + md_wakeup_thread(mddev->thread); + + spin_unlock_irqrestore(&conf->device_lock, flags); + + unplug_slaves(mddev); +} + +static int raid5_congested(void *data, int bits) +{ + mddev_t *mddev = data; + raid5_conf_t *conf = mddev_to_conf(mddev); + + /* No difference between reads and writes. Just check + * how busy the stripe_cache is + */ + if (conf->inactive_blocked) + return 1; + if (conf->quiesce) + return 1; + if (list_empty_careful(&conf->inactive_list)) + return 1; + + return 0; +} + +/* We want read requests to align with chunks where possible, + * but write requests don't need to. + */ +static int raid5_mergeable_bvec(struct request_queue *q, + struct bvec_merge_data *bvm, + struct bio_vec *biovec) +{ + mddev_t *mddev = q->queuedata; + sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + int max; + unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int bio_sectors = bvm->bi_size >> 9; + + if ((bvm->bi_rw & 1) == WRITE) + return biovec->bv_len; /* always allow writes to be mergeable */ + + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; + if (max < 0) max = 0; + if (max <= biovec->bv_len && bio_sectors == 0) + return biovec->bv_len; + else + return max; +} + + +static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) +{ + sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); + unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int bio_sectors = bio->bi_size >> 9; + + return chunk_sectors >= + ((sector & (chunk_sectors - 1)) + bio_sectors); +} + +/* + * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) + * later sampled by raid5d. + */ +static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) +{ + unsigned long flags; + + spin_lock_irqsave(&conf->device_lock, flags); + + bi->bi_next = conf->retry_read_aligned_list; + conf->retry_read_aligned_list = bi; + + spin_unlock_irqrestore(&conf->device_lock, flags); + md_wakeup_thread(conf->mddev->thread); +} + + +static struct bio *remove_bio_from_retry(raid5_conf_t *conf) +{ + struct bio *bi; + + bi = conf->retry_read_aligned; + if (bi) { + conf->retry_read_aligned = NULL; + return bi; + } + bi = conf->retry_read_aligned_list; + if(bi) { + conf->retry_read_aligned_list = bi->bi_next; + bi->bi_next = NULL; + /* + * this sets the active strip count to 1 and the processed + * strip count to zero (upper 8 bits) + */ + bi->bi_phys_segments = 1; /* biased count of active stripes */ + } + + return bi; +} + + +/* + * The "raid5_align_endio" should check if the read succeeded and if it + * did, call bio_endio on the original bio (having bio_put the new bio + * first). + * If the read failed.. + */ +static void raid5_align_endio(struct bio *bi, int error) +{ + struct bio* raid_bi = bi->bi_private; + mddev_t *mddev; + raid5_conf_t *conf; + int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); + mdk_rdev_t *rdev; + + bio_put(bi); + + mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; + conf = mddev_to_conf(mddev); + rdev = (void*)raid_bi->bi_next; + raid_bi->bi_next = NULL; + + rdev_dec_pending(rdev, conf->mddev); + + if (!error && uptodate) { + bio_endio(raid_bi, 0); + if (atomic_dec_and_test(&conf->active_aligned_reads)) + wake_up(&conf->wait_for_stripe); + return; + } + + + pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); + + add_bio_to_retry(raid_bi, conf); +} + +static int bio_fits_rdev(struct bio *bi) +{ + struct request_queue *q = bdev_get_queue(bi->bi_bdev); + + if ((bi->bi_size>>9) > q->max_sectors) + return 0; + blk_recount_segments(q, bi); + if (bi->bi_phys_segments > q->max_phys_segments) + return 0; + + if (q->merge_bvec_fn) + /* it's too hard to apply the merge_bvec_fn at this stage, + * just just give up + */ + return 0; + + return 1; +} + + +static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) +{ + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); + const unsigned int raid_disks = conf->raid_disks; + const unsigned int data_disks = raid_disks - conf->max_degraded; + unsigned int dd_idx, pd_idx; + struct bio* align_bi; + mdk_rdev_t *rdev; + + if (!in_chunk_boundary(mddev, raid_bio)) { + pr_debug("chunk_aligned_read : non aligned\n"); + return 0; + } + /* + * use bio_clone to make a copy of the bio + */ + align_bi = bio_clone(raid_bio, GFP_NOIO); + if (!align_bi) + return 0; + /* + * set bi_end_io to a new function, and set bi_private to the + * original bio. + */ + align_bi->bi_end_io = raid5_align_endio; + align_bi->bi_private = raid_bio; + /* + * compute position + */ + align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, + raid_disks, + data_disks, + &dd_idx, + &pd_idx, + conf); + + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[dd_idx].rdev); + if (rdev && test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + raid_bio->bi_next = (void*)rdev; + align_bi->bi_bdev = rdev->bdev; + align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); + align_bi->bi_sector += rdev->data_offset; + + if (!bio_fits_rdev(align_bi)) { + /* too big in some way */ + bio_put(align_bi); + rdev_dec_pending(rdev, mddev); + return 0; + } + + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_stripe, + conf->quiesce == 0, + conf->device_lock, /* nothing */); + atomic_inc(&conf->active_aligned_reads); + spin_unlock_irq(&conf->device_lock); + + generic_make_request(align_bi); + return 1; + } else { + rcu_read_unlock(); + bio_put(align_bi); + return 0; + } +} + +/* __get_priority_stripe - get the next stripe to process + * + * Full stripe writes are allowed to pass preread active stripes up until + * the bypass_threshold is exceeded. In general the bypass_count + * increments when the handle_list is handled before the hold_list; however, it + * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a + * stripe with in flight i/o. The bypass_count will be reset when the + * head of the hold_list has changed, i.e. the head was promoted to the + * handle_list. + */ +static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) +{ + struct stripe_head *sh; + + pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", + __func__, + list_empty(&conf->handle_list) ? "empty" : "busy", + list_empty(&conf->hold_list) ? "empty" : "busy", + atomic_read(&conf->pending_full_writes), conf->bypass_count); + + if (!list_empty(&conf->handle_list)) { + sh = list_entry(conf->handle_list.next, typeof(*sh), lru); + + if (list_empty(&conf->hold_list)) + conf->bypass_count = 0; + else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { + if (conf->hold_list.next == conf->last_hold) + conf->bypass_count++; + else { + conf->last_hold = conf->hold_list.next; + conf->bypass_count -= conf->bypass_threshold; + if (conf->bypass_count < 0) + conf->bypass_count = 0; + } + } + } else if (!list_empty(&conf->hold_list) && + ((conf->bypass_threshold && + conf->bypass_count > conf->bypass_threshold) || + atomic_read(&conf->pending_full_writes) == 0)) { + sh = list_entry(conf->hold_list.next, + typeof(*sh), lru); + conf->bypass_count -= conf->bypass_threshold; + if (conf->bypass_count < 0) + conf->bypass_count = 0; + } else + return NULL; + + list_del_init(&sh->lru); + atomic_inc(&sh->count); + BUG_ON(atomic_read(&sh->count) != 1); + return sh; +} + +static int make_request(struct request_queue *q, struct bio * bi) +{ + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); + unsigned int dd_idx, pd_idx; + sector_t new_sector; + sector_t logical_sector, last_sector; + struct stripe_head *sh; + const int rw = bio_data_dir(bi); + int cpu, remaining; + + if (unlikely(bio_barrier(bi))) { + bio_endio(bi, -EOPNOTSUPP); + return 0; + } + + md_write_start(mddev, bi); + + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], + bio_sectors(bi)); + part_stat_unlock(); + + if (rw == READ && + mddev->reshape_position == MaxSector && + chunk_aligned_read(q,bi)) + return 0; + + logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + last_sector = bi->bi_sector + (bi->bi_size>>9); + bi->bi_next = NULL; + bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ + + for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { + DEFINE_WAIT(w); + int disks, data_disks; + + retry: + prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + if (likely(conf->expand_progress == MaxSector)) + disks = conf->raid_disks; + else { + /* spinlock is needed as expand_progress may be + * 64bit on a 32bit platform, and so it might be + * possible to see a half-updated value + * Ofcourse expand_progress could change after + * the lock is dropped, so once we get a reference + * to the stripe that we think it is, we will have + * to check again. + */ + spin_lock_irq(&conf->device_lock); + disks = conf->raid_disks; + if (logical_sector >= conf->expand_progress) + disks = conf->previous_raid_disks; + else { + if (logical_sector >= conf->expand_lo) { + spin_unlock_irq(&conf->device_lock); + schedule(); + goto retry; + } + } + spin_unlock_irq(&conf->device_lock); + } + data_disks = disks - conf->max_degraded; + + new_sector = raid5_compute_sector(logical_sector, disks, data_disks, + &dd_idx, &pd_idx, conf); + pr_debug("raid5: make_request, sector %llu logical %llu\n", + (unsigned long long)new_sector, + (unsigned long long)logical_sector); + + sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); + if (sh) { + if (unlikely(conf->expand_progress != MaxSector)) { + /* expansion might have moved on while waiting for a + * stripe, so we must do the range check again. + * Expansion could still move past after this + * test, but as we are holding a reference to + * 'sh', we know that if that happens, + * STRIPE_EXPANDING will get set and the expansion + * won't proceed until we finish with the stripe. + */ + int must_retry = 0; + spin_lock_irq(&conf->device_lock); + if (logical_sector < conf->expand_progress && + disks == conf->previous_raid_disks) + /* mismatch, need to try again */ + must_retry = 1; + spin_unlock_irq(&conf->device_lock); + if (must_retry) { + release_stripe(sh); + goto retry; + } + } + /* FIXME what if we get a false positive because these + * are being updated. + */ + if (logical_sector >= mddev->suspend_lo && + logical_sector < mddev->suspend_hi) { + release_stripe(sh); + schedule(); + goto retry; + } + + if (test_bit(STRIPE_EXPANDING, &sh->state) || + !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { + /* Stripe is busy expanding or + * add failed due to overlap. Flush everything + * and wait a while + */ + raid5_unplug_device(mddev->queue); + release_stripe(sh); + schedule(); + goto retry; + } + finish_wait(&conf->wait_for_overlap, &w); + set_bit(STRIPE_HANDLE, &sh->state); + clear_bit(STRIPE_DELAYED, &sh->state); + release_stripe(sh); + } else { + /* cannot get stripe for read-ahead, just give-up */ + clear_bit(BIO_UPTODATE, &bi->bi_flags); + finish_wait(&conf->wait_for_overlap, &w); + break; + } + + } + spin_lock_irq(&conf->device_lock); + remaining = raid5_dec_bi_phys_segments(bi); + spin_unlock_irq(&conf->device_lock); + if (remaining == 0) { + + if ( rw == WRITE ) + md_write_end(mddev); + + bio_endio(bi, 0); + } + return 0; +} + +static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) +{ + /* reshaping is quite different to recovery/resync so it is + * handled quite separately ... here. + * + * On each call to sync_request, we gather one chunk worth of + * destination stripes and flag them as expanding. + * Then we find all the source stripes and request reads. + * As the reads complete, handle_stripe will copy the data + * into the destination stripe and release that stripe. + */ + raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + struct stripe_head *sh; + int pd_idx; + sector_t first_sector, last_sector; + int raid_disks = conf->previous_raid_disks; + int data_disks = raid_disks - conf->max_degraded; + int new_data_disks = conf->raid_disks - conf->max_degraded; + int i; + int dd_idx; + sector_t writepos, safepos, gap; + + if (sector_nr == 0 && + conf->expand_progress != 0) { + /* restarting in the middle, skip the initial sectors */ + sector_nr = conf->expand_progress; + sector_div(sector_nr, new_data_disks); + *skipped = 1; + return sector_nr; + } + + /* we update the metadata when there is more than 3Meg + * in the block range (that is rather arbitrary, should + * probably be time based) or when the data about to be + * copied would over-write the source of the data at + * the front of the range. + * i.e. one new_stripe forward from expand_progress new_maps + * to after where expand_lo old_maps to + */ + writepos = conf->expand_progress + + conf->chunk_size/512*(new_data_disks); + sector_div(writepos, new_data_disks); + safepos = conf->expand_lo; + sector_div(safepos, data_disks); + gap = conf->expand_progress - conf->expand_lo; + + if (writepos >= safepos || + gap > (new_data_disks)*3000*2 /*3Meg*/) { + /* Cannot proceed until we've updated the superblock... */ + wait_event(conf->wait_for_overlap, + atomic_read(&conf->reshape_stripes)==0); + mddev->reshape_position = conf->expand_progress; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, mddev->flags == 0 || + kthread_should_stop()); + spin_lock_irq(&conf->device_lock); + conf->expand_lo = mddev->reshape_position; + spin_unlock_irq(&conf->device_lock); + wake_up(&conf->wait_for_overlap); + } + + for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { + int j; + int skipped = 0; + pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); + sh = get_active_stripe(conf, sector_nr+i, + conf->raid_disks, pd_idx, 0); + set_bit(STRIPE_EXPANDING, &sh->state); + atomic_inc(&conf->reshape_stripes); + /* If any of this stripe is beyond the end of the old + * array, then we need to zero those blocks + */ + for (j=sh->disks; j--;) { + sector_t s; + if (j == sh->pd_idx) + continue; + if (conf->level == 6 && + j == raid6_next_disk(sh->pd_idx, sh->disks)) + continue; + s = compute_blocknr(sh, j); + if (s < mddev->array_sectors) { + skipped = 1; + continue; + } + memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); + set_bit(R5_Expanded, &sh->dev[j].flags); + set_bit(R5_UPTODATE, &sh->dev[j].flags); + } + if (!skipped) { + set_bit(STRIPE_EXPAND_READY, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + } + release_stripe(sh); + } + spin_lock_irq(&conf->device_lock); + conf->expand_progress = (sector_nr + i) * new_data_disks; + spin_unlock_irq(&conf->device_lock); + /* Ok, those stripe are ready. We can start scheduling + * reads on the source stripes. + * The source stripes are determined by mapping the first and last + * block on the destination stripes. + */ + first_sector = + raid5_compute_sector(sector_nr*(new_data_disks), + raid_disks, data_disks, + &dd_idx, &pd_idx, conf); + last_sector = + raid5_compute_sector((sector_nr+conf->chunk_size/512) + *(new_data_disks) -1, + raid_disks, data_disks, + &dd_idx, &pd_idx, conf); + if (last_sector >= (mddev->size<<1)) + last_sector = (mddev->size<<1)-1; + while (first_sector <= last_sector) { + pd_idx = stripe_to_pdidx(first_sector, conf, + conf->previous_raid_disks); + sh = get_active_stripe(conf, first_sector, + conf->previous_raid_disks, pd_idx, 0); + set_bit(STRIPE_EXPAND_SOURCE, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); + first_sector += STRIPE_SECTORS; + } + /* If this takes us to the resync_max point where we have to pause, + * then we need to write out the superblock. + */ + sector_nr += conf->chunk_size>>9; + if (sector_nr >= mddev->resync_max) { + /* Cannot proceed until we've updated the superblock... */ + wait_event(conf->wait_for_overlap, + atomic_read(&conf->reshape_stripes) == 0); + mddev->reshape_position = conf->expand_progress; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_DEVS, &mddev->flags) + || kthread_should_stop()); + spin_lock_irq(&conf->device_lock); + conf->expand_lo = mddev->reshape_position; + spin_unlock_irq(&conf->device_lock); + wake_up(&conf->wait_for_overlap); + } + return conf->chunk_size>>9; +} + +/* FIXME go_faster isn't used */ +static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) +{ + raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + struct stripe_head *sh; + int pd_idx; + int raid_disks = conf->raid_disks; + sector_t max_sector = mddev->size << 1; + int sync_blocks; + int still_degraded = 0; + int i; + + if (sector_nr >= max_sector) { + /* just being told to finish up .. nothing much to do */ + unplug_slaves(mddev); + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { + end_reshape(conf); + return 0; + } + + if (mddev->curr_resync < max_sector) /* aborted */ + bitmap_end_sync(mddev->bitmap, mddev->curr_resync, + &sync_blocks, 1); + else /* completed sync */ + conf->fullsync = 0; + bitmap_close_sync(mddev->bitmap); + + return 0; + } + + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + return reshape_request(mddev, sector_nr, skipped); + + /* No need to check resync_max as we never do more than one + * stripe, and as resync_max will always be on a chunk boundary, + * if the check in md_do_sync didn't fire, there is no chance + * of overstepping resync_max here + */ + + /* if there is too many failed drives and we are trying + * to resync, then assert that we are finished, because there is + * nothing we can do. + */ + if (mddev->degraded >= conf->max_degraded && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + sector_t rv = (mddev->size << 1) - sector_nr; + *skipped = 1; + return rv; + } + if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && + !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { + /* we can skip this block, and probably more */ + sync_blocks /= STRIPE_SECTORS; + *skipped = 1; + return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ + } + + + bitmap_cond_end_sync(mddev->bitmap, sector_nr); + + pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); + sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); + if (sh == NULL) { + sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); + /* make sure we don't swamp the stripe cache if someone else + * is trying to get access + */ + schedule_timeout_uninterruptible(1); + } + /* Need to check if array will still be degraded after recovery/resync + * We don't need to check the 'failed' flag as when that gets set, + * recovery aborts. + */ + for (i=0; i<mddev->raid_disks; i++) + if (conf->disks[i].rdev == NULL) + still_degraded = 1; + + bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); + + spin_lock(&sh->lock); + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + spin_unlock(&sh->lock); + + /* wait for any blocked device to be handled */ + while(unlikely(!handle_stripe(sh, NULL))) + ; + release_stripe(sh); + + return STRIPE_SECTORS; +} + +static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) +{ + /* We may not be able to submit a whole bio at once as there + * may not be enough stripe_heads available. + * We cannot pre-allocate enough stripe_heads as we may need + * more than exist in the cache (if we allow ever large chunks). + * So we do one stripe head at a time and record in + * ->bi_hw_segments how many have been done. + * + * We *know* that this entire raid_bio is in one chunk, so + * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. + */ + struct stripe_head *sh; + int dd_idx, pd_idx; + sector_t sector, logical_sector, last_sector; + int scnt = 0; + int remaining; + int handled = 0; + + logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + sector = raid5_compute_sector( logical_sector, + conf->raid_disks, + conf->raid_disks - conf->max_degraded, + &dd_idx, + &pd_idx, + conf); + last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); + + for (; logical_sector < last_sector; + logical_sector += STRIPE_SECTORS, + sector += STRIPE_SECTORS, + scnt++) { + + if (scnt < raid5_bi_hw_segments(raid_bio)) + /* already done this stripe */ + continue; + + sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); + + if (!sh) { + /* failed to get a stripe - must wait */ + raid5_set_bi_hw_segments(raid_bio, scnt); + conf->retry_read_aligned = raid_bio; + return handled; + } + + set_bit(R5_ReadError, &sh->dev[dd_idx].flags); + if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { + release_stripe(sh); + raid5_set_bi_hw_segments(raid_bio, scnt); + conf->retry_read_aligned = raid_bio; + return handled; + } + + handle_stripe(sh, NULL); + release_stripe(sh); + handled++; + } + spin_lock_irq(&conf->device_lock); + remaining = raid5_dec_bi_phys_segments(raid_bio); + spin_unlock_irq(&conf->device_lock); + if (remaining == 0) + bio_endio(raid_bio, 0); + if (atomic_dec_and_test(&conf->active_aligned_reads)) + wake_up(&conf->wait_for_stripe); + return handled; +} + + + +/* + * This is our raid5 kernel thread. + * + * We scan the hash table for stripes which can be handled now. + * During the scan, completed stripes are saved for us by the interrupt + * handler, so that they will not have to wait for our next wakeup. + */ +static void raid5d(mddev_t *mddev) +{ + struct stripe_head *sh; + raid5_conf_t *conf = mddev_to_conf(mddev); + int handled; + + pr_debug("+++ raid5d active\n"); + + md_check_recovery(mddev); + + handled = 0; + spin_lock_irq(&conf->device_lock); + while (1) { + struct bio *bio; + + if (conf->seq_flush != conf->seq_write) { + int seq = conf->seq_flush; + spin_unlock_irq(&conf->device_lock); + bitmap_unplug(mddev->bitmap); + spin_lock_irq(&conf->device_lock); + conf->seq_write = seq; + activate_bit_delay(conf); + } + + while ((bio = remove_bio_from_retry(conf))) { + int ok; + spin_unlock_irq(&conf->device_lock); + ok = retry_aligned_read(conf, bio); + spin_lock_irq(&conf->device_lock); + if (!ok) + break; + handled++; + } + + sh = __get_priority_stripe(conf); + + if (!sh) + break; + spin_unlock_irq(&conf->device_lock); + + handled++; + handle_stripe(sh, conf->spare_page); + release_stripe(sh); + + spin_lock_irq(&conf->device_lock); + } + pr_debug("%d stripes handled\n", handled); + + spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); + unplug_slaves(mddev); + + pr_debug("--- raid5d inactive\n"); +} + +static ssize_t +raid5_show_stripe_cache_size(mddev_t *mddev, char *page) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + if (conf) + return sprintf(page, "%d\n", conf->max_nr_stripes); + else + return 0; +} + +static ssize_t +raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + unsigned long new; + int err; + + if (len >= PAGE_SIZE) + return -EINVAL; + if (!conf) + return -ENODEV; + + if (strict_strtoul(page, 10, &new)) + return -EINVAL; + if (new <= 16 || new > 32768) + return -EINVAL; + while (new < conf->max_nr_stripes) { + if (drop_one_stripe(conf)) + conf->max_nr_stripes--; + else + break; + } + err = md_allow_write(mddev); + if (err) + return err; + while (new > conf->max_nr_stripes) { + if (grow_one_stripe(conf)) + conf->max_nr_stripes++; + else break; + } + return len; +} + +static struct md_sysfs_entry +raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, + raid5_show_stripe_cache_size, + raid5_store_stripe_cache_size); + +static ssize_t +raid5_show_preread_threshold(mddev_t *mddev, char *page) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + if (conf) + return sprintf(page, "%d\n", conf->bypass_threshold); + else + return 0; +} + +static ssize_t +raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + unsigned long new; + if (len >= PAGE_SIZE) + return -EINVAL; + if (!conf) + return -ENODEV; + + if (strict_strtoul(page, 10, &new)) + return -EINVAL; + if (new > conf->max_nr_stripes) + return -EINVAL; + conf->bypass_threshold = new; + return len; +} + +static struct md_sysfs_entry +raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, + S_IRUGO | S_IWUSR, + raid5_show_preread_threshold, + raid5_store_preread_threshold); + +static ssize_t +stripe_cache_active_show(mddev_t *mddev, char *page) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + if (conf) + return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); + else + return 0; +} + +static struct md_sysfs_entry +raid5_stripecache_active = __ATTR_RO(stripe_cache_active); + +static struct attribute *raid5_attrs[] = { + &raid5_stripecache_size.attr, + &raid5_stripecache_active.attr, + &raid5_preread_bypass_threshold.attr, + NULL, +}; +static struct attribute_group raid5_attrs_group = { + .name = NULL, + .attrs = raid5_attrs, +}; + +static int run(mddev_t *mddev) +{ + raid5_conf_t *conf; + int raid_disk, memory; + mdk_rdev_t *rdev; + struct disk_info *disk; + struct list_head *tmp; + int working_disks = 0; + + if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { + printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", + mdname(mddev), mddev->level); + return -EIO; + } + + if (mddev->chunk_size < PAGE_SIZE) { + printk(KERN_ERR "md/raid5: chunk_size must be at least " + "PAGE_SIZE but %d < %ld\n", + mddev->chunk_size, PAGE_SIZE); + return -EINVAL; + } + + if (mddev->reshape_position != MaxSector) { + /* Check that we can continue the reshape. + * Currently only disks can change, it must + * increase, and we must be past the point where + * a stripe over-writes itself + */ + sector_t here_new, here_old; + int old_disks; + int max_degraded = (mddev->level == 5 ? 1 : 2); + + if (mddev->new_level != mddev->level || + mddev->new_layout != mddev->layout || + mddev->new_chunk != mddev->chunk_size) { + printk(KERN_ERR "raid5: %s: unsupported reshape " + "required - aborting.\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->delta_disks <= 0) { + printk(KERN_ERR "raid5: %s: unsupported reshape " + "(reduce disks) required - aborting.\n", + mdname(mddev)); + return -EINVAL; + } + old_disks = mddev->raid_disks - mddev->delta_disks; + /* reshape_position must be on a new-stripe boundary, and one + * further up in new geometry must map after here in old + * geometry. + */ + here_new = mddev->reshape_position; + if (sector_div(here_new, (mddev->chunk_size>>9)* + (mddev->raid_disks - max_degraded))) { + printk(KERN_ERR "raid5: reshape_position not " + "on a stripe boundary\n"); + return -EINVAL; + } + /* here_new is the stripe we will write to */ + here_old = mddev->reshape_position; + sector_div(here_old, (mddev->chunk_size>>9)* + (old_disks-max_degraded)); + /* here_old is the first stripe that we might need to read + * from */ + if (here_new >= here_old) { + /* Reading from the same stripe as writing to - bad */ + printk(KERN_ERR "raid5: reshape_position too early for " + "auto-recovery - aborting.\n"); + return -EINVAL; + } + printk(KERN_INFO "raid5: reshape will continue\n"); + /* OK, we should be able to continue; */ + } + + + mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); + if ((conf = mddev->private) == NULL) + goto abort; + if (mddev->reshape_position == MaxSector) { + conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; + } else { + conf->raid_disks = mddev->raid_disks; + conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; + } + + conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); + if (!conf->disks) + goto abort; + + conf->mddev = mddev; + + if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) + goto abort; + + if (mddev->level == 6) { + conf->spare_page = alloc_page(GFP_KERNEL); + if (!conf->spare_page) + goto abort; + } + spin_lock_init(&conf->device_lock); + mddev->queue->queue_lock = &conf->device_lock; + init_waitqueue_head(&conf->wait_for_stripe); + init_waitqueue_head(&conf->wait_for_overlap); + INIT_LIST_HEAD(&conf->handle_list); + INIT_LIST_HEAD(&conf->hold_list); + INIT_LIST_HEAD(&conf->delayed_list); + INIT_LIST_HEAD(&conf->bitmap_list); + INIT_LIST_HEAD(&conf->inactive_list); + atomic_set(&conf->active_stripes, 0); + atomic_set(&conf->preread_active_stripes, 0); + atomic_set(&conf->active_aligned_reads, 0); + conf->bypass_threshold = BYPASS_THRESHOLD; + + pr_debug("raid5: run(%s) called.\n", mdname(mddev)); + + rdev_for_each(rdev, tmp, mddev) { + raid_disk = rdev->raid_disk; + if (raid_disk >= conf->raid_disks + || raid_disk < 0) + continue; + disk = conf->disks + raid_disk; + + disk->rdev = rdev; + + if (test_bit(In_sync, &rdev->flags)) { + char b[BDEVNAME_SIZE]; + printk(KERN_INFO "raid5: device %s operational as raid" + " disk %d\n", bdevname(rdev->bdev,b), + raid_disk); + working_disks++; + } else + /* Cannot rely on bitmap to complete recovery */ + conf->fullsync = 1; + } + + /* + * 0 for a fully functional array, 1 or 2 for a degraded array. + */ + mddev->degraded = conf->raid_disks - working_disks; + conf->mddev = mddev; + conf->chunk_size = mddev->chunk_size; + conf->level = mddev->level; + if (conf->level == 6) + conf->max_degraded = 2; + else + conf->max_degraded = 1; + conf->algorithm = mddev->layout; + conf->max_nr_stripes = NR_STRIPES; + conf->expand_progress = mddev->reshape_position; + + /* device size must be a multiple of chunk size */ + mddev->size &= ~(mddev->chunk_size/1024 -1); + mddev->resync_max_sectors = mddev->size << 1; + + if (conf->level == 6 && conf->raid_disks < 4) { + printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", + mdname(mddev), conf->raid_disks); + goto abort; + } + if (!conf->chunk_size || conf->chunk_size % 4) { + printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", + conf->chunk_size, mdname(mddev)); + goto abort; + } + if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { + printk(KERN_ERR + "raid5: unsupported parity algorithm %d for %s\n", + conf->algorithm, mdname(mddev)); + goto abort; + } + if (mddev->degraded > conf->max_degraded) { + printk(KERN_ERR "raid5: not enough operational devices for %s" + " (%d/%d failed)\n", + mdname(mddev), mddev->degraded, conf->raid_disks); + goto abort; + } + + if (mddev->degraded > 0 && + mddev->recovery_cp != MaxSector) { + if (mddev->ok_start_degraded) + printk(KERN_WARNING + "raid5: starting dirty degraded array: %s" + "- data corruption possible.\n", + mdname(mddev)); + else { + printk(KERN_ERR + "raid5: cannot start dirty degraded array for %s\n", + mdname(mddev)); + goto abort; + } + } + + { + mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); + if (!mddev->thread) { + printk(KERN_ERR + "raid5: couldn't allocate thread for %s\n", + mdname(mddev)); + goto abort; + } + } + memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + + conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; + if (grow_stripes(conf, conf->max_nr_stripes)) { + printk(KERN_ERR + "raid5: couldn't allocate %dkB for buffers\n", memory); + shrink_stripes(conf); + md_unregister_thread(mddev->thread); + goto abort; + } else + printk(KERN_INFO "raid5: allocated %dkB for %s\n", + memory, mdname(mddev)); + + if (mddev->degraded == 0) + printk("raid5: raid level %d set %s active with %d out of %d" + " devices, algorithm %d\n", conf->level, mdname(mddev), + mddev->raid_disks-mddev->degraded, mddev->raid_disks, + conf->algorithm); + else + printk(KERN_ALERT "raid5: raid level %d set %s active with %d" + " out of %d devices, algorithm %d\n", conf->level, + mdname(mddev), mddev->raid_disks - mddev->degraded, + mddev->raid_disks, conf->algorithm); + + print_raid5_conf(conf); + + if (conf->expand_progress != MaxSector) { + printk("...ok start reshape thread\n"); + conf->expand_lo = conf->expand_progress; + atomic_set(&conf->reshape_stripes, 0); + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "%s_reshape"); + } + + /* read-ahead size must cover two whole stripes, which is + * 2 * (datadisks) * chunksize where 'n' is the number of raid devices + */ + { + int data_disks = conf->previous_raid_disks - conf->max_degraded; + int stripe = data_disks * + (mddev->chunk_size / PAGE_SIZE); + if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) + mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + } + + /* Ok, everything is just fine now */ + if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) + printk(KERN_WARNING + "raid5: failed to create sysfs attributes for %s\n", + mdname(mddev)); + + mddev->queue->unplug_fn = raid5_unplug_device; + mddev->queue->backing_dev_info.congested_data = mddev; + mddev->queue->backing_dev_info.congested_fn = raid5_congested; + + mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - + conf->max_degraded); + + blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); + + return 0; +abort: + if (conf) { + print_raid5_conf(conf); + safe_put_page(conf->spare_page); + kfree(conf->disks); + kfree(conf->stripe_hashtbl); + kfree(conf); + } + mddev->private = NULL; + printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); + return -EIO; +} + + + +static int stop(mddev_t *mddev) +{ + raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + + md_unregister_thread(mddev->thread); + mddev->thread = NULL; + shrink_stripes(conf); + kfree(conf->stripe_hashtbl); + mddev->queue->backing_dev_info.congested_fn = NULL; + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); + kfree(conf->disks); + kfree(conf); + mddev->private = NULL; + return 0; +} + +#ifdef DEBUG +static void print_sh(struct seq_file *seq, struct stripe_head *sh) +{ + int i; + + seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", + (unsigned long long)sh->sector, sh->pd_idx, sh->state); + seq_printf(seq, "sh %llu, count %d.\n", + (unsigned long long)sh->sector, atomic_read(&sh->count)); + seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); + for (i = 0; i < sh->disks; i++) { + seq_printf(seq, "(cache%d: %p %ld) ", + i, sh->dev[i].page, sh->dev[i].flags); + } + seq_printf(seq, "\n"); +} + +static void printall(struct seq_file *seq, raid5_conf_t *conf) +{ + struct stripe_head *sh; + struct hlist_node *hn; + int i; + + spin_lock_irq(&conf->device_lock); + for (i = 0; i < NR_HASH; i++) { + hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { + if (sh->raid_conf != conf) + continue; + print_sh(seq, sh); + } + } + spin_unlock_irq(&conf->device_lock); +} +#endif + +static void status(struct seq_file *seq, mddev_t *mddev) +{ + raid5_conf_t *conf = (raid5_conf_t *) mddev->private; + int i; + + seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); + seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); + for (i = 0; i < conf->raid_disks; i++) + seq_printf (seq, "%s", + conf->disks[i].rdev && + test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); + seq_printf (seq, "]"); +#ifdef DEBUG + seq_printf (seq, "\n"); + printall(seq, conf); +#endif +} + +static void print_raid5_conf (raid5_conf_t *conf) +{ + int i; + struct disk_info *tmp; + + printk("RAID5 conf printout:\n"); + if (!conf) { + printk("(conf==NULL)\n"); + return; + } + printk(" --- rd:%d wd:%d\n", conf->raid_disks, + conf->raid_disks - conf->mddev->degraded); + + for (i = 0; i < conf->raid_disks; i++) { + char b[BDEVNAME_SIZE]; + tmp = conf->disks + i; + if (tmp->rdev) + printk(" disk %d, o:%d, dev:%s\n", + i, !test_bit(Faulty, &tmp->rdev->flags), + bdevname(tmp->rdev->bdev,b)); + } +} + +static int raid5_spare_active(mddev_t *mddev) +{ + int i; + raid5_conf_t *conf = mddev->private; + struct disk_info *tmp; + + for (i = 0; i < conf->raid_disks; i++) { + tmp = conf->disks + i; + if (tmp->rdev + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { + unsigned long flags; + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded--; + spin_unlock_irqrestore(&conf->device_lock, flags); + } + } + print_raid5_conf(conf); + return 0; +} + +static int raid5_remove_disk(mddev_t *mddev, int number) +{ + raid5_conf_t *conf = mddev->private; + int err = 0; + mdk_rdev_t *rdev; + struct disk_info *p = conf->disks + number; + + print_raid5_conf(conf); + rdev = p->rdev; + if (rdev) { + if (test_bit(In_sync, &rdev->flags) || + atomic_read(&rdev->nr_pending)) { + err = -EBUSY; + goto abort; + } + /* Only remove non-faulty devices if recovery + * isn't possible. + */ + if (!test_bit(Faulty, &rdev->flags) && + mddev->degraded <= conf->max_degraded) { + err = -EBUSY; + goto abort; + } + p->rdev = NULL; + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + } + } +abort: + + print_raid5_conf(conf); + return err; +} + +static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) +{ + raid5_conf_t *conf = mddev->private; + int err = -EEXIST; + int disk; + struct disk_info *p; + int first = 0; + int last = conf->raid_disks - 1; + + if (mddev->degraded > conf->max_degraded) + /* no point adding a device */ + return -EINVAL; + + if (rdev->raid_disk >= 0) + first = last = rdev->raid_disk; + + /* + * find the disk ... but prefer rdev->saved_raid_disk + * if possible. + */ + if (rdev->saved_raid_disk >= 0 && + rdev->saved_raid_disk >= first && + conf->disks[rdev->saved_raid_disk].rdev == NULL) + disk = rdev->saved_raid_disk; + else + disk = first; + for ( ; disk <= last ; disk++) + if ((p=conf->disks + disk)->rdev == NULL) { + clear_bit(In_sync, &rdev->flags); + rdev->raid_disk = disk; + err = 0; + if (rdev->saved_raid_disk != disk) + conf->fullsync = 1; + rcu_assign_pointer(p->rdev, rdev); + break; + } + print_raid5_conf(conf); + return err; +} + +static int raid5_resize(mddev_t *mddev, sector_t sectors) +{ + /* no resync is happening, and there is enough space + * on all devices, so we can resize. + * We need to make sure resync covers any new space. + * If the array is shrinking we should possibly wait until + * any io in the removed space completes, but it hardly seems + * worth it. + */ + raid5_conf_t *conf = mddev_to_conf(mddev); + + sectors &= ~((sector_t)mddev->chunk_size/512 - 1); + mddev->array_sectors = sectors * (mddev->raid_disks + - conf->max_degraded); + set_capacity(mddev->gendisk, mddev->array_sectors); + mddev->changed = 1; + if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { + mddev->recovery_cp = mddev->size << 1; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } + mddev->size = sectors /2; + mddev->resync_max_sectors = sectors; + return 0; +} + +#ifdef CONFIG_MD_RAID5_RESHAPE +static int raid5_check_reshape(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + int err; + + if (mddev->delta_disks < 0 || + mddev->new_level != mddev->level) + return -EINVAL; /* Cannot shrink array or change level yet */ + if (mddev->delta_disks == 0) + return 0; /* nothing to do */ + if (mddev->bitmap) + /* Cannot grow a bitmap yet */ + return -EBUSY; + + /* Can only proceed if there are plenty of stripe_heads. + * We need a minimum of one full stripe,, and for sensible progress + * it is best to have about 4 times that. + * If we require 4 times, then the default 256 4K stripe_heads will + * allow for chunk sizes up to 256K, which is probably OK. + * If the chunk size is greater, user-space should request more + * stripe_heads first. + */ + if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || + (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { + printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", + (mddev->chunk_size / STRIPE_SIZE)*4); + return -ENOSPC; + } + + err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); + if (err) + return err; + + if (mddev->degraded > conf->max_degraded) + return -EINVAL; + /* looks like we might be able to manage this */ + return 0; +} + +static int raid5_start_reshape(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + mdk_rdev_t *rdev; + struct list_head *rtmp; + int spares = 0; + int added_devices = 0; + unsigned long flags; + + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return -EBUSY; + + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) + spares++; + + if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) + /* Not enough devices even to make a degraded array + * of that size + */ + return -EINVAL; + + atomic_set(&conf->reshape_stripes, 0); + spin_lock_irq(&conf->device_lock); + conf->previous_raid_disks = conf->raid_disks; + conf->raid_disks += mddev->delta_disks; + conf->expand_progress = 0; + conf->expand_lo = 0; + spin_unlock_irq(&conf->device_lock); + + /* Add some new drives, as many as will fit. + * We know there are enough to make the newly sized array work. + */ + rdev_for_each(rdev, rtmp, mddev) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) { + if (raid5_add_disk(mddev, rdev) == 0) { + char nm[20]; + set_bit(In_sync, &rdev->flags); + added_devices++; + rdev->recovery_offset = 0; + sprintf(nm, "rd%d", rdev->raid_disk); + if (sysfs_create_link(&mddev->kobj, + &rdev->kobj, nm)) + printk(KERN_WARNING + "raid5: failed to create " + " link %s for %s\n", + nm, mdname(mddev)); + } else + break; + } + + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; + spin_unlock_irqrestore(&conf->device_lock, flags); + mddev->raid_disks = conf->raid_disks; + mddev->reshape_position = 0; + set_bit(MD_CHANGE_DEVS, &mddev->flags); + + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "%s_reshape"); + if (!mddev->sync_thread) { + mddev->recovery = 0; + spin_lock_irq(&conf->device_lock); + mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + conf->expand_progress = MaxSector; + spin_unlock_irq(&conf->device_lock); + return -EAGAIN; + } + md_wakeup_thread(mddev->sync_thread); + md_new_event(mddev); + return 0; +} +#endif + +static void end_reshape(raid5_conf_t *conf) +{ + struct block_device *bdev; + + if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { + conf->mddev->array_sectors = 2 * conf->mddev->size * + (conf->raid_disks - conf->max_degraded); + set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); + conf->mddev->changed = 1; + + bdev = bdget_disk(conf->mddev->gendisk, 0); + if (bdev) { + mutex_lock(&bdev->bd_inode->i_mutex); + i_size_write(bdev->bd_inode, + (loff_t)conf->mddev->array_sectors << 9); + mutex_unlock(&bdev->bd_inode->i_mutex); + bdput(bdev); + } + spin_lock_irq(&conf->device_lock); + conf->expand_progress = MaxSector; + spin_unlock_irq(&conf->device_lock); + conf->mddev->reshape_position = MaxSector; + + /* read-ahead size must cover two whole stripes, which is + * 2 * (datadisks) * chunksize where 'n' is the number of raid devices + */ + { + int data_disks = conf->previous_raid_disks - conf->max_degraded; + int stripe = data_disks * + (conf->mddev->chunk_size / PAGE_SIZE); + if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + } + } +} + +static void raid5_quiesce(mddev_t *mddev, int state) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + + switch(state) { + case 2: /* resume for a suspend */ + wake_up(&conf->wait_for_overlap); + break; + + case 1: /* stop all writes */ + spin_lock_irq(&conf->device_lock); + conf->quiesce = 1; + wait_event_lock_irq(conf->wait_for_stripe, + atomic_read(&conf->active_stripes) == 0 && + atomic_read(&conf->active_aligned_reads) == 0, + conf->device_lock, /* nothing */); + spin_unlock_irq(&conf->device_lock); + break; + + case 0: /* re-enable writes */ + spin_lock_irq(&conf->device_lock); + conf->quiesce = 0; + wake_up(&conf->wait_for_stripe); + wake_up(&conf->wait_for_overlap); + spin_unlock_irq(&conf->device_lock); + break; + } +} + +static struct mdk_personality raid6_personality = +{ + .name = "raid6", + .level = 6, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid5_add_disk, + .hot_remove_disk= raid5_remove_disk, + .spare_active = raid5_spare_active, + .sync_request = sync_request, + .resize = raid5_resize, +#ifdef CONFIG_MD_RAID5_RESHAPE + .check_reshape = raid5_check_reshape, + .start_reshape = raid5_start_reshape, +#endif + .quiesce = raid5_quiesce, +}; +static struct mdk_personality raid5_personality = +{ + .name = "raid5", + .level = 5, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid5_add_disk, + .hot_remove_disk= raid5_remove_disk, + .spare_active = raid5_spare_active, + .sync_request = sync_request, + .resize = raid5_resize, +#ifdef CONFIG_MD_RAID5_RESHAPE + .check_reshape = raid5_check_reshape, + .start_reshape = raid5_start_reshape, +#endif + .quiesce = raid5_quiesce, +}; + +static struct mdk_personality raid4_personality = +{ + .name = "raid4", + .level = 4, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid5_add_disk, + .hot_remove_disk= raid5_remove_disk, + .spare_active = raid5_spare_active, + .sync_request = sync_request, + .resize = raid5_resize, +#ifdef CONFIG_MD_RAID5_RESHAPE + .check_reshape = raid5_check_reshape, + .start_reshape = raid5_start_reshape, +#endif + .quiesce = raid5_quiesce, +}; + +static int __init raid5_init(void) +{ + int e; + + e = raid6_select_algo(); + if ( e ) + return e; + register_md_personality(&raid6_personality); + register_md_personality(&raid5_personality); + register_md_personality(&raid4_personality); + return 0; +} + +static void raid5_exit(void) +{ + unregister_md_personality(&raid6_personality); + unregister_md_personality(&raid5_personality); + unregister_md_personality(&raid4_personality); +} + +module_init(raid5_init); +module_exit(raid5_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("md-personality-4"); /* RAID5 */ +MODULE_ALIAS("md-raid5"); +MODULE_ALIAS("md-raid4"); +MODULE_ALIAS("md-level-5"); +MODULE_ALIAS("md-level-4"); +MODULE_ALIAS("md-personality-8"); /* RAID6 */ +MODULE_ALIAS("md-raid6"); +MODULE_ALIAS("md-level-6"); + +/* This used to be two separate modules, they were: */ +MODULE_ALIAS("raid5"); +MODULE_ALIAS("raid6"); diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h new file mode 100644 index 0000000..98dcde8 --- /dev/null +++ b/drivers/md/raid6.h @@ -0,0 +1,130 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2003 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +#ifndef LINUX_RAID_RAID6_H +#define LINUX_RAID_RAID6_H + +#ifdef __KERNEL__ + +/* Set to 1 to use kernel-wide empty_zero_page */ +#define RAID6_USE_EMPTY_ZERO_PAGE 0 + +#include <linux/raid/md.h> +#include <linux/raid/raid5.h> + +typedef raid5_conf_t raid6_conf_t; /* Same configuration */ + +/* Additional compute_parity mode -- updates the parity w/o LOCKING */ +#define UPDATE_PARITY 4 + +/* We need a pre-zeroed page... if we don't want to use the kernel-provided + one define it here */ +#if RAID6_USE_EMPTY_ZERO_PAGE +# define raid6_empty_zero_page empty_zero_page +#else +extern const char raid6_empty_zero_page[PAGE_SIZE]; +#endif + +#else /* ! __KERNEL__ */ +/* Used for testing in user space */ + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/types.h> + +/* Not standard, but glibc defines it */ +#define BITS_PER_LONG __WORDSIZE + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#ifndef PAGE_SIZE +# define PAGE_SIZE 4096 +#endif +extern const char raid6_empty_zero_page[PAGE_SIZE]; + +#define __init +#define __exit +#define __attribute_const__ __attribute__((const)) +#define noinline __attribute__((noinline)) + +#define preempt_enable() +#define preempt_disable() +#define cpu_has_feature(x) 1 +#define enable_kernel_altivec() +#define disable_kernel_altivec() + +#endif /* __KERNEL__ */ + +/* Routine choices */ +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + int (*valid)(void); /* Returns 1 if this routine set is usable */ + const char *name; /* Name of this routine set */ + int prefer; /* Has special performance attribute */ +}; + +/* Selected algorithm */ +extern struct raid6_calls raid6_call; + +/* Algorithm list */ +extern const struct raid6_calls * const raid6_algos[]; +int raid6_select_algo(void); + +/* Return values from chk_syndrome */ +#define RAID6_OK 0 +#define RAID6_P_BAD 1 +#define RAID6_Q_BAD 2 +#define RAID6_PQ_BAD 3 + +/* Galois field tables */ +extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); +extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); +extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); + +/* Recovery routines */ +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, void **ptrs); +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs); + +/* Some definitions to allow code to be compiled for testing in userspace */ +#ifndef __KERNEL__ + +# define jiffies raid6_jiffies() +# define printk printf +# define GFP_KERNEL 0 +# define __get_free_pages(x,y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0)) +# define free_pages(x,y) munmap((void *)(x), (y)*PAGE_SIZE) + +static inline void cpu_relax(void) +{ + /* Nothing */ +} + +#undef HZ +#define HZ 1000 +static inline uint32_t raid6_jiffies(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec*1000 + tv.tv_usec/1000; +} + +#endif /* ! __KERNEL__ */ + +#endif /* LINUX_RAID_RAID6_H */ diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c new file mode 100644 index 0000000..21987e3 --- /dev/null +++ b/drivers/md/raid6algos.c @@ -0,0 +1,154 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6algos.c + * + * Algorithm list and algorithm selection for RAID-6 + */ + +#include "raid6.h" +#ifndef __KERNEL__ +#include <sys/mman.h> +#include <stdio.h> +#endif + +struct raid6_calls raid6_call; + +/* Various routine sets */ +extern const struct raid6_calls raid6_intx1; +extern const struct raid6_calls raid6_intx2; +extern const struct raid6_calls raid6_intx4; +extern const struct raid6_calls raid6_intx8; +extern const struct raid6_calls raid6_intx16; +extern const struct raid6_calls raid6_intx32; +extern const struct raid6_calls raid6_mmxx1; +extern const struct raid6_calls raid6_mmxx2; +extern const struct raid6_calls raid6_sse1x1; +extern const struct raid6_calls raid6_sse1x2; +extern const struct raid6_calls raid6_sse2x1; +extern const struct raid6_calls raid6_sse2x2; +extern const struct raid6_calls raid6_sse2x4; +extern const struct raid6_calls raid6_altivec1; +extern const struct raid6_calls raid6_altivec2; +extern const struct raid6_calls raid6_altivec4; +extern const struct raid6_calls raid6_altivec8; + +const struct raid6_calls * const raid6_algos[] = { + &raid6_intx1, + &raid6_intx2, + &raid6_intx4, + &raid6_intx8, +#if defined(__ia64__) + &raid6_intx16, + &raid6_intx32, +#endif +#if defined(__i386__) && !defined(__arch_um__) + &raid6_mmxx1, + &raid6_mmxx2, + &raid6_sse1x1, + &raid6_sse1x2, + &raid6_sse2x1, + &raid6_sse2x2, +#endif +#if defined(__x86_64__) && !defined(__arch_um__) + &raid6_sse2x1, + &raid6_sse2x2, + &raid6_sse2x4, +#endif +#ifdef CONFIG_ALTIVEC + &raid6_altivec1, + &raid6_altivec2, + &raid6_altivec4, + &raid6_altivec8, +#endif + NULL +}; + +#ifdef __KERNEL__ +#define RAID6_TIME_JIFFIES_LG2 4 +#else +/* Need more time to be stable in userspace */ +#define RAID6_TIME_JIFFIES_LG2 9 +#endif + +/* Try to pick the best algorithm */ +/* This code uses the gfmul table as convenient data set to abuse */ + +int __init raid6_select_algo(void) +{ + const struct raid6_calls * const * algo; + const struct raid6_calls * best; + char *syndromes; + void *dptrs[(65536/PAGE_SIZE)+2]; + int i, disks; + unsigned long perf, bestperf; + int bestprefer; + unsigned long j0, j1; + + disks = (65536/PAGE_SIZE)+2; + for ( i = 0 ; i < disks-2 ; i++ ) { + dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; + } + + /* Normal code - use a 2-page allocation to avoid D$ conflict */ + syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); + + if ( !syndromes ) { + printk("raid6: Yikes! No memory available.\n"); + return -ENOMEM; + } + + dptrs[disks-2] = syndromes; + dptrs[disks-1] = syndromes + PAGE_SIZE; + + bestperf = 0; bestprefer = 0; best = NULL; + + for ( algo = raid6_algos ; *algo ; algo++ ) { + if ( !(*algo)->valid || (*algo)->valid() ) { + perf = 0; + + preempt_disable(); + j0 = jiffies; + while ( (j1 = jiffies) == j0 ) + cpu_relax(); + while (time_before(jiffies, + j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { + (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs); + perf++; + } + preempt_enable(); + + if ( (*algo)->prefer > bestprefer || + ((*algo)->prefer == bestprefer && + perf > bestperf) ) { + best = *algo; + bestprefer = best->prefer; + bestperf = perf; + } + printk("raid6: %-8s %5ld MB/s\n", (*algo)->name, + (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); + } + } + + if (best) { + printk("raid6: using algorithm %s (%ld MB/s)\n", + best->name, + (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); + raid6_call = *best; + } else + printk("raid6: Yikes! No algorithm found!\n"); + + free_pages((unsigned long)syndromes, 1); + + return best ? 0 : -EINVAL; +} diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc new file mode 100644 index 0000000..b9afd35 --- /dev/null +++ b/drivers/md/raid6altivec.uc @@ -0,0 +1,130 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6altivec$#.c + * + * $#-way unrolled portable integer math RAID-6 instruction set + * + * This file is postprocessed using unroll.pl + * + * <benh> hpa: in process, + * you can just "steal" the vec unit with enable_kernel_altivec() (but + * bracked this with preempt_disable/enable or in a lock) + */ + +#include "raid6.h" + +#ifdef CONFIG_ALTIVEC + +#include <altivec.h> +#ifdef __KERNEL__ +# include <asm/system.h> +# include <asm/cputable.h> +#endif + +/* + * This is the C data type to use. We use a vector of + * signed char so vec_cmpgt() will generate the right + * instruction. + */ + +typedef vector signed char unative_t; + +#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) +#define NSIZE sizeof(unative_t) + +/* + * The SHLBYTE() operation shifts each byte left by 1, *not* + * rolling over into the next byte + */ +static inline __attribute_const__ unative_t SHLBYTE(unative_t v) +{ + return vec_add(v,v); +} + +/* + * The MASK() operation returns 0xFF in any byte for which the high + * bit is 1, 0x00 for any byte for which the high bit is 0. + */ +static inline __attribute_const__ unative_t MASK(unative_t v) +{ + unative_t zv = NBYTES(0); + + /* vec_cmpgt returns a vector bool char; thus the need for the cast */ + return (unative_t)vec_cmpgt(zv, v); +} + + +/* This is noinline to make damned sure that gcc doesn't move any of the + Altivec code around the enable/disable code */ +static void noinline +raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + unative_t wd$$, wq$$, wp$$, w1$$, w2$$; + unative_t x1d = NBYTES(0x1d); + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { + wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; + for ( z = z0-1 ; z >= 0 ; z-- ) { + wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; + wp$$ = vec_xor(wp$$, wd$$); + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + w2$$ = vec_and(w2$$, x1d); + w1$$ = vec_xor(w1$$, w2$$); + wq$$ = vec_xor(w1$$, wd$$); + } + *(unative_t *)&p[d+NSIZE*$$] = wp$$; + *(unative_t *)&q[d+NSIZE*$$] = wq$$; + } +} + +static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + preempt_disable(); + enable_kernel_altivec(); + + raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); + + preempt_enable(); +} + +int raid6_have_altivec(void); +#if $# == 1 +int raid6_have_altivec(void) +{ + /* This assumes either all CPUs have Altivec or none does */ +# ifdef __KERNEL__ + return cpu_has_feature(CPU_FTR_ALTIVEC); +# else + return 1; +# endif +} +#endif + +const struct raid6_calls raid6_altivec$# = { + raid6_altivec$#_gen_syndrome, + raid6_have_altivec, + "altivecx$#", + 0 +}; + +#endif /* CONFIG_ALTIVEC */ diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc new file mode 100644 index 0000000..ad004ce --- /dev/null +++ b/drivers/md/raid6int.uc @@ -0,0 +1,117 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6int$#.c + * + * $#-way unrolled portable integer math RAID-6 instruction set + * + * This file is postprocessed using unroll.pl + */ + +#include "raid6.h" + +/* + * This is the C data type to use + */ + +/* Change this from BITS_PER_LONG if there is something better... */ +#if BITS_PER_LONG == 64 +# define NBYTES(x) ((x) * 0x0101010101010101UL) +# define NSIZE 8 +# define NSHIFT 3 +# define NSTRING "64" +typedef u64 unative_t; +#else +# define NBYTES(x) ((x) * 0x01010101U) +# define NSIZE 4 +# define NSHIFT 2 +# define NSTRING "32" +typedef u32 unative_t; +#endif + + + +/* + * IA-64 wants insane amounts of unrolling. On other architectures that + * is just a waste of space. + */ +#if ($# <= 8) || defined(__ia64__) + + +/* + * These sub-operations are separate inlines since they can sometimes be + * specially optimized using architecture-specific hacks. + */ + +/* + * The SHLBYTE() operation shifts each byte left by 1, *not* + * rolling over into the next byte + */ +static inline __attribute_const__ unative_t SHLBYTE(unative_t v) +{ + unative_t vv; + + vv = (v << 1) & NBYTES(0xfe); + return vv; +} + +/* + * The MASK() operation returns 0xFF in any byte for which the high + * bit is 1, 0x00 for any byte for which the high bit is 0. + */ +static inline __attribute_const__ unative_t MASK(unative_t v) +{ + unative_t vv; + + vv = v & NBYTES(0x80); + vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */ + return vv; +} + + +static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + unative_t wd$$, wq$$, wp$$, w1$$, w2$$; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { + wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; + for ( z = z0-1 ; z >= 0 ; z-- ) { + wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; + wp$$ ^= wd$$; + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + w2$$ &= NBYTES(0x1d); + w1$$ ^= w2$$; + wq$$ = w1$$ ^ wd$$; + } + *(unative_t *)&p[d+NSIZE*$$] = wp$$; + *(unative_t *)&q[d+NSIZE*$$] = wq$$; + } +} + +const struct raid6_calls raid6_intx$# = { + raid6_int$#_gen_syndrome, + NULL, /* always valid */ + "int" NSTRING "x$#", + 0 +}; + +#endif diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c new file mode 100644 index 0000000..d4e4a1b --- /dev/null +++ b/drivers/md/raid6mmx.c @@ -0,0 +1,142 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6mmx.c + * + * MMX implementation of RAID-6 syndrome functions + */ + +#if defined(__i386__) && !defined(__arch_um__) + +#include "raid6.h" +#include "raid6x86.h" + +/* Shared with raid6sse1.c */ +const struct raid6_mmx_constants { + u64 x1d; +} raid6_mmx_constants = { + 0x1d1d1d1d1d1d1d1dULL, +}; + +static int raid6_have_mmx(void) +{ + /* Not really "boot_cpu" but "all_cpus" */ + return boot_cpu_has(X86_FEATURE_MMX); +} + +/* + * Plain MMX implementation + */ +static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); + asm volatile("pxor %mm5,%mm5"); /* Zero temp */ + + for ( d = 0 ; d < bytes ; d += 8 ) { + asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("movq %mm2,%mm4"); /* Q[0] */ + for ( z = z0-1 ; z >= 0 ; z-- ) { + asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); + asm volatile("pcmpgtb %mm4,%mm5"); + asm volatile("paddb %mm4,%mm4"); + asm volatile("pand %mm0,%mm5"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm5,%mm5"); + asm volatile("pxor %mm6,%mm2"); + asm volatile("pxor %mm6,%mm4"); + } + asm volatile("movq %%mm2,%0" : "=m" (p[d])); + asm volatile("pxor %mm2,%mm2"); + asm volatile("movq %%mm4,%0" : "=m" (q[d])); + asm volatile("pxor %mm4,%mm4"); + } + + kernel_fpu_end(); +} + +const struct raid6_calls raid6_mmxx1 = { + raid6_mmx1_gen_syndrome, + raid6_have_mmx, + "mmxx1", + 0 +}; + +/* + * Unrolled-by-2 MMX implementation + */ +static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); + asm volatile("pxor %mm5,%mm5"); /* Zero temp */ + asm volatile("pxor %mm7,%mm7"); /* Zero temp */ + + for ( d = 0 ; d < bytes ; d += 16 ) { + asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); + asm volatile("movq %mm2,%mm4"); /* Q[0] */ + asm volatile("movq %mm3,%mm6"); /* Q[1] */ + for ( z = z0-1 ; z >= 0 ; z-- ) { + asm volatile("pcmpgtb %mm4,%mm5"); + asm volatile("pcmpgtb %mm6,%mm7"); + asm volatile("paddb %mm4,%mm4"); + asm volatile("paddb %mm6,%mm6"); + asm volatile("pand %mm0,%mm5"); + asm volatile("pand %mm0,%mm7"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm7,%mm6"); + asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); + asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); + asm volatile("pxor %mm5,%mm2"); + asm volatile("pxor %mm7,%mm3"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm7,%mm6"); + asm volatile("pxor %mm5,%mm5"); + asm volatile("pxor %mm7,%mm7"); + } + asm volatile("movq %%mm2,%0" : "=m" (p[d])); + asm volatile("movq %%mm3,%0" : "=m" (p[d+8])); + asm volatile("movq %%mm4,%0" : "=m" (q[d])); + asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); + } + + kernel_fpu_end(); +} + +const struct raid6_calls raid6_mmxx2 = { + raid6_mmx2_gen_syndrome, + raid6_have_mmx, + "mmxx2", + 0 +}; + +#endif diff --git a/drivers/md/raid6recov.c b/drivers/md/raid6recov.c new file mode 100644 index 0000000..a8c4d94 --- /dev/null +++ b/drivers/md/raid6recov.c @@ -0,0 +1,133 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6recov.c + * + * RAID-6 data recovery in dual failure mode. In single failure mode, + * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct + * the syndrome.) + */ + +#include "raid6.h" + +/* Recover two failed data blocks. */ +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs) +{ + u8 *p, *q, *dp, *dq; + u8 px, qx, db; + const u8 *pbmul; /* P multiplier table for B data */ + const u8 *qmul; /* Q multiplier table (for both) */ + + p = (u8 *)ptrs[disks-2]; + q = (u8 *)ptrs[disks-1]; + + /* Compute syndrome with zero for the missing data pages + Use the dead data pages as temporary storage for + delta p and delta q */ + dp = (u8 *)ptrs[faila]; + ptrs[faila] = (void *)raid6_empty_zero_page; + ptrs[disks-2] = dp; + dq = (u8 *)ptrs[failb]; + ptrs[failb] = (void *)raid6_empty_zero_page; + ptrs[disks-1] = dq; + + raid6_call.gen_syndrome(disks, bytes, ptrs); + + /* Restore pointer table */ + ptrs[faila] = dp; + ptrs[failb] = dq; + ptrs[disks-2] = p; + ptrs[disks-1] = q; + + /* Now, pick the proper data tables */ + pbmul = raid6_gfmul[raid6_gfexi[failb-faila]]; + qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]]; + + /* Now do it... */ + while ( bytes-- ) { + px = *p ^ *dp; + qx = qmul[*q ^ *dq]; + *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */ + *dp++ = db ^ px; /* Reconstructed A */ + p++; q++; + } +} + + + + +/* Recover failure of one data block plus the P block */ +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) +{ + u8 *p, *q, *dq; + const u8 *qmul; /* Q multiplier table */ + + p = (u8 *)ptrs[disks-2]; + q = (u8 *)ptrs[disks-1]; + + /* Compute syndrome with zero for the missing data page + Use the dead data page as temporary storage for delta q */ + dq = (u8 *)ptrs[faila]; + ptrs[faila] = (void *)raid6_empty_zero_page; + ptrs[disks-1] = dq; + + raid6_call.gen_syndrome(disks, bytes, ptrs); + + /* Restore pointer table */ + ptrs[faila] = dq; + ptrs[disks-1] = q; + + /* Now, pick the proper data tables */ + qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]]; + + /* Now do it... */ + while ( bytes-- ) { + *p++ ^= *dq = qmul[*q ^ *dq]; + q++; dq++; + } +} + + +#ifndef __KERNEL__ /* Testing only */ + +/* Recover two failed blocks. */ +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) +{ + if ( faila > failb ) { + int tmp = faila; + faila = failb; + failb = tmp; + } + + if ( failb == disks-1 ) { + if ( faila == disks-2 ) { + /* P+Q failure. Just rebuild the syndrome. */ + raid6_call.gen_syndrome(disks, bytes, ptrs); + } else { + /* data+Q failure. Reconstruct data from P, + then rebuild syndrome. */ + /* NOT IMPLEMENTED - equivalent to RAID-5 */ + } + } else { + if ( failb == disks-2 ) { + /* data+P failure. */ + raid6_datap_recov(disks, bytes, faila, ptrs); + } else { + /* data+data failure. */ + raid6_2data_recov(disks, bytes, faila, failb, ptrs); + } + } +} + +#endif diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c new file mode 100644 index 0000000..0666237 --- /dev/null +++ b/drivers/md/raid6sse1.c @@ -0,0 +1,162 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6sse1.c + * + * SSE-1/MMXEXT implementation of RAID-6 syndrome functions + * + * This is really an MMX implementation, but it requires SSE-1 or + * AMD MMXEXT for prefetch support and a few other features. The + * support for nontemporal memory accesses is enough to make this + * worthwhile as a separate implementation. + */ + +#if defined(__i386__) && !defined(__arch_um__) + +#include "raid6.h" +#include "raid6x86.h" + +/* Defined in raid6mmx.c */ +extern const struct raid6_mmx_constants { + u64 x1d; +} raid6_mmx_constants; + +static int raid6_have_sse1_or_mmxext(void) +{ + /* Not really boot_cpu but "all_cpus" */ + return boot_cpu_has(X86_FEATURE_MMX) && + (boot_cpu_has(X86_FEATURE_XMM) || + boot_cpu_has(X86_FEATURE_MMXEXT)); +} + +/* + * Plain SSE1 implementation + */ +static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); + asm volatile("pxor %mm5,%mm5"); /* Zero temp */ + + for ( d = 0 ; d < bytes ; d += 8 ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); + asm volatile("movq %mm2,%mm4"); /* Q[0] */ + asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d])); + for ( z = z0-2 ; z >= 0 ; z-- ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("pcmpgtb %mm4,%mm5"); + asm volatile("paddb %mm4,%mm4"); + asm volatile("pand %mm0,%mm5"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm5,%mm5"); + asm volatile("pxor %mm6,%mm2"); + asm volatile("pxor %mm6,%mm4"); + asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); + } + asm volatile("pcmpgtb %mm4,%mm5"); + asm volatile("paddb %mm4,%mm4"); + asm volatile("pand %mm0,%mm5"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm5,%mm5"); + asm volatile("pxor %mm6,%mm2"); + asm volatile("pxor %mm6,%mm4"); + + asm volatile("movntq %%mm2,%0" : "=m" (p[d])); + asm volatile("movntq %%mm4,%0" : "=m" (q[d])); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_sse1x1 = { + raid6_sse11_gen_syndrome, + raid6_have_sse1_or_mmxext, + "sse1x1", + 1 /* Has cache hints */ +}; + +/* + * Unrolled-by-2 SSE1 implementation + */ +static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); + asm volatile("pxor %mm5,%mm5"); /* Zero temp */ + asm volatile("pxor %mm7,%mm7"); /* Zero temp */ + + /* We uniformly assume a single prefetch covers at least 16 bytes */ + for ( d = 0 ; d < bytes ; d += 16 ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */ + asm volatile("movq %mm2,%mm4"); /* Q[0] */ + asm volatile("movq %mm3,%mm6"); /* Q[1] */ + for ( z = z0-1 ; z >= 0 ; z-- ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("pcmpgtb %mm4,%mm5"); + asm volatile("pcmpgtb %mm6,%mm7"); + asm volatile("paddb %mm4,%mm4"); + asm volatile("paddb %mm6,%mm6"); + asm volatile("pand %mm0,%mm5"); + asm volatile("pand %mm0,%mm7"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm7,%mm6"); + asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); + asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); + asm volatile("pxor %mm5,%mm2"); + asm volatile("pxor %mm7,%mm3"); + asm volatile("pxor %mm5,%mm4"); + asm volatile("pxor %mm7,%mm6"); + asm volatile("pxor %mm5,%mm5"); + asm volatile("pxor %mm7,%mm7"); + } + asm volatile("movntq %%mm2,%0" : "=m" (p[d])); + asm volatile("movntq %%mm3,%0" : "=m" (p[d+8])); + asm volatile("movntq %%mm4,%0" : "=m" (q[d])); + asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); + } + + asm volatile("sfence" : :: "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_sse1x2 = { + raid6_sse12_gen_syndrome, + raid6_have_sse1_or_mmxext, + "sse1x2", + 1 /* Has cache hints */ +}; + +#endif diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c new file mode 100644 index 0000000..b034ad8 --- /dev/null +++ b/drivers/md/raid6sse2.c @@ -0,0 +1,262 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6sse2.c + * + * SSE-2 implementation of RAID-6 syndrome functions + * + */ + +#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) + +#include "raid6.h" +#include "raid6x86.h" + +static const struct raid6_sse_constants { + u64 x1d[2]; +} raid6_sse_constants __attribute__((aligned(16))) = { + { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL }, +}; + +static int raid6_have_sse2(void) +{ + /* Not really boot_cpu but "all_cpus" */ + return boot_cpu_has(X86_FEATURE_MMX) && + boot_cpu_has(X86_FEATURE_FXSR) && + boot_cpu_has(X86_FEATURE_XMM) && + boot_cpu_has(X86_FEATURE_XMM2); +} + +/* + * Plain SSE2 implementation + */ +static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); + asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ + + for ( d = 0 ; d < bytes ; d += 16 ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); + asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ + asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); + for ( z = z0-2 ; z >= 0 ; z-- ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("pcmpgtb %xmm4,%xmm5"); + asm volatile("paddb %xmm4,%xmm4"); + asm volatile("pand %xmm0,%xmm5"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm5,%xmm5"); + asm volatile("pxor %xmm6,%xmm2"); + asm volatile("pxor %xmm6,%xmm4"); + asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); + } + asm volatile("pcmpgtb %xmm4,%xmm5"); + asm volatile("paddb %xmm4,%xmm4"); + asm volatile("pand %xmm0,%xmm5"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm5,%xmm5"); + asm volatile("pxor %xmm6,%xmm2"); + asm volatile("pxor %xmm6,%xmm4"); + + asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); + asm volatile("pxor %xmm2,%xmm2"); + asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); + asm volatile("pxor %xmm4,%xmm4"); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_sse2x1 = { + raid6_sse21_gen_syndrome, + raid6_have_sse2, + "sse2x1", + 1 /* Has cache hints */ +}; + +/* + * Unrolled-by-2 SSE2 implementation + */ +static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); + asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ + asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ + + /* We uniformly assume a single prefetch covers at least 32 bytes */ + for ( d = 0 ; d < bytes ; d += 32 ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */ + asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ + asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */ + for ( z = z0-1 ; z >= 0 ; z-- ) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("pcmpgtb %xmm4,%xmm5"); + asm volatile("pcmpgtb %xmm6,%xmm7"); + asm volatile("paddb %xmm4,%xmm4"); + asm volatile("paddb %xmm6,%xmm6"); + asm volatile("pand %xmm0,%xmm5"); + asm volatile("pand %xmm0,%xmm7"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm7,%xmm6"); + asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d])); + asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16])); + asm volatile("pxor %xmm5,%xmm2"); + asm volatile("pxor %xmm7,%xmm3"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm7,%xmm6"); + asm volatile("pxor %xmm5,%xmm5"); + asm volatile("pxor %xmm7,%xmm7"); + } + asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); + asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); + asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); + asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_sse2x2 = { + raid6_sse22_gen_syndrome, + raid6_have_sse2, + "sse2x2", + 1 /* Has cache hints */ +}; + +#endif + +#if defined(__x86_64__) && !defined(__arch_um__) + +/* + * Unrolled-by-4 SSE2 implementation + */ +static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0])); + asm volatile("pxor %xmm2,%xmm2"); /* P[0] */ + asm volatile("pxor %xmm3,%xmm3"); /* P[1] */ + asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */ + asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ + asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */ + asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ + asm volatile("pxor %xmm10,%xmm10"); /* P[2] */ + asm volatile("pxor %xmm11,%xmm11"); /* P[3] */ + asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */ + asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */ + asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */ + asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */ + + for ( d = 0 ; d < bytes ; d += 64 ) { + for ( z = z0 ; z >= 0 ; z-- ) { + /* The second prefetch seems to improve performance... */ + asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); + asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); + asm volatile("pcmpgtb %xmm4,%xmm5"); + asm volatile("pcmpgtb %xmm6,%xmm7"); + asm volatile("pcmpgtb %xmm12,%xmm13"); + asm volatile("pcmpgtb %xmm14,%xmm15"); + asm volatile("paddb %xmm4,%xmm4"); + asm volatile("paddb %xmm6,%xmm6"); + asm volatile("paddb %xmm12,%xmm12"); + asm volatile("paddb %xmm14,%xmm14"); + asm volatile("pand %xmm0,%xmm5"); + asm volatile("pand %xmm0,%xmm7"); + asm volatile("pand %xmm0,%xmm13"); + asm volatile("pand %xmm0,%xmm15"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm7,%xmm6"); + asm volatile("pxor %xmm13,%xmm12"); + asm volatile("pxor %xmm15,%xmm14"); + asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); + asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); + asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); + asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); + asm volatile("pxor %xmm5,%xmm2"); + asm volatile("pxor %xmm7,%xmm3"); + asm volatile("pxor %xmm13,%xmm10"); + asm volatile("pxor %xmm15,%xmm11"); + asm volatile("pxor %xmm5,%xmm4"); + asm volatile("pxor %xmm7,%xmm6"); + asm volatile("pxor %xmm13,%xmm12"); + asm volatile("pxor %xmm15,%xmm14"); + asm volatile("pxor %xmm5,%xmm5"); + asm volatile("pxor %xmm7,%xmm7"); + asm volatile("pxor %xmm13,%xmm13"); + asm volatile("pxor %xmm15,%xmm15"); + } + asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); + asm volatile("pxor %xmm2,%xmm2"); + asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); + asm volatile("pxor %xmm3,%xmm3"); + asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); + asm volatile("pxor %xmm10,%xmm10"); + asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); + asm volatile("pxor %xmm11,%xmm11"); + asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); + asm volatile("pxor %xmm4,%xmm4"); + asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); + asm volatile("pxor %xmm6,%xmm6"); + asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); + asm volatile("pxor %xmm12,%xmm12"); + asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); + asm volatile("pxor %xmm14,%xmm14"); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_sse2x4 = { + raid6_sse24_gen_syndrome, + raid6_have_sse2, + "sse2x4", + 1 /* Has cache hints */ +}; + +#endif diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile new file mode 100644 index 0000000..78e0396 --- /dev/null +++ b/drivers/md/raid6test/Makefile @@ -0,0 +1,75 @@ +# +# This is a simple Makefile to test some of the RAID-6 code +# from userspace. +# + +CC = gcc +OPTFLAGS = -O2 # Adjust as desired +CFLAGS = -I.. -g $(OPTFLAGS) +LD = ld +PERL = perl +AR = ar +RANLIB = ranlib + +.c.o: + $(CC) $(CFLAGS) -c -o $@ $< + +%.c: ../%.c + cp -f $< $@ + +%.uc: ../%.uc + cp -f $< $@ + +all: raid6.a raid6test + +raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ + raid6int32.o \ + raid6mmx.o raid6sse1.o raid6sse2.o \ + raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \ + raid6recov.o raid6algos.o \ + raid6tables.o + rm -f $@ + $(AR) cq $@ $^ + $(RANLIB) $@ + +raid6test: test.c raid6.a + $(CC) $(CFLAGS) -o raid6test $^ + +raid6altivec1.c: raid6altivec.uc ../unroll.pl + $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ + +raid6altivec2.c: raid6altivec.uc ../unroll.pl + $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ + +raid6altivec4.c: raid6altivec.uc ../unroll.pl + $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ + +raid6altivec8.c: raid6altivec.uc ../unroll.pl + $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ + +raid6int1.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 1 < raid6int.uc > $@ + +raid6int2.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 2 < raid6int.uc > $@ + +raid6int4.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 4 < raid6int.uc > $@ + +raid6int8.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 8 < raid6int.uc > $@ + +raid6int16.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 16 < raid6int.uc > $@ + +raid6int32.c: raid6int.uc ../unroll.pl + $(PERL) ../unroll.pl 32 < raid6int.uc > $@ + +raid6tables.c: mktables + ./mktables > raid6tables.c + +clean: + rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test + +spotless: clean + rm -f *~ diff --git a/drivers/md/raid6test/test.c b/drivers/md/raid6test/test.c new file mode 100644 index 0000000..559cc41 --- /dev/null +++ b/drivers/md/raid6test/test.c @@ -0,0 +1,124 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved + * + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2 or (at your + * option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6test.c + * + * Test RAID-6 recovery with various algorithms + */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include "raid6.h" + +#define NDISKS 16 /* Including P and Q */ + +const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); +struct raid6_calls raid6_call; + +char *dataptrs[NDISKS]; +char data[NDISKS][PAGE_SIZE]; +char recovi[PAGE_SIZE], recovj[PAGE_SIZE]; + +static void makedata(void) +{ + int i, j; + + for (i = 0; i < NDISKS; i++) { + for (j = 0; j < PAGE_SIZE; j++) + data[i][j] = rand(); + + dataptrs[i] = data[i]; + } +} + +static char disk_type(int d) +{ + switch (d) { + case NDISKS-2: + return 'P'; + case NDISKS-1: + return 'Q'; + default: + return 'D'; + } +} + +static int test_disks(int i, int j) +{ + int erra, errb; + + memset(recovi, 0xf0, PAGE_SIZE); + memset(recovj, 0xba, PAGE_SIZE); + + dataptrs[i] = recovi; + dataptrs[j] = recovj; + + raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs); + + erra = memcmp(data[i], recovi, PAGE_SIZE); + errb = memcmp(data[j], recovj, PAGE_SIZE); + + if (i < NDISKS-2 && j == NDISKS-1) { + /* We don't implement the DQ failure scenario, since it's + equivalent to a RAID-5 failure (XOR, then recompute Q) */ + erra = errb = 0; + } else { + printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n", + raid6_call.name, + i, disk_type(i), + j, disk_type(j), + (!erra && !errb) ? "OK" : + !erra ? "ERRB" : + !errb ? "ERRA" : "ERRAB"); + } + + dataptrs[i] = data[i]; + dataptrs[j] = data[j]; + + return erra || errb; +} + +int main(int argc, char *argv[]) +{ + const struct raid6_calls *const *algo; + int i, j; + int err = 0; + + makedata(); + + for (algo = raid6_algos; *algo; algo++) { + if (!(*algo)->valid || (*algo)->valid()) { + raid6_call = **algo; + + /* Nuke syndromes */ + memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE); + + /* Generate assumed good syndrome */ + raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, + (void **)&dataptrs); + + for (i = 0; i < NDISKS-1; i++) + for (j = i+1; j < NDISKS; j++) + err += test_disks(i, j); + } + printf("\n"); + } + + printf("\n"); + /* Pick the best algorithm test */ + raid6_select_algo(); + + if (err) + printf("\n*** ERRORS FOUND ***\n"); + + return err; +} diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h new file mode 100644 index 0000000..99fea7a --- /dev/null +++ b/drivers/md/raid6x86.h @@ -0,0 +1,61 @@ +/* ----------------------------------------------------------------------- * + * + * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Bostom MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * raid6x86.h + * + * Definitions common to x86 and x86-64 RAID-6 code only + */ + +#ifndef LINUX_RAID_RAID6X86_H +#define LINUX_RAID_RAID6X86_H + +#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) + +#ifdef __KERNEL__ /* Real code */ + +#include <asm/i387.h> + +#else /* Dummy code for user space testing */ + +static inline void kernel_fpu_begin(void) +{ +} + +static inline void kernel_fpu_end(void) +{ +} + +#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions + * (fast save and restore) */ +#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ +#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ +#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ + +/* Should work well enough on modern CPUs for testing */ +static inline int boot_cpu_has(int flag) +{ + u32 eax = (flag >> 5) ? 0x80000001 : 1; + u32 edx; + + asm volatile("cpuid" + : "+a" (eax), "=d" (edx) + : : "ecx", "ebx"); + + return (edx >> (flag & 31)) & 1; +} + +#endif /* ndef __KERNEL__ */ + +#endif +#endif diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl new file mode 100644 index 0000000..3acc710 --- /dev/null +++ b/drivers/md/unroll.pl @@ -0,0 +1,24 @@ +#!/usr/bin/perl +# +# Take a piece of C code and for each line which contains the sequence $$ +# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced +# by the unrolling factor, and $* with a single $ +# + +($n) = @ARGV; +$n += 0; + +while ( defined($line = <STDIN>) ) { + if ( $line =~ /\$\$/ ) { + $rep = $n; + } else { + $rep = 1; + } + for ( $i = 0 ; $i < $rep ; $i++ ) { + $tmp = $line; + $tmp =~ s/\$\$/$i/g; + $tmp =~ s/\$\#/$n/g; + $tmp =~ s/\$\*/\$/g; + print $tmp; + } +} |