summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c136
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/cciss_scsi.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c42
-rw-r--r--drivers/block/drbd/drbd_int.h52
-rw-r--r--drivers/block/drbd/drbd_main.c148
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c232
-rw-r--r--drivers/block/drbd/drbd_req.c38
-rw-r--r--drivers/block/drbd/drbd_req.h3
-rw-r--r--drivers/block/drbd/drbd_worker.c34
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/rbd.c748
-rw-r--r--drivers/block/xen-blkfront.c57
19 files changed, 802 insertions, 743 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a1725e6..7888501 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void)
{
struct request_queue *q;
int cnt = FD_MAX_UNITS;
- struct request *rq;
+ struct request *rq = NULL;
/* Find next queue we can dispatch from */
fdc_queue = fdc_queue + 1;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e188..528f631 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG();
bio_endio(bio, -ENXIO);
return 0;
- } else if (bio->bi_rw & REQ_HARDBARRIER) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
} else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG();
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4e4cc6c..605a67e 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void)
{
struct request_queue *q;
int old_pos = fdc_queue;
- struct request *rq;
+ struct request *rq = NULL;
do {
q = unit[fdc_queue].disk->queue;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda4..8e0f925 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL");
static DEFINE_MUTEX(cciss_mutex);
+static struct proc_dir_entry *proc_cciss;
#include "cciss_cmd.h"
#include "cciss.h"
@@ -113,6 +114,8 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access},
+ {0x3223103C, "Smart Array P800", &SA5_access},
+ {0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access},
@@ -361,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
#define ENG_GIG_FACTOR (ENG_GIG/512)
#define ENGAGE_SCSI "engage scsi"
-static struct proc_dir_entry *proc_cciss;
-
static void cciss_seq_show_header(struct seq_file *seq)
{
ctlr_info_t *h = seq->private;
@@ -2833,6 +2834,8 @@ static int cciss_revalidate(struct gendisk *disk)
InquiryData_struct *inq_buff = NULL;
for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+ if (!h->drv[logvol])
+ continue;
if (memcmp(h->drv[logvol]->LunID, drv->LunID,
sizeof(drv->LunID)) == 0) {
FOUND = 1;
@@ -3753,7 +3756,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3937,10 +3940,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
- for (i = 0; i < ARRAY_SIZE(products); i++) {
+ for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id)
return i;
- }
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id);
return -ENODEV;
@@ -3971,18 +3973,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
+#define BOARD_READY 1
+#define BOARD_NOT_READY 0
{
- int i;
+ int i, iterations;
u32 scratchpad;
- for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == CCISS_FIRMWARE_READY)
- return 0;
+ if (wait_for_ready)
+ iterations = CCISS_BOARD_READY_ITERATIONS;
+ else
+ iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
+
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == CCISS_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != CCISS_FIRMWARE_READY)
+ return 0;
+ }
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -4031,6 +4046,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -4148,7 +4168,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = cciss_wait_for_board_ready(h);
+ err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = cciss_find_cfgtables(h);
@@ -4313,36 +4333,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
#define cciss_noop(p) cciss_message(p, 3, 0)
-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int cciss_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -4397,17 +4387,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
* states or using the doorbell register. */
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
CfgTable_struct __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old a the p600, this is very nearly
* the same thing as
@@ -4417,14 +4407,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -4443,8 +4425,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
return -ENODEV;
}
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4466,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
-
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(CCISS_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc) /* Don't bail, might be E500, etc. which can't be reset */
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4532,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (cciss_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4927,8 @@ static void __exit cciss_cleanup(void)
}
}
kthread_stop(cciss_scan_thread);
- remove_proc_entry("driver/cciss", NULL);
+ if (proc_cciss)
+ remove_proc_entry("driver/cciss", NULL);
bus_unregister(&cciss_bus_type);
}
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ff..4b8933d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@ struct ctlr_info
* the above.
*/
#define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define CCISS_BOARD_READY_ITERATIONS \
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_BOARD_NOT_READY_ITERATIONS \
+ ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_POST_RESET_PAUSE_MSECS (3000)
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
#define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 575495f..727d022 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -62,8 +62,8 @@ static int cciss_scsi_proc_info(
int length, /* length of data in buffer */
int func); /* 0 == read, 1 == write */
-static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
- void (* done)(struct scsi_cmnd *));
+static int cciss_scsi_queue_command (struct Scsi_Host *h,
+ struct scsi_cmnd *cmd);
static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
static int cciss_eh_abort_handler(struct scsi_cmnd *);
@@ -1406,7 +1406,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
static int
-cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
ctlr_info_t *h;
int rc;
@@ -1504,6 +1504,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
return 0;
}
+static DEF_SCSI_QCMD(cciss_scsi_queue_command)
+
static void cciss_unregister_scsi(ctlr_info_t *h)
{
struct cciss_scsi_adapter_data_t *sa;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef9..ba95cba 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
init_completion(&md_io.event);
md_io.error = 0;
- if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
- rw |= REQ_HARDBARRIER;
+ if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC;
- retry:
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
wait_for_completion(&md_io.event);
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
- /* check for unsupported barrier op.
- * would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0 */
- if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
- /* Try again with no barrier */
- dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
- set_bit(MD_NO_BARRIER, &mdev->flags);
- rw &= ~REQ_HARDBARRIER;
- bio_put(bio);
- goto retry;
- }
out:
bio_put(bio);
return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
u32 xor_sum = 0;
if (!get_ldev(mdev)) {
- dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+ dev_err(DEV,
+ "disk is %s, cannot start al transaction (-%d +%d)\n",
+ drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
return 1;
}
/* do we have to do a bitmap write, first?
* TODO reduce maximum latency:
* submit both bios, then wait for both,
- * instead of doing two synchronous sector writes. */
+ * instead of doing two synchronous sector writes.
+ * For now, we must not write the transaction,
+ * if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+ /* The bitmap write may have failed, causing a state change. */
+ if (mdev->state.disk < D_INCONSISTENT) {
+ dev_err(DEV,
+ "disk is %s, cannot write al transaction (-%d +%d)\n",
+ drbd_disk_str(mdev->state.disk), evicted, new_enr);
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
+
+ mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
unsigned int enr;
unsigned long add = 0;
char ppb[10];
- int i;
+ int i, tmp;
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
enr = lc_element_by_index(mdev->act_log, i)->lc_number;
if (enr == LC_FREE)
continue;
- add += drbd_bm_ALe_set_all(mdev, enr);
+ tmp = drbd_bm_ALe_set_all(mdev, enr);
+ dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
+ add += tmp;
}
lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf43..1ea1a34 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@ struct drbd_conf;
#define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
-#define ERR_IF(exp) if (({ \
- int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \
- __func__, #exp, __FILE__, __LINE__); \
- _b; \
+#define ERR_IF(exp) if (({ \
+ int _b = (exp) != 0; \
+ if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
+ __func__, #exp, __FILE__, __LINE__); \
+ _b; \
}))
/* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
/* drbd_epoch flag bits */
enum {
- DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
- DE_BARRIER_IN_NEXT_EPOCH_DONE,
- DE_CONTAINS_A_BARRIER,
DE_HAVE_BARRIER_NUMBER,
- DE_IS_FINISHING,
};
enum epoch_event {
EV_PUT,
EV_GOT_BARRIER_NR,
- EV_BARRIER_DONE,
EV_BECAME_LAST,
EV_CLEANUP = 32, /* used as flag */
};
@@ -801,11 +796,6 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
- /* This epoch entry closes an epoch using a barrier.
- * On sucessful completion, the epoch is released,
- * and the P_BARRIER_ACK send. */
- __EE_IS_BARRIER,
-
/* In case a barrier failed,
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
* Gets cleared when the state.conn
* goes into C_CONNECTED state. */
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
- NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,
- MD_NO_BARRIER, /* meta data device does not support barriers,
- so don't even try */
+ MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
SUSPEND_IO, /* suspend application io */
BITMAP_IO, /* suspend application io;
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
- GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */
+ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
+ WAS_IO_ERROR, /* Local disk failed returned IO error */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */
@@ -947,7 +935,6 @@ enum write_ordering_e {
WO_none,
WO_drain_io,
WO_bdev_flush,
- WO_bio_barrier
};
struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
extern void drbd_go_diskless(struct drbd_conf *mdev);
+extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_PASS_ON:
if (!forcedetach) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Local IO failed in %s."
- "Passing error on...\n", where);
+ dev_err(DEV, "Local IO failed in %s.\n", where);
break;
}
/* NOTE fall through to detach case if forcedetach set */
case EP_DETACH:
case EP_CALL_HELPER:
+ set_bit(WAS_IO_ERROR, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
- dev_err(DEV, "Local IO failed in %s."
- "Detaching...\n", where);
+ dev_err(DEV,
+ "Local IO failed in %s. Detaching...\n", where);
}
break;
}
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
static inline sector_t drbd_get_capacity(struct block_device *bdev)
{
/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
- return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+ return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
}
/**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
__release(local);
D_ASSERT(i >= 0);
if (i == 0) {
+ if (mdev->state.disk == D_DISKLESS)
+ /* even internal references gone, safe to destroy */
+ drbd_ldev_destroy(mdev);
if (mdev->state.disk == D_FAILED)
+ /* all application IO references gone. */
drbd_go_diskless(mdev);
wake_up(&mdev->misc_wait);
}
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
{
int io_allowed;
+ /* never get a reference while D_DISKLESS */
+ if (mdev->state.disk == D_DISKLESS)
+ return 0;
+
atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins);
if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
- if (test_bit(MD_NO_BARRIER, &mdev->flags))
+ if (test_bit(MD_NO_FUA, &mdev->flags))
return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
if (r) {
- set_bit(MD_NO_BARRIER, &mdev->flags);
+ set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73..6be5401 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
ns.conn = os.conn;
+ /* we cannot fail (again) if we already detached */
+ if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+ ns.disk = D_DISKLESS;
+
+ /* if we are only D_ATTACHING yet,
+ * we can (and should) go directly to D_DISKLESS. */
+ if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
+ ns.disk = D_DISKLESS;
+
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
!test_and_set_bit(CONFIG_PENDING, &mdev->flags))
set_bit(DEVICE_DYING, &mdev->flags);
- mdev->state.i = ns.i;
+ /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+ * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+ * drbd_ldev_destroy() won't happen before our corresponding
+ * after_state_ch works run, where we put_ldev again. */
+ if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+ atomic_inc(&mdev->local_cnt);
+
+ mdev->state = ns;
wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait);
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_md_sync(mdev);
}
spin_lock_irq(&mdev->req_lock);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
- /* first half of local IO error */
- if (os.disk > D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
+ /* first half of local IO error, failure to attach,
+ * or administrative detach */
+ if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+ enum drbd_io_error_p eh;
+ int was_io_error;
+ /* corresponding get_ldev was in __drbd_set_state, to serialize
+ * our cleanup here with the transition to D_DISKLESS,
+ * so it is safe to dreference ldev here. */
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that my disk is broken.\n");
+ dev_warn(DEV, "Notified peer that I am detaching my disk\n");
else
- dev_err(DEV, "Sending state for drbd_io_error() failed\n");
+ dev_err(DEV, "Sending state for detaching disk failed\n");
drbd_rs_cancel_all(mdev);
- if (get_ldev_if_state(mdev, D_FAILED)) {
- eh = mdev->ldev->dc.on_io_error;
- put_ldev(mdev);
- }
- if (eh == EP_CALL_HELPER)
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ put_ldev(mdev);
+
+ if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
}
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (mdev->state.disk != D_DISKLESS)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(mdev->state.disk));
- /* second half of local IO error handling,
- * after local_cnt references have reached zero: */
- if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- }
-
- if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
- /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
- * will inc/dec it frequently. Since we became D_DISKLESS, no
- * one has touched the protected members anymore, though, so we
- * are safe to free them here. */
if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I detached my disk.\n");
+ dev_warn(DEV, "Notified peer that I'm now diskless.\n");
else
- dev_err(DEV, "Sending state for detach failed\n");
-
- lc_destroy(mdev->resync);
- mdev->resync = NULL;
- lc_destroy(mdev->act_log);
- mdev->act_log = NULL;
- __no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
-
- if (mdev->md_io_tmpp) {
- __free_page(mdev->md_io_tmpp);
- mdev->md_io_tmpp = NULL;
- }
+ dev_err(DEV, "Sending state for being diskless failed\n");
+ /* corresponding get_ldev in __drbd_set_state
+ * this may finaly trigger drbd_ldev_destroy. */
+ put_ldev(mdev);
}
/* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_set_defaults(mdev);
- /* for now, we do NOT yet support it,
- * even though we start some framework
- * to eventually support barriers */
- set_bit(NO_BARRIER_SUPP, &mdev->flags);
-
atomic_set(&mdev->ap_bio_cnt, 0);
atomic_set(&mdev->ap_pending_cnt, 0);
atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_thread_init(mdev, &mdev->asender, drbd_asender);
mdev->agreed_pro_version = PRO_VERSION_MAX;
- mdev->write_ordering = WO_bio_barrier;
+ mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
}
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
-
}
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
+ /* get it to stable storage _now_ */
+ drbd_md_sync(mdev);
}
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1;
}
+void drbd_ldev_destroy(struct drbd_conf *mdev)
+{
+ lc_destroy(mdev->resync);
+ mdev->resync = NULL;
+ lc_destroy(mdev->act_log);
+ mdev->act_log = NULL;
+ __no_warn(local,
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;);
+
+ if (mdev->md_io_tmpp) {
+ __free_page(mdev->md_io_tmpp);
+ mdev->md_io_tmpp = NULL;
+ }
+ clear_bit(GO_DISKLESS, &mdev->flags);
+}
+
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
- * the protected members anymore, though, so in the after_state_ch work
- * it will be safe to free them. */
+ * the protected members anymore, though, so once put_ldev reaches zero
+ * again, it will be safe to free them. */
drbd_force_state(mdev, NS(disk, D_DISKLESS));
- /* We need to wait for return of references checked out while we still
- * have been D_FAILED, though (drbd_md_sync, bitmap io). */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
-
- clear_bit(GO_DISKLESS, &mdev->flags);
return 1;
}
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
- /* don't drbd_queue_work_front,
- * we need to serialize with the after_state_ch work
- * of the -> D_FAILED transition. */
}
/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e9..29e5c70 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
retcode = ERR_DISK_CONFIGURED;
goto fail;
}
+ /* It may just now have detached because of IO error. Make sure
+ * drbd_ldev_destroy is done already, we may end up here very fast,
+ * e.g. if someone calls attach from the on-io-error handler,
+ * to realize a "hot spare" feature (not that I'd recommend that) */
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush)
- set_bit(MD_NO_BARRIER, &mdev->flags);
+ set_bit(MD_NO_FUA, &mdev->flags);
else
- clear_bit(MD_NO_BARRIER, &mdev->flags);
+ clear_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc = NULL;
resync_lru = NULL;
- mdev->write_ordering = WO_bio_barrier;
- drbd_bump_write_ordering(mdev, WO_bio_barrier);
+ mdev->write_ordering = WO_bdev_flush;
+ drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(mdev, NS(disk, D_FAILED));
drbd_md_sync(mdev);
release_bdev2_fail:
if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
return 0;
}
+/* Detaching the disk is a process in multiple stages. First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
+ drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+ if (mdev->state.disk == D_DISKLESS)
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ drbd_resume_io(mdev);
return 0;
}
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_md_sync(mdev);
}
drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5..7e6ac30 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
[WO_none] = 'n',
[WO_drain_io] = 'd',
[WO_bdev_flush] = 'f',
- [WO_bio_barrier] = 'b',
};
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169..24487d4 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -36,7 +36,6 @@
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pkt_sched.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
@@ -49,11 +48,6 @@
#include "drbd_vli.h"
-struct flush_work {
- struct drbd_work w;
- struct drbd_epoch *epoch;
-};
-
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
@@ -66,16 +60,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
-static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
-{
- struct drbd_epoch *prev;
- spin_lock(&mdev->epoch_lock);
- prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
- if (prev == epoch || prev == mdev->current_epoch)
- prev = NULL;
- spin_unlock(&mdev->epoch_lock);
- return prev;
-}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -981,7 +965,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
return TRUE;
}
-static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+static void drbd_flush(struct drbd_conf *mdev)
{
int rv;
@@ -997,24 +981,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
}
put_ldev(mdev);
}
-
- return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-}
-
-static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct flush_work *fw = (struct flush_work *)w;
- struct drbd_epoch *epoch = fw->epoch;
-
- kfree(w);
-
- if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
- drbd_flush_after_epoch(mdev, epoch);
-
- drbd_may_finish_epoch(mdev, epoch, EV_PUT |
- (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
-
- return 1;
}
/**
@@ -1027,15 +993,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
- int finish, epoch_size;
+ int epoch_size;
struct drbd_epoch *next_epoch;
- int schedule_flush = 0;
enum finish_epoch rv = FE_STILL_LIVE;
spin_lock(&mdev->epoch_lock);
do {
next_epoch = NULL;
- finish = 0;
epoch_size = atomic_read(&epoch->epoch_size);
@@ -1045,16 +1009,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
break;
case EV_GOT_BARRIER_NR:
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
-
- /* Special case: If we just switched from WO_bio_barrier to
- WO_bdev_flush we should not finish the current epoch */
- if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
- mdev->write_ordering != WO_bio_barrier &&
- epoch == mdev->current_epoch)
- clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
- break;
- case EV_BARRIER_DONE:
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
break;
case EV_BECAME_LAST:
/* nothing to do*/
@@ -1063,23 +1017,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
- epoch->list.prev == &mdev->current_epoch->list &&
- !test_bit(DE_IS_FINISHING, &epoch->flags)) {
- /* Nearly all conditions are met to finish that epoch... */
- if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
- mdev->write_ordering == WO_none ||
- (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
- ev & EV_CLEANUP) {
- finish = 1;
- set_bit(DE_IS_FINISHING, &epoch->flags);
- } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
- mdev->write_ordering == WO_bio_barrier) {
- atomic_inc(&epoch->active);
- schedule_flush = 1;
- }
- }
- if (finish) {
+ test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1040,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
+ wake_up(&mdev->ee_wait);
}
}
@@ -1113,22 +1052,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
spin_unlock(&mdev->epoch_lock);
- if (schedule_flush) {
- struct flush_work *fw;
- fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
- if (fw) {
- fw->w.cb = w_flush;
- fw->epoch = epoch;
- drbd_queue_work(&mdev->data.work, &fw->w);
- } else {
- dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
- /* That is not a recursion, only one level */
- drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
- drbd_may_finish_epoch(mdev, epoch, EV_PUT);
- }
- }
-
return rv;
}
@@ -1144,19 +1067,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
- [WO_bio_barrier] = "barrier",
};
pwo = mdev->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
- wo = WO_bdev_flush;
if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
wo = WO_drain_io;
if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
wo = WO_none;
mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
+ if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
}
@@ -1192,7 +1112,7 @@ next_bio:
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
/* we special case some flags in the multi-bio case, see below
- * (REQ_UNPLUG, REQ_HARDBARRIER) */
+ * (REQ_UNPLUG) */
bio->bi_rw = rw;
bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1146,6 @@ next_bio:
bio->bi_rw &= ~REQ_UNPLUG;
drbd_generic_make_request(mdev, fault_type, bio);
-
- /* strip off REQ_HARDBARRIER,
- * unless it is the first or last bio */
- if (bios && bios->bi_next)
- bios->bi_rw &= ~REQ_HARDBARRIER;
} while (bios);
maybe_kick_lo(mdev);
return 0;
@@ -1244,45 +1159,9 @@ fail:
return -ENOMEM;
}
-/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
- * @mdev: DRBD device.
- * @w: work object.
- * @cancel: The connection will be closed anyways (unused in this callback)
- */
-int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
-{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
- (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
- so that we can finish that epoch in drbd_may_finish_epoch().
- That is necessary if we already have a long chain of Epochs, before
- we realize that REQ_HARDBARRIER is actually not supported */
-
- /* As long as the -ENOTSUPP on the barrier is reported immediately
- that will never trigger. If it is reported late, we will just
- print that warning and continue correctly for all future requests
- with WO_bdev_flush */
- if (previous_epoch(mdev, e->epoch))
- dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
-
- /* we still have a local reference,
- * get_ldev was done in receive_Data. */
-
- e->w.cb = e_end_block;
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
- /* drbd_submit_ee fails for one reason only:
- * if was not able to allocate sufficient bios.
- * requeue, try again later. */
- e->w.cb = w_e_reissue;
- drbd_queue_work(&mdev->data.work, &e->w);
- }
- return 1;
-}
-
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- int rv, issue_flush;
+ int rv;
struct p_barrier *p = &mdev->data.rbuf.barrier;
struct drbd_epoch *epoch;
@@ -1300,44 +1179,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
switch (mdev->write_ordering) {
- case WO_bio_barrier:
case WO_none:
if (rv == FE_RECYCLED)
return TRUE;
- break;
+
+ /* receiver context, in the writeout path of the other node.
+ * avoid potential distributed deadlock */
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
+ else
+ dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ /* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- if (rv == FE_STILL_LIVE) {
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- }
- if (rv == FE_RECYCLED)
- return TRUE;
-
- /* The asender will send all the ACKs and barrier ACKs out, since
- all EEs moved from the active_ee to the done_ee. We need to
- provide a new epoch object for the EEs that come in soon */
- break;
- }
-
- /* receiver context, in the writeout path of the other node.
- * avoid potential distributed deadlock */
- epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
- if (!epoch) {
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
- issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- if (issue_flush) {
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- if (rv == FE_RECYCLED)
- return TRUE;
+ drbd_flush(mdev);
+
+ if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
}
- drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
+ epoch = mdev->current_epoch;
+ wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
+
+ D_ASSERT(atomic_read(&epoch->active) == 0);
+ D_ASSERT(epoch->flags == 0);
return TRUE;
+ default:
+ dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+ return FALSE;
}
epoch->flags = 0;
@@ -1652,15 +1527,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
sector_t sector = e->sector;
- struct drbd_epoch *epoch;
int ok = 1, pcmd;
- if (e->flags & EE_IS_BARRIER) {
- epoch = previous_epoch(mdev, e->epoch);
- if (epoch)
- drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
- }
-
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1685,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
e->epoch = mdev->current_epoch;
atomic_inc(&e->epoch->epoch_size);
atomic_inc(&e->epoch->active);
-
- if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
- struct drbd_epoch *epoch;
- /* Issue a barrier if we start a new epoch, and the previous epoch
- was not a epoch containing a single request which already was
- a Barrier. */
- epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
- if (epoch == e->epoch) {
- set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= REQ_HARDBARRIER;
- e->flags |= EE_IS_BARRIER;
- } else {
- if (atomic_read(&epoch->epoch_size) > 1 ||
- !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
- set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
- rw |= REQ_HARDBARRIER;
- e->flags |= EE_IS_BARRIER;
- }
- }
- }
spin_unlock(&mdev->epoch_lock);
dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1842,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
break;
}
- if (mdev->state.pdsk == D_DISKLESS) {
+ if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
drbd_set_out_of_sync(mdev, e->sector, e->size);
e->flags |= EE_CALL_AL_COMPLETE_IO;
+ e->flags &= ~EE_MAY_SET_IN_SYNC;
drbd_al_begin_io(mdev, e->sector);
}
@@ -3362,7 +3210,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if (ns.conn == C_MASK) {
ns.conn = C_CONNECTED;
if (mdev->state.disk == D_NEGOTIATING) {
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(mdev, NS(disk, D_FAILED));
} else if (peer_state.disk == D_NEGOTIATING) {
dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
peer_state.disk = D_DISKLESS;
@@ -3779,17 +3627,19 @@ static void drbdd(struct drbd_conf *mdev)
}
shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
- rv = drbd_recv(mdev, &header->h80.payload, shs);
- if (unlikely(rv != shs)) {
- dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
- goto err_out;
- }
-
if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
goto err_out;
}
+ if (shs) {
+ rv = drbd_recv(mdev, &header->h80.payload, shs);
+ if (unlikely(rv != shs)) {
+ dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
+ goto err_out;
+ }
+ }
+
rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
if (unlikely(!rv)) {
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a25..11a75d3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
if (!hlist_unhashed(&req->colision))
hlist_del(&req->colision);
else
- D_ASSERT((s & RQ_NET_MASK) == 0);
+ D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
/* for writes we need to do some extra housekeeping */
if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
mdev->state.conn >= C_CONNECTED));
if (!(local || remote) && !is_susp(mdev->state)) {
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete;
}
@@ -942,12 +943,21 @@ allocate_barrier:
if (local) {
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
- if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
+ /* State may have changed since we grabbed our reference on the
+ * mdev->ldev member. Double check, and short-circuit to endio.
+ * In case the last activity log transaction failed to get on
+ * stable storage, and this is a WRITE, we may not even submit
+ * this bio. */
+ if (get_ldev(mdev)) {
+ if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
+ bio_endio(req->private_bio, -EIO);
+ else
+ generic_make_request(req->private_bio);
+ put_ldev(mdev);
+ } else
bio_endio(req->private_bio, -EIO);
- else
- generic_make_request(req->private_bio);
}
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
return 0;
}
- /* Reject barrier requests if we know the underlying device does
- * not support them.
- * XXX: Need to get this info from peer as well some how so we
- * XXX: reject if EITHER side/data/metadata area does not support them.
- *
- * because of those XXX, this is not yet enabled,
- * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
- */
- if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
- /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
/*
* what we "blindly" assume:
*/
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 181ea03..ab2bd09 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -339,7 +339,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
}
/* completion of master bio is outside of spinlock.
- * If you need it irqsave, do it your self! */
+ * If you need it irqsave, do it your self!
+ * Which means: don't use from bio endio callback. */
static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d580..34f224b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/drbd.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
@@ -102,12 +101,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
put_ldev(mdev);
}
-static int is_failed_barrier(int ee_flags)
-{
- return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
- == (EE_IS_BARRIER|EE_WAS_ERROR);
-}
-
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +112,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
int is_syncer_req;
int do_al_complete_io;
- /* if this is a failed barrier request, disable use of barriers,
- * and schedule for resubmission */
- if (is_failed_barrier(e->flags)) {
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
- spin_lock_irqsave(&mdev->req_lock, flags);
- list_del(&e->w.list);
- e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
- e->w.cb = w_e_reissue;
- /* put_ldev actually happens below, once we come here again. */
- __release(local);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
- drbd_queue_work(&mdev->data.work, &e->w);
- return;
- }
-
D_ASSERT(e->block_id != ID_VACANT);
/* after we moved e to done_ee,
@@ -215,8 +193,10 @@ void drbd_endio_sec(struct bio *bio, int error)
*/
void drbd_endio_pri(struct bio *bio, int error)
{
+ unsigned long flags;
struct drbd_request *req = bio->bi_private;
struct drbd_conf *mdev = req->mdev;
+ struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -242,7 +222,13 @@ void drbd_endio_pri(struct bio *bio, int error)
bio_put(req->private_bio);
req->private_bio = ERR_PTR(error);
- req_mod(req, what);
+ /* not req_mod(), we need irqsave here! */
+ spin_lock_irqsave(&mdev->req_lock, flags);
+ __req_mod(req, what, &m);
+ spin_unlock_irqrestore(&mdev->req_lock, flags);
+
+ if (m.bio)
+ complete_master_bio(mdev, &m);
}
int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
@@ -925,7 +911,7 @@ out:
drbd_md_sync(mdev);
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
- dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
+ dev_info(DEV, "Writing the whole bitmap\n");
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107c..3951020 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@ out_unreg_blkdev:
out_put_disk:
while (dr--) {
del_timer(&motor_off_timer[dr]);
- put_disk(disks[dr]);
if (disks[dr]->queue)
blk_cleanup_queue(disks[dr]->queue);
+ put_disk(disks[dr]);
}
return err;
}
@@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void)
device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
}
- put_disk(disks[drive]);
blk_cleanup_queue(disks[drive]->queue);
+ put_disk(disks[drive]);
}
del_timer_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284e..7ea0bea 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file;
- /* REQ_HARDBARRIER is deprecated */
- if (bio->bi_rw & REQ_HARDBARRIER) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
if (bio->bi_rw & REQ_FLUSH) {
ret = vfs_fsync(file, 0);
if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6ec9d53..008d4a0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -21,80 +21,9 @@
- Instructions for use
- --------------------
+ For usage instructions, please refer to:
- 1) Map a Linux block device to an existing rbd image.
-
- Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
-
- $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
-
- The snapshot name can be "-" or omitted to map the image read/write.
-
- 2) List all active blkdev<->object mappings.
-
- In this example, we have performed step #1 twice, creating two blkdevs,
- mapped to two separate rados objects in the rados rbd pool
-
- $ cat /sys/class/rbd/list
- #id major client_name pool name snap KB
- 0 254 client4143 rbd foo - 1024000
-
- The columns, in order, are:
- - blkdev unique id
- - blkdev assigned major
- - rados client id
- - rados pool name
- - rados block device name
- - mapped snapshot ("-" if none)
- - device size in KB
-
-
- 3) Create a snapshot.
-
- Usage: <blkdev id> <snapname>
-
- $ echo "0 mysnap" > /sys/class/rbd/snap_create
-
-
- 4) Listing a snapshot.
-
- $ cat /sys/class/rbd/snaps_list
- #id snap KB
- 0 - 1024000 (*)
- 0 foo 1024000
-
- The columns, in order, are:
- - blkdev unique id
- - snapshot name, '-' means none (active read/write version)
- - size of device at time of snapshot
- - the (*) indicates this is the active version
-
- 5) Rollback to snapshot.
-
- Usage: <blkdev id> <snapname>
-
- $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
-
-
- 6) Mapping an image using snapshot.
-
- A snapshot mapping is read-only. This is being done by passing
- snap=<snapname> to the options when adding a device.
-
- $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
-
-
- 7) Remove an active blkdev<->rbd image mapping.
-
- In this example, we remove the mapping with blkdev unique id 1.
-
- $ echo 1 > /sys/class/rbd/remove
-
-
- NOTE: The actual creation and deletion of rados objects is outside the scope
- of this driver.
+ Documentation/ABI/testing/sysfs-bus-rbd
*/
@@ -163,6 +92,14 @@ struct rbd_request {
u64 len;
};
+struct rbd_snap {
+ struct device dev;
+ const char *name;
+ size_t size;
+ struct list_head node;
+ u64 id;
+};
+
/*
* a single device
*/
@@ -193,21 +130,60 @@ struct rbd_device {
int read_only;
struct list_head node;
+
+ /* list of snapshots */
+ struct list_head snaps;
+
+ /* sysfs related */
+ struct device dev;
+};
+
+static struct bus_type rbd_bus_type = {
+ .name = "rbd",
};
static spinlock_t node_lock; /* protects client get/put */
-static struct class *class_rbd; /* /sys/class/rbd */
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
static LIST_HEAD(rbd_dev_list); /* devices */
static LIST_HEAD(rbd_client_list); /* clients */
+static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
+static void rbd_dev_release(struct device *dev);
+static ssize_t rbd_snap_rollback(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size);
+static ssize_t rbd_snap_add(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count);
+static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap);;
+
+
+static struct rbd_device *dev_to_rbd(struct device *dev)
+{
+ return container_of(dev, struct rbd_device, dev);
+}
+
+static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
+{
+ return get_device(&rbd_dev->dev);
+}
+
+static void rbd_put_dev(struct rbd_device *rbd_dev)
+{
+ put_device(&rbd_dev->dev);
+}
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
struct rbd_device *rbd_dev = disk->private_data;
+ rbd_get_dev(rbd_dev);
+
set_device_ro(bdev, rbd_dev->read_only);
if ((mode & FMODE_WRITE) && rbd_dev->read_only)
@@ -216,9 +192,19 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
+static int rbd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct rbd_device *rbd_dev = disk->private_data;
+
+ rbd_put_dev(rbd_dev);
+
+ return 0;
+}
+
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
+ .release = rbd_release,
};
/*
@@ -361,7 +347,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
int ret = -ENOMEM;
init_rwsem(&header->snap_rwsem);
-
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count *
@@ -1256,10 +1241,20 @@ bad:
return -ERANGE;
}
+static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
+{
+ struct rbd_snap *snap;
+
+ while (!list_empty(&rbd_dev->snaps)) {
+ snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node);
+ __rbd_remove_snap_dev(rbd_dev, snap);
+ }
+}
+
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_update_snaps(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1280,12 +1275,15 @@ static int rbd_update_snaps(struct rbd_device *rbd_dev)
rbd_dev->header.total_snaps = h.total_snaps;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
+ rbd_dev->header.snap_names_len = h.snap_names_len;
rbd_dev->header.snap_sizes = h.snap_sizes;
rbd_dev->header.snapc->seq = snap_seq;
+ ret = __rbd_init_snaps_header(rbd_dev);
+
up_write(&rbd_dev->header.snap_rwsem);
- return 0;
+ return ret;
}
static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -1300,6 +1298,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (rc)
return rc;
+ /* no need to lock here, as rbd_dev is not registered yet */
+ rc = __rbd_init_snaps_header(rbd_dev);
+ if (rc)
+ return rc;
+
rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
if (rc)
return rc;
@@ -1343,54 +1346,360 @@ out:
return rc;
}
-/********************************************************************
- * /sys/class/rbd/
- * add map rados objects to blkdev
- * remove unmap rados objects
- * list show mappings
- *******************************************************************/
+/*
+ sysfs
+*/
+
+static ssize_t rbd_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size);
+}
+
+static ssize_t rbd_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
-static void class_rbd_release(struct class *cls)
+ return sprintf(buf, "%d\n", rbd_dev->major);
+}
+
+static ssize_t rbd_client_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- kfree(cls);
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client));
}
-static ssize_t class_rbd_list(struct class *c,
- struct class_attribute *attr,
- char *data)
+static ssize_t rbd_pool_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int n = 0;
- struct list_head *tmp;
- int max = PAGE_SIZE;
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->pool_name);
+}
+
+static ssize_t rbd_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->obj);
+}
+
+static ssize_t rbd_snap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->snap_name);
+}
+
+static ssize_t rbd_image_refresh(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int rc;
+ int ret = size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- n += snprintf(data, max,
- "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n");
+ rc = __rbd_update_snaps(rbd_dev);
+ if (rc < 0)
+ ret = rc;
- list_for_each(tmp, &rbd_dev_list) {
- struct rbd_device *rbd_dev;
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- n += snprintf(data+n, max-n,
- "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n",
- rbd_dev->id,
- rbd_dev->major,
- ceph_client_id(rbd_dev->client),
- rbd_dev->pool_name,
- rbd_dev->obj, rbd_dev->snap_name,
- rbd_dev->header.image_size >> 10);
- if (n == max)
+static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
+static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
+static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
+static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
+static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
+static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
+static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
+
+static struct attribute *rbd_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_major.attr,
+ &dev_attr_client_id.attr,
+ &dev_attr_pool.attr,
+ &dev_attr_name.attr,
+ &dev_attr_current_snap.attr,
+ &dev_attr_refresh.attr,
+ &dev_attr_create_snap.attr,
+ &dev_attr_rollback_snap.attr,
+ NULL
+};
+
+static struct attribute_group rbd_attr_group = {
+ .attrs = rbd_attrs,
+};
+
+static const struct attribute_group *rbd_attr_groups[] = {
+ &rbd_attr_group,
+ NULL
+};
+
+static void rbd_sysfs_dev_release(struct device *dev)
+{
+}
+
+static struct device_type rbd_device_type = {
+ .name = "rbd",
+ .groups = rbd_attr_groups,
+ .release = rbd_sysfs_dev_release,
+};
+
+
+/*
+ sysfs - snapshots
+*/
+
+static ssize_t rbd_snap_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+
+ return sprintf(buf, "%lld\n", (long long)snap->size);
+}
+
+static ssize_t rbd_snap_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+
+ return sprintf(buf, "%lld\n", (long long)snap->id);
+}
+
+static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
+static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
+
+static struct attribute *rbd_snap_attrs[] = {
+ &dev_attr_snap_size.attr,
+ &dev_attr_snap_id.attr,
+ NULL,
+};
+
+static struct attribute_group rbd_snap_attr_group = {
+ .attrs = rbd_snap_attrs,
+};
+
+static void rbd_snap_dev_release(struct device *dev)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+ kfree(snap->name);
+ kfree(snap);
+}
+
+static const struct attribute_group *rbd_snap_attr_groups[] = {
+ &rbd_snap_attr_group,
+ NULL
+};
+
+static struct device_type rbd_snap_device_type = {
+ .groups = rbd_snap_attr_groups,
+ .release = rbd_snap_dev_release,
+};
+
+static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap)
+{
+ list_del(&snap->node);
+ device_unregister(&snap->dev);
+}
+
+static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
+ struct rbd_snap *snap,
+ struct device *parent)
+{
+ struct device *dev = &snap->dev;
+ int ret;
+
+ dev->type = &rbd_snap_device_type;
+ dev->parent = parent;
+ dev->release = rbd_snap_dev_release;
+ dev_set_name(dev, "snap_%s", snap->name);
+ ret = device_register(dev);
+
+ return ret;
+}
+
+static int __rbd_add_snap_dev(struct rbd_device *rbd_dev,
+ int i, const char *name,
+ struct rbd_snap **snapp)
+{
+ int ret;
+ struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+ if (!snap)
+ return -ENOMEM;
+ snap->name = kstrdup(name, GFP_KERNEL);
+ snap->size = rbd_dev->header.snap_sizes[i];
+ snap->id = rbd_dev->header.snapc->snaps[i];
+ if (device_is_registered(&rbd_dev->dev)) {
+ ret = rbd_register_snap_dev(rbd_dev, snap,
+ &rbd_dev->dev);
+ if (ret < 0)
+ goto err;
+ }
+ *snapp = snap;
+ return 0;
+err:
+ kfree(snap->name);
+ kfree(snap);
+ return ret;
+}
+
+/*
+ * search for the previous snap in a null delimited string list
+ */
+const char *rbd_prev_snap_name(const char *name, const char *start)
+{
+ if (name < start + 2)
+ return NULL;
+
+ name -= 2;
+ while (*name) {
+ if (name == start)
+ return start;
+ name--;
+ }
+ return name + 1;
+}
+
+/*
+ * compare the old list of snapshots that we have to what's in the header
+ * and update it accordingly. Note that the header holds the snapshots
+ * in a reverse order (from newest to oldest) and we need to go from
+ * older to new so that we don't get a duplicate snap name when
+ * doing the process (e.g., removed snapshot and recreated a new
+ * one with the same name.
+ */
+static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
+{
+ const char *name, *first_name;
+ int i = rbd_dev->header.total_snaps;
+ struct rbd_snap *snap, *old_snap = NULL;
+ int ret;
+ struct list_head *p, *n;
+
+ first_name = rbd_dev->header.snap_names;
+ name = first_name + rbd_dev->header.snap_names_len;
+
+ list_for_each_prev_safe(p, n, &rbd_dev->snaps) {
+ u64 cur_id;
+
+ old_snap = list_entry(p, struct rbd_snap, node);
+
+ if (i)
+ cur_id = rbd_dev->header.snapc->snaps[i - 1];
+
+ if (!i || old_snap->id < cur_id) {
+ /* old_snap->id was skipped, thus was removed */
+ __rbd_remove_snap_dev(rbd_dev, old_snap);
+ continue;
+ }
+ if (old_snap->id == cur_id) {
+ /* we have this snapshot already */
+ i--;
+ name = rbd_prev_snap_name(name, first_name);
+ continue;
+ }
+ for (; i > 0;
+ i--, name = rbd_prev_snap_name(name, first_name)) {
+ if (!name) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ cur_id = rbd_dev->header.snapc->snaps[i];
+ /* snapshot removal? handle it above */
+ if (cur_id >= old_snap->id)
+ break;
+ /* a new snapshot */
+ ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
+ if (ret < 0)
+ return ret;
+
+ /* note that we add it backward so using n and not p */
+ list_add(&snap->node, n);
+ p = &snap->node;
+ }
+ }
+ /* we're done going over the old snap list, just add what's left */
+ for (; i > 0; i--) {
+ name = rbd_prev_snap_name(name, first_name);
+ if (!name) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
+ if (ret < 0)
+ return ret;
+ list_add(&snap->node, &rbd_dev->snaps);
+ }
+
+ return 0;
+}
+
+
+static void rbd_root_dev_release(struct device *dev)
+{
+}
+
+static struct device rbd_root_dev = {
+ .init_name = "rbd",
+ .release = rbd_root_dev_release,
+};
+
+static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
+{
+ int ret = -ENOMEM;
+ struct device *dev;
+ struct rbd_snap *snap;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ dev = &rbd_dev->dev;
+
+ dev->bus = &rbd_bus_type;
+ dev->type = &rbd_device_type;
+ dev->parent = &rbd_root_dev;
+ dev->release = rbd_dev_release;
+ dev_set_name(dev, "%d", rbd_dev->id);
+ ret = device_register(dev);
+ if (ret < 0)
+ goto done_free;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node) {
+ ret = rbd_register_snap_dev(rbd_dev, snap,
+ &rbd_dev->dev);
+ if (ret < 0)
break;
}
mutex_unlock(&ctl_mutex);
- return n;
+ return 0;
+done_free:
+ mutex_unlock(&ctl_mutex);
+ return ret;
}
-static ssize_t class_rbd_add(struct class *c,
- struct class_attribute *attr,
- const char *buf, size_t count)
+static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
+{
+ device_unregister(&rbd_dev->dev);
+}
+
+static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
{
struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
@@ -1419,6 +1728,7 @@ static ssize_t class_rbd_add(struct class *c,
/* static rbd_device initialization */
spin_lock_init(&rbd_dev->lock);
INIT_LIST_HEAD(&rbd_dev->node);
+ INIT_LIST_HEAD(&rbd_dev->snaps);
/* generate unique id: find highest unique id, add one */
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1478,6 +1788,9 @@ static ssize_t class_rbd_add(struct class *c,
}
rbd_dev->major = irc;
+ rc = rbd_bus_add_dev(rbd_dev);
+ if (rc)
+ goto err_out_disk;
/* set up and announce blkdev mapping */
rc = rbd_init_disk(rbd_dev);
if (rc)
@@ -1487,6 +1800,8 @@ static ssize_t class_rbd_add(struct class *c,
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
+err_out_disk:
+ rbd_free_disk(rbd_dev);
err_out_client:
rbd_put_client(rbd_dev);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1518,35 +1833,10 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
return NULL;
}
-static ssize_t class_rbd_remove(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static void rbd_dev_release(struct device *dev)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, rc;
- unsigned long ul;
-
- rc = strict_strtoul(buf, 10, &ul);
- if (rc)
- return rc;
-
- /* convert to int; abort if we lost anything in the conversion */
- target_id = (int) ul;
- if (target_id != ul)
- return -EINVAL;
-
- /* remove object from list immediately */
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- rbd_dev = __rbd_get_dev(target_id);
- if (rbd_dev)
- list_del_init(&rbd_dev->node);
-
- mutex_unlock(&ctl_mutex);
-
- if (!rbd_dev)
- return -ENOENT;
+ struct rbd_device *rbd_dev =
+ container_of(dev, struct rbd_device, dev);
rbd_put_client(rbd_dev);
@@ -1557,67 +1847,11 @@ static ssize_t class_rbd_remove(struct class *c,
/* release module ref */
module_put(THIS_MODULE);
-
- return count;
}
-static ssize_t class_rbd_snaps_list(struct class *c,
- struct class_attribute *attr,
- char *data)
-{
- struct rbd_device *rbd_dev = NULL;
- struct list_head *tmp;
- struct rbd_image_header *header;
- int i, n = 0, max = PAGE_SIZE;
- int ret;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- n += snprintf(data, max, "#id\tsnap\tKB\n");
-
- list_for_each(tmp, &rbd_dev_list) {
- char *names, *p;
- struct ceph_snap_context *snapc;
-
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- header = &rbd_dev->header;
-
- down_read(&header->snap_rwsem);
-
- names = header->snap_names;
- snapc = header->snapc;
-
- n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
- rbd_dev->id, RBD_SNAP_HEAD_NAME,
- header->image_size >> 10,
- (!rbd_dev->cur_snap ? " (*)" : ""));
- if (n == max)
- break;
-
- p = names;
- for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
- n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
- rbd_dev->id, p, header->snap_sizes[i] >> 10,
- (rbd_dev->cur_snap &&
- (snap_index(header, i) == rbd_dev->cur_snap) ?
- " (*)" : ""));
- if (n == max)
- break;
- }
-
- up_read(&header->snap_rwsem);
- }
-
-
- ret = n;
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static ssize_t class_rbd_snaps_refresh(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
int target_id, rc;
@@ -1641,95 +1875,70 @@ static ssize_t class_rbd_snaps_refresh(struct class *c,
goto done;
}
- rc = rbd_update_snaps(rbd_dev);
- if (rc < 0)
- ret = rc;
+ list_del_init(&rbd_dev->node);
+
+ __rbd_remove_all_snaps(rbd_dev);
+ rbd_bus_del_dev(rbd_dev);
done:
mutex_unlock(&ctl_mutex);
return ret;
}
-static ssize_t class_rbd_snap_create(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_snap_add(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, ret;
- char *name;
-
- name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int ret;
+ char *name = kmalloc(count + 1, GFP_KERNEL);
if (!name)
return -ENOMEM;
- /* parse snaps add command */
- if (sscanf(buf, "%d "
- "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
- &target_id,
- name) != 2) {
- ret = -EINVAL;
- goto done;
- }
+ snprintf(name, count, "%s", buf);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_dev = __rbd_get_dev(target_id);
- if (!rbd_dev) {
- ret = -ENOENT;
- goto done_unlock;
- }
-
ret = rbd_header_add_snap(rbd_dev,
name, GFP_KERNEL);
if (ret < 0)
goto done_unlock;
- ret = rbd_update_snaps(rbd_dev);
+ ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
goto done_unlock;
ret = count;
done_unlock:
mutex_unlock(&ctl_mutex);
-done:
kfree(name);
return ret;
}
-static ssize_t class_rbd_rollback(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rbd_snap_rollback(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
- struct rbd_device *rbd_dev = NULL;
- int target_id, ret;
+ struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ int ret;
u64 snapid;
- char snap_name[RBD_MAX_SNAP_NAME_LEN];
u64 cur_ofs;
- char *seg_name;
+ char *seg_name = NULL;
+ char *snap_name = kmalloc(count + 1, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!snap_name)
+ return ret;
/* parse snaps add command */
- if (sscanf(buf, "%d "
- "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
- &target_id,
- snap_name) != 2) {
- return -EINVAL;
- }
-
- ret = -ENOMEM;
+ snprintf(snap_name, count, "%s", buf);
seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
if (!seg_name)
- return ret;
+ goto done;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_dev = __rbd_get_dev(target_id);
- if (!rbd_dev) {
- ret = -ENOENT;
- goto done_unlock;
- }
-
ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
if (ret < 0)
goto done_unlock;
@@ -1750,7 +1959,7 @@ static ssize_t class_rbd_rollback(struct class *c,
seg_name, ret);
}
- ret = rbd_update_snaps(rbd_dev);
+ ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
goto done_unlock;
@@ -1758,57 +1967,42 @@ static ssize_t class_rbd_rollback(struct class *c,
done_unlock:
mutex_unlock(&ctl_mutex);
+done:
kfree(seg_name);
+ kfree(snap_name);
return ret;
}
-static struct class_attribute class_rbd_attrs[] = {
- __ATTR(add, 0200, NULL, class_rbd_add),
- __ATTR(remove, 0200, NULL, class_rbd_remove),
- __ATTR(list, 0444, class_rbd_list, NULL),
- __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh),
- __ATTR(snap_create, 0200, NULL, class_rbd_snap_create),
- __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL),
- __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback),
+static struct bus_attribute rbd_bus_attrs[] = {
+ __ATTR(add, S_IWUSR, NULL, rbd_add),
+ __ATTR(remove, S_IWUSR, NULL, rbd_remove),
__ATTR_NULL
};
/*
* create control files in sysfs
- * /sys/class/rbd/...
+ * /sys/bus/rbd/...
*/
static int rbd_sysfs_init(void)
{
- int ret = -ENOMEM;
+ int ret;
- class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL);
- if (!class_rbd)
- goto out;
+ rbd_bus_type.bus_attrs = rbd_bus_attrs;
- class_rbd->name = DRV_NAME;
- class_rbd->owner = THIS_MODULE;
- class_rbd->class_release = class_rbd_release;
- class_rbd->class_attrs = class_rbd_attrs;
+ ret = bus_register(&rbd_bus_type);
+ if (ret < 0)
+ return ret;
- ret = class_register(class_rbd);
- if (ret)
- goto out_class;
- return 0;
+ ret = device_register(&rbd_root_dev);
-out_class:
- kfree(class_rbd);
- class_rbd = NULL;
- pr_err(DRV_NAME ": failed to create class rbd\n");
-out:
return ret;
}
static void rbd_sysfs_cleanup(void)
{
- if (class_rbd)
- class_destroy(class_rbd);
- class_rbd = NULL;
+ device_unregister(&rbd_root_dev);
+ bus_unregister(&rbd_bus_type);
}
int __init rbd_init(void)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812..657873e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -65,14 +65,14 @@ enum blkif_state {
struct blk_shadow {
struct blkif_request req;
- unsigned long request;
+ struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
-#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
/*
* We have one of these per vbd, whether ide, scsi or 'other'. They
@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
info->shadow[id].req.id = info->shadow_free;
- info->shadow[id].request = 0;
+ info->shadow[id].request = NULL;
info->shadow_free = id;
}
@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
}
/*
- * blkif_queue_request
+ * Generate a Xen blkfront IO request from a blk layer request. Reads
+ * and writes are handled as expected. Since we lack a loose flush
+ * request, we map flushes into a full ordered barrier.
*
- * request block io
- *
- * id: for guest use only.
- * operation: BLKIF_OP_{READ,WRITE,PROBE}
- * buffer: buffer to read/write into. this should be a
- * virtual address in the guest os.
+ * @req: a request struct
*/
static int blkif_queue_request(struct request *req)
{
@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req)
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info);
- info->shadow[id].request = (unsigned long)req;
+ info->shadow[id].request = req;
ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
@@ -289,8 +286,18 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & REQ_HARDBARRIER)
+
+ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ /*
+ * Ideally we could just do an unordered
+ * flush-to-disk, but all we have is a full write
+ * barrier at the moment. However, a barrier write is
+ * a superset of FUA, so we can implement it the same
+ * way. (It's also a FLUSH+FUA, since it is
+ * guaranteed ordered WRT previous writes.)
+ */
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+ }
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -636,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
- req = (struct request *)info->shadow[id].request;
+ req = info->shadow[id].request;
blkif_completion(&info->shadow[id]);
@@ -649,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
info->gd->disk_name);
error = -EOPNOTSUPP;
+ }
+ if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+ info->shadow[id].req.nr_segments == 0)) {
+ printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n",
+ info->gd->disk_name);
+ error = -EOPNOTSUPP;
+ }
+ if (unlikely(error)) {
+ if (error == -EOPNOTSUPP)
+ error = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -901,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < BLK_RING_SIZE; i++) {
/* Not in use? */
- if (copy[i].request == 0)
+ if (!copy[i].request)
continue;
/* Grab a request slot and copy shadow state into it. */
@@ -918,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info)
req->seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
- rq_data_dir(
- (struct request *)
- info->shadow[req->id].request));
+ rq_data_dir(info->shadow[req->id].request));
info->shadow[req->id].req = *req;
info->ring.req_prod_pvt++;
@@ -1069,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info)
*/
info->feature_flush = 0;
- /*
- * The driver doesn't properly handled empty flushes, so
- * lets disable barrier support for now.
- */
-#if 0
if (!err && barrier)
- info->feature_flush = REQ_FLUSH;
-#endif
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
OpenPOWER on IntegriCloud