summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_debugfs.c6
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/nvme-scsi.c4
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--drivers/block/sunvdc.c227
-rw-r--r--drivers/block/virtio_blk.c74
-rw-r--r--drivers/block/zram/zram_drv.c117
-rw-r--r--drivers/block/zram/zram_drv.h4
8 files changed, 350 insertions, 131 deletions
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 900d4d3..9a95002 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -419,7 +419,7 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
return 0;
}
-/* simple_positive(file->f_dentry) respectively debugfs_positive(),
+/* simple_positive(file->f_path.dentry) respectively debugfs_positive(),
* but neither is "reachable" from here.
* So we have our own inline version of it above. :-( */
static inline int debugfs_positive(struct dentry *dentry)
@@ -437,14 +437,14 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
/* Are we still linked,
* or has debugfs_remove() already been called? */
- parent = file->f_dentry->d_parent;
+ parent = file->f_path.dentry->d_parent;
/* not sure if this can happen: */
if (!parent || !parent->d_inode)
goto out;
/* serialize with d_delete() */
mutex_lock(&parent->d_inode->i_mutex);
/* Make sure the object is still alive */
- if (debugfs_positive(file->f_dentry)
+ if (debugfs_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
mutex_unlock(&parent->d_inode->i_mutex);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8433bc8..caa6121 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
ret = setup_commands(nq);
if (ret)
- goto err_queue;
+ return ret;
nullb->nr_queues++;
}
-
return 0;
-err_queue:
- cleanup_queues(nullb);
- return ret;
}
static int null_add_dev(void)
@@ -507,7 +503,9 @@ static int null_add_dev(void)
goto out_cleanup_queues;
}
blk_queue_make_request(nullb->q, null_queue_bio);
- init_driver_queues(nullb);
+ rv = init_driver_queues(nullb);
+ if (rv)
+ goto out_cleanup_blk_queue;
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (!nullb->q) {
@@ -516,7 +514,9 @@ static int null_add_dev(void)
}
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
- init_driver_queues(nullb);
+ rv = init_driver_queues(nullb);
+ if (rv)
+ goto out_cleanup_blk_queue;
}
nullb->q->queuedata = nullb;
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index a4cd6d6..0b4b277 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -329,7 +329,7 @@ INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
#define IS_READ_CAP_16(cdb) \
-((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
+((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
/* Request Sense Helper Macros */
#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
@@ -2947,7 +2947,7 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
case READ_CAPACITY:
retcode = nvme_trans_read_capacity(ns, hdr, cmd);
break;
- case SERVICE_ACTION_IN:
+ case SERVICE_ACTION_IN_16:
if (IS_READ_CAP_16(cmd))
retcode = nvme_trans_read_capacity(ns, hdr, cmd);
else
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0a54c58..27b71a0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -342,7 +342,6 @@ struct rbd_device {
struct list_head rq_queue; /* incoming rq queue */
spinlock_t lock; /* queue, flags, open_count */
- struct workqueue_struct *rq_wq;
struct work_struct rq_work;
struct rbd_image_header header;
@@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache;
static int rbd_major;
static DEFINE_IDA(rbd_dev_id_ida);
+static struct workqueue_struct *rbd_wq;
+
/*
* Default to false for now, as single-major requires >= 0.75 version of
* userspace rbd utility.
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
}
if (queued)
- queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
+ queue_work(rbd_wq, &rbd_dev->rq_work);
}
/*
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
page_count = (u32) calc_pages_for(offset, length);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
- ret = PTR_ERR(pages);
+ return PTR_ERR(pages);
ret = -ENOMEM;
obj_request = rbd_obj_request_create(object_name, offset, length,
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
- rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- rbd_dev->disk->disk_name);
- if (!rbd_dev->rq_wq) {
- ret = -ENOMEM;
- goto err_out_mapping;
- }
-
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
- goto err_out_workqueue;
+ goto err_out_mapping;
/* Everything's ready. Announce the disk to the world. */
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
return ret;
-err_out_workqueue:
- destroy_workqueue(rbd_dev->rq_wq);
- rbd_dev->rq_wq = NULL;
err_out_mapping:
rbd_dev_mapping_clear(rbd_dev);
err_out_disk:
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- destroy_workqueue(rbd_dev->rq_wq);
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
if (rc)
return rc;
+ /*
+ * The number of active work items is limited by the number of
+ * rbd devices, so leave @max_active at default.
+ */
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ if (!rbd_wq) {
+ rc = -ENOMEM;
+ goto err_out_slab;
+ }
+
if (single_major) {
rbd_major = register_blkdev(0, RBD_DRV_NAME);
if (rbd_major < 0) {
rc = rbd_major;
- goto err_out_slab;
+ goto err_out_wq;
}
}
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
err_out_blkdev:
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
+err_out_wq:
+ destroy_workqueue(rbd_wq);
err_out_slab:
rbd_slab_exit();
return rc;
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
rbd_sysfs_cleanup();
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
+ destroy_workqueue(rbd_wq);
rbd_slab_exit();
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 756b8ec..4b911ed 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -23,8 +23,8 @@
#define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "February 13, 2013"
+#define DRV_MODULE_VERSION "1.2"
+#define DRV_MODULE_RELDATE "November 24, 2014"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+static struct workqueue_struct *sunvdc_wq;
+
struct vdc_req_entry {
struct request *req;
};
@@ -60,6 +62,10 @@ struct vdc_port {
u64 max_xfer_size;
u32 vdisk_block_size;
+ u64 ldc_timeout;
+ struct timer_list ldc_reset_timer;
+ struct work_struct ldc_reset_work;
+
/* The server fills these in for us in the disk attribute
* ACK packet.
*/
@@ -69,10 +75,12 @@ struct vdc_port {
u8 vdisk_mtype;
char disk_name[32];
-
- struct vio_disk_vtoc label;
};
+static void vdc_ldc_reset(struct vdc_port *port);
+static void vdc_ldc_reset_work(struct work_struct *work);
+static void vdc_ldc_reset_timer(unsigned long _arg);
+
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vdc_port, vio);
@@ -152,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
.ioctl = vdc_ioctl,
};
+static void vdc_blk_queue_start(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ /* restart blk queue when ring is half emptied. also called after
+ * handshake completes, so check for initial handshake before we've
+ * allocated a disk.
+ */
+ if (port->disk && blk_queue_stopped(port->disk->queue) &&
+ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
+ blk_start_queue(port->disk->queue);
+ }
+
+}
+
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
{
if (vio->cmp &&
@@ -165,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
static void vdc_handshake_complete(struct vio_driver_state *vio)
{
+ struct vdc_port *port = to_vdc_port(vio);
+
+ del_timer(&port->ldc_reset_timer);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
+ vdc_blk_queue_start(port);
}
static int vdc_handle_unknown(struct vdc_port *port, void *arg)
@@ -271,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
- dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->cons = vio_dring_next(dr, index);
req = rqe->req;
if (req == NULL) {
@@ -283,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
- /* restart blk queue when ring is half emptied */
- if (blk_queue_stopped(port->disk->queue) &&
- vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
- blk_start_queue(port->disk->queue);
+ vdc_blk_queue_start(port);
}
static int vdc_ack(struct vdc_port *port, void *msgbuf)
@@ -319,17 +343,20 @@ static void vdc_event(void *arg, int event)
spin_lock_irqsave(&vio->lock, flags);
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
+ if (unlikely(event == LDC_EVENT_RESET)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ queue_work(sunvdc_wq, &port->ldc_reset_work);
+ goto out;
+ }
+
+ if (unlikely(event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ goto out;
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ pr_warn(PFX "Unexpected LDC event %d\n", event);
+ goto out;
}
err = 0;
@@ -373,6 +400,7 @@ static void vdc_event(void *arg, int event)
}
if (err < 0)
vdc_finish(&port->vio, err, WAITING_FOR_ANY);
+out:
spin_unlock_irqrestore(&vio->lock, flags);
}
@@ -405,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
delay = 128;
} while (err == -EAGAIN);
+ if (err == -ENOTCONN)
+ vdc_ldc_reset(port);
return err;
}
@@ -474,7 +504,7 @@ static int __send_request(struct request *req)
printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
} else {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
}
return err;
@@ -628,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
err = __vdc_tx_trigger(port);
if (err >= 0) {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
spin_unlock_irqrestore(&port->vio.lock, flags);
wait_for_completion(&comp.com);
@@ -692,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
}
}
-static int probe_disk(struct vdc_port *port)
+static int vdc_port_up(struct vdc_port *port)
{
struct vio_completion comp;
- struct request_queue *q;
- struct gendisk *g;
- int err;
init_completion(&comp.com);
comp.err = 0;
@@ -705,17 +732,27 @@ static int probe_disk(struct vdc_port *port)
port->vio.cmp = &comp;
vio_port_up(&port->vio);
-
wait_for_completion(&comp.com);
- if (comp.err)
- return comp.err;
+ return comp.err;
+}
- err = generic_request(port, VD_OP_GET_VTOC,
- &port->label, sizeof(port->label));
- if (err < 0) {
- printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
+static void vdc_port_down(struct vdc_port *port)
+{
+ ldc_disconnect(port->vio.lp);
+ ldc_unbind(port->vio.lp);
+ vdc_free_tx_ring(port);
+ vio_ldc_free(&port->vio);
+}
+
+static int probe_disk(struct vdc_port *port)
+{
+ struct request_queue *q;
+ struct gendisk *g;
+ int err;
+
+ err = vdc_port_up(port);
+ if (err)
return err;
- }
if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't
@@ -828,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct mdesc_handle *hp;
struct vdc_port *port;
int err;
+ const u64 *ldc_timeout;
print_version();
@@ -857,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
port->vdisk_size = -1;
+ /* Actual wall time may be double due to do_generic_file_read() doing
+ * a readahead I/O first, and once that fails it will try to read a
+ * single page.
+ */
+ ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
+ port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
+ setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
+ (unsigned long)port);
+ INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
+
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions),
&vdc_vio_ops, port->disk_name);
@@ -905,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+ blk_stop_queue(port->disk->queue);
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ flush_work(&port->ldc_reset_work);
+ del_timer_sync(&port->ldc_reset_timer);
del_timer_sync(&port->vio.timer);
+ del_gendisk(port->disk);
+ blk_cleanup_queue(port->disk->queue);
+ put_disk(port->disk);
+ port->disk = NULL;
+
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
@@ -917,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
return 0;
}
+static void vdc_requeue_inflight(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ u32 idx;
+
+ for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
+ struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
+ struct vdc_req_entry *rqe = &port->rq_arr[idx];
+ struct request *req;
+
+ ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
+ desc->hdr.state = VIO_DESC_FREE;
+ dr->cons = vio_dring_next(dr, idx);
+
+ req = rqe->req;
+ if (req == NULL) {
+ vdc_end_special(port, desc);
+ continue;
+ }
+
+ rqe->req = NULL;
+ blk_requeue_request(port->disk->queue, req);
+ }
+}
+
+static void vdc_queue_drain(struct vdc_port *port)
+{
+ struct request *req;
+
+ while ((req = blk_fetch_request(port->disk->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
+}
+
+static void vdc_ldc_reset_timer(unsigned long _arg)
+{
+ struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
+ pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
+ port->disk_name, port->ldc_timeout);
+ vdc_queue_drain(port);
+ vdc_blk_queue_start(port);
+ }
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset_work(struct work_struct *work)
+{
+ struct vdc_port *port;
+ struct vio_driver_state *vio;
+ unsigned long flags;
+
+ port = container_of(work, struct vdc_port, ldc_reset_work);
+ vio = &port->vio;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ vdc_ldc_reset(port);
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset(struct vdc_port *port)
+{
+ int err;
+
+ assert_spin_locked(&port->vio.lock);
+
+ pr_warn(PFX "%s ldc link reset\n", port->disk_name);
+ blk_stop_queue(port->disk->queue);
+ vdc_requeue_inflight(port);
+ vdc_port_down(port);
+
+ err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
+ if (err) {
+ pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
+ return;
+ }
+
+ err = vdc_alloc_tx_ring(port);
+ if (err) {
+ pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
+ goto err_free_ldc;
+ }
+
+ if (port->ldc_timeout)
+ mod_timer(&port->ldc_reset_timer,
+ round_jiffies(jiffies + HZ * port->ldc_timeout));
+ mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
+ return;
+
+err_free_ldc:
+ vio_ldc_free(&port->vio);
+}
+
static const struct vio_device_id vdc_port_match[] = {
{
.type = "vdc-port",
@@ -936,9 +1093,13 @@ static int __init vdc_init(void)
{
int err;
+ sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ if (!sunvdc_wq)
+ return -ENOMEM;
+
err = register_blkdev(0, VDCBLK_NAME);
if (err < 0)
- goto out_err;
+ goto out_free_wq;
vdc_major = err;
@@ -952,7 +1113,8 @@ out_unregister_blkdev:
unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0;
-out_err:
+out_free_wq:
+ destroy_workqueue(sunvdc_wq);
return err;
}
@@ -960,6 +1122,7 @@ static void __exit vdc_exit(void)
{
vio_unregister_driver(&vdc_port_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME);
+ destroy_workqueue(sunvdc_wq);
}
module_init(vdc_init);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index cecd3f9..7ef7c09 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -80,7 +80,7 @@ static int __virtblk_add_req(struct virtqueue *vq,
{
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
- int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
+ __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
@@ -91,19 +91,19 @@ static int __virtblk_add_req(struct virtqueue *vq,
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information.
*/
- if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
sgs[num_out++] = &cmd;
}
if (have_data) {
- if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
+ if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
}
- if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
@@ -119,12 +119,13 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtio_blk *vblk = req->q->queuedata;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
- req->resid_len = vbr->in_hdr.residual;
- req->sense_len = vbr->in_hdr.sense_len;
- req->errors = vbr->in_hdr.errors;
+ req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
+ req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
+ req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
req->errors = (error != 0);
}
@@ -174,25 +175,25 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
vbr->req = req;
if (req->cmd_flags & REQ_FLUSH) {
- vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
} else {
switch (req->cmd_type) {
case REQ_TYPE_FS:
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = blk_rq_pos(vbr->req);
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_SPECIAL:
- vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
default:
/* We don't put anything else in the queue. */
@@ -205,9 +206,9 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE)
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
@@ -332,7 +333,8 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+
+static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
static void virtblk_config_changed_work(struct work_struct *work)
{
@@ -477,7 +479,8 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
struct virtio_blk_config, wce,
&writeback);
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
return writeback;
}
@@ -822,25 +825,34 @@ static const struct virtio_device_id id_table[] = {
{ 0 },
};
-static unsigned int features[] = {
+static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
+}
+;
+static unsigned int features[] = {
+ VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
+ VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+ VIRTIO_BLK_F_TOPOLOGY,
+ VIRTIO_BLK_F_MQ,
};
static struct virtio_driver virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = virtblk_remove,
- .config_changed = virtblk_config_changed,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = virtblk_remove,
+ .config_changed = virtblk_config_changed,
#ifdef CONFIG_PM_SLEEP
- .freeze = virtblk_freeze,
- .restore = virtblk_restore,
+ .freeze = virtblk_freeze,
+ .restore = virtblk_restore,
#endif
};
@@ -872,8 +884,8 @@ out_destroy_workqueue:
static void __exit fini(void)
{
- unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ unregister_blkdev(major, "virtblk");
destroy_workqueue(virtblk_wq);
}
module_init(init);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e63e8a..bd8bda3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,15 +44,14 @@ static const char *default_compressor = "lzo";
static unsigned int num_devices = 1;
#define ZRAM_ATTR_RO(name) \
-static ssize_t zram_attr_##name##_show(struct device *d, \
+static ssize_t name##_show(struct device *d, \
struct device_attribute *attr, char *b) \
{ \
struct zram *zram = dev_to_zram(d); \
return scnprintf(b, PAGE_SIZE, "%llu\n", \
(u64)atomic64_read(&zram->stats.name)); \
} \
-static struct device_attribute dev_attr_##name = \
- __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
+static DEVICE_ATTR_RO(name);
static inline int init_done(struct zram *zram)
{
@@ -99,11 +98,12 @@ static ssize_t mem_used_total_show(struct device *dev,
{
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta = zram->meta;
down_read(&zram->init_lock);
- if (init_done(zram))
+ if (init_done(zram)) {
+ struct zram_meta *meta = zram->meta;
val = zs_get_total_pages(meta->mem_pool);
+ }
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +173,17 @@ static ssize_t mem_used_max_store(struct device *dev,
int err;
unsigned long val;
struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta = zram->meta;
err = kstrtoul(buf, 10, &val);
if (err || val != 0)
return -EINVAL;
down_read(&zram->init_lock);
- if (init_done(zram))
+ if (init_done(zram)) {
+ struct zram_meta *meta = zram->meta;
atomic_long_set(&zram->stats.max_used_pages,
zs_get_total_pages(meta->mem_pool));
+ }
up_read(&zram->init_lock);
return len;
@@ -285,19 +286,18 @@ static inline int is_partial_io(struct bio_vec *bvec)
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
+static inline int valid_io_request(struct zram *zram,
+ sector_t start, unsigned int size)
{
- u64 start, end, bound;
+ u64 end, bound;
/* unaligned request */
- if (unlikely(bio->bi_iter.bi_sector &
- (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
- if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
return 0;
- start = bio->bi_iter.bi_sector;
- end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
+ end = start + (size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
@@ -451,7 +451,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+ u32 index, int offset)
{
int ret;
struct page *page;
@@ -558,7 +558,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
+ if (user_mem)
+ kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
@@ -642,14 +643,13 @@ out:
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, struct bio *bio)
+ int offset, int rw)
{
int ret;
- int rw = bio_data_dir(bio);
if (rw == READ) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ ret = zram_bvec_read(zram, bvec, index, offset);
} else {
atomic64_inc(&zram->stats.num_writes);
ret = zram_bvec_write(zram, bvec, index, offset);
@@ -850,7 +850,7 @@ out:
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset;
+ int offset, rw;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -865,6 +865,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
+ rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -879,15 +880,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -912,7 +913,8 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
if (unlikely(!init_done(zram)))
goto error;
- if (!valid_io_request(zram, bio)) {
+ if (!valid_io_request(zram, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
goto error;
}
@@ -942,25 +944,64 @@ static void zram_slot_free_notify(struct block_device *bdev,
atomic64_inc(&zram->stats.notify_free);
}
+static int zram_rw_page(struct block_device *bdev, sector_t sector,
+ struct page *page, int rw)
+{
+ int offset, err;
+ u32 index;
+ struct zram *zram;
+ struct bio_vec bv;
+
+ zram = bdev->bd_disk->private_data;
+ if (!valid_io_request(zram, sector, PAGE_SIZE)) {
+ atomic64_inc(&zram->stats.invalid_io);
+ return -EINVAL;
+ }
+
+ down_read(&zram->init_lock);
+ if (unlikely(!init_done(zram))) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ index = sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+
+ bv.bv_page = page;
+ bv.bv_len = PAGE_SIZE;
+ bv.bv_offset = 0;
+
+ err = zram_bvec_rw(zram, &bv, index, offset, rw);
+out_unlock:
+ up_read(&zram->init_lock);
+ /*
+ * If I/O fails, just return error(ie, non-zero) without
+ * calling page_endio.
+ * It causes resubmit the I/O with bio request by upper functions
+ * of rw_page(e.g., swap_readpage, __swap_writepage) and
+ * bio->bi_end_io does things to handle the error
+ * (e.g., SetPageError, set_page_dirty and extra works).
+ */
+ if (err == 0)
+ page_endio(page, rw, 0);
+ return err;
+}
+
static const struct block_device_operations zram_devops = {
.swap_slot_free_notify = zram_slot_free_notify,
+ .rw_page = zram_rw_page,
.owner = THIS_MODULE
};
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
- mem_limit_store);
-static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
- mem_used_max_store);
-static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
- max_comp_streams_show, max_comp_streams_store);
-static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
- comp_algorithm_show, comp_algorithm_store);
+static DEVICE_ATTR_RW(disksize);
+static DEVICE_ATTR_RO(initstate);
+static DEVICE_ATTR_WO(reset);
+static DEVICE_ATTR_RO(orig_data_size);
+static DEVICE_ATTR_RO(mem_used_total);
+static DEVICE_ATTR_RW(mem_limit);
+static DEVICE_ATTR_RW(mem_used_max);
+static DEVICE_ATTR_RW(max_comp_streams);
+static DEVICE_ATTR_RW(comp_algorithm);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index c6ee271..b05a816 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -66,8 +66,8 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
- ZRAM_ACCESS, /* page in now accessed */
+ ZRAM_ZERO = ZRAM_FLAG_SHIFT,
+ ZRAM_ACCESS, /* page is now accessed */
__NR_ZRAM_PAGEFLAGS,
};
OpenPOWER on IntegriCloud