diff options
-rw-r--r-- | block/cfq-iosched.c | 6 | ||||
-rw-r--r-- | block/genhd.c | 4 | ||||
-rw-r--r-- | drivers/block/loop.c | 6 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 55 | ||||
-rw-r--r-- | fs/char_dev.c | 2 | ||||
-rw-r--r-- | fs/ioprio.c | 31 | ||||
-rw-r--r-- | fs/nfsd/vfs.c | 5 | ||||
-rw-r--r-- | fs/splice.c | 43 |
8 files changed, 57 insertions, 95 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9b186fd..c19d015 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2103,12 +2103,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) unsigned group_slice; enum wl_prio_t original_prio = cfqd->serving_prio; - if (!cfqg) { - cfqd->serving_prio = IDLE_WORKLOAD; - cfqd->workload_expires = jiffies + 1; - return; - } - /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = RT_WORKLOAD; diff --git a/block/genhd.c b/block/genhd.c index 5465a82..7433173 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -244,7 +244,7 @@ static struct blk_major_name { } *major_names[BLKDEV_MAJOR_HASH_SIZE]; /* index in the above - for now: assume no multimajor ranges */ -static inline int major_to_index(int major) +static inline int major_to_index(unsigned major) { return major % BLKDEV_MAJOR_HASH_SIZE; } @@ -828,7 +828,7 @@ static void *show_partition_start(struct seq_file *seqf, loff_t *pos) static void *p; p = disk_seqf_start(seqf, pos); - if (!IS_ERR(p) && p && !*pos) + if (!IS_ERR_OR_NULL(p) && !*pos) seq_puts(seqf, "major minor #blocks name\n\n"); return p; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7ea0bea..44e18c0 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -395,11 +395,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct loop_device *lo = p->lo; struct page *page = buf->page; sector_t IV; - int size, ret; - - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; + int size; IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + (buf->offset >> 9); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 255035c..4f9e22f 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -65,7 +65,7 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; - unsigned long request; + struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; - info->shadow[id].request = 0; + info->shadow[id].request = NULL; info->shadow_free = id; } @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* - * blkif_queue_request + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. Since we lack a loose flush + * request, we map flushes into a full ordered barrier. * - * request block io - * - * id: for guest use only. - * operation: BLKIF_OP_{READ,WRITE,PROBE} - * buffer: buffer to read/write into. this should be a - * virtual address in the guest os. + * @req: a request struct */ static int blkif_queue_request(struct request *req) { @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); - info->shadow[id].request = (unsigned long)req; + info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); @@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + /* + * Ideally we could just do an unordered + * flush-to-disk, but all we have is a full write + * barrier at the moment. However, a barrier write is + * a superset of FUA, so we can implement it the same + * way. (It's also a FLUSH+FUA, since it is + * guaranteed ordered WRT previous writes.) + */ + ring_req->operation = BLKIF_OP_WRITE_BARRIER; + } + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); @@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; - req = (struct request *)info->shadow[id].request; + req = info->shadow[id].request; blkif_completion(&info->shadow[id]); @@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; + } + if (unlikely(bret->status == BLKIF_RSP_ERROR && + info->shadow[id].req.nr_segments == 0)) { + printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", + info->gd->disk_name); + error = -EOPNOTSUPP; + } + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ - if (copy[i].request == 0) + if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ @@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), - rq_data_dir( - (struct request *) - info->shadow[req->id].request)); + rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; @@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; - /* - * The driver doesn't properly handled empty flushes, so - * lets disable barrier support for now. - */ -#if 0 if (!err && barrier) - info->feature_flush = REQ_FLUSH; -#endif + info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { diff --git a/fs/char_dev.c b/fs/char_dev.c index e5b9df9..143f020 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -59,7 +59,7 @@ static struct char_device_struct { } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; /* index in the above */ -static inline int major_to_index(int major) +static inline int major_to_index(unsigned major) { return major % CHRDEV_MAJOR_HASH_SIZE; } diff --git a/fs/ioprio.c b/fs/ioprio.c index 2f7d05c..7da2a06 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) } ret = -ESRCH; - /* - * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic", - * so we can't use rcu_read_lock(). See re-copy of ->ioprio - * in copy_process(). - */ - read_lock(&tasklist_lock); + rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: - rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = set_task_ioprio(p, ioprio); - rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) @@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) break; do_each_thread(g, p) { - int match; - - rcu_read_lock(); - match = __task_cred(p)->uid == who; - rcu_read_unlock(); - if (!match) + if (__task_cred(p)->uid != who) continue; ret = set_task_ioprio(p, ioprio); if (ret) @@ -160,7 +148,7 @@ free_uid: ret = -EINVAL; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return ret; } @@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) int ret = -ESRCH; int tmpio; - read_lock(&tasklist_lock); + rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: - rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = get_task_ioprio(p); - rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) @@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) break; do_each_thread(g, p) { - int match; - - rcu_read_lock(); - match = __task_cred(p)->uid == user->uid; - rcu_read_unlock(); - if (!match) + if (__task_cred(p)->uid != user->uid) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) @@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ret = -EINVAL; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return ret; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 184938f..106ed48 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -845,11 +845,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct page **pp = rqstp->rq_respages + rqstp->rq_resused; struct page *page = buf->page; size_t size; - int ret; - - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; size = sd->len; diff --git a/fs/splice.c b/fs/splice.c index 8f1dfae..d202638 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -682,19 +682,14 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; loff_t pos = sd->pos; - int ret, more; - - ret = buf->ops->confirm(pipe, buf); - if (!ret) { - more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; - if (file->f_op && file->f_op->sendpage) - ret = file->f_op->sendpage(file, buf->page, buf->offset, - sd->len, &pos, more); - else - ret = -EINVAL; - } + int more; - return ret; + if (!likely(file->f_op && file->f_op->sendpage)) + return -EINVAL; + + more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; + return file->f_op->sendpage(file, buf->page, buf->offset, + sd->len, &pos, more); } /* @@ -727,13 +722,6 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, void *fsdata; int ret; - /* - * make sure the data in this buffer is uptodate - */ - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; @@ -805,12 +793,17 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, if (sd->len > sd->total_len) sd->len = sd->total_len; - ret = actor(pipe, buf, sd); - if (ret <= 0) { + ret = buf->ops->confirm(pipe, buf); + if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } + + ret = actor(pipe, buf, sd); + if (ret <= 0) + return ret; + buf->offset += ret; buf->len -= ret; @@ -1044,10 +1037,6 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, int ret; void *data; - ret = buf->ops->confirm(pipe, buf); - if (ret) - return ret; - data = buf->ops->map(pipe, buf, 0); ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); buf->ops->unmap(pipe, buf, data); @@ -1507,10 +1496,6 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, char *src; int ret; - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - /* * See if we can use the atomic maps, by prefaulting in the * pages and doing an atomic copy |