summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-22 18:57:02 +0100
committerJiri Kosina <jkosina@suse.cz>2010-12-22 18:57:02 +0100
commit4b7bd364700d9ac8372eff48832062b936d0793b (patch)
tree0dbf78c95456a0b02d07fcd473281f04a87e266d /block
parentc0d8768af260e2cbb4bf659ae6094a262c86b085 (diff)
parent90a8a73c06cc32b609a880d48449d7083327e11a (diff)
downloadop-kernel-dev-4b7bd364700d9ac8372eff48832062b936d0793b.zip
op-kernel-dev-4b7bd364700d9ac8372eff48832062b936d0793b.tar.gz
Merge branch 'master' into for-next
Conflicts: MAINTAINERS arch/arm/mach-omap2/pm24xx.c drivers/scsi/bfa/bfa_fcpim.c Needed to update to apply fixes for which the old branch was too outdated.
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c3
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c51
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.c41
-rw-r--r--block/bsg.c8
-rw-r--r--block/compat_ioctl.c5
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c8
-rw-r--r--block/scsi_ioctl.c34
12 files changed, 97 insertions, 90 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2..4ce953f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT;
int rw_flags;
- /* REQ_HARDBARRIER is no more */
- if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
- "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
bdevname(bio->bi_bdev, b),
bio->bi_rw,
(unsigned long long)bio->bi_sector + bio_sectors(bio),
- (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+ (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags);
}
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0;
/* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
sector_t sector = bio->bi_sector;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c5..3c7a339 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
}
EXPORT_SYMBOL(get_io_context);
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
- struct io_context *src = *psrc;
- struct io_context *dst = *pdst;
-
- if (src) {
- BUG_ON(atomic_long_read(&src->refcount) == 0);
- atomic_long_inc(&src->refcount);
- put_io_context(dst);
- *pdst = src;
- }
-}
-EXPORT_SYMBOL(copy_io_context);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d..e663ac2 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -201,6 +201,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ if (!iov[i].iov_len)
+ return -EINVAL;
+
if (uaddr & queue_dma_alignment(q)) {
unaligned = 1;
break;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 77b7c26..74bc4a7 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
return 0;
fbio = bio;
- cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
- if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ if (!blk_queue_cluster(q))
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs, cluster;
nsegs = 0;
- cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ cluster = blk_queue_cluster(q);
/*
* for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 701859f..36c8c1f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->no_cluster = 0;
+ lim->cluster = 1;
}
EXPORT_SYMBOL(blk_set_default_limits);
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
+ * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
+ * @limits: the queue limits
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
{
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
__func__, max_hw_sectors);
}
- q->limits.max_hw_sectors = max_hw_sectors;
- q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
- BLK_DEF_MAX_SECTORS);
+ limits->max_hw_sectors = max_hw_sectors;
+ limits->max_sectors = min_t(unsigned int, max_hw_sectors,
+ BLK_DEF_MAX_SECTORS);
+}
+EXPORT_SYMBOL(blk_limits_max_hw_sectors);
+
+/**
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q: the request queue for the device
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ *
+ * Description:
+ * See description for blk_limits_max_hw_sectors().
+ **/
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+{
+ blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@@ -464,15 +478,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);
-
- if (!t->queue_lock)
- WARN_ON_ONCE(1);
- else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
- unsigned long flags;
- spin_lock_irqsave(t->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
- spin_unlock_irqrestore(t->queue_lock, flags);
- }
}
EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -545,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
- t->no_cluster |= b->no_cluster;
+ t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
/* Physical block size a multiple of the logical block size? */
@@ -641,7 +646,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset)
{
struct request_queue *t = disk->queue;
- struct request_queue *b = bdev_get_queue(bdev);
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
@@ -652,17 +656,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
-
- if (!t->queue_lock)
- WARN_ON_ONCE(1);
- else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
- unsigned long flags;
-
- spin_lock_irqsave(t->queue_lock, flags);
- if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
- queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
- spin_unlock_irqrestore(t->queue_lock, flags);
- }
}
EXPORT_SYMBOL(disk_stack_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 013457f..41fb691 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page));
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 56ad453..381b09b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
tg->slice_end[rw], jiffies);
}
+static inline void throtl_set_slice_end(struct throtl_data *td,
+ struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+{
+ tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
+}
+
static inline void throtl_extend_slice(struct throtl_data *td,
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
{
@@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
if (throtl_slice_used(td, tg, rw))
return;
+ /*
+ * A bio has been dispatched. Also adjust slice_end. It might happen
+ * that initially cgroup limit was very low resulting in high
+ * slice_end, but later limit was bumped up and bio was dispached
+ * sooner, then we need to reduce slice_end. A high bogus slice_end
+ * is bad because it does not allow new slice to start.
+ */
+
+ throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
+
time_elapsed = jiffies - tg->slice_start[rw];
nr_slices = time_elapsed / throtl_slice;
@@ -645,7 +661,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
{
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
- unsigned int max_nr_writes = throtl_grp_quantum - nr_reads;
+ unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
struct bio *bio;
/* Try to dispatch 75% READS and 25% WRITES */
@@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td)
struct throtl_grp *tg;
struct hlist_node *pos, *n;
- /*
- * Make sure atomic_inc() effects from
- * throtl_update_blkio_group_read_bps(), group of functions are
- * visible.
- * Is this required or smp_mb__after_atomic_inc() was suffcient
- * after the atomic_inc().
- */
- smp_rmb();
if (!atomic_read(&td->limits_changed))
return;
throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- /*
- * Do I need an smp_rmb() here to make sure tg->limits_changed
- * update is visible. I am relying on smp_rmb() at the
- * beginning of function and not putting a new one here.
- */
+ /*
+ * Make sure updates from throtl_update_blkio_group_read_bps() group
+ * of functions to tg->limits_changed are visible. We do not
+ * want update td->limits_changed to be visible but update to
+ * tg->limits_changed not being visible yet on this cpu. Hence
+ * the read barrier.
+ */
+ smp_rmb();
+ hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
if (throtl_tg_on_rr(tg) && tg->limits_changed) {
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
" riops=%u wiops=%u", tg->bps[READ],
diff --git a/block/bsg.c b/block/bsg.c
index f20d6a7..0c8b64a 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -250,6 +250,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
+ struct bsg_class_device *bcd = &q->bsg_dev;
+
+ /* if the LLD has been removed then the bsg_unregister_queue will
+ * eventually be called and the class_dev was freed, so we can no
+ * longer use this request_queue. Return no such address.
+ */
+ if (!bcd->class_dev)
+ return ERR_PTR(-ENXIO);
dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b..cc3eb78 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -8,7 +8,6 @@
#include <linux/hdreg.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/uaccess.h>
@@ -744,13 +743,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKGETSIZE:
- size = bdev->bd_inode->i_size;
+ size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
return -EFBIG;
return compat_put_ulong(arg, size >> 9);
case BLKGETSIZE64_32:
- return compat_put_u64(arg, bdev->bd_inode->i_size);
+ return compat_put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESETUP32:
case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e830..2569512 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--;
boundary = q->end_sector;
- stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+ stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
- if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+ if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb..a9a302e 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,7 +5,6 @@
#include <linux/hdreg.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
-#include <linux/smp_lock.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h>
@@ -125,7 +124,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
start >>= 9;
len >>= 9;
- if (start + len > (bdev->bd_inode->i_size >> 9))
+ if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL;
if (secure)
flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +241,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
* We need to set the startsect first, the driver may
* want to override it.
*/
+ memset(&geo, 0, sizeof(geo));
geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo);
if (ret)
@@ -307,12 +307,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
ret = blkdev_reread_part(bdev);
break;
case BLKGETSIZE:
- size = bdev->bd_inode->i_size;
+ size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
return -EFBIG;
return put_ulong(arg, size >> 9);
case BLKGETSIZE64:
- return put_u64(arg, bdev->bd_inode->i_size);
+ return put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10..4f4230b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len;
- struct sg_iovec *iov;
+ struct sg_iovec *sg_iov;
+ struct iovec *iov;
+ int i;
- iov = kmalloc(size, GFP_KERNEL);
- if (!iov) {
+ sg_iov = kmalloc(size, GFP_KERNEL);
+ if (!sg_iov) {
ret = -ENOMEM;
goto out;
}
- if (copy_from_user(iov, hdr->dxferp, size)) {
- kfree(iov);
+ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+ kfree(sg_iov);
ret = -EFAULT;
goto out;
}
+ /*
+ * Sum up the vecs, making sure they don't overflow
+ */
+ iov = (struct iovec *) sg_iov;
+ iov_data_len = 0;
+ for (i = 0; i < hdr->iovec_count; i++) {
+ if (iov_data_len + iov[i].iov_len < iov_data_len) {
+ kfree(sg_iov);
+ ret = -EINVAL;
+ goto out;
+ }
+ iov_data_len += iov[i].iov_len;
+ }
+
/* SG_IO howto says that the shorter of the two wins */
- iov_data_len = iov_length((struct iovec *)iov,
- hdr->iovec_count);
if (hdr->dxfer_len < iov_data_len) {
- hdr->iovec_count = iov_shorten((struct iovec *)iov,
+ hdr->iovec_count = iov_shorten(iov,
hdr->iovec_count,
hdr->dxfer_len);
iov_data_len = hdr->dxfer_len;
}
- ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
iov_data_len, GFP_KERNEL);
- kfree(iov);
+ kfree(sg_iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL);
OpenPOWER on IntegriCloud