summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-26 13:06:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-26 13:06:13 -0700
commit3493860c76eb4e5b222f1016d1458615afac9fc4 (patch)
tree6097d14cde828e20f0a1287bb0cd1ee0ded6089f
parent0b86dbf675e0170a191a9ca18e5e99fd39a678c0 (diff)
parent54ed4ed8f9a5071a3bbae2e037376d8c02b9629b (diff)
downloadop-kernel-dev-3493860c76eb4e5b222f1016d1458615afac9fc4.zip
op-kernel-dev-3493860c76eb4e5b222f1016d1458615afac9fc4.tar.gz
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A small collection of fixes/changes for the current series. This contains: - Removal of dead code from Gu Zheng. - Revert of two bad fixes that went in earlier in this round, marking things as __init that were not purely used from init. - A fix for blk_mq_start_hw_queue() using the __blk_mq_run_hw_queue(), which could place us wrongly. Make it use the non __ variant, which handles cases where we are called from the wrong CPU set. From me. - A fix for drbd, which allocates discard requests without room for the SCSI payload. From Lars Ellenberg. - A fix for user-after-free in the blkcg code from Tejun. - Addition of limiting gaps in SG lists, if the hardware needs it. This is the last pre-req patch for blk-mq to enable the full NVMe conversion. Could wait until 3.17, but it's simple enough so would be nice to have everything we need for the NVMe port in the 3.17 release. From me" * 'for-linus' of git://git.kernel.dk/linux-block: drbd: fix NULL pointer deref in blk_add_request_payload blk-mq: blk_mq_start_hw_queue() should use blk_mq_run_hw_queue() block: add support for limiting gaps in SG lists bio: remove unused macro bip_vec_idx() Revert "block: add __init to elv_register" Revert "block: add __init to blkcg_policy_register" blkcg: fix use-after-free in __blkg_release_rcu() by making blkcg_gq refcnt an atomic_t floppy: format block0 read error message properly
-rw-r--r--block/bio.c8
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-cgroup.h21
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/elevator.h2
11 files changed, 48 insertions, 27 deletions
diff --git a/block/bio.c b/block/bio.c
index 8c2e55e..0ec61c9 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
goto done;
}
+
+ /*
+ * If the queue doesn't support SG gaps and adding this
+ * offset would create a gap, disallow it.
+ */
+ if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
+ bvec_gap_to_prev(prev, offset))
+ return 0;
}
if (bio->bi_vcnt >= bio->bi_max_vecs)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 069bc20..b9f4cc4 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
- blkg->refcnt = 1;
+ atomic_set(&blkg->refcnt, 1);
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
/* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
- if (blkg->parent) {
- spin_lock_irq(blkg->q->queue_lock);
+ if (blkg->parent)
blkg_put(blkg->parent);
- spin_unlock_irq(blkg->q->queue_lock);
- }
blkg_free(blkg);
}
@@ -1093,7 +1090,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
* Register @pol with blkcg core. Might sleep and @pol may be modified on
* successful registration. Returns 0 on success and -errno on failure.
*/
-int __init blkcg_policy_register(struct blkcg_policy *pol)
+int blkcg_policy_register(struct blkcg_policy *pol)
{
int i, ret;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index cbb7f94..d3fd7aa 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -18,6 +18,7 @@
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
+#include <linux/atomic.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
struct request_list rl;
/* reference count */
- int refcnt;
+ atomic_t refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */
-int __init blkcg_policy_register(struct blkcg_policy *pol);
+int blkcg_policy_register(struct blkcg_policy *pol);
void blkcg_policy_unregister(struct blkcg_policy *pol);
int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
* blkg_get - get a blkg reference
* @blkg: blkg to get
*
- * The caller should be holding queue_lock and an existing reference.
+ * The caller should be holding an existing reference.
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- lockdep_assert_held(blkg->q->queue_lock);
- WARN_ON_ONCE(!blkg->refcnt);
- blkg->refcnt++;
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
}
void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
/**
* blkg_put - put a blkg reference
* @blkg: blkg to put
- *
- * The caller should be holding queue_lock.
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- lockdep_assert_held(blkg->q->queue_lock);
- WARN_ON_ONCE(blkg->refcnt <= 0);
- if (!--blkg->refcnt)
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
static inline int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3bf0df..5453583 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
+ struct request_queue *q = rq->q;
+
if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!blk_write_same_mergeable(rq->bio, bio))
return false;
+ if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+ struct bio_vec *bprev;
+
+ bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+ if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+ return false;
+ }
+
return true;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0ef2dc7..ad69ef6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -878,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
preempt_disable();
- __blk_mq_run_hw_queue(hctx);
+ blk_mq_run_hw_queue(hctx, false);
preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
diff --git a/block/elevator.c b/block/elevator.c
index 34bded1..24c28b6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -825,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
}
EXPORT_SYMBOL(elv_unregister_queue);
-int __init elv_register(struct elevator_type *e)
+int elv_register(struct elevator_type *e)
{
char *def = "";
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index b6c8aaf..5b17ec8 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
return 0;
}
+ /* Discards don't have any payload.
+ * But the scsi layer still expects a bio_vec it can use internally,
+ * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
if (peer_req->flags & EE_IS_TRIM)
- nr_pages = 0; /* discards don't have any payload. */
+ nr_pages = 1;
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 677db04..56d46ff 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
int drive = cbdata->drive;
if (err) {
- pr_info("floppy: error %d while reading block 0", err);
+ pr_info("floppy: error %d while reading block 0\n", err);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a64576..d2633ee 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
+{
+ return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
+}
+
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
@@ -644,10 +653,6 @@ struct biovec_slab {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-
-#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
-
#define bip_for_each_vec(bvl, bip, iter) \
for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 713f8b6..8699bcf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -512,6 +512,7 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e2a6bd7..45a9147 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -143,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
* io scheduler registration
*/
extern void __init load_default_elevator_module(void);
-extern int __init elv_register(struct elevator_type *);
+extern int elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *);
/*
OpenPOWER on IntegriCloud