summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-softirq.c17
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c2
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h13
6 files changed, 24 insertions, 14 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 34d7c19..a0e3096 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(!hlist_unhashed(&req->hash));
+ BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, flags);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ebd6b6f..53b1737 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, queuelist);
- list_del_init(&rq->queuelist);
+ rq = list_entry(local_list.next, struct request, ipi_list);
+ list_del_init(&rq->ipi_list);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,14 +45,9 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- /*
- * We reuse queuelist for a list of requests to process. Since the
- * queuelist is used by the block layer only for requests waiting to be
- * submitted to the device it is unused now.
- */
- list_add_tail(&rq->queuelist, list);
+ list_add_tail(&rq->ipi_list, list);
- if (list->next == &rq->queuelist)
+ if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->queuelist, list);
+ list_add_tail(&req->ipi_list, list);
/*
* if the list only contains our just added request,
@@ -149,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->queuelist)
+ if (list->next == &req->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/blk.h b/block/blk.h
index d23b415..1d880f1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7..1e01b66 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
+ rq->cmd_flags &= ~REQ_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+ rq->cmd_flags |= REQ_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index bbc3a6c..aa0eaa2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -189,6 +189,7 @@ enum rq_flag_bits {
__REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */
+ __REQ_HASHED, /* on IO scheduler merge hash */
__REQ_NR_BITS, /* stops here */
};
@@ -241,5 +242,6 @@ enum rq_flag_bits {
#define REQ_KERNEL (1ULL << __REQ_KERNEL)
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END)
+#define REQ_HASHED (1ULL << __REQ_HASHED)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1e1fa3f..99617cf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -118,7 +118,18 @@ struct request {
struct bio *bio;
struct bio *biotail;
- struct hlist_node hash; /* merge hash */
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct list_head ipi_list;
+ };
+
/*
* The rb_node is only used inside the io scheduler, requests
* are pruned when moved to the dispatch queue. So let the
OpenPOWER on IntegriCloud