summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-11-28 09:25:50 -0700
committerJens Axboe <axboe@fb.com>2016-11-28 10:27:03 -0700
commitfa224eed2b5e0f2f9a57281e9dc733c843d590ad (patch)
treee7cdf9a2458ec2fd000c536add5ac206ee1dd9f9 /block
parent80e091d10e8bf7b801d634ea8870b9e907314424 (diff)
downloadop-kernel-dev-fa224eed2b5e0f2f9a57281e9dc733c843d590ad.zip
op-kernel-dev-fa224eed2b5e0f2f9a57281e9dc733c843d590ad.tar.gz
blk-wbt: cleanup disable-by-default for CFQ
Make it clear that we are disabling wbt for the specified queued, if it was enabled by default. This is in preparation for allowing users to re-enable wbt, and not have it disabled automatically again. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-wbt.c10
-rw-r--r--block/blk-wbt.h4
-rw-r--r--block/cfq-iosched.c9
3 files changed, 13 insertions, 10 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 92df2f7..7c0e618 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -665,15 +665,21 @@ void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
rwb->wc = write_cache_on;
}
-void wbt_disable(struct rq_wb *rwb)
+ /*
+ * Disable wbt, if enabled by default. Only called from CFQ, if we have
+ * cgroups enabled
+ */
+void wbt_disable_default(struct request_queue *q)
{
+ struct rq_wb *rwb = q->rq_wb;
+
if (rwb) {
del_timer_sync(&rwb->window_timer);
rwb->win_nsec = rwb->min_lat_nsec = 0;
wbt_update_limits(rwb);
}
}
-EXPORT_SYMBOL_GPL(wbt_disable);
+EXPORT_SYMBOL_GPL(wbt_disable_default);
u64 wbt_default_latency_nsec(struct request_queue *q)
{
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 9dfc88a..8f485f8 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -105,7 +105,7 @@ void wbt_exit(struct request_queue *);
void wbt_update_limits(struct rq_wb *);
void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
-void wbt_disable(struct rq_wb *);
+void wbt_disable_default(struct request_queue *);
void wbt_set_queue_depth(struct rq_wb *, unsigned int);
void wbt_set_write_cache(struct rq_wb *, bool);
@@ -141,7 +141,7 @@ static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
{
}
-static inline void wbt_disable(struct rq_wb *rwb)
+static inline void wbt_disable_default(struct request_queue *q)
{
}
static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9894dc9..c73a6fc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3780,13 +3780,10 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
/*
* If we have a non-root cgroup, we can depend on that to
* do proper throttling of writes. Turn off wbt for that
- * case.
+ * case, if it was enabled by default.
*/
- if (nonroot_cg) {
- struct request_queue *q = cfqd->queue;
-
- wbt_disable(q->rq_wb);
- }
+ if (nonroot_cg)
+ wbt_disable_default(cfqd->queue);
/*
* Drop reference to queues. New queues will be assigned in new
OpenPOWER on IntegriCloud