summaryrefslogtreecommitdiffstats
path: root/block/bfq-cgroup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 12:49:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 12:49:01 -0700
commit126e76ffbf78d9e948b641aadb265d16c57f5a3d (patch)
tree656e7838f0ec057936b80e15a774911df05c6005 /block/bfq-cgroup.c
parentfbd01410e89a66f346ba1b3c0161e1198449b746 (diff)
parent175206cf9ab63161dec74d9cd7f9992e062491f5 (diff)
downloadop-kernel-dev-126e76ffbf78d9e948b641aadb265d16c57f5a3d.zip
op-kernel-dev-126e76ffbf78d9e948b641aadb265d16c57f5a3d.tar.gz
Merge branch 'for-4.14/block-postmerge' of git://git.kernel.dk/linux-block
Pull followup block layer updates from Jens Axboe: "I ended up splitting the main pull request for this series into two, mainly because of clashes between NVMe fixes that went into 4.13 after the for-4.14 branches were split off. This pull request is mostly NVMe, but not exclusively. In detail, it contains: - Two pull request for NVMe changes from Christoph. Nothing new on the feature front, basically just fixes all over the map for the core bits, transport, rdma, etc. - Series from Bart, cleaning up various bits in the BFQ scheduler. - Series of bcache fixes, which has been lingering for a release or two. Coly sent this in, but patches from various people in this area. - Set of patches for BFQ from Paolo himself, updating both documentation and fixing some corner cases in performance. - Series from Omar, attempting to now get the 4k loop support correct. Our confidence level is higher this time. - Series from Shaohua for loop as well, improving O_DIRECT performance and fixing a use-after-free" * 'for-4.14/block-postmerge' of git://git.kernel.dk/linux-block: (74 commits) bcache: initialize dirty stripes in flash_dev_run() loop: set physical block size to logical block size bcache: fix bch_hprint crash and improve output bcache: Update continue_at() documentation bcache: silence static checker warning bcache: fix for gc and write-back race bcache: increase the number of open buckets bcache: Correct return value for sysfs attach errors bcache: correct cache_dirty_target in __update_writeback_rate() bcache: gc does not work when triggering by manual command bcache: Don't reinvent the wheel but use existing llist API bcache: do not subtract sectors_to_gc for bypassed IO bcache: fix sequential large write IO bypass bcache: Fix leak of bdev reference block/loop: remove unused field block/loop: fix use after free bfq: Use icq_to_bic() consistently bfq: Suppress compiler warnings about comparisons bfq: Check kstrtoul() return value bfq: Declare local functions static ...
Diffstat (limited to 'block/bfq-cgroup.c')
-rw-r--r--block/bfq-cgroup.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 78b2e0d..ceefb9a 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -206,7 +206,7 @@ static void bfqg_get(struct bfq_group *bfqg)
bfqg->ref++;
}
-void bfqg_put(struct bfq_group *bfqg)
+static void bfqg_put(struct bfq_group *bfqg)
{
bfqg->ref--;
@@ -385,7 +385,7 @@ static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
}
-struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
+static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
{
struct bfq_group_data *bgd;
@@ -395,7 +395,7 @@ struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
return &bgd->pd;
}
-void bfq_cpd_init(struct blkcg_policy_data *cpd)
+static void bfq_cpd_init(struct blkcg_policy_data *cpd)
{
struct bfq_group_data *d = cpd_to_bfqgd(cpd);
@@ -403,12 +403,12 @@ void bfq_cpd_init(struct blkcg_policy_data *cpd)
CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
}
-void bfq_cpd_free(struct blkcg_policy_data *cpd)
+static void bfq_cpd_free(struct blkcg_policy_data *cpd)
{
kfree(cpd_to_bfqgd(cpd));
}
-struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
{
struct bfq_group *bfqg;
@@ -426,7 +426,7 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
return &bfqg->pd;
}
-void bfq_pd_init(struct blkg_policy_data *pd)
+static void bfq_pd_init(struct blkg_policy_data *pd)
{
struct blkcg_gq *blkg = pd_to_blkg(pd);
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
@@ -445,7 +445,7 @@ void bfq_pd_init(struct blkg_policy_data *pd)
bfqg->rq_pos_tree = RB_ROOT;
}
-void bfq_pd_free(struct blkg_policy_data *pd)
+static void bfq_pd_free(struct blkg_policy_data *pd)
{
struct bfq_group *bfqg = pd_to_bfqg(pd);
@@ -453,7 +453,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
bfqg_put(bfqg);
}
-void bfq_pd_reset_stats(struct blkg_policy_data *pd)
+static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
{
struct bfq_group *bfqg = pd_to_bfqg(pd);
@@ -740,7 +740,7 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd,
* blkio already grabs the queue_lock for us, so no need to use
* RCU-based magic
*/
-void bfq_pd_offline(struct blkg_policy_data *pd)
+static void bfq_pd_offline(struct blkg_policy_data *pd)
{
struct bfq_service_tree *st;
struct bfq_group *bfqg = pd_to_bfqg(pd);
OpenPOWER on IntegriCloud