summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-11-14 17:38:05 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-11-14 17:38:05 -0800
commit42249094f79422fbf5ed4b54eeb48ff096809b8f (patch)
tree91e6850c8c7e8cc284cf8bb6363f8662f84011f4 /block
parent936816161978ca716a56c5e553c68f25972b1e3a (diff)
parent2c027b7c48a888ab173ba45babb4525e278375d9 (diff)
downloadop-kernel-dev-42249094f79422fbf5ed4b54eeb48ff096809b8f.zip
op-kernel-dev-42249094f79422fbf5ed4b54eeb48ff096809b8f.tar.gz
Merge branch 'next' into for-linus
Merge first round of changes for 3.13 merge window.
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig11
-rw-r--r--block/Makefile1
-rw-r--r--block/blk-cgroup.c179
-rw-r--r--block/blk-cgroup.h54
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-iopoll.c6
-rw-r--r--block/blk-softirq.c6
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-tag.c11
-rw-r--r--block/blk-throttle.c1093
-rw-r--r--block/blk-timeout.c5
-rw-r--r--block/cfq-iosched.c111
-rw-r--r--block/cmdline-parser.c250
-rw-r--r--block/compat_ioctl.c3
-rw-r--r--block/deadline-iosched.c18
-rw-r--r--block/elevator.c27
-rw-r--r--block/genhd.c17
-rw-r--r--block/noop-iosched.c17
-rw-r--r--block/partitions/Kconfig18
-rw-r--r--block/partitions/Makefile2
-rw-r--r--block/partitions/aix.c293
-rw-r--r--block/partitions/aix.h1
-rw-r--r--block/partitions/check.c4
-rw-r--r--block/partitions/cmdline.c99
-rw-r--r--block/partitions/cmdline.h2
-rw-r--r--block/partitions/efi.c175
-rw-r--r--block/partitions/efi.h38
-rw-r--r--block/partitions/msdos.c17
30 files changed, 1845 insertions, 644 deletions
diff --git a/block/Kconfig b/block/Kconfig
index a7e40a7..2429515 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,6 +99,17 @@ config BLK_DEV_THROTTLING
See Documentation/cgroups/blkio-controller.txt for more information.
+config BLK_CMDLINE_PARSER
+ bool "Block device command line partition parser"
+ default n
+ ---help---
+ Enabling this option allows you to specify the partition layout from
+ the kernel boot args. This is typically of use for embedded devices
+ which don't otherwise have any standardized method for listing the
+ partitions on a block device.
+
+ See Documentation/block/cmdline-partition.txt for more information.
+
menu "Partition Types"
source "block/partitions/Kconfig"
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..671a83d 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e8918ff..4e491d9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -32,26 +32,6 @@ EXPORT_SYMBOL_GPL(blkcg_root);
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
- * subtree.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
- cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
- if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
- (p_blkg)->q, false)))
-
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -71,18 +51,8 @@ static void blkg_free(struct blkcg_gq *blkg)
if (!blkg)
return;
- for (i = 0; i < BLKCG_MAX_POLS; i++) {
- struct blkcg_policy *pol = blkcg_policy[i];
- struct blkg_policy_data *pd = blkg->pd[i];
-
- if (!pd)
- continue;
-
- if (pol && pol->pd_exit_fn)
- pol->pd_exit_fn(blkg);
-
- kfree(pd);
- }
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ kfree(blkg->pd[i]);
blk_exit_rl(&blkg->rl);
kfree(blkg);
@@ -134,10 +104,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->pd[i] = pd;
pd->blkg = blkg;
pd->plid = i;
-
- /* invoke per-policy init */
- if (pol->pd_init_fn)
- pol->pd_init_fn(blkg);
}
return blkg;
@@ -158,8 +124,8 @@ err_free:
* @q's bypass state. If @update_hint is %true, the caller should be
* holding @q->queue_lock and lookup hint is updated on success.
*/
-static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint)
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint)
{
struct blkcg_gq *blkg;
@@ -234,16 +200,25 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
}
blkg = new_blkg;
- /* link parent and insert */
+ /* link parent */
if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
if (WARN_ON_ONCE(!blkg->parent)) {
- blkg = ERR_PTR(-EINVAL);
+ ret = -EINVAL;
goto err_put_css;
}
blkg_get(blkg->parent);
}
+ /* invoke per-policy init */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_init_fn)
+ pol->pd_init_fn(blkg);
+ }
+
+ /* insert */
spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
if (likely(!ret)) {
@@ -260,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg->online = true;
spin_unlock(&blkcg->lock);
- if (!ret)
+ if (!ret) {
+ if (blkcg == &blkcg_root) {
+ q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
+ }
return blkg;
+ }
/* @blkg failed fully initialized, use the usual release path */
blkg_put(blkg);
@@ -360,6 +340,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
+ * If root blkg is destroyed. Just clear the pointer since root_rl
+ * does not take reference on root blkg.
+ */
+ if (blkcg == &blkcg_root) {
+ blkg->q->root_blkg = NULL;
+ blkg->q->root_rl.blkg = NULL;
+ }
+
+ /*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
@@ -385,39 +374,40 @@ static void blkg_destroy_all(struct request_queue *q)
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
-
- /*
- * root blkg is destroyed. Just clear the pointer since
- * root_rl does not take reference on root blkg.
- */
- q->root_blkg = NULL;
- q->root_rl.blkg = NULL;
}
-static void blkg_rcu_free(struct rcu_head *rcu_head)
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+void __blkg_release_rcu(struct rcu_head *rcu_head)
{
- blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
-}
+ struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
+ int i;
+
+ /* tell policies that this one is being freed */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
+ }
-void __blkg_release(struct blkcg_gq *blkg)
-{
/* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
- if (blkg->parent)
+ if (blkg->parent) {
+ spin_lock_irq(blkg->q->queue_lock);
blkg_put(blkg->parent);
+ spin_unlock_irq(blkg->q->queue_lock);
+ }
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&blkg->rcu_head, blkg_rcu_free);
+ blkg_free(blkg);
}
-EXPORT_SYMBOL_GPL(__blkg_release);
+EXPORT_SYMBOL_GPL(__blkg_release_rcu);
/*
* The next function used by blk_queue_for_each_rl(). It's a bit tricky
@@ -454,10 +444,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
return &blkg->rl;
}
-static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
- u64 val)
+static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 val)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
int i;
@@ -631,15 +621,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{
struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
- struct cgroup *pos_cgrp;
- u64 sum;
+ struct cgroup_subsys_state *pos_css;
+ u64 sum = 0;
lockdep_assert_held(pd->blkg->q->queue_lock);
- sum = blkg_stat_read((void *)pd + off);
-
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_stat *stat = (void *)pos_pd + off;
@@ -666,16 +654,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
{
struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
- struct cgroup *pos_cgrp;
- struct blkg_rwstat sum;
+ struct cgroup_subsys_state *pos_css;
+ struct blkg_rwstat sum = { };
int i;
lockdep_assert_held(pd->blkg->q->queue_lock);
- sum = blkg_rwstat_read((void *)pd + off);
-
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_rwstat *rwstat = (void *)pos_pd + off;
struct blkg_rwstat tmp;
@@ -782,18 +768,18 @@ struct cftype blkcg_files[] = {
/**
* blkcg_css_offline - cgroup css_offline callback
- * @cgroup: cgroup of interest
+ * @css: css of interest
*
- * This function is called when @cgroup is about to go away and responsible
- * for shooting down all blkgs associated with @cgroup. blkgs should be
+ * This function is called when @css is about to go away and responsible
+ * for shooting down all blkgs associated with @css. blkgs should be
* removed while holding both q and blkcg locks. As blkcg lock is nested
* inside q lock, this function performs reverse double lock dancing.
*
* This is the blkcg counterpart of ioc_release_fn().
*/
-static void blkcg_css_offline(struct cgroup *cgroup)
+static void blkcg_css_offline(struct cgroup_subsys_state *css)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
spin_lock_irq(&blkcg->lock);
@@ -815,21 +801,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
spin_unlock_irq(&blkcg->lock);
}
-static void blkcg_css_free(struct cgroup *cgroup)
+static void blkcg_css_free(struct cgroup_subsys_state *css)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
if (blkcg != &blkcg_root)
kfree(blkcg);
}
-static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{
static atomic64_t id_seq = ATOMIC64_INIT(0);
struct blkcg *blkcg;
- struct cgroup *parent = cgroup->parent;
- if (!parent) {
+ if (!parent_css) {
blkcg = &blkcg_root;
goto done;
}
@@ -900,14 +886,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -928,14 +915,6 @@ struct cgroup_subsys blkio_subsys = {
.subsys_id = blkio_subsys_id,
.base_cftypes = blkcg_files,
.module = THIS_MODULE,
-
- /*
- * blkio subsystem is utterly broken in terms of hierarchy support.
- * It treats all cgroups equally regardless of where they're
- * located in the hierarchy - all cgroups are treated as if they're
- * right below the root. Fix it and remove the following.
- */
- .broken_hierarchy = true,
};
EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -998,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
ret = PTR_ERR(blkg);
goto out_unlock;
}
- q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++;
@@ -1152,7 +1129,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* kill the intf files first */
if (pol->cftypes)
- cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+ cgroup_rm_cftypes(pol->cftypes);
/* unregister and update blkgs */
blkcg_policy[pol->plid] = NULL;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 4e595ee..ae6969a 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkcg, css);
+ return css ? container_of(css, struct blkcg, css) : NULL;
}
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkcg, css);
+ return css_to_blkcg(task_css(tsk, blkio_subsys_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
if (bio && bio->bi_css)
- return container_of(bio->bi_css, struct blkcg, css);
+ return css_to_blkcg(bio->bi_css);
return task_blkcg(current);
}
@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
*/
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
- struct cgroup *pcg = blkcg->css.cgroup->parent;
-
- return pcg ? cgroup_to_blkcg(pcg) : NULL;
+ return css_to_blkcg(css_parent(&blkcg->css));
}
/**
@@ -266,7 +262,7 @@ static inline void blkg_get(struct blkcg_gq *blkg)
blkg->refcnt++;
}
-void __blkg_release(struct blkcg_gq *blkg);
+void __blkg_release_rcu(struct rcu_head *rcu);
/**
* blkg_put - put a blkg reference
@@ -279,9 +275,44 @@ static inline void blkg_put(struct blkcg_gq *blkg)
lockdep_assert_held(blkg->q->queue_lock);
WARN_ON_ONCE(blkg->refcnt <= 0);
if (!--blkg->refcnt)
- __blkg_release(blkg);
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Similar to blkg_for_each_descendant_pre() but performs post-order
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
+ */
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
/**
* blk_get_rl - get request_list to use
* @q: request_queue of interest
@@ -542,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
diff --git a/block/blk-core.c b/block/blk-core.c
index 33c33bc..0a00e4e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1549,11 +1549,9 @@ get_rq:
if (plug) {
/*
* If this is the first request added after a plug, fire
- * of a plug trace. If others have been added before, check
- * if we have multiple devices in this plug. If so, make a
- * note to sort the list before dispatch.
+ * of a plug trace.
*/
- if (list_empty(&plug->list))
+ if (!request_count)
trace_block_plug(q);
else {
if (request_count >= BLK_MAX_REQUEST_COUNT) {
@@ -2315,6 +2313,15 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
case -EBADE:
error_type = "critical nexus";
break;
+ case -ETIMEDOUT:
+ error_type = "timeout";
+ break;
+ case -ENOSPC:
+ error_type = "critical space allocation";
+ break;
+ case -ENODATA:
+ error_type = "critical medium";
+ break;
case -EIO:
default:
error_type = "I/O";
@@ -3164,7 +3171,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
q->rpm_status = RPM_ACTIVE;
__blk_run_queue(q);
pm_runtime_mark_last_busy(q->dev);
- pm_runtime_autosuspend(q->dev);
+ pm_request_autosuspend(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}
@@ -3180,7 +3187,8 @@ int __init blk_dev_init(void)
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI |
+ WQ_POWER_EFFICIENT, 0);
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
diff --git a/block/blk-exec.c b/block/blk-exec.c
index e706213..ae4f27d 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
+ rq->cmd_flags |= REQ_QUIET;
rq->errors = -ENXIO;
- if (rq->end_io)
- rq->end_io(rq, rq->errors);
+ __blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
return;
}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 9c4bb82..46cd7bd 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -144,7 +144,8 @@ void put_io_context(struct io_context *ioc)
if (atomic_long_dec_and_test(&ioc->refcount)) {
spin_lock_irqsave(&ioc->lock, flags);
if (!hlist_empty(&ioc->icq_list))
- schedule_work(&ioc->release_work);
+ queue_work(system_power_efficient_wq,
+ &ioc->release_work);
else
free_ioc = true;
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -366,7 +367,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
if (!icq)
return NULL;
- if (radix_tree_preload(gfp_mask) < 0) {
+ if (radix_tree_maybe_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 58916af..4b8d9b54 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -189,8 +189,8 @@ void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
}
EXPORT_SYMBOL(blk_iopoll_init);
-static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int blk_iopoll_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
+static struct notifier_block blk_iopoll_cpu_notifier = {
.notifier_call = blk_iopoll_cpu_notify,
};
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 467c8de..ec9e606 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -78,8 +78,8 @@ static int raise_blk_irq(int cpu, struct request *rq)
}
#endif
-static int __cpuinit blk_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5efc5a6..3aa5b19 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -29,7 +29,7 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
int err;
unsigned long v;
- err = strict_strtoul(page, 10, &v);
+ err = kstrtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index cc345e1..3f33d86 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -348,9 +348,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
*/
max_depth = bqt->max_depth;
if (!rq_is_sync(rq) && max_depth > 1) {
- max_depth -= 2;
- if (!max_depth)
+ switch (max_depth) {
+ case 2:
max_depth = 1;
+ break;
+ case 3:
+ max_depth = 2;
+ break;
+ default:
+ max_depth -= 2;
+ }
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 3114622..8331aba 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -25,18 +25,61 @@ static struct blkcg_policy blkcg_policy_throtl;
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
-static void throtl_schedule_delayed_work(struct throtl_data *td,
- unsigned long delay);
-
-struct throtl_rb_root {
- struct rb_root rb;
- struct rb_node *left;
- unsigned int count;
- unsigned long min_disptime;
+
+/*
+ * To implement hierarchical throttling, throtl_grps form a tree and bios
+ * are dispatched upwards level by level until they reach the top and get
+ * issued. When dispatching bios from the children and local group at each
+ * level, if the bios are dispatched into a single bio_list, there's a risk
+ * of a local or child group which can queue many bios at once filling up
+ * the list starving others.
+ *
+ * To avoid such starvation, dispatched bios are queued separately
+ * according to where they came from. When they are again dispatched to
+ * the parent, they're popped in round-robin order so that no single source
+ * hogs the dispatch window.
+ *
+ * throtl_qnode is used to keep the queued bios separated by their sources.
+ * Bios are queued to throtl_qnode which in turn is queued to
+ * throtl_service_queue and then dispatched in round-robin order.
+ *
+ * It's also used to track the reference counts on blkg's. A qnode always
+ * belongs to a throtl_grp and gets queued on itself or the parent, so
+ * incrementing the reference of the associated throtl_grp when a qnode is
+ * queued and decrementing when dequeued is enough to keep the whole blkg
+ * tree pinned while bios are in flight.
+ */
+struct throtl_qnode {
+ struct list_head node; /* service_queue->queued[] */
+ struct bio_list bios; /* queued bios */
+ struct throtl_grp *tg; /* tg this qnode belongs to */
};
-#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
- .count = 0, .min_disptime = 0}
+struct throtl_service_queue {
+ struct throtl_service_queue *parent_sq; /* the parent service_queue */
+
+ /*
+ * Bios queued directly to this service_queue or dispatched from
+ * children throtl_grp's.
+ */
+ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
+ unsigned int nr_queued[2]; /* number of queued bios */
+
+ /*
+ * RB tree of active children throtl_grp's, which are sorted by
+ * their ->disptime.
+ */
+ struct rb_root pending_tree; /* RB tree of active tgs */
+ struct rb_node *first_pending; /* first node in the tree */
+ unsigned int nr_pending; /* # queued in the tree */
+ unsigned long first_pending_disptime; /* disptime of the first tg */
+ struct timer_list pending_timer; /* fires on first_pending_disptime */
+};
+
+enum tg_state_flags {
+ THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
+ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
+};
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
@@ -52,9 +95,26 @@ struct throtl_grp {
/* must be the first member */
struct blkg_policy_data pd;
- /* active throtl group service_tree member */
+ /* active throtl group service_queue member */
struct rb_node rb_node;
+ /* throtl_data this group belongs to */
+ struct throtl_data *td;
+
+ /* this group's service queue */
+ struct throtl_service_queue service_queue;
+
+ /*
+ * qnode_on_self is used when bios are directly queued to this
+ * throtl_grp so that local bios compete fairly with bios
+ * dispatched from children. qnode_on_parent is used when bios are
+ * dispatched from this throtl_grp into its parent and will compete
+ * with the sibling qnode_on_parents and the parent's
+ * qnode_on_self.
+ */
+ struct throtl_qnode qnode_on_self[2];
+ struct throtl_qnode qnode_on_parent[2];
+
/*
* Dispatch time in jiffies. This is the estimated time when group
* will unthrottle and is ready to dispatch more bio. It is used as
@@ -64,11 +124,8 @@ struct throtl_grp {
unsigned int flags;
- /* Two lists for READ and WRITE */
- struct bio_list bio_lists[2];
-
- /* Number of queued bios on READ and WRITE lists */
- unsigned int nr_queued[2];
+ /* are there any throtl rules between this group and td? */
+ bool has_rules[2];
/* bytes per second rate limits */
uint64_t bps[2];
@@ -85,9 +142,6 @@ struct throtl_grp {
unsigned long slice_start[2];
unsigned long slice_end[2];
- /* Some throttle limits got updated for the group */
- int limits_changed;
-
/* Per cpu stats pointer */
struct tg_stats_cpu __percpu *stats_cpu;
@@ -98,7 +152,7 @@ struct throtl_grp {
struct throtl_data
{
/* service tree for active throtl groups */
- struct throtl_rb_root tg_service_tree;
+ struct throtl_service_queue service_queue;
struct request_queue *queue;
@@ -111,9 +165,7 @@ struct throtl_data
unsigned int nr_undestroyed_grps;
/* Work for dispatching throttled bios */
- struct delayed_work throtl_work;
-
- int limits_changed;
+ struct work_struct dispatch_work;
};
/* list and work item to allocate percpu group stats */
@@ -123,6 +175,8 @@ static LIST_HEAD(tg_stats_alloc_list);
static void tg_stats_alloc_fn(struct work_struct *);
static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+static void throtl_pending_timer_fn(unsigned long arg);
+
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
{
return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
@@ -143,41 +197,65 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
return blkg_to_tg(td->queue->root_blkg);
}
-enum tg_state_flags {
- THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
-};
-
-#define THROTL_TG_FNS(name) \
-static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
-{ \
- (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
-} \
-static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
-{ \
- (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
-} \
-static inline int throtl_tg_##name(const struct throtl_grp *tg) \
-{ \
- return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
+/**
+ * sq_to_tg - return the throl_grp the specified service queue belongs to
+ * @sq: the throtl_service_queue of interest
+ *
+ * Return the throtl_grp @sq belongs to. If @sq is the top-level one
+ * embedded in throtl_data, %NULL is returned.
+ */
+static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
+{
+ if (sq && sq->parent_sq)
+ return container_of(sq, struct throtl_grp, service_queue);
+ else
+ return NULL;
}
-THROTL_TG_FNS(on_rr);
+/**
+ * sq_to_td - return throtl_data the specified service queue belongs to
+ * @sq: the throtl_service_queue of interest
+ *
+ * A service_queue can be embeded in either a throtl_grp or throtl_data.
+ * Determine the associated throtl_data accordingly and return it.
+ */
+static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
+{
+ struct throtl_grp *tg = sq_to_tg(sq);
-#define throtl_log_tg(td, tg, fmt, args...) do { \
- char __pbuf[128]; \
+ if (tg)
+ return tg->td;
+ else
+ return container_of(sq, struct throtl_data, service_queue);
+}
+
+/**
+ * throtl_log - log debug message via blktrace
+ * @sq: the service_queue being reported
+ * @fmt: printf format string
+ * @args: printf args
+ *
+ * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
+ * throtl_grp; otherwise, just "throtl".
+ *
+ * TODO: this should be made a function and name formatting should happen
+ * after testing whether blktrace is enabled.
+ */
+#define throtl_log(sq, fmt, args...) do { \
+ struct throtl_grp *__tg = sq_to_tg((sq)); \
+ struct throtl_data *__td = sq_to_td((sq)); \
+ \
+ (void)__td; \
+ if ((__tg)) { \
+ char __pbuf[128]; \
\
- blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+ blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
+ } else { \
+ blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
+ } \
} while (0)
-#define throtl_log(td, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
-
-static inline unsigned int total_nr_queued(struct throtl_data *td)
-{
- return td->nr_queued[0] + td->nr_queued[1];
-}
-
/*
* Worker for allocating per cpu stat for tgs. This is scheduled on the
* system_wq once there are some groups on the alloc_list waiting for
@@ -215,15 +293,141 @@ alloc_stats:
goto alloc_stats;
}
+static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
+{
+ INIT_LIST_HEAD(&qn->node);
+ bio_list_init(&qn->bios);
+ qn->tg = tg;
+}
+
+/**
+ * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
+ * @bio: bio being added
+ * @qn: qnode to add bio to
+ * @queued: the service_queue->queued[] list @qn belongs to
+ *
+ * Add @bio to @qn and put @qn on @queued if it's not already on.
+ * @qn->tg's reference count is bumped when @qn is activated. See the
+ * comment on top of throtl_qnode definition for details.
+ */
+static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
+ struct list_head *queued)
+{
+ bio_list_add(&qn->bios, bio);
+ if (list_empty(&qn->node)) {
+ list_add_tail(&qn->node, queued);
+ blkg_get(tg_to_blkg(qn->tg));
+ }
+}
+
+/**
+ * throtl_peek_queued - peek the first bio on a qnode list
+ * @queued: the qnode list to peek
+ */
+static struct bio *throtl_peek_queued(struct list_head *queued)
+{
+ struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
+ struct bio *bio;
+
+ if (list_empty(queued))
+ return NULL;
+
+ bio = bio_list_peek(&qn->bios);
+ WARN_ON_ONCE(!bio);
+ return bio;
+}
+
+/**
+ * throtl_pop_queued - pop the first bio form a qnode list
+ * @queued: the qnode list to pop a bio from
+ * @tg_to_put: optional out argument for throtl_grp to put
+ *
+ * Pop the first bio from the qnode list @queued. After popping, the first
+ * qnode is removed from @queued if empty or moved to the end of @queued so
+ * that the popping order is round-robin.
+ *
+ * When the first qnode is removed, its associated throtl_grp should be put
+ * too. If @tg_to_put is NULL, this function automatically puts it;
+ * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
+ * responsible for putting it.
+ */
+static struct bio *throtl_pop_queued(struct list_head *queued,
+ struct throtl_grp **tg_to_put)
+{
+ struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
+ struct bio *bio;
+
+ if (list_empty(queued))
+ return NULL;
+
+ bio = bio_list_pop(&qn->bios);
+ WARN_ON_ONCE(!bio);
+
+ if (bio_list_empty(&qn->bios)) {
+ list_del_init(&qn->node);
+ if (tg_to_put)
+ *tg_to_put = qn->tg;
+ else
+ blkg_put(tg_to_blkg(qn->tg));
+ } else {
+ list_move_tail(&qn->node, queued);
+ }
+
+ return bio;
+}
+
+/* init a service_queue, assumes the caller zeroed it */
+static void throtl_service_queue_init(struct throtl_service_queue *sq,
+ struct throtl_service_queue *parent_sq)
+{
+ INIT_LIST_HEAD(&sq->queued[0]);
+ INIT_LIST_HEAD(&sq->queued[1]);
+ sq->pending_tree = RB_ROOT;
+ sq->parent_sq = parent_sq;
+ setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
+ (unsigned long)sq);
+}
+
+static void throtl_service_queue_exit(struct throtl_service_queue *sq)
+{
+ del_timer_sync(&sq->pending_timer);
+}
+
static void throtl_pd_init(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct throtl_data *td = blkg->q->td;
+ struct throtl_service_queue *parent_sq;
unsigned long flags;
+ int rw;
+
+ /*
+ * If sane_hierarchy is enabled, we switch to properly hierarchical
+ * behavior where limits on a given throtl_grp are applied to the
+ * whole subtree rather than just the group itself. e.g. If 16M
+ * read_bps limit is set on the root group, the whole system can't
+ * exceed 16M for the device.
+ *
+ * If sane_hierarchy is not enabled, the broken flat hierarchy
+ * behavior is retained where all throtl_grps are treated as if
+ * they're all separate root groups right below throtl_data.
+ * Limits of a group don't interact with limits of other groups
+ * regardless of the position of the group in the hierarchy.
+ */
+ parent_sq = &td->service_queue;
+
+ if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent)
+ parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
+
+ throtl_service_queue_init(&tg->service_queue, parent_sq);
+
+ for (rw = READ; rw <= WRITE; rw++) {
+ throtl_qnode_init(&tg->qnode_on_self[rw], tg);
+ throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
+ }
RB_CLEAR_NODE(&tg->rb_node);
- bio_list_init(&tg->bio_lists[0]);
- bio_list_init(&tg->bio_lists[1]);
- tg->limits_changed = false;
+ tg->td = td;
tg->bps[READ] = -1;
tg->bps[WRITE] = -1;
@@ -241,6 +445,30 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
+/*
+ * Set has_rules[] if @tg or any of its parents have limits configured.
+ * This doesn't require walking up to the top of the hierarchy as the
+ * parent's has_rules[] is guaranteed to be correct.
+ */
+static void tg_update_has_rules(struct throtl_grp *tg)
+{
+ struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
+ int rw;
+
+ for (rw = READ; rw <= WRITE; rw++)
+ tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
+ (tg->bps[rw] != -1 || tg->iops[rw] != -1);
+}
+
+static void throtl_pd_online(struct blkcg_gq *blkg)
+{
+ /*
+ * We don't want new groups to escape the limits of its ancestors.
+ * Update has_rules[] after a new group is brought online.
+ */
+ tg_update_has_rules(blkg_to_tg(blkg));
+}
+
static void throtl_pd_exit(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -251,6 +479,8 @@ static void throtl_pd_exit(struct blkcg_gq *blkg)
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
free_percpu(tg->stats_cpu);
+
+ throtl_service_queue_exit(&tg->service_queue);
}
static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
@@ -309,17 +539,18 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
return tg;
}
-static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
+static struct throtl_grp *
+throtl_rb_first(struct throtl_service_queue *parent_sq)
{
/* Service tree is empty */
- if (!root->count)
+ if (!parent_sq->nr_pending)
return NULL;
- if (!root->left)
- root->left = rb_first(&root->rb);
+ if (!parent_sq->first_pending)
+ parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
- if (root->left)
- return rb_entry_tg(root->left);
+ if (parent_sq->first_pending)
+ return rb_entry_tg(parent_sq->first_pending);
return NULL;
}
@@ -330,29 +561,30 @@ static void rb_erase_init(struct rb_node *n, struct rb_root *root)
RB_CLEAR_NODE(n);
}
-static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
+static void throtl_rb_erase(struct rb_node *n,
+ struct throtl_service_queue *parent_sq)
{
- if (root->left == n)
- root->left = NULL;
- rb_erase_init(n, &root->rb);
- --root->count;
+ if (parent_sq->first_pending == n)
+ parent_sq->first_pending = NULL;
+ rb_erase_init(n, &parent_sq->pending_tree);
+ --parent_sq->nr_pending;
}
-static void update_min_dispatch_time(struct throtl_rb_root *st)
+static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
{
struct throtl_grp *tg;
- tg = throtl_rb_first(st);
+ tg = throtl_rb_first(parent_sq);
if (!tg)
return;
- st->min_disptime = tg->disptime;
+ parent_sq->first_pending_disptime = tg->disptime;
}
-static void
-tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
+static void tg_service_queue_add(struct throtl_grp *tg)
{
- struct rb_node **node = &st->rb.rb_node;
+ struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
+ struct rb_node **node = &parent_sq->pending_tree.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
@@ -371,89 +603,135 @@ tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
}
if (left)
- st->left = &tg->rb_node;
+ parent_sq->first_pending = &tg->rb_node;
rb_link_node(&tg->rb_node, parent, node);
- rb_insert_color(&tg->rb_node, &st->rb);
+ rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
}
-static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void __throtl_enqueue_tg(struct throtl_grp *tg)
{
- struct throtl_rb_root *st = &td->tg_service_tree;
+ tg_service_queue_add(tg);
+ tg->flags |= THROTL_TG_PENDING;
+ tg->service_queue.parent_sq->nr_pending++;
+}
- tg_service_tree_add(st, tg);
- throtl_mark_tg_on_rr(tg);
- st->count++;
+static void throtl_enqueue_tg(struct throtl_grp *tg)
+{
+ if (!(tg->flags & THROTL_TG_PENDING))
+ __throtl_enqueue_tg(tg);
}
-static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void __throtl_dequeue_tg(struct throtl_grp *tg)
{
- if (!throtl_tg_on_rr(tg))
- __throtl_enqueue_tg(td, tg);
+ throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
+ tg->flags &= ~THROTL_TG_PENDING;
}
-static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_dequeue_tg(struct throtl_grp *tg)
{
- throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
- throtl_clear_tg_on_rr(tg);
+ if (tg->flags & THROTL_TG_PENDING)
+ __throtl_dequeue_tg(tg);
}
-static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
+/* Call with queue lock held */
+static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
+ unsigned long expires)
{
- if (throtl_tg_on_rr(tg))
- __throtl_dequeue_tg(td, tg);
+ mod_timer(&sq->pending_timer, expires);
+ throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
+ expires - jiffies, jiffies);
}
-static void throtl_schedule_next_dispatch(struct throtl_data *td)
+/**
+ * throtl_schedule_next_dispatch - schedule the next dispatch cycle
+ * @sq: the service_queue to schedule dispatch for
+ * @force: force scheduling
+ *
+ * Arm @sq->pending_timer so that the next dispatch cycle starts on the
+ * dispatch time of the first pending child. Returns %true if either timer
+ * is armed or there's no pending child left. %false if the current
+ * dispatch window is still open and the caller should continue
+ * dispatching.
+ *
+ * If @force is %true, the dispatch timer is always scheduled and this
+ * function is guaranteed to return %true. This is to be used when the
+ * caller can't dispatch itself and needs to invoke pending_timer
+ * unconditionally. Note that forced scheduling is likely to induce short
+ * delay before dispatch starts even if @sq->first_pending_disptime is not
+ * in the future and thus shouldn't be used in hot paths.
+ */
+static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
+ bool force)
{
- struct throtl_rb_root *st = &td->tg_service_tree;
+ /* any pending children left? */
+ if (!sq->nr_pending)
+ return true;
- /*
- * If there are more bios pending, schedule more work.
- */
- if (!total_nr_queued(td))
- return;
+ update_min_dispatch_time(sq);
- BUG_ON(!st->count);
+ /* is the next dispatch time in the future? */
+ if (force || time_after(sq->first_pending_disptime, jiffies)) {
+ throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
+ return true;
+ }
- update_min_dispatch_time(st);
+ /* tell the caller to continue dispatching */
+ return false;
+}
- if (time_before_eq(st->min_disptime, jiffies))
- throtl_schedule_delayed_work(td, 0);
- else
- throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
+static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
+ bool rw, unsigned long start)
+{
+ tg->bytes_disp[rw] = 0;
+ tg->io_disp[rw] = 0;
+
+ /*
+ * Previous slice has expired. We must have trimmed it after last
+ * bio dispatch. That means since start of last slice, we never used
+ * that bandwidth. Do try to make use of that bandwidth while giving
+ * credit.
+ */
+ if (time_after_eq(start, tg->slice_start[rw]))
+ tg->slice_start[rw] = start;
+
+ tg->slice_end[rw] = jiffies + throtl_slice;
+ throtl_log(&tg->service_queue,
+ "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
-static inline void
-throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
{
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + throtl_slice;
- throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', tg->slice_start[rw],
- tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] new slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
-static inline void throtl_set_slice_end(struct throtl_data *td,
- struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
+ unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
}
-static inline void throtl_extend_slice(struct throtl_data *td,
- struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
+static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
+ unsigned long jiffy_end)
{
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
- throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', tg->slice_start[rw],
- tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] extend slice start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', tg->slice_start[rw],
+ tg->slice_end[rw], jiffies);
}
/* Determine if previously allocated or extended slice is complete or not */
-static bool
-throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
{
if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
return 0;
@@ -462,8 +740,7 @@ throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
}
/* Trim the used slices and adjust slice start accordingly */
-static inline void
-throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
+static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
unsigned long nr_slices, time_elapsed, io_trim;
u64 bytes_trim, tmp;
@@ -475,7 +752,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
* renewed. Don't try to trim the slice if slice is used. A new
* slice will start when appropriate.
*/
- if (throtl_slice_used(td, tg, rw))
+ if (throtl_slice_used(tg, rw))
return;
/*
@@ -486,7 +763,7 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
* is bad because it does not allow new slice to start.
*/
- throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
+ throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
time_elapsed = jiffies - tg->slice_start[rw];
@@ -515,14 +792,14 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
tg->slice_start[rw] += nr_slices * throtl_slice;
- throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
- " start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
- tg->slice_start[rw], tg->slice_end[rw], jiffies);
+ throtl_log(&tg->service_queue,
+ "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
+ tg->slice_start[rw], tg->slice_end[rw], jiffies);
}
-static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned int io_allowed;
@@ -571,8 +848,8 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
-static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
@@ -613,18 +890,12 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
-static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
- if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
- return 1;
- return 0;
-}
-
/*
* Returns whether one can dispatch a bio or not. Also returns approx number
* of jiffies to wait before this bio is with-in IO rate and can be dispatched
*/
-static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio, unsigned long *wait)
+static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
+ unsigned long *wait)
{
bool rw = bio_data_dir(bio);
unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
@@ -635,7 +906,8 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
* this function with a different bio if there are other bios
* queued.
*/
- BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
+ BUG_ON(tg->service_queue.nr_queued[rw] &&
+ bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
/* If tg->bps = -1, then BW is unlimited */
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
@@ -649,15 +921,15 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
* existing slice to make sure it is at least throtl_slice interval
* long since now.
*/
- if (throtl_slice_used(td, tg, rw))
- throtl_start_new_slice(td, tg, rw);
+ if (throtl_slice_used(tg, rw))
+ throtl_start_new_slice(tg, rw);
else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
- throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
+ throtl_extend_slice(tg, rw, jiffies + throtl_slice);
}
- if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
- && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
+ if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
+ tg_with_in_iops_limit(tg, bio, &iops_wait)) {
if (wait)
*wait = 0;
return 1;
@@ -669,7 +941,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
*wait = max_wait;
if (time_before(tg->slice_end[rw], jiffies + max_wait))
- throtl_extend_slice(td, tg, rw, jiffies + max_wait);
+ throtl_extend_slice(tg, rw, jiffies + max_wait);
return 0;
}
@@ -708,65 +980,136 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
tg->bytes_disp[rw] += bio->bi_size;
tg->io_disp[rw]++;
- throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
+ /*
+ * REQ_THROTTLED is used to prevent the same bio to be throttled
+ * more than once as a throttled bio will go through blk-throtl the
+ * second time when it eventually gets issued. Set it when a bio
+ * is being charged to a tg.
+ *
+ * Dispatch stats aren't recursive and each @bio should only be
+ * accounted by the @tg it was originally associated with. Let's
+ * update the stats when setting REQ_THROTTLED for the first time
+ * which is guaranteed to be for the @bio's original tg.
+ */
+ if (!(bio->bi_rw & REQ_THROTTLED)) {
+ bio->bi_rw |= REQ_THROTTLED;
+ throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
+ bio->bi_rw);
+ }
}
-static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
- struct bio *bio)
+/**
+ * throtl_add_bio_tg - add a bio to the specified throtl_grp
+ * @bio: bio to add
+ * @qn: qnode to use
+ * @tg: the target throtl_grp
+ *
+ * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
+ * tg->qnode_on_self[] is used.
+ */
+static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
+ struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
bool rw = bio_data_dir(bio);
- bio_list_add(&tg->bio_lists[rw], bio);
- /* Take a bio reference on tg */
- blkg_get(tg_to_blkg(tg));
- tg->nr_queued[rw]++;
- td->nr_queued[rw]++;
- throtl_enqueue_tg(td, tg);
+ if (!qn)
+ qn = &tg->qnode_on_self[rw];
+
+ /*
+ * If @tg doesn't currently have any bios queued in the same
+ * direction, queueing @bio can change when @tg should be
+ * dispatched. Mark that @tg was empty. This is automatically
+ * cleaered on the next tg_update_disptime().
+ */
+ if (!sq->nr_queued[rw])
+ tg->flags |= THROTL_TG_WAS_EMPTY;
+
+ throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
+
+ sq->nr_queued[rw]++;
+ throtl_enqueue_tg(tg);
}
-static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
+static void tg_update_disptime(struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
struct bio *bio;
- if ((bio = bio_list_peek(&tg->bio_lists[READ])))
- tg_may_dispatch(td, tg, bio, &read_wait);
+ if ((bio = throtl_peek_queued(&sq->queued[READ])))
+ tg_may_dispatch(tg, bio, &read_wait);
- if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
- tg_may_dispatch(td, tg, bio, &write_wait);
+ if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
+ tg_may_dispatch(tg, bio, &write_wait);
min_wait = min(read_wait, write_wait);
disptime = jiffies + min_wait;
/* Update dispatch time */
- throtl_dequeue_tg(td, tg);
+ throtl_dequeue_tg(tg);
tg->disptime = disptime;
- throtl_enqueue_tg(td, tg);
+ throtl_enqueue_tg(tg);
+
+ /* see throtl_add_bio_tg() */
+ tg->flags &= ~THROTL_TG_WAS_EMPTY;
}
-static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
- bool rw, struct bio_list *bl)
+static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
+ struct throtl_grp *parent_tg, bool rw)
{
- struct bio *bio;
+ if (throtl_slice_used(parent_tg, rw)) {
+ throtl_start_new_slice_with_credit(parent_tg, rw,
+ child_tg->slice_start[rw]);
+ }
+
+}
- bio = bio_list_pop(&tg->bio_lists[rw]);
- tg->nr_queued[rw]--;
- /* Drop bio reference on blkg */
- blkg_put(tg_to_blkg(tg));
+static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
+{
+ struct throtl_service_queue *sq = &tg->service_queue;
+ struct throtl_service_queue *parent_sq = sq->parent_sq;
+ struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
+ struct throtl_grp *tg_to_put = NULL;
+ struct bio *bio;
- BUG_ON(td->nr_queued[rw] <= 0);
- td->nr_queued[rw]--;
+ /*
+ * @bio is being transferred from @tg to @parent_sq. Popping a bio
+ * from @tg may put its reference and @parent_sq might end up
+ * getting released prematurely. Remember the tg to put and put it
+ * after @bio is transferred to @parent_sq.
+ */
+ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
+ sq->nr_queued[rw]--;
throtl_charge_bio(tg, bio);
- bio_list_add(bl, bio);
- bio->bi_rw |= REQ_THROTTLED;
- throtl_trim_slice(td, tg, rw);
+ /*
+ * If our parent is another tg, we just need to transfer @bio to
+ * the parent using throtl_add_bio_tg(). If our parent is
+ * @td->service_queue, @bio is ready to be issued. Put it on its
+ * bio_lists[] and decrease total number queued. The caller is
+ * responsible for issuing these bios.
+ */
+ if (parent_tg) {
+ throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
+ start_parent_slice_with_credit(tg, parent_tg, rw);
+ } else {
+ throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
+ &parent_sq->queued[rw]);
+ BUG_ON(tg->td->nr_queued[rw] <= 0);
+ tg->td->nr_queued[rw]--;
+ }
+
+ throtl_trim_slice(tg, rw);
+
+ if (tg_to_put)
+ blkg_put(tg_to_blkg(tg_to_put));
}
-static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
- struct bio_list *bl)
+static int throtl_dispatch_tg(struct throtl_grp *tg)
{
+ struct throtl_service_queue *sq = &tg->service_queue;
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
@@ -774,20 +1117,20 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
/* Try to dispatch 75% READS and 25% WRITES */
- while ((bio = bio_list_peek(&tg->bio_lists[READ]))
- && tg_may_dispatch(td, tg, bio, NULL)) {
+ while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
+ tg_may_dispatch(tg, bio, NULL)) {
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
nr_reads++;
if (nr_reads >= max_nr_reads)
break;
}
- while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
- && tg_may_dispatch(td, tg, bio, NULL)) {
+ while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
+ tg_may_dispatch(tg, bio, NULL)) {
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
nr_writes++;
if (nr_writes >= max_nr_writes)
@@ -797,14 +1140,13 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
return nr_reads + nr_writes;
}
-static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
+static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
{
unsigned int nr_disp = 0;
- struct throtl_grp *tg;
- struct throtl_rb_root *st = &td->tg_service_tree;
while (1) {
- tg = throtl_rb_first(st);
+ struct throtl_grp *tg = throtl_rb_first(parent_sq);
+ struct throtl_service_queue *sq = &tg->service_queue;
if (!tg)
break;
@@ -812,14 +1154,12 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
if (time_before(jiffies, tg->disptime))
break;
- throtl_dequeue_tg(td, tg);
+ throtl_dequeue_tg(tg);
- nr_disp += throtl_dispatch_tg(td, tg, bl);
+ nr_disp += throtl_dispatch_tg(tg);
- if (tg->nr_queued[0] || tg->nr_queued[1]) {
- tg_update_disptime(td, tg);
- throtl_enqueue_tg(td, tg);
- }
+ if (sq->nr_queued[0] || sq->nr_queued[1])
+ tg_update_disptime(tg);
if (nr_disp >= throtl_quantum)
break;
@@ -828,111 +1168,111 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
return nr_disp;
}
-static void throtl_process_limit_change(struct throtl_data *td)
+/**
+ * throtl_pending_timer_fn - timer function for service_queue->pending_timer
+ * @arg: the throtl_service_queue being serviced
+ *
+ * This timer is armed when a child throtl_grp with active bio's become
+ * pending and queued on the service_queue's pending_tree and expires when
+ * the first child throtl_grp should be dispatched. This function
+ * dispatches bio's from the children throtl_grps to the parent
+ * service_queue.
+ *
+ * If the parent's parent is another throtl_grp, dispatching is propagated
+ * by either arming its pending_timer or repeating dispatch directly. If
+ * the top-level service_tree is reached, throtl_data->dispatch_work is
+ * kicked so that the ready bio's are issued.
+ */
+static void throtl_pending_timer_fn(unsigned long arg)
{
+ struct throtl_service_queue *sq = (void *)arg;
+ struct throtl_grp *tg = sq_to_tg(sq);
+ struct throtl_data *td = sq_to_td(sq);
struct request_queue *q = td->queue;
- struct blkcg_gq *blkg, *n;
-
- if (!td->limits_changed)
- return;
-
- xchg(&td->limits_changed, false);
-
- throtl_log(td, "limits changed");
-
- list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
- struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct throtl_service_queue *parent_sq;
+ bool dispatched;
+ int ret;
- if (!tg->limits_changed)
- continue;
+ spin_lock_irq(q->queue_lock);
+again:
+ parent_sq = sq->parent_sq;
+ dispatched = false;
+
+ while (true) {
+ throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
+ sq->nr_queued[READ] + sq->nr_queued[WRITE],
+ sq->nr_queued[READ], sq->nr_queued[WRITE]);
+
+ ret = throtl_select_dispatch(sq);
+ if (ret) {
+ throtl_log(sq, "bios disp=%u", ret);
+ dispatched = true;
+ }
- if (!xchg(&tg->limits_changed, false))
- continue;
+ if (throtl_schedule_next_dispatch(sq, false))
+ break;
- throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
- " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
- tg->iops[READ], tg->iops[WRITE]);
+ /* this dispatch windows is still open, relax and repeat */
+ spin_unlock_irq(q->queue_lock);
+ cpu_relax();
+ spin_lock_irq(q->queue_lock);
+ }
- /*
- * Restart the slices for both READ and WRITES. It
- * might happen that a group's limit are dropped
- * suddenly and we don't want to account recently
- * dispatched IO with new low rate
- */
- throtl_start_new_slice(td, tg, 0);
- throtl_start_new_slice(td, tg, 1);
+ if (!dispatched)
+ goto out_unlock;
- if (throtl_tg_on_rr(tg))
- tg_update_disptime(td, tg);
+ if (parent_sq) {
+ /* @parent_sq is another throl_grp, propagate dispatch */
+ if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ tg_update_disptime(tg);
+ if (!throtl_schedule_next_dispatch(parent_sq, false)) {
+ /* window is already open, repeat dispatching */
+ sq = parent_sq;
+ tg = sq_to_tg(sq);
+ goto again;
+ }
+ }
+ } else {
+ /* reached the top-level, queue issueing */
+ queue_work(kthrotld_workqueue, &td->dispatch_work);
}
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
}
-/* Dispatch throttled bios. Should be called without queue lock held. */
-static int throtl_dispatch(struct request_queue *q)
+/**
+ * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
+ * @work: work item being executed
+ *
+ * This function is queued for execution when bio's reach the bio_lists[]
+ * of throtl_data->service_queue. Those bio's are ready and issued by this
+ * function.
+ */
+void blk_throtl_dispatch_work_fn(struct work_struct *work)
{
- struct throtl_data *td = q->td;
- unsigned int nr_disp = 0;
+ struct throtl_data *td = container_of(work, struct throtl_data,
+ dispatch_work);
+ struct throtl_service_queue *td_sq = &td->service_queue;
+ struct request_queue *q = td->queue;
struct bio_list bio_list_on_stack;
struct bio *bio;
struct blk_plug plug;
-
- spin_lock_irq(q->queue_lock);
-
- throtl_process_limit_change(td);
-
- if (!total_nr_queued(td))
- goto out;
+ int rw;
bio_list_init(&bio_list_on_stack);
- throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
- total_nr_queued(td), td->nr_queued[READ],
- td->nr_queued[WRITE]);
-
- nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
-
- if (nr_disp)
- throtl_log(td, "bios disp=%u", nr_disp);
-
- throtl_schedule_next_dispatch(td);
-out:
+ spin_lock_irq(q->queue_lock);
+ for (rw = READ; rw <= WRITE; rw++)
+ while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
+ bio_list_add(&bio_list_on_stack, bio);
spin_unlock_irq(q->queue_lock);
- /*
- * If we dispatched some requests, unplug the queue to make sure
- * immediate dispatch
- */
- if (nr_disp) {
+ if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio);
blk_finish_plug(&plug);
}
- return nr_disp;
-}
-
-void blk_throtl_work(struct work_struct *work)
-{
- struct throtl_data *td = container_of(work, struct throtl_data,
- throtl_work.work);
- struct request_queue *q = td->queue;
-
- throtl_dispatch(q);
-}
-
-/* Call with queue lock held */
-static void
-throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
-{
-
- struct delayed_work *dwork = &td->throtl_work;
-
- /* schedule work if limits changed even if no bio is queued */
- if (total_nr_queued(td) || td->limits_changed) {
- mod_delayed_work(kthrotld_workqueue, dwork, delay);
- throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
- delay, jiffies);
- }
}
static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
@@ -953,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
cft->private, true);
@@ -985,29 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
return __blkg_prfill_u64(sf, pd, v);
}
-static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_conf_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
&blkcg_policy_throtl, cft->private, false);
return 0;
}
-static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_conf_uint(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
&blkcg_policy_throtl, cft->private, false);
return 0;
}
-static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
- bool is_u64)
+static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
+ const char *buf, bool is_u64)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx;
struct throtl_grp *tg;
- struct throtl_data *td;
+ struct throtl_service_queue *sq;
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
int ret;
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1015,7 +1357,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
return ret;
tg = blkg_to_tg(ctx.blkg);
- td = ctx.blkg->q->td;
+ sq = &tg->service_queue;
if (!ctx.v)
ctx.v = -1;
@@ -1025,25 +1367,51 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
else
*(unsigned int *)((void *)tg + cft->private) = ctx.v;
- /* XXX: we don't need the following deferred processing */
- xchg(&tg->limits_changed, true);
- xchg(&td->limits_changed, true);
- throtl_schedule_delayed_work(td, 0);
+ throtl_log(&tg->service_queue,
+ "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
+ tg->bps[READ], tg->bps[WRITE],
+ tg->iops[READ], tg->iops[WRITE]);
+
+ /*
+ * Update has_rules[] flags for the updated tg's subtree. A tg is
+ * considered to have rules if either the tg itself or any of its
+ * ancestors has rules. This identifies groups without any
+ * restrictions in the whole hierarchy and allows them to bypass
+ * blk-throttle.
+ */
+ blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
+ tg_update_has_rules(blkg_to_tg(blkg));
+
+ /*
+ * We're already holding queue_lock and know @tg is valid. Let's
+ * apply the new config directly.
+ *
+ * Restart the slices for both READ and WRITES. It might happen
+ * that a group's limit are dropped suddenly and we don't want to
+ * account recently dispatched IO with new low rate.
+ */
+ throtl_start_new_slice(tg, 0);
+ throtl_start_new_slice(tg, 1);
+
+ if (tg->flags & THROTL_TG_PENDING) {
+ tg_update_disptime(tg);
+ throtl_schedule_next_dispatch(sq->parent_sq, true);
+ }
blkg_conf_finish(&ctx);
return 0;
}
-static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf)
{
- return tg_set_conf(cgrp, cft, buf, true);
+ return tg_set_conf(css, cft, buf, true);
}
-static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf)
{
- return tg_set_conf(cgrp, cft, buf, false);
+ return tg_set_conf(css, cft, buf, false);
}
static struct cftype throtl_files[] = {
@@ -1092,7 +1460,7 @@ static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
- cancel_delayed_work_sync(&td->throtl_work);
+ cancel_work_sync(&td->dispatch_work);
}
static struct blkcg_policy blkcg_policy_throtl = {
@@ -1100,6 +1468,7 @@ static struct blkcg_policy blkcg_policy_throtl = {
.cftypes = throtl_files,
.pd_init_fn = throtl_pd_init,
+ .pd_online_fn = throtl_pd_online,
.pd_exit_fn = throtl_pd_exit,
.pd_reset_stats_fn = throtl_pd_reset_stats,
};
@@ -1107,15 +1476,16 @@ static struct blkcg_policy blkcg_policy_throtl = {
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
+ struct throtl_qnode *qn = NULL;
struct throtl_grp *tg;
- bool rw = bio_data_dir(bio), update_disptime = true;
+ struct throtl_service_queue *sq;
+ bool rw = bio_data_dir(bio);
struct blkcg *blkcg;
bool throttled = false;
- if (bio->bi_rw & REQ_THROTTLED) {
- bio->bi_rw &= ~REQ_THROTTLED;
+ /* see throtl_charge_bio() */
+ if (bio->bi_rw & REQ_THROTTLED)
goto out;
- }
/*
* A throtl_grp pointer retrieved under rcu can be used to access
@@ -1126,7 +1496,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
blkcg = bio_blkcg(bio);
tg = throtl_lookup_tg(td, blkcg);
if (tg) {
- if (tg_no_rule_group(tg, rw)) {
+ if (!tg->has_rules[rw]) {
throtl_update_dispatch_stats(tg_to_blkg(tg),
bio->bi_size, bio->bi_rw);
goto out_unlock_rcu;
@@ -1142,18 +1512,18 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (unlikely(!tg))
goto out_unlock;
- if (tg->nr_queued[rw]) {
- /*
- * There is already another bio queued in same dir. No
- * need to update dispatch time.
- */
- update_disptime = false;
- goto queue_bio;
+ sq = &tg->service_queue;
- }
+ while (true) {
+ /* throtl is FIFO - if bios are already queued, should queue */
+ if (sq->nr_queued[rw])
+ break;
+
+ /* if above limits, break to queue */
+ if (!tg_may_dispatch(tg, bio, NULL))
+ break;
- /* Bio is with-in rate limit of group */
- if (tg_may_dispatch(td, tg, bio, NULL)) {
+ /* within limits, let's charge and dispatch directly */
throtl_charge_bio(tg, bio);
/*
@@ -1167,25 +1537,41 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
*
* So keep on trimming slice even if bio is not queued.
*/
- throtl_trim_slice(td, tg, rw);
- goto out_unlock;
+ throtl_trim_slice(tg, rw);
+
+ /*
+ * @bio passed through this layer without being throttled.
+ * Climb up the ladder. If we''re already at the top, it
+ * can be executed directly.
+ */
+ qn = &tg->qnode_on_parent[rw];
+ sq = sq->parent_sq;
+ tg = sq_to_tg(sq);
+ if (!tg)
+ goto out_unlock;
}
-queue_bio:
- throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
- " iodisp=%u iops=%u queued=%d/%d",
- rw == READ ? 'R' : 'W',
- tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
- tg->io_disp[rw], tg->iops[rw],
- tg->nr_queued[READ], tg->nr_queued[WRITE]);
+ /* out-of-limit, queue to @tg */
+ throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
+ rw == READ ? 'R' : 'W',
+ tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+ tg->io_disp[rw], tg->iops[rw],
+ sq->nr_queued[READ], sq->nr_queued[WRITE]);
bio_associate_current(bio);
- throtl_add_bio_tg(q->td, tg, bio);
+ tg->td->nr_queued[rw]++;
+ throtl_add_bio_tg(bio, qn, tg);
throttled = true;
- if (update_disptime) {
- tg_update_disptime(td, tg);
- throtl_schedule_next_dispatch(td);
+ /*
+ * Update @tg's dispatch time and force schedule dispatch if @tg
+ * was empty before @bio. The forced scheduling isn't likely to
+ * cause undue delay as @bio is likely to be dispatched directly if
+ * its @tg's disptime is not in the future.
+ */
+ if (tg->flags & THROTL_TG_WAS_EMPTY) {
+ tg_update_disptime(tg);
+ throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
}
out_unlock:
@@ -1193,9 +1579,38 @@ out_unlock:
out_unlock_rcu:
rcu_read_unlock();
out:
+ /*
+ * As multiple blk-throtls may stack in the same issue path, we
+ * don't want bios to leave with the flag set. Clear the flag if
+ * being issued.
+ */
+ if (!throttled)
+ bio->bi_rw &= ~REQ_THROTTLED;
return throttled;
}
+/*
+ * Dispatch all bios from all children tg's queued on @parent_sq. On
+ * return, @parent_sq is guaranteed to not have any active children tg's
+ * and all bios from previously active tg's are on @parent_sq->bio_lists[].
+ */
+static void tg_drain_bios(struct throtl_service_queue *parent_sq)
+{
+ struct throtl_grp *tg;
+
+ while ((tg = throtl_rb_first(parent_sq))) {
+ struct throtl_service_queue *sq = &tg->service_queue;
+ struct bio *bio;
+
+ throtl_dequeue_tg(tg);
+
+ while ((bio = throtl_peek_queued(&sq->queued[READ])))
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
+ while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
+ tg_dispatch_one_bio(tg, bio_data_dir(bio));
+ }
+}
+
/**
* blk_throtl_drain - drain throttled bios
* @q: request_queue to drain throttled bios for
@@ -1206,27 +1621,34 @@ void blk_throtl_drain(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct throtl_data *td = q->td;
- struct throtl_rb_root *st = &td->tg_service_tree;
- struct throtl_grp *tg;
- struct bio_list bl;
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
struct bio *bio;
+ int rw;
queue_lockdep_assert_held(q);
+ rcu_read_lock();
- bio_list_init(&bl);
+ /*
+ * Drain each tg while doing post-order walk on the blkg tree, so
+ * that all bios are propagated to td->service_queue. It'd be
+ * better to walk service_queue tree directly but blkg walk is
+ * easier.
+ */
+ blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
+ tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
- while ((tg = throtl_rb_first(st))) {
- throtl_dequeue_tg(td, tg);
+ /* finally, transfer bios from top-level tg's into the td */
+ tg_drain_bios(&td->service_queue);
- while ((bio = bio_list_peek(&tg->bio_lists[READ])))
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
- while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
- tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
- }
+ rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
- while ((bio = bio_list_pop(&bl)))
- generic_make_request(bio);
+ /* all bios now should be in td->service_queue, issue them */
+ for (rw = READ; rw <= WRITE; rw++)
+ while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
+ NULL)))
+ generic_make_request(bio);
spin_lock_irq(q->queue_lock);
}
@@ -1240,9 +1662,8 @@ int blk_throtl_init(struct request_queue *q)
if (!td)
return -ENOMEM;
- td->tg_service_tree = THROTL_RB_ROOT;
- td->limits_changed = false;
- INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
+ INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
+ throtl_service_queue_init(&td->service_queue, NULL);
q->td = td;
td->queue = q;
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 6e4744c..65f1035 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -82,9 +82,10 @@ void blk_delete_timer(struct request *req)
static void blk_rq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
- enum blk_eh_timer_return ret;
+ enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
- ret = q->rq_timed_out_fn(req);
+ if (q->rq_timed_out_fn)
+ ret = q->rq_timed_out_fn(req);
switch (ret) {
case BLK_EH_HANDLED:
__blk_complete_request(req);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d5cd3131..434944c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
- cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
- false);
+ blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
+ &blkcg_policy_cfq, 0, false);
return 0;
}
@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
}
-static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
+static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
struct cftype *cft,
struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
- cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
- false);
+ blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
+ &blkcg_policy_cfq, 0, false);
return 0;
}
-static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf)
{
- seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
return 0;
}
-static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- seq_printf(sf, "%u\n",
- cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
return 0;
}
-static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf, bool is_leaf_weight)
+static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf,
+ bool is_leaf_weight)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx;
struct cfq_group *cfqg;
int ret;
@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
return ret;
}
-static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- return __cfqg_set_weight_device(cgrp, cft, buf, false);
+ return __cfqg_set_weight_device(css, cft, buf, false);
}
-static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- return __cfqg_set_weight_device(cgrp, cft, buf, true);
+ return __cfqg_set_weight_device(css, cft, buf, true);
}
-static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
- bool is_leaf_weight)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val, bool is_leaf_weight)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
return 0;
}
-static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
{
- return __cfq_set_weight(cgrp, cft, val, false);
+ return __cfq_set_weight(css, cft, val, false);
}
-static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- return __cfq_set_weight(cgrp, cft, val, true);
+ return __cfq_set_weight(css, cft, val, true);
}
-static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
cft->private, false);
return 0;
}
-static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
cft->private, true);
@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &sum);
}
-static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
&blkcg_policy_cfq, cft->private, false);
return 0;
}
-static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
&blkcg_policy_cfq, cft->private, true);
@@ -1803,17 +1803,17 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
if (samples) {
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
- do_div(v, samples);
+ v = div64_u64(v, samples);
}
__blkg_prfill_u64(sf, pd, v);
return 0;
}
/* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
&blkcg_policy_cfq, 0, false);
@@ -4347,18 +4347,28 @@ static void cfq_exit_queue(struct elevator_queue *e)
kfree(cfqd);
}
-static int cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct cfq_data *cfqd;
struct blkcg_gq *blkg __maybe_unused;
int i, ret;
+ struct elevator_queue *eq;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!cfqd)
+ eq = elevator_alloc(q, e);
+ if (!eq)
return -ENOMEM;
+ cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+ if (!cfqd) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ eq->elevator_data = cfqd;
+
cfqd->queue = q;
- q->elevator->elevator_data = cfqd;
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
@@ -4433,6 +4443,7 @@ static int cfq_init_queue(struct request_queue *q)
out_free:
kfree(cfqd);
+ kobject_put(&eq->kobj);
return ret;
}
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
new file mode 100644
index 0000000..cc2637f
--- /dev/null
+++ b/block/cmdline-parser.c
@@ -0,0 +1,250 @@
+/*
+ * Parse command line, get partition information
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/cmdline-parser.h>
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+ int ret = 0;
+ struct cmdline_subpart *new_subpart;
+
+ *subpart = NULL;
+
+ new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+ if (!new_subpart)
+ return -ENOMEM;
+
+ if (*partdef == '-') {
+ new_subpart->size = (sector_t)(~0ULL);
+ partdef++;
+ } else {
+ new_subpart->size = (sector_t)memparse(partdef, &partdef);
+ if (new_subpart->size < (sector_t)PAGE_SIZE) {
+ pr_warn("cmdline partition size is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (*partdef == '@') {
+ partdef++;
+ new_subpart->from = (sector_t)memparse(partdef, &partdef);
+ } else {
+ new_subpart->from = (sector_t)(~0ULL);
+ }
+
+ if (*partdef == '(') {
+ int length;
+ char *next = strchr(++partdef, ')');
+
+ if (!next) {
+ pr_warn("cmdline partition format is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ length = min_t(int, next - partdef,
+ sizeof(new_subpart->name) - 1);
+ strncpy(new_subpart->name, partdef, length);
+ new_subpart->name[length] = '\0';
+
+ partdef = ++next;
+ } else
+ new_subpart->name[0] = '\0';
+
+ new_subpart->flags = 0;
+
+ if (!strncmp(partdef, "ro", 2)) {
+ new_subpart->flags |= PF_RDONLY;
+ partdef += 2;
+ }
+
+ if (!strncmp(partdef, "lk", 2)) {
+ new_subpart->flags |= PF_POWERUP_LOCK;
+ partdef += 2;
+ }
+
+ *subpart = new_subpart;
+ return 0;
+fail:
+ kfree(new_subpart);
+ return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+ struct cmdline_subpart *subpart;
+
+ while (parts->subpart) {
+ subpart = parts->subpart;
+ parts->subpart = subpart->next_subpart;
+ kfree(subpart);
+ }
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+ int ret = -EINVAL;
+ char *next;
+ int length;
+ struct cmdline_subpart **next_subpart;
+ struct cmdline_parts *newparts;
+ char buf[BDEVNAME_SIZE + 32 + 4];
+
+ *parts = NULL;
+
+ newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+ if (!newparts)
+ return -ENOMEM;
+
+ next = strchr(bdevdef, ':');
+ if (!next) {
+ pr_warn("cmdline partition has no block device.");
+ goto fail;
+ }
+
+ length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+ strncpy(newparts->name, bdevdef, length);
+ newparts->name[length] = '\0';
+ newparts->nr_subparts = 0;
+
+ next_subpart = &newparts->subpart;
+
+ while (next && *(++next)) {
+ bdevdef = next;
+ next = strchr(bdevdef, ',');
+
+ length = (!next) ? (sizeof(buf) - 1) :
+ min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+ strncpy(buf, bdevdef, length);
+ buf[length] = '\0';
+
+ ret = parse_subpart(next_subpart, buf);
+ if (ret)
+ goto fail;
+
+ newparts->nr_subparts++;
+ next_subpart = &(*next_subpart)->next_subpart;
+ }
+
+ if (!newparts->subpart) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *parts = newparts;
+
+ return 0;
+fail:
+ free_subpart(newparts);
+ kfree(newparts);
+ return ret;
+}
+
+void cmdline_parts_free(struct cmdline_parts **parts)
+{
+ struct cmdline_parts *next_parts;
+
+ while (*parts) {
+ next_parts = (*parts)->next_parts;
+ free_subpart(*parts);
+ kfree(*parts);
+ *parts = next_parts;
+ }
+}
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
+{
+ int ret;
+ char *buf;
+ char *pbuf;
+ char *next;
+ struct cmdline_parts **next_parts;
+
+ *parts = NULL;
+
+ next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ next_parts = parts;
+
+ while (next && *pbuf) {
+ next = strchr(pbuf, ';');
+ if (next)
+ *next = '\0';
+
+ ret = parse_parts(next_parts, pbuf);
+ if (ret)
+ goto fail;
+
+ if (next)
+ pbuf = ++next;
+
+ next_parts = &(*next_parts)->next_parts;
+ }
+
+ if (!*parts) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = 0;
+done:
+ kfree(buf);
+ return ret;
+
+fail:
+ cmdline_parts_free(parts);
+ goto done;
+}
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev)
+{
+ while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+ parts = parts->next_parts;
+ return parts;
+}
+
+/*
+ * add_part()
+ * 0 success.
+ * 1 can not add so many partitions.
+ */
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param)
+
+{
+ sector_t from = 0;
+ struct cmdline_subpart *subpart;
+
+ for (subpart = parts->subpart; subpart;
+ subpart = subpart->next_subpart, slot++) {
+ if (subpart->from == (sector_t)(~0ULL))
+ subpart->from = from;
+ else
+ from = subpart->from;
+
+ if (from >= disk_size)
+ break;
+
+ if (subpart->size > (disk_size - from))
+ subpart->size = disk_size - from;
+
+ from += subpart->size;
+
+ if (add_part(slot, subpart, param))
+ break;
+ }
+}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7c668c8..fbd5a67 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -59,6 +59,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
if (!disk->fops->getgeo)
return -ENOTTY;
+ memset(&geo, 0, sizeof(geo));
/*
* We need to set the startsect first, the driver may
* want to override it.
@@ -69,7 +70,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
return ret;
ret = copy_to_user(ugeo, &geo, 4);
- ret |= __put_user(geo.start, &ugeo->start);
+ ret |= put_user(geo.start, &ugeo->start);
if (ret)
ret = -EFAULT;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index ba19a3a..9ef6640 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,14 +337,22 @@ static void deadline_exit_queue(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
-static int deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct deadline_data *dd;
+ struct elevator_queue *eq;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!dd)
+ eq = elevator_alloc(q, e);
+ if (!eq)
return -ENOMEM;
+ dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ if (!dd) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ eq->elevator_data = dd;
+
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd->sort_list[READ] = RB_ROOT;
@@ -355,7 +363,9 @@ static int deadline_init_queue(struct request_queue *q)
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- q->elevator->elevator_data = dd;
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
return 0;
}
diff --git a/block/elevator.c b/block/elevator.c
index eba5b04..2bcbd8c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -150,12 +150,12 @@ void __init load_default_elevator_module(void)
static struct kobj_type elv_ktype;
-static struct elevator_queue *elevator_alloc(struct request_queue *q,
+struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
struct elevator_queue *eq;
- eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
goto err;
@@ -170,6 +170,7 @@ err:
elevator_put(e);
return NULL;
}
+EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
@@ -221,16 +222,7 @@ int elevator_init(struct request_queue *q, char *name)
}
}
- q->elevator = elevator_alloc(q, e);
- if (!q->elevator)
- return -ENOMEM;
-
- err = e->ops.elevator_init_fn(q);
- if (err) {
- kobject_put(&q->elevator->kobj);
- return err;
- }
-
+ err = e->ops.elevator_init_fn(q, e);
return 0;
}
EXPORT_SYMBOL(elevator_init);
@@ -935,16 +927,9 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
spin_unlock_irq(q->queue_lock);
/* allocate, init and register new elevator */
- err = -ENOMEM;
- q->elevator = elevator_alloc(q, new_e);
- if (!q->elevator)
- goto fail_init;
-
- err = new_e->ops.elevator_init_fn(q);
- if (err) {
- kobject_put(&q->elevator->kobj);
+ err = new_e->ops.elevator_init_fn(q, new_e);
+ if (err)
goto fail_init;
- }
if (registered) {
err = elv_register_queue(q);
diff --git a/block/genhd.c b/block/genhd.c
index 20625ee..791f419 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -512,7 +512,7 @@ static void register_disk(struct gendisk *disk)
ddev->parent = disk->driverfs_dev;
- dev_set_name(ddev, disk->disk_name);
+ dev_set_name(ddev, "%s", disk->disk_name);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk),
- GFP_KERNEL | __GFP_ZERO, node_id);
+ disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
if (!init_part_stats(&disk->part0)) {
kfree(disk);
@@ -1489,9 +1488,11 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
- queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
else if (intv)
- queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
@@ -1534,7 +1535,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
spin_lock_irq(&ev->lock);
ev->clearing |= mask;
if (!ev->block)
- mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, 0);
spin_unlock_irq(&ev->lock);
}
@@ -1627,7 +1629,8 @@ static void disk_check_events(struct disk_events *ev,
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
- queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_power_efficient_wq,
+ &ev->dwork, intv);
spin_unlock_irq(&ev->lock);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 5d1bf70..3de89d4 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,16 +59,27 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static int noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct noop_data *nd;
+ struct elevator_queue *eq;
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
- if (!nd)
+ if (!nd) {
+ kobject_put(&eq->kobj);
return -ENOMEM;
+ }
+ eq->elevator_data = nd;
INIT_LIST_HEAD(&nd->queue);
- q->elevator->elevator_data = nd;
+
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
return 0;
}
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 75a54e1..9b29a99 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -68,6 +68,17 @@ config ACORN_PARTITION_RISCIX
of machines called RISCiX. If you say 'Y' here, Linux will be able
to read disks partitioned under RISCiX.
+config AIX_PARTITION
+ bool "AIX basic partition table support" if PARTITION_ADVANCED
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by IBM or Motorola PowerPC machines
+ running AIX. AIX actually uses a Logical Volume Manager, where
+ "logical volumes" can be spread across one or multiple disks,
+ but this driver works only for the simple case of partitions which
+ are contiguous.
+ Otherwise, say N.
+
config OSF_PARTITION
bool "Alpha OSF partition support" if PARTITION_ADVANCED
default y if ALPHA
@@ -249,3 +260,10 @@ config SYSV68_PARTITION
partition table format used by Motorola Delta machines (using
sysv68).
Otherwise, say N.
+
+config CMDLINE_PARTITION
+ bool "Command line partition support" if PARTITION_ADVANCED
+ select BLK_CMDLINE_PARSER
+ help
+ Say Y here if you want to read the partition table from bootargs.
+ The format for the command line is just like mtdparts.
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
index 03af8ea..37a9527 100644
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_BLOCK) := check.o
obj-$(CONFIG_ACORN_PARTITION) += acorn.o
obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_AIX_PARTITION) += aix.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
new file mode 100644
index 0000000..43be471
--- /dev/null
+++ b/block/partitions/aix.c
@@ -0,0 +1,293 @@
+/*
+ * fs/partitions/aix.c
+ *
+ * Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
+ */
+
+#include "check.h"
+#include "aix.h"
+
+struct lvm_rec {
+ char lvm_id[4]; /* "_LVM" */
+ char reserved4[16];
+ __be32 lvmarea_len;
+ __be32 vgda_len;
+ __be32 vgda_psn[2];
+ char reserved36[10];
+ __be16 pp_size; /* log2(pp_size) */
+ char reserved46[12];
+ __be16 version;
+ };
+
+struct vgda {
+ __be32 secs;
+ __be32 usec;
+ char reserved8[16];
+ __be16 numlvs;
+ __be16 maxlvs;
+ __be16 pp_size;
+ __be16 numpvs;
+ __be16 total_vgdas;
+ __be16 vgda_size;
+ };
+
+struct lvd {
+ __be16 lv_ix;
+ __be16 res2;
+ __be16 res4;
+ __be16 maxsize;
+ __be16 lv_state;
+ __be16 mirror;
+ __be16 mirror_policy;
+ __be16 num_lps;
+ __be16 res10[8];
+ };
+
+struct lvname {
+ char name[64];
+ };
+
+struct ppe {
+ __be16 lv_ix;
+ unsigned short res2;
+ unsigned short res4;
+ __be16 lp_ix;
+ unsigned short res8[12];
+ };
+
+struct pvd {
+ char reserved0[16];
+ __be16 pp_count;
+ char reserved18[2];
+ __be32 psn_part1;
+ char reserved24[8];
+ struct ppe ppe[1016];
+ };
+
+#define LVM_MAXLVS 256
+
+/**
+ * last_lba(): return number of last logical block of device
+ * @bdev: block device
+ *
+ * Description: Returns last LBA value on success, 0 on error.
+ * This is stored (by sd and ide-geometry) in
+ * the part[0] entry for this disk, and is the number of
+ * physical sectors available on the disk.
+ */
+static u64 last_lba(struct block_device *bdev)
+{
+ if (!bdev || !bdev->bd_inode)
+ return 0;
+ return (bdev->bd_inode->i_size >> 9) - 1ULL;
+}
+
+/**
+ * read_lba(): Read bytes from disk, starting at given LBA
+ * @state
+ * @lba
+ * @buffer
+ * @count
+ *
+ * Description: Reads @count bytes from @state->bdev into @buffer.
+ * Returns number of bytes read on success, 0 on error.
+ */
+static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
+ size_t count)
+{
+ size_t totalreadcount = 0;
+
+ if (!buffer || lba + count / 512 > last_lba(state->bdev))
+ return 0;
+
+ while (count) {
+ int copied = 512;
+ Sector sect;
+ unsigned char *data = read_part_sector(state, lba++, &sect);
+ if (!data)
+ break;
+ if (copied > count)
+ copied = count;
+ memcpy(buffer, data, copied);
+ put_dev_sector(sect);
+ buffer += copied;
+ totalreadcount += copied;
+ count -= copied;
+ }
+ return totalreadcount;
+}
+
+/**
+ * alloc_pvd(): reads physical volume descriptor
+ * @state
+ * @lba
+ *
+ * Description: Returns pvd on success, NULL on error.
+ * Allocates space for pvd and fill it with disk blocks at @lba
+ * Notes: remember to free pvd when you're done!
+ */
+static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
+{
+ size_t count = sizeof(struct pvd);
+ struct pvd *p;
+
+ p = kmalloc(count, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (read_lba(state, lba, (u8 *) p, count) < count) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+/**
+ * alloc_lvn(): reads logical volume names
+ * @state
+ * @lba
+ *
+ * Description: Returns lvn on success, NULL on error.
+ * Allocates space for lvn and fill it with disk blocks at @lba
+ * Notes: remember to free lvn when you're done!
+ */
+static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
+{
+ size_t count = sizeof(struct lvname) * LVM_MAXLVS;
+ struct lvname *p;
+
+ p = kmalloc(count, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ if (read_lba(state, lba, (u8 *) p, count) < count) {
+ kfree(p);
+ return NULL;
+ }
+ return p;
+}
+
+int aix_partition(struct parsed_partitions *state)
+{
+ int ret = 0;
+ Sector sect;
+ unsigned char *d;
+ u32 pp_bytes_size;
+ u32 pp_blocks_size = 0;
+ u32 vgda_sector = 0;
+ u32 vgda_len = 0;
+ int numlvs = 0;
+ struct pvd *pvd;
+ struct lv_info {
+ unsigned short pps_per_lv;
+ unsigned short pps_found;
+ unsigned char lv_is_contiguous;
+ } *lvip;
+ struct lvname *n = NULL;
+
+ d = read_part_sector(state, 7, &sect);
+ if (d) {
+ struct lvm_rec *p = (struct lvm_rec *)d;
+ u16 lvm_version = be16_to_cpu(p->version);
+ char tmp[64];
+
+ if (lvm_version == 1) {
+ int pp_size_log2 = be16_to_cpu(p->pp_size);
+
+ pp_bytes_size = 1 << pp_size_log2;
+ pp_blocks_size = pp_bytes_size / 512;
+ snprintf(tmp, sizeof(tmp),
+ " AIX LVM header version %u found\n",
+ lvm_version);
+ vgda_len = be32_to_cpu(p->vgda_len);
+ vgda_sector = be32_to_cpu(p->vgda_psn[0]);
+ } else {
+ snprintf(tmp, sizeof(tmp),
+ " unsupported AIX LVM version %d found\n",
+ lvm_version);
+ }
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ put_dev_sector(sect);
+ }
+ if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
+ struct vgda *p = (struct vgda *)d;
+
+ numlvs = be16_to_cpu(p->numlvs);
+ put_dev_sector(sect);
+ }
+ lvip = kzalloc(sizeof(struct lv_info) * state->limit, GFP_KERNEL);
+ if (!lvip)
+ return 0;
+ if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
+ struct lvd *p = (struct lvd *)d;
+ int i;
+
+ n = alloc_lvn(state, vgda_sector + vgda_len - 33);
+ if (n) {
+ int foundlvs = 0;
+
+ for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
+ lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
+ if (lvip[i].pps_per_lv)
+ foundlvs += 1;
+ }
+ }
+ put_dev_sector(sect);
+ }
+ pvd = alloc_pvd(state, vgda_sector + 17);
+ if (pvd) {
+ int numpps = be16_to_cpu(pvd->pp_count);
+ int psn_part1 = be32_to_cpu(pvd->psn_part1);
+ int i;
+ int cur_lv_ix = -1;
+ int next_lp_ix = 1;
+ int lp_ix;
+
+ for (i = 0; i < numpps; i += 1) {
+ struct ppe *p = pvd->ppe + i;
+ unsigned int lv_ix;
+
+ lp_ix = be16_to_cpu(p->lp_ix);
+ if (!lp_ix) {
+ next_lp_ix = 1;
+ continue;
+ }
+ lv_ix = be16_to_cpu(p->lv_ix) - 1;
+ if (lv_ix > state->limit) {
+ cur_lv_ix = -1;
+ continue;
+ }
+ lvip[lv_ix].pps_found += 1;
+ if (lp_ix == 1) {
+ cur_lv_ix = lv_ix;
+ next_lp_ix = 1;
+ } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
+ next_lp_ix = 1;
+ continue;
+ }
+ if (lp_ix == lvip[lv_ix].pps_per_lv) {
+ char tmp[70];
+
+ put_partition(state, lv_ix + 1,
+ (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
+ lvip[lv_ix].pps_per_lv * pp_blocks_size);
+ snprintf(tmp, sizeof(tmp), " <%s>\n",
+ n[lv_ix].name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ lvip[lv_ix].lv_is_contiguous = 1;
+ ret = 1;
+ next_lp_ix = 1;
+ } else
+ next_lp_ix += 1;
+ }
+ for (i = 0; i < state->limit; i += 1)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ pr_warn("partition %s (%u pp's found) is "
+ "not contiguous\n",
+ n[i].name, lvip[i].pps_found);
+ kfree(pvd);
+ }
+ kfree(n);
+ kfree(lvip);
+ return ret;
+}
diff --git a/block/partitions/aix.h b/block/partitions/aix.h
new file mode 100644
index 0000000..e0c66a9
--- /dev/null
+++ b/block/partitions/aix.h
@@ -0,0 +1 @@
+extern int aix_partition(struct parsed_partitions *state);
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 19ba207..9ac1df7 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -34,6 +34,7 @@
#include "efi.h"
#include "karma.h"
#include "sysv68.h"
+#include "cmdline.h"
int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
@@ -65,6 +66,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
adfspart_check_ADFS,
#endif
+#ifdef CONFIG_CMDLINE_PARTITION
+ cmdline_partition,
+#endif
#ifdef CONFIG_EFI_PARTITION
efi_partition, /* this must come before msdos */
#endif
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
new file mode 100644
index 0000000..5141b56
--- /dev/null
+++ b/block/partitions/cmdline.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 HUAWEI
+ * Author: Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ * Read block device partition table from the command line.
+ * Typically used for fixed block (eMMC) embedded devices.
+ * It has no MBR, so saves storage space. Bootloader can be easily accessed
+ * by absolute address of data on the block device.
+ * Users can easily change the partition.
+ *
+ * The format for the command line is just like mtdparts.
+ *
+ * For further information, see "Documentation/block/cmdline-partition.txt"
+ *
+ */
+
+#include <linux/cmdline-parser.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static struct cmdline_parts *bdev_parts;
+
+static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+{
+ int label_min;
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname) + 4];
+ struct parsed_partitions *state = (struct parsed_partitions *)param;
+
+ if (slot >= state->limit)
+ return 1;
+
+ put_partition(state, slot, subpart->from >> 9,
+ subpart->size >> 9);
+
+ info = &state->parts[slot].info;
+
+ label_min = min_t(int, sizeof(info->volname) - 1,
+ sizeof(subpart->name));
+ strncpy(info->volname, subpart->name, label_min);
+ info->volname[label_min] = '\0';
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+ state->parts[slot].has_info = true;
+
+ return 0;
+}
+
+static int __init cmdline_parts_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+__setup("blkdevparts=", cmdline_parts_setup);
+
+/*
+ * Purpose: allocate cmdline partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ * 0 if this isn't our partition table
+ * 1 if successful
+ */
+int cmdline_partition(struct parsed_partitions *state)
+{
+ sector_t disk_size;
+ char bdev[BDEVNAME_SIZE];
+ struct cmdline_parts *parts;
+
+ if (cmdline) {
+ if (bdev_parts)
+ cmdline_parts_free(&bdev_parts);
+
+ if (cmdline_parts_parse(&bdev_parts, cmdline)) {
+ cmdline = NULL;
+ return -1;
+ }
+ cmdline = NULL;
+ }
+
+ if (!bdev_parts)
+ return 0;
+
+ bdevname(state->bdev, bdev);
+ parts = cmdline_parts_find(bdev_parts, bdev);
+ if (!parts)
+ return 0;
+
+ disk_size = get_capacity(state->bdev->bd_disk) << 9;
+
+ cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ return 1;
+}
diff --git a/block/partitions/cmdline.h b/block/partitions/cmdline.h
new file mode 100644
index 0000000..26e0f8d
--- /dev/null
+++ b/block/partitions/cmdline.h
@@ -0,0 +1,2 @@
+
+int cmdline_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index c85fc89..1eb09ee 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -25,6 +25,9 @@
* TODO:
*
* Changelog:
+ * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com>
+ * - detect hybrid MBRs, tighter pMBR checking & cleanups.
+ *
* Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
* - test for valid PMBR and valid PGPT before ever reading
* AGPT, allow override with 'gpt' kernel command line option.
@@ -149,34 +152,84 @@ static u64 last_lba(struct block_device *bdev)
bdev_logical_block_size(bdev)) - 1ULL;
}
-static inline int
-pmbr_part_valid(struct partition *part)
+static inline int pmbr_part_valid(gpt_mbr_record *part)
{
- if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
- le32_to_cpu(part->start_sect) == 1UL)
- return 1;
- return 0;
+ if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
+ goto invalid;
+
+ /* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
+ if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
+ goto invalid;
+
+ return GPT_MBR_PROTECTIVE;
+invalid:
+ return 0;
}
/**
* is_pmbr_valid(): test Protective MBR for validity
* @mbr: pointer to a legacy mbr structure
+ * @total_sectors: amount of sectors in the device
*
- * Description: Returns 1 if PMBR is valid, 0 otherwise.
- * Validity depends on two things:
+ * Description: Checks for a valid protective or hybrid
+ * master boot record (MBR). The validity of a pMBR depends
+ * on all of the following properties:
* 1) MSDOS signature is in the last two bytes of the MBR
* 2) One partition of type 0xEE is found
+ *
+ * In addition, a hybrid MBR will have up to three additional
+ * primary partitions, which point to the same space that's
+ * marked out by up to three GPT partitions.
+ *
+ * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or
+ * GPT_MBR_HYBRID depending on the device layout.
*/
-static int
-is_pmbr_valid(legacy_mbr *mbr)
+static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
{
- int i;
+ uint32_t sz = 0;
+ int i, part = 0, ret = 0; /* invalid by default */
+
if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
- return 0;
+ goto done;
+
+ for (i = 0; i < 4; i++) {
+ ret = pmbr_part_valid(&mbr->partition_record[i]);
+ if (ret == GPT_MBR_PROTECTIVE) {
+ part = i;
+ /*
+ * Ok, we at least know that there's a protective MBR,
+ * now check if there are other partition types for
+ * hybrid MBR.
+ */
+ goto check_hybrid;
+ }
+ }
+
+ if (ret != GPT_MBR_PROTECTIVE)
+ goto done;
+check_hybrid:
for (i = 0; i < 4; i++)
- if (pmbr_part_valid(&mbr->partition_record[i]))
- return 1;
- return 0;
+ if ((mbr->partition_record[i].os_type !=
+ EFI_PMBR_OSTYPE_EFI_GPT) &&
+ (mbr->partition_record[i].os_type != 0x00))
+ ret = GPT_MBR_HYBRID;
+
+ /*
+ * Protective MBRs take up the lesser of the whole disk
+ * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+ * Some partitioning programs, nonetheless, choose to set
+ * the size to the maximum 32-bit limitation, disregarding
+ * the disk size.
+ *
+ * Hybrid MBRs do not necessarily comply with this.
+ */
+ if (ret == GPT_MBR_PROTECTIVE) {
+ sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
+ if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
+ ret = 0;
+ }
+done:
+ return ret;
}
/**
@@ -243,8 +296,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
return NULL;
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
- (u8 *) pte,
- count) < count) {
+ (u8 *) pte, count) < count) {
kfree(pte);
pte=NULL;
return NULL;
@@ -364,7 +416,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
(unsigned long long)lastlba);
goto fail;
}
-
+ if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
+ pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+ (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
+ goto fail;
+ }
/* Check that sizeof_partition_entry has the correct value */
if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
pr_debug("GUID Partitition Entry Size check failed.\n");
@@ -429,44 +486,42 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
if (!pgpt || !agpt)
return;
if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header LBA != Alt. header alternate_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->my_lba),
(unsigned long long)le64_to_cpu(agpt->alternate_lba));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header alternate_lba != Alt. header my_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)le64_to_cpu(agpt->my_lba));
error_found++;
}
if (le64_to_cpu(pgpt->first_usable_lba) !=
le64_to_cpu(agpt->first_usable_lba)) {
- printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:first_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
(unsigned long long)le64_to_cpu(agpt->first_usable_lba));
error_found++;
}
if (le64_to_cpu(pgpt->last_usable_lba) !=
le64_to_cpu(agpt->last_usable_lba)) {
- printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:last_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
(unsigned long long)le64_to_cpu(agpt->last_usable_lba));
error_found++;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
- printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+ pr_warn("GPT:disk_guids don't match.\n");
error_found++;
}
if (le32_to_cpu(pgpt->num_partition_entries) !=
le32_to_cpu(agpt->num_partition_entries)) {
- printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+ pr_warn("GPT:num_partition_entries don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->num_partition_entries),
le32_to_cpu(agpt->num_partition_entries));
@@ -474,8 +529,7 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
le32_to_cpu(agpt->sizeof_partition_entry)) {
- printk(KERN_WARNING
- "GPT:sizeof_partition_entry values don't match: "
+ pr_warn("GPT:sizeof_partition_entry values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->sizeof_partition_entry),
le32_to_cpu(agpt->sizeof_partition_entry));
@@ -483,34 +537,30 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
le32_to_cpu(agpt->partition_entry_array_crc32)) {
- printk(KERN_WARNING
- "GPT:partition_entry_array_crc32 values don't match: "
+ pr_warn("GPT:partition_entry_array_crc32 values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->partition_entry_array_crc32),
le32_to_cpu(agpt->partition_entry_array_crc32));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)lastlba);
error_found++;
}
if (le64_to_cpu(agpt->my_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Alternate GPT header not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Alternate GPT header not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(agpt->my_lba),
(unsigned long long)lastlba);
error_found++;
}
if (error_found)
- printk(KERN_WARNING
- "GPT: Use GNU Parted to correct GPT errors.\n");
+ pr_warn("GPT: Use GNU Parted to correct GPT errors.\n");
return;
}
@@ -536,6 +586,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
gpt_header *pgpt = NULL, *agpt = NULL;
gpt_entry *pptes = NULL, *aptes = NULL;
legacy_mbr *legacymbr;
+ sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
u64 lastlba;
if (!ptes)
@@ -543,17 +594,22 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
lastlba = last_lba(state->bdev);
if (!force_gpt) {
- /* This will be added to the EFI Spec. per Intel after v1.02. */
- legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
- if (legacymbr) {
- read_lba(state, 0, (u8 *) legacymbr,
- sizeof (*legacymbr));
- good_pmbr = is_pmbr_valid(legacymbr);
- kfree(legacymbr);
- }
- if (!good_pmbr)
- goto fail;
- }
+ /* This will be added to the EFI Spec. per Intel after v1.02. */
+ legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
+ if (!legacymbr)
+ goto fail;
+
+ read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr));
+ good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
+ kfree(legacymbr);
+
+ if (!good_pmbr)
+ goto fail;
+
+ pr_debug("Device has a %s MBR\n",
+ good_pmbr == GPT_MBR_PROTECTIVE ?
+ "protective" : "hybrid");
+ }
good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
&pgpt, &pptes);
@@ -576,11 +632,8 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = pptes;
kfree(agpt);
kfree(aptes);
- if (!good_agpt) {
- printk(KERN_WARNING
- "Alternate GPT is invalid, "
- "using primary GPT.\n");
- }
+ if (!good_agpt)
+ pr_warn("Alternate GPT is invalid, using primary GPT.\n");
return 1;
}
else if (good_agpt) {
@@ -588,8 +641,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = aptes;
kfree(pgpt);
kfree(pptes);
- printk(KERN_WARNING
- "Primary GPT is invalid, using alternate GPT.\n");
+ pr_warn("Primary GPT is invalid, using alternate GPT.\n");
return 1;
}
@@ -651,8 +703,7 @@ int efi_partition(struct parsed_partitions *state)
put_partition(state, i+1, start * ssz, size * ssz);
/* If this is a RAID volume, tell md */
- if (!efi_guidcmp(ptes[i].partition_type_guid,
- PARTITION_LINUX_RAID_GUID))
+ if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
info = &state->parts[i + 1].info;
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index b69ab72..4efcafb 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -37,6 +37,9 @@
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+#define GPT_MBR_PROTECTIVE 1
+#define GPT_MBR_HYBRID 2
+
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -101,11 +104,25 @@ typedef struct _gpt_entry {
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
} __attribute__ ((packed)) gpt_entry;
+typedef struct _gpt_mbr_record {
+ u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
+ u8 start_head; /* unused by EFI, pt start in CHS */
+ u8 start_sector; /* unused by EFI, pt start in CHS */
+ u8 start_track;
+ u8 os_type; /* EFI and legacy non-EFI OS types */
+ u8 end_head; /* unused by EFI, pt end in CHS */
+ u8 end_sector; /* unused by EFI, pt end in CHS */
+ u8 end_track; /* unused by EFI, pt end in CHS */
+ __le32 starting_lba; /* used by EFI - start addr of the on disk pt */
+ __le32 size_in_lba; /* used by EFI - size of pt in LBA */
+} __packed gpt_mbr_record;
+
+
typedef struct _legacy_mbr {
u8 boot_code[440];
__le32 unique_mbr_signature;
__le16 unknown;
- struct partition partition_record[4];
+ gpt_mbr_record partition_record[4];
__le16 signature;
} __attribute__ ((packed)) legacy_mbr;
@@ -113,22 +130,3 @@ typedef struct _legacy_mbr {
extern int efi_partition(struct parsed_partitions *state);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * --------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 7681cd2..9123f25 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -23,6 +23,7 @@
#include "check.h"
#include "msdos.h"
#include "efi.h"
+#include "aix.h"
/*
* Many architectures don't like unaligned accesses, while
@@ -90,7 +91,7 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M')
ret = 1;
put_dev_sector(sect);
- };
+ }
return ret;
}
@@ -142,7 +143,7 @@ static void parse_extended(struct parsed_partitions *state,
return;
if (!msdos_magic_present(data + 510))
- goto done;
+ goto done;
p = (struct partition *) (data + 0x1be);
@@ -155,7 +156,7 @@ static void parse_extended(struct parsed_partitions *state,
* and OS/2 seems to use all four entries.
*/
- /*
+ /*
* First process the data partition(s)
*/
for (i=0; i<4; i++, p++) {
@@ -263,7 +264,7 @@ static void parse_solaris_x86(struct parsed_partitions *state,
}
#if defined(CONFIG_BSD_DISKLABEL)
-/*
+/*
* Create devices for BSD partitions listed in a disklabel, under a
* dos-like partition. See parse_extended() for more information.
*/
@@ -294,7 +295,7 @@ static void parse_bsd(struct parsed_partitions *state,
if (state->next == state->limit)
break;
- if (p->p_fstype == BSD_FS_UNUSED)
+ if (p->p_fstype == BSD_FS_UNUSED)
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
@@ -441,7 +442,7 @@ static struct {
{NEW_SOLARIS_X86_PARTITION, parse_solaris_x86},
{0, NULL},
};
-
+
int msdos_partition(struct parsed_partitions *state)
{
sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
@@ -462,8 +463,12 @@ int msdos_partition(struct parsed_partitions *state)
*/
if (aix_magic_present(state, data)) {
put_dev_sector(sect);
+#ifdef CONFIG_AIX_PARTITION
+ return aix_partition(state);
+#else
strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
+#endif
}
if (!msdos_magic_present(data + 510)) {
OpenPOWER on IntegriCloud