diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 17:40:19 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 17:40:19 -0700 |
commit | 88d6ae8dc33af12fe1c7941b1fae2767374046fd (patch) | |
tree | 8f17415c0722b0a4d7511ac170cfb4e3802e1ad2 | |
parent | f5c101892fbd3d2f6d2729bc7eb7b3f6c31dbddd (diff) | |
parent | 0d4dde1ac9a5af74ac76c6ab90557d1ae7b8f5d8 (diff) | |
download | op-kernel-dev-88d6ae8dc33af12fe1c7941b1fae2767374046fd.zip op-kernel-dev-88d6ae8dc33af12fe1c7941b1fae2767374046fd.tar.gz |
Merge branch 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo:
"cgroup file type addition / removal is updated so that file types are
added and removed instead of individual files so that dynamic file
type addition / removal can be implemented by cgroup and used by
controllers. blkio controller changes which will come through block
tree are dependent on this. Other changes include res_counter cleanup
and disallowing kthread / PF_THREAD_BOUND threads to be attached to
non-root cgroups.
There's a reported bug with the file type addition / removal handling
which can lead to oops on cgroup umount. The issue is being looked
into. It shouldn't cause problems for most setups and isn't a
security concern."
Fix up trivial conflict in Documentation/feature-removal-schedule.txt
* 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (21 commits)
res_counter: Account max_usage when calling res_counter_charge_nofail()
res_counter: Merge res_counter_charge and res_counter_charge_nofail
cgroups: disallow attaching kthreadd or PF_THREAD_BOUND threads
cgroup: remove cgroup_subsys->populate()
cgroup: get rid of populate for memcg
cgroup: pass struct mem_cgroup instead of struct cgroup to socket memcg
cgroup: make css->refcnt clearing on cgroup removal optional
cgroup: use negative bias on css->refcnt to block css_tryget()
cgroup: implement cgroup_rm_cftypes()
cgroup: introduce struct cfent
cgroup: relocate __d_cgrp() and __d_cft()
cgroup: remove cgroup_add_file[s]()
cgroup: convert memcg controller to the new cftype interface
memcg: always create memsw files if CONFIG_CGROUP_MEM_RES_CTLR_SWAP
cgroup: convert all non-memcg controllers to the new cftype interface
cgroup: relocate cftype and cgroup_subsys definitions in controllers
cgroup: merge cft_release_agent cftype array into the base files array
cgroup: implement cgroup_add_cftypes() and friends
cgroup: build list of all cgroups under a given cgroupfs_root
cgroup: move cgroup_clear_directory() call out of cgroup_populate_dir()
...
-rw-r--r-- | Documentation/cgroups/resource_counter.txt | 4 | ||||
-rw-r--r-- | Documentation/feature-removal-schedule.txt | 11 | ||||
-rw-r--r-- | block/blk-cgroup.c | 45 | ||||
-rw-r--r-- | include/linux/cgroup.h | 81 | ||||
-rw-r--r-- | include/linux/res_counter.h | 2 | ||||
-rw-r--r-- | include/net/sock.h | 12 | ||||
-rw-r--r-- | include/net/tcp_memcontrol.h | 4 | ||||
-rw-r--r-- | kernel/cgroup.c | 564 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 11 | ||||
-rw-r--r-- | kernel/cpuset.c | 31 | ||||
-rw-r--r-- | kernel/res_counter.c | 71 | ||||
-rw-r--r-- | kernel/sched/core.c | 16 | ||||
-rw-r--r-- | mm/memcontrol.c | 115 | ||||
-rw-r--r-- | net/core/netprio_cgroup.c | 30 | ||||
-rw-r--r-- | net/core/sock.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_memcontrol.c | 77 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 31 | ||||
-rw-r--r-- | security/device_cgroup.c | 10 |
18 files changed, 687 insertions, 438 deletions
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt index 95b24d7..f3c4ec3 100644 --- a/Documentation/cgroups/resource_counter.txt +++ b/Documentation/cgroups/resource_counter.txt @@ -77,11 +77,11 @@ to work with it. where the charging failed. d. int res_counter_charge_locked - (struct res_counter *rc, unsigned long val) + (struct res_counter *rc, unsigned long val, bool force) The same as res_counter_charge(), but it must not acquire/release the res_counter->lock internally (it must be called with res_counter->lock - held). + held). The force parameter indicates whether we can bypass the limit. e. void res_counter_uncharge[_locked] (struct res_counter *rc, unsigned long val) diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index e9abede..1e69a81 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -556,3 +556,14 @@ Why: The V4L2_CID_VCENTER, V4L2_CID_HCENTER controls have been deprecated There are newer controls (V4L2_CID_PAN*, V4L2_CID_TILT*) that provide similar functionality. Who: Sylwester Nawrocki <sylvester.nawrocki@gmail.com> + +---------------------------- + +What: cgroup option updates via remount +When: March 2013 +Why: Remount currently allows changing bound subsystems and + release_agent. Rebinding is hardly useful as it only works + when the hierarchy is empty and release_agent itself should be + replaced with conventional fsnotify. + +---------------------------- diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ea84a23..126c341 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -28,34 +28,12 @@ static LIST_HEAD(blkio_list); struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; EXPORT_SYMBOL_GPL(blkio_root_cgroup); -static struct cgroup_subsys_state *blkiocg_create(struct cgroup *); -static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *); -static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *); -static void blkiocg_destroy(struct cgroup *); -static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); - /* for encoding cft->private value on file */ #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) /* What policy owns the file, proportional or throttle */ #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) #define BLKIOFILE_ATTR(val) ((val) & 0xffff) -struct cgroup_subsys blkio_subsys = { - .name = "blkio", - .create = blkiocg_create, - .can_attach = blkiocg_can_attach, - .attach = blkiocg_attach, - .destroy = blkiocg_destroy, - .populate = blkiocg_populate, -#ifdef CONFIG_BLK_CGROUP - /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */ - .subsys_id = blkio_subsys_id, -#endif - .use_id = 1, - .module = THIS_MODULE, -}; -EXPORT_SYMBOL_GPL(blkio_subsys); - static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg, struct blkio_policy_node *pn) { @@ -1537,14 +1515,9 @@ struct cftype blkio_files[] = { .read_map = blkiocg_file_read_map, }, #endif + { } /* terminate */ }; -static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) -{ - return cgroup_add_files(cgroup, subsys, blkio_files, - ARRAY_SIZE(blkio_files)); -} - static void blkiocg_destroy(struct cgroup *cgroup) { struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); @@ -1658,6 +1631,22 @@ static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) } } +struct cgroup_subsys blkio_subsys = { + .name = "blkio", + .create = blkiocg_create, + .can_attach = blkiocg_can_attach, + .attach = blkiocg_attach, + .destroy = blkiocg_destroy, +#ifdef CONFIG_BLK_CGROUP + /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */ + .subsys_id = blkio_subsys_id, +#endif + .base_cftypes = blkio_files, + .use_id = 1, + .module = THIS_MODULE, +}; +EXPORT_SYMBOL_GPL(blkio_subsys); + void blkio_policy_register(struct blkio_policy_type *blkiop) { spin_lock(&blkio_list_lock); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5a85b34..d3f5fba 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -16,6 +16,7 @@ #include <linux/prio_heap.h> #include <linux/rwsem.h> #include <linux/idr.h> +#include <linux/workqueue.h> #ifdef CONFIG_CGROUPS @@ -76,12 +77,16 @@ struct cgroup_subsys_state { unsigned long flags; /* ID for this css, if possible */ struct css_id __rcu *id; + + /* Used to put @cgroup->dentry on the last css_put() */ + struct work_struct dput_work; }; /* bits in struct cgroup_subsys_state flags field */ enum { CSS_ROOT, /* This CSS is the root of the subsystem */ CSS_REMOVED, /* This CSS is dead */ + CSS_CLEAR_CSS_REFS, /* @ss->__DEPRECATED_clear_css_refs */ }; /* Caller must verify that the css is not for root cgroup */ @@ -115,16 +120,12 @@ static inline bool css_is_removed(struct cgroup_subsys_state *css) * the css has been destroyed. */ +extern bool __css_tryget(struct cgroup_subsys_state *css); static inline bool css_tryget(struct cgroup_subsys_state *css) { if (test_bit(CSS_ROOT, &css->flags)) return true; - while (!atomic_inc_not_zero(&css->refcnt)) { - if (test_bit(CSS_REMOVED, &css->flags)) - return false; - cpu_relax(); - } - return true; + return __css_tryget(css); } /* @@ -132,11 +133,11 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) * css_get() or css_tryget() */ -extern void __css_put(struct cgroup_subsys_state *css, int count); +extern void __css_put(struct cgroup_subsys_state *css); static inline void css_put(struct cgroup_subsys_state *css) { if (!test_bit(CSS_ROOT, &css->flags)) - __css_put(css, 1); + __css_put(css); } /* bits in struct cgroup flags field */ @@ -175,6 +176,7 @@ struct cgroup { */ struct list_head sibling; /* my parent's children */ struct list_head children; /* my children */ + struct list_head files; /* my files */ struct cgroup *parent; /* my parent */ struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ @@ -191,6 +193,9 @@ struct cgroup { */ struct list_head css_sets; + struct list_head allcg_node; /* cgroupfs_root->allcg_list */ + struct list_head cft_q_node; /* used during cftype add/rm */ + /* * Linked list running through all cgroups that can * potentially be reaped by the release agent. Protected by @@ -275,11 +280,17 @@ struct cgroup_map_cb { * - the 'cftype' of the file is file->f_dentry->d_fsdata */ -#define MAX_CFTYPE_NAME 64 +/* cftype->flags */ +#define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */ +#define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create onp root cg */ + +#define MAX_CFTYPE_NAME 64 + struct cftype { /* * By convention, the name should begin with the name of the - * subsystem, followed by a period + * subsystem, followed by a period. Zero length string indicates + * end of cftype array. */ char name[MAX_CFTYPE_NAME]; int private; @@ -295,6 +306,9 @@ struct cftype { */ size_t max_write_len; + /* CFTYPE_* flags */ + unsigned int flags; + int (*open)(struct inode *inode, struct file *file); ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, struct file *file, @@ -373,6 +387,16 @@ struct cftype { struct eventfd_ctx *eventfd); }; +/* + * cftype_sets describe cftypes belonging to a subsystem and are chained at + * cgroup_subsys->cftsets. Each cftset points to an array of cftypes + * terminated by zero length name. + */ +struct cftype_set { + struct list_head node; /* chained at subsys->cftsets */ + const struct cftype *cfts; +}; + struct cgroup_scanner { struct cgroup *cg; int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); @@ -382,21 +406,8 @@ struct cgroup_scanner { void *data; }; -/* - * Add a new file to the given cgroup directory. Should only be - * called by subsystems from within a populate() method - */ -int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, - const struct cftype *cft); - -/* - * Add a set of new files to the given cgroup directory. Should - * only be called by subsystems from within a populate() method - */ -int cgroup_add_files(struct cgroup *cgrp, - struct cgroup_subsys *subsys, - const struct cftype cft[], - int count); +int cgroup_add_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts); +int cgroup_rm_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts); int cgroup_is_removed(const struct cgroup *cgrp); @@ -461,7 +472,6 @@ struct cgroup_subsys { void (*fork)(struct task_struct *task); void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, struct task_struct *task); - int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*post_clone)(struct cgroup *cgrp); void (*bind)(struct cgroup *root); @@ -474,6 +484,18 @@ struct cgroup_subsys { * (not available in early_init time.) */ bool use_id; + + /* + * If %true, cgroup removal will try to clear css refs by retrying + * ss->pre_destroy() until there's no css ref left. This behavior + * is strictly for backward compatibility and will be removed as + * soon as the current user (memcg) is updated. + * + * If %false, ss->pre_destroy() can't fail and cgroup removal won't + * wait for css refs to drop to zero before proceeding. + */ + bool __DEPRECATED_clear_css_refs; + #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; @@ -500,6 +522,13 @@ struct cgroup_subsys { struct idr idr; spinlock_t id_lock; + /* list of cftype_sets */ + struct list_head cftsets; + + /* base cftypes, automatically [de]registered with subsys itself */ + struct cftype *base_cftypes; + struct cftype_set base_cftset; + /* should be defined only by modular subsystems */ struct module *module; }; diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index da81af0..fb20189 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -116,7 +116,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); */ int __must_check res_counter_charge_locked(struct res_counter *counter, - unsigned long val); + unsigned long val, bool force); int __must_check res_counter_charge(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at); int __must_check res_counter_charge_nofail(struct res_counter *counter, diff --git a/include/net/sock.h b/include/net/sock.h index da93155..d89f058 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,16 +70,16 @@ struct cgroup; struct cgroup_subsys; #ifdef CONFIG_NET -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); -void mem_cgroup_sockets_destroy(struct cgroup *cgrp); +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); #else static inline -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; } static inline -void mem_cgroup_sockets_destroy(struct cgroup *cgrp) +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) { } #endif @@ -914,9 +914,9 @@ struct proto { * This function has to setup any files the protocol want to * appear in the kmem cgroup filesystem. */ - int (*init_cgroup)(struct cgroup *cgrp, + int (*init_cgroup)(struct mem_cgroup *memcg, struct cgroup_subsys *ss); - void (*destroy_cgroup)(struct cgroup *cgrp); + void (*destroy_cgroup)(struct mem_cgroup *memcg); struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); #endif }; diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 48410ff..7df18bc 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h @@ -12,8 +12,8 @@ struct tcp_memcontrol { }; struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); -int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); -void tcp_destroy_cgroup(struct cgroup *cgrp); +int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void tcp_destroy_cgroup(struct mem_cgroup *memcg); unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); #endif /* _TCP_MEMCG_H */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ed64cca..ad8eae5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -60,9 +60,13 @@ #include <linux/eventfd.h> #include <linux/poll.h> #include <linux/flex_array.h> /* used in cgroup_attach_proc */ +#include <linux/kthread.h> #include <linux/atomic.h> +/* css deactivation bias, makes css->refcnt negative to deny new trygets */ +#define CSS_DEACT_BIAS INT_MIN + /* * cgroup_mutex is the master lock. Any modification to cgroup or its * hierarchy must be performed while holding it. @@ -127,6 +131,9 @@ struct cgroupfs_root { /* A list running through the active hierarchies */ struct list_head root_list; + /* All cgroups on this root, cgroup_mutex protected */ + struct list_head allcg_list; + /* Hierarchy-specific flags */ unsigned long flags; @@ -145,6 +152,15 @@ struct cgroupfs_root { static struct cgroupfs_root rootnode; /* + * cgroupfs file entry, pointed to from leaf dentry->d_fsdata. + */ +struct cfent { + struct list_head node; + struct dentry *dentry; + struct cftype *type; +}; + +/* * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when * cgroup_subsys->use_id != 0. */ @@ -239,6 +255,14 @@ int cgroup_lock_is_held(void) EXPORT_SYMBOL_GPL(cgroup_lock_is_held); +/* the current nr of refs, always >= 0 whether @css is deactivated or not */ +static int css_refcnt(struct cgroup_subsys_state *css) +{ + int v = atomic_read(&css->refcnt); + + return v >= 0 ? v : v - CSS_DEACT_BIAS; +} + /* convenient tests for these bits */ inline int cgroup_is_removed(const struct cgroup *cgrp) { @@ -279,6 +303,21 @@ list_for_each_entry(_ss, &_root->subsys_list, sibling) #define for_each_active_root(_root) \ list_for_each_entry(_root, &roots, root_list) +static inline struct cgroup *__d_cgrp(struct dentry *dentry) +{ + return dentry->d_fsdata; +} + +static inline struct cfent *__d_cfe(struct dentry *dentry) +{ + return dentry->d_fsdata; +} + +static inline struct cftype *__d_cft(struct dentry *dentry) +{ + return __d_cfe(dentry)->type; +} + /* the list of cgroups eligible for automatic release. Protected by * release_list_lock */ static LIST_HEAD(release_list); @@ -816,12 +855,17 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp) struct cgroup_subsys *ss; int ret = 0; - for_each_subsys(cgrp->root, ss) - if (ss->pre_destroy) { - ret = ss->pre_destroy(cgrp); - if (ret) - break; + for_each_subsys(cgrp->root, ss) { + if (!ss->pre_destroy) + continue; + + ret = ss->pre_destroy(cgrp); + if (ret) { + /* ->pre_destroy() failure is being deprecated */ + WARN_ON_ONCE(!ss->__DEPRECATED_clear_css_refs); + break; } + } return ret; } @@ -864,6 +908,14 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) BUG_ON(!list_empty(&cgrp->pidlists)); kfree_rcu(cgrp, rcu_head); + } else { + struct cfent *cfe = __d_cfe(dentry); + struct cgroup *cgrp = dentry->d_parent->d_fsdata; + + WARN_ONCE(!list_empty(&cfe->node) && + cgrp != &cgrp->root->top_cgroup, + "cfe still linked for %s\n", cfe->type->name); + kfree(cfe); } iput(inode); } @@ -882,34 +934,36 @@ static void remove_dir(struct dentry *d) dput(parent); } -static void cgroup_clear_directory(struct dentry *dentry) -{ - struct list_head *node; - - BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); - spin_lock(&dentry->d_lock); - node = dentry->d_subdirs.next; - while (node != &dentry->d_subdirs) { - struct dentry *d = list_entry(node, struct dentry, d_u.d_child); - - spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); - list_del_init(node); - if (d->d_inode) { - /* This should never be called on a cgroup - * directory with child cgroups */ - BUG_ON(d->d_inode->i_mode & S_IFDIR); - dget_dlock(d); - spin_unlock(&d->d_lock); - spin_unlock(&dentry->d_lock); - d_delete(d); - simple_unlink(dentry->d_inode, d); - dput(d); - spin_lock(&dentry->d_lock); - } else - spin_unlock(&d->d_lock); - node = dentry->d_subdirs.next; +static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) +{ + struct cfent *cfe; + + lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); + lockdep_assert_held(&cgroup_mutex); + + list_for_each_entry(cfe, &cgrp->files, node) { + struct dentry *d = cfe->dentry; + + if (cft && cfe->type != cft) + continue; + + dget(d); + d_delete(d); + simple_unlink(d->d_inode, d); + list_del_init(&cfe->node); + dput(d); + + return 0; } - spin_unlock(&dentry->d_lock); + return -ENOENT; +} + +static void cgroup_clear_directory(struct dentry *dir) +{ + struct cgroup *cgrp = __d_cgrp(dir); + + while (!list_empty(&cgrp->files)) + cgroup_rm_file(cgrp, NULL); } /* @@ -1294,6 +1348,11 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) if (ret) goto out_unlock; + /* See feature-removal-schedule.txt */ + if (opts.subsys_bits != root->actual_subsys_bits || opts.release_agent) + pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", + task_tgid_nr(current), current->comm); + /* Don't allow flags or name to change at remount */ if (opts.flags != root->flags || (opts.name && strcmp(opts.name, root->name))) { @@ -1308,7 +1367,8 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) goto out_unlock; } - /* (re)populate subsystem files */ + /* clear out any existing files and repopulate subsystem files */ + cgroup_clear_directory(cgrp->dentry); cgroup_populate_dir(cgrp); if (opts.release_agent) @@ -1333,6 +1393,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) { INIT_LIST_HEAD(&cgrp->sibling); INIT_LIST_HEAD(&cgrp->children); + INIT_LIST_HEAD(&cgrp->files); INIT_LIST_HEAD(&cgrp->css_sets); INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->pidlists); @@ -1344,11 +1405,14 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) static void init_cgroup_root(struct cgroupfs_root *root) { struct cgroup *cgrp = &root->top_cgroup; + INIT_LIST_HEAD(&root->subsys_list); INIT_LIST_HEAD(&root->root_list); + INIT_LIST_HEAD(&root->allcg_list); root->number_of_cgroups = 1; cgrp->root = root; cgrp->top_cgroup = cgrp; + list_add_tail(&cgrp->allcg_node, &root->allcg_list); init_cgroup_housekeeping(cgrp); } @@ -1692,16 +1756,6 @@ static struct file_system_type cgroup_fs_type = { static struct kobject *cgroup_kobj; -static inline struct cgroup *__d_cgrp(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cftype *__d_cft(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - /** * cgroup_path - generate the path of a cgroup * @cgrp: the cgroup in question @@ -2172,6 +2226,18 @@ retry_find_task: if (threadgroup) tsk = tsk->group_leader; + + /* + * Workqueue threads may acquire PF_THREAD_BOUND and become + * trapped in a cpuset, or RT worker may be born in a cgroup + * with no rt_runtime allocated. Just say no. + */ + if (tsk == kthreadd_task || (tsk->flags & PF_THREAD_BOUND)) { + ret = -EINVAL; + rcu_read_unlock(); + goto out_unlock_cgroup; + } + get_task_struct(tsk); rcu_read_unlock(); @@ -2603,50 +2669,191 @@ static umode_t cgroup_file_mode(const struct cftype *cft) return mode; } -int cgroup_add_file(struct cgroup *cgrp, - struct cgroup_subsys *subsys, - const struct cftype *cft) +static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, + const struct cftype *cft) { struct dentry *dir = cgrp->dentry; + struct cgroup *parent = __d_cgrp(dir); struct dentry *dentry; + struct cfent *cfe; int error; umode_t mode; - char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; + + /* does @cft->flags tell us to skip creation on @cgrp? */ + if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) + return 0; + if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent) + return 0; + if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { strcpy(name, subsys->name); strcat(name, "."); } strcat(name, cft->name); + BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); + + cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); + if (!cfe) + return -ENOMEM; + dentry = lookup_one_len(name, dir, strlen(name)); - if (!IS_ERR(dentry)) { - mode = cgroup_file_mode(cft); - error = cgroup_create_file(dentry, mode | S_IFREG, - cgrp->root->sb); - if (!error) - dentry->d_fsdata = (void *)cft; - dput(dentry); - } else + if (IS_ERR(dentry)) { error = PTR_ERR(dentry); + goto out; + } + + mode = cgroup_file_mode(cft); + error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); + if (!error) { + cfe->type = (void *)cft; + cfe->dentry = dentry; + dentry->d_fsdata = cfe; + list_add_tail(&cfe->node, &parent->files); + cfe = NULL; + } + dput(dentry); +out: + kfree(cfe); return error; } -EXPORT_SYMBOL_GPL(cgroup_add_file); -int cgroup_add_files(struct cgroup *cgrp, - struct cgroup_subsys *subsys, - const struct cftype cft[], - int count) +static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, + const struct cftype cfts[], bool is_add) { - int i, err; - for (i = 0; i < count; i++) { - err = cgroup_add_file(cgrp, subsys, &cft[i]); - if (err) - return err; + const struct cftype *cft; + int err, ret = 0; + + for (cft = cfts; cft->name[0] != '\0'; cft++) { + if (is_add) + err = cgroup_add_file(cgrp, subsys, cft); + else + err = cgroup_rm_file(cgrp, cft); + if (err) { + pr_warning("cgroup_addrm_files: failed to %s %s, err=%d\n", + is_add ? "add" : "remove", cft->name, err); + ret = err; + } + } + return ret; +} + +static DEFINE_MUTEX(cgroup_cft_mutex); + +static void cgroup_cfts_prepare(void) + __acquires(&cgroup_cft_mutex) __acquires(&cgroup_mutex) +{ + /* + * Thanks to the entanglement with vfs inode locking, we can't walk + * the existing cgroups under cgroup_mutex and create files. + * Instead, we increment reference on all cgroups and build list of + * them using @cgrp->cft_q_node. Grab cgroup_cft_mutex to ensure + * exclusive access to the field. + */ + mutex_lock(&cgroup_cft_mutex); + mutex_lock(&cgroup_mutex); +} + +static void cgroup_cfts_commit(struct cgroup_subsys *ss, + const struct cftype *cfts, bool is_add) + __releases(&cgroup_mutex) __releases(&cgroup_cft_mutex) +{ + LIST_HEAD(pending); + struct cgroup *cgrp, *n; + + /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ + if (cfts && ss->root != &rootnode) { + list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) { + dget(cgrp->dentry); + list_add_tail(&cgrp->cft_q_node, &pending); + } + } + + mutex_unlock(&cgroup_mutex); + + /* + * All new cgroups will see @cfts update on @ss->cftsets. Add/rm + * files for all cgroups which were created before. + */ + list_for_each_entry_safe(cgrp, n, &pending, cft_q_node) { + struct inode *inode = cgrp->dentry->d_inode; + + mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_mutex); + if (!cgroup_is_removed(cgrp)) + cgroup_addrm_files(cgrp, ss, cfts, is_add); + mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); + + list_del_init(&cgrp->cft_q_node); + dput(cgrp->dentry); } + + mutex_unlock(&cgroup_cft_mutex); +} + +/** + * cgroup_add_cftypes - add an array of cftypes to a subsystem + * @ss: target cgroup subsystem + * @cfts: zero-length name terminated array of cftypes + * + * Register @cfts to @ss. Files described by @cfts are created for all + * existing cgroups to which @ss is attached and all future cgroups will + * have them too. This function can be called anytime whether @ss is + * attached or not. + * + * Returns 0 on successful registration, -errno on failure. Note that this + * function currently returns 0 as long as @cfts registration is successful + * even if some file creation attempts on existing cgroups fail. + */ +int cgroup_add_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts) +{ + struct cftype_set *set; + + set = kzalloc(sizeof(*set), GFP_KERNEL); + if (!set) + return -ENOMEM; + + cgroup_cfts_prepare(); + set->cfts = cfts; + list_add_tail(&set->node, &ss->cftsets); + cgroup_cfts_commit(ss, cfts, true); + return 0; } -EXPORT_SYMBOL_GPL(cgroup_add_files); +EXPORT_SYMBOL_GPL(cgroup_add_cftypes); + +/** + * cgroup_rm_cftypes - remove an array of cftypes from a subsystem + * @ss: target cgroup subsystem + * @cfts: zero-length name terminated array of cftypes + * + * Unregister @cfts from @ss. Files described by @cfts are removed from + * all existing cgroups to which @ss is attached and all future cgroups + * won't have them either. This function can be called anytime whether @ss + * is attached or not. + * + * Returns 0 on successful unregistration, -ENOENT if @cfts is not + * registered with @ss. + */ +int cgroup_rm_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts) +{ + struct cftype_set *set; + + cgroup_cfts_prepare(); + + list_for_each_entry(set, &ss->cftsets, node) { + if (set->cfts == cfts) { + list_del_init(&set->node); + cgroup_cfts_commit(ss, cfts, false); + return 0; + } + } + + cgroup_cfts_commit(ss, NULL, false); + return -ENOENT; +} /** * cgroup_task_count - count the number of tasks in a cgroup. @@ -3625,13 +3832,14 @@ static struct cftype files[] = { .read_u64 = cgroup_clone_children_read, .write_u64 = cgroup_clone_children_write, }, -}; - -static struct cftype cft_release_agent = { - .name = "release_agent", - .read_seq_string = cgroup_release_agent_show, - .write_string = cgroup_release_agent_write, - .max_write_len = PATH_MAX, + { + .name = "release_agent", + .flags = CFTYPE_ONLY_ON_ROOT, + .read_seq_string = cgroup_release_agent_show, + .write_string = cgroup_release_agent_write, + .max_write_len = PATH_MAX, + }, + { } /* terminate */ }; static int cgroup_populate_dir(struct cgroup *cgrp) @@ -3639,22 +3847,18 @@ static int cgroup_populate_dir(struct cgroup *cgrp) int err; struct cgroup_subsys *ss; - /* First clear out any existing files */ - cgroup_clear_directory(cgrp->dentry); - - err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files)); + err = cgroup_addrm_files(cgrp, NULL, files, true); if (err < 0) return err; - if (cgrp == cgrp->top_cgroup) { - if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0) - return err; - } - + /* process cftsets of each subsystem */ for_each_subsys(cgrp->root, ss) { - if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) - return err; + struct cftype_set *set; + + list_for_each_entry(set, &ss->cftsets, node) + cgroup_addrm_files(cgrp, ss, set->cfts, true); } + /* This cgroup is ready now */ for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; @@ -3670,6 +3874,14 @@ static int cgroup_populate_dir(struct cgroup *cgrp) return 0; } +static void css_dput_fn(struct work_struct *work) +{ + struct cgroup_subsys_state *css = + container_of(work, struct cgroup_subsys_state, dput_work); + + dput(css->cgroup->dentry); +} + static void init_cgroup_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss, struct cgroup *cgrp) @@ -3682,6 +3894,16 @@ static void init_cgroup_css(struct cgroup_subsys_state *css, set_bit(CSS_ROOT, &css->flags); BUG_ON(cgrp->subsys[ss->subsys_id]); cgrp->subsys[ss->subsys_id] = css; + + /* + * If !clear_css_refs, css holds an extra ref to @cgrp->dentry + * which is put on the last css_put(). dput() requires process + * context, which css_put() may be called without. @css->dput_work + * will be used to invoke dput() asynchronously from css_put(). + */ + INIT_WORK(&css->dput_work, css_dput_fn); + if (ss->__DEPRECATED_clear_css_refs) + set_bit(CSS_CLEAR_CSS_REFS, &css->flags); } static void cgroup_lock_hierarchy(struct cgroupfs_root *root) @@ -3784,9 +4006,16 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (err < 0) goto err_remove; + /* If !clear_css_refs, each css holds a ref to the cgroup's dentry */ + for_each_subsys(root, ss) + if (!ss->__DEPRECATED_clear_css_refs) + dget(dentry); + /* The cgroup directory was pre-locked for us */ BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); + list_add_tail(&cgrp->allcg_node, &root->allcg_list); + err = cgroup_populate_dir(cgrp); /* If err < 0, we have a half-filled directory - oh well ;) */ @@ -3826,18 +4055,19 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) return cgroup_create(c_parent, dentry, mode | S_IFDIR); } +/* + * Check the reference count on each subsystem. Since we already + * established that there are no tasks in the cgroup, if the css refcount + * is also 1, then there should be no outstanding references, so the + * subsystem is safe to destroy. We scan across all subsystems rather than + * using the per-hierarchy linked list of mounted subsystems since we can + * be called via check_for_release() with no synchronization other than + * RCU, and the subsystem linked list isn't RCU-safe. + */ static int cgroup_has_css_refs(struct cgroup *cgrp) { - /* Check the reference count on each subsystem. Since we - * already established that there are no tasks in the - * cgroup, if the css refcount is also 1, then there should - * be no outstanding references, so the subsystem is safe to - * destroy. We scan across all subsystems rather than using - * the per-hierarchy linked list of mounted subsystems since - * we can be called via check_for_release() with no - * synchronization other than RCU, and the subsystem linked - * list isn't RCU-safe */ int i; + /* * We won't need to lock the subsys array, because the subsystems * we're concerned about aren't going anywhere since our cgroup root @@ -3846,17 +4076,21 @@ static int cgroup_has_css_refs(struct cgroup *cgrp) for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; struct cgroup_subsys_state *css; + /* Skip subsystems not present or not in this hierarchy */ if (ss == NULL || ss->root != cgrp->root) continue; + css = cgrp->subsys[ss->subsys_id]; - /* When called from check_for_release() it's possible + /* + * When called from check_for_release() it's possible * that by this point the cgroup has been removed * and the css deleted. But a false-positive doesn't * matter, since it can only happen if the cgroup * has been deleted and hence no longer needs the - * release agent to be called anyway. */ - if (css && (atomic_read(&css->refcnt) > 1)) + * release agent to be called anyway. + */ + if (css && css_refcnt(css) > 1) return 1; } return 0; @@ -3866,51 +4100,63 @@ static int cgroup_has_css_refs(struct cgroup *cgrp) * Atomically mark all (or else none) of the cgroup's CSS objects as * CSS_REMOVED. Return true on success, or false if the cgroup has * busy subsystems. Call with cgroup_mutex held + * + * Depending on whether a subsys has __DEPRECATED_clear_css_refs set or + * not, cgroup removal behaves differently. + * + * If clear is set, css refcnt for the subsystem should be zero before + * cgroup removal can be committed. This is implemented by + * CGRP_WAIT_ON_RMDIR and retry logic around ->pre_destroy(), which may be + * called multiple times until all css refcnts reach zero and is allowed to + * veto removal on any invocation. This behavior is deprecated and will be + * removed as soon as the existing user (memcg) is updated. + * + * If clear is not set, each css holds an extra reference to the cgroup's + * dentry and cgroup removal proceeds regardless of css refs. + * ->pre_destroy() will be called at least once and is not allowed to fail. + * On the last put of each css, whenever that may be, the extra dentry ref + * is put so that dentry destruction happens only after all css's are + * released. */ - static int cgroup_clear_css_refs(struct cgroup *cgrp) { struct cgroup_subsys *ss; unsigned long flags; bool failed = false; + local_irq_save(flags); + + /* + * Block new css_tryget() by deactivating refcnt. If all refcnts + * for subsystems w/ clear_css_refs set were 1 at the moment of + * deactivation, we succeeded. + */ for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; - int refcnt; - while (1) { - /* We can only remove a CSS with a refcnt==1 */ - refcnt = atomic_read(&css->refcnt); - if (refcnt > 1) { - failed = true; - goto done; - } - BUG_ON(!refcnt); - /* - * Drop the refcnt to 0 while we check other - * subsystems. This will cause any racing - * css_tryget() to spin until we set the - * CSS_REMOVED bits or abort - */ - if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt) - break; - cpu_relax(); - } + + WARN_ON(atomic_read(&css->refcnt) < 0); + atomic_add(CSS_DEACT_BIAS, &css->refcnt); + + if (ss->__DEPRECATED_clear_css_refs) + failed |= css_refcnt(css) != 1; } - done: + + /* + * If succeeded, set REMOVED and put all the base refs; otherwise, + * restore refcnts to positive values. Either way, all in-progress + * css_tryget() will be released. + */ for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; - if (failed) { - /* - * Restore old refcnt if we previously managed - * to clear it from 1 to 0 - */ - if (!atomic_read(&css->refcnt)) - atomic_set(&css->refcnt, 1); - } else { - /* Commit the fact that the CSS is removed */ + + if (!failed) { set_bit(CSS_REMOVED, &css->flags); + css_put(css); + } else { + atomic_sub(CSS_DEACT_BIAS, &css->refcnt); } } + local_irq_restore(flags); return !failed; } @@ -3995,6 +4241,8 @@ again: list_del_init(&cgrp->sibling); cgroup_unlock_hierarchy(cgrp->root); + list_del_init(&cgrp->allcg_node); + d = dget(cgrp->dentry); cgroup_d_remove_dir(d); @@ -4021,12 +4269,29 @@ again: return 0; } +static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) +{ + INIT_LIST_HEAD(&ss->cftsets); + + /* + * base_cftset is embedded in subsys itself, no need to worry about + * deregistration. + */ + if (ss->base_cftypes) { + ss->base_cftset.cfts = ss->base_cftypes; + list_add_tail(&ss->base_cftset.node, &ss->cftsets); + } +} + static void __init cgroup_init_subsys(struct cgroup_subsys *ss) { struct cgroup_subsys_state *css; printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); + /* init base cftset */ + cgroup_init_cftsets(ss); + /* Create the top cgroup state for this subsystem */ list_add(&ss->sibling, &rootnode.subsys_list); ss->root = &rootnode; @@ -4096,6 +4361,9 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) return 0; } + /* init base cftset */ + cgroup_init_cftsets(ss); + /* * need to register a subsys id before anything else - for example, * init_cgroup_css needs it. @@ -4685,21 +4953,41 @@ static void check_for_release(struct cgroup *cgrp) } /* Caller must verify that the css is not for root cgroup */ -void __css_put(struct cgroup_subsys_state *css, int count) +bool __css_tryget(struct cgroup_subsys_state *css) +{ + do { + int v = css_refcnt(css); + + if (atomic_cmpxchg(&css->refcnt, v, v + 1) == v) + return true; + cpu_relax(); + } while (!test_bit(CSS_REMOVED, &css->flags)); + + return false; +} +EXPORT_SYMBOL_GPL(__css_tryget); + +/* Caller must verify that the css is not for root cgroup */ +void __css_put(struct cgroup_subsys_state *css) { struct cgroup *cgrp = css->cgroup; - int val; + rcu_read_lock(); - val = atomic_sub_return(count, &css->refcnt); - if (val == 1) { + atomic_dec(&css->refcnt); + switch (css_refcnt(css)) { + case 1: if (notify_on_release(cgrp)) { set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); } cgroup_wakeup_rmdir_waiter(cgrp); + break; + case 0: + if (!test_bit(CSS_CLEAR_CSS_REFS, &css->flags)) + schedule_work(&css->dput_work); + break; } rcu_read_unlock(); - WARN_ON_ONCE(val < 1); } EXPORT_SYMBOL_GPL(__css_put); @@ -4818,7 +5106,7 @@ unsigned short css_id(struct cgroup_subsys_state *css) * on this or this is under rcu_read_lock(). Once css->id is allocated, * it's unchanged until freed. */ - cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt)); + cssid = rcu_dereference_check(css->id, css_refcnt(css)); if (cssid) return cssid->id; @@ -4830,7 +5118,7 @@ unsigned short css_depth(struct cgroup_subsys_state *css) { struct css_id *cssid; - cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt)); + cssid = rcu_dereference_check(css->id, css_refcnt(css)); if (cssid) return cssid->depth; @@ -5211,19 +5499,15 @@ static struct cftype debug_files[] = { .name = "releasable", .read_u64 = releasable_read, }, -}; -static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) -{ - return cgroup_add_files(cont, ss, debug_files, - ARRAY_SIZE(debug_files)); -} + { } /* terminate */ +}; struct cgroup_subsys debug_subsys = { .name = "debug", .create = debug_create, .destroy = debug_destroy, - .populate = debug_populate, .subsys_id = debug_subsys_id, + .base_cftypes = debug_files, }; #endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index f86e939..3649fc6 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -358,24 +358,19 @@ static int freezer_write(struct cgroup *cgroup, static struct cftype files[] = { { .name = "state", + .flags = CFTYPE_NOT_ON_ROOT, .read_seq_string = freezer_read, .write_string = freezer_write, }, + { } /* terminate */ }; -static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup) -{ - if (!cgroup->parent) - return 0; - return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files)); -} - struct cgroup_subsys freezer_subsys = { .name = "freezer", .create = freezer_create, .destroy = freezer_destroy, - .populate = freezer_populate, .subsys_id = freezer_subsys_id, .can_attach = freezer_can_attach, .fork = freezer_fork, + .base_cftypes = files, }; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 14f7070..8c8bd65 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1765,28 +1765,17 @@ static struct cftype files[] = { .write_u64 = cpuset_write_u64, .private = FILE_SPREAD_SLAB, }, -}; - -static struct cftype cft_memory_pressure_enabled = { - .name = "memory_pressure_enabled", - .read_u64 = cpuset_read_u64, - .write_u64 = cpuset_write_u64, - .private = FILE_MEMORY_PRESSURE_ENABLED, -}; -static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) -{ - int err; + { + .name = "memory_pressure_enabled", + .flags = CFTYPE_ONLY_ON_ROOT, + .read_u64 = cpuset_read_u64, + .write_u64 = cpuset_write_u64, + .private = FILE_MEMORY_PRESSURE_ENABLED, + }, - err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); - if (err) - return err; - /* memory_pressure_enabled is in root cpuset only */ - if (!cont->parent) - err = cgroup_add_file(cont, ss, - &cft_memory_pressure_enabled); - return err; -} + { } /* terminate */ +}; /* * post_clone() is called during cgroup_create() when the @@ -1887,9 +1876,9 @@ struct cgroup_subsys cpuset_subsys = { .destroy = cpuset_destroy, .can_attach = cpuset_can_attach, .attach = cpuset_attach, - .populate = cpuset_populate, .post_clone = cpuset_post_clone, .subsys_id = cpuset_subsys_id, + .base_cftypes = files, .early_init = 1, }; diff --git a/kernel/res_counter.c b/kernel/res_counter.c index d508363..bebe2b1 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -22,75 +22,70 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) counter->parent = parent; } -int res_counter_charge_locked(struct res_counter *counter, unsigned long val) +int res_counter_charge_locked(struct res_counter *counter, unsigned long val, + bool force) { + int ret = 0; + if (counter->usage + val > counter->limit) { counter->failcnt++; - return -ENOMEM; + ret = -ENOMEM; + if (!force) + return ret; } counter->usage += val; if (counter->usage > counter->max_usage) counter->max_usage = counter->usage; - return 0; + return ret; } -int res_counter_charge(struct res_counter *counter, unsigned long val, - struct res_counter **limit_fail_at) +static int __res_counter_charge(struct res_counter *counter, unsigned long val, + struct res_counter **limit_fail_at, bool force) { - int ret; + int ret, r; unsigned long flags; struct res_counter *c, *u; + r = ret = 0; *limit_fail_at = NULL; local_irq_save(flags); for (c = counter; c != NULL; c = c->parent) { spin_lock(&c->lock); - ret = res_counter_charge_locked(c, val); + r = res_counter_charge_locked(c, val, force); spin_unlock(&c->lock); - if (ret < 0) { + if (r < 0 && !ret) { + ret = r; *limit_fail_at = c; - goto undo; + if (!force) + break; } } - ret = 0; - goto done; -undo: - for (u = counter; u != c; u = u->parent) { - spin_lock(&u->lock); - res_counter_uncharge_locked(u, val); - spin_unlock(&u->lock); + + if (ret < 0 && !force) { + for (u = counter; u != c; u = u->parent) { + spin_lock(&u->lock); + res_counter_uncharge_locked(u, val); + spin_unlock(&u->lock); + } } -done: local_irq_restore(flags); + return ret; } +int res_counter_charge(struct res_counter *counter, unsigned long val, + struct res_counter **limit_fail_at) +{ + return __res_counter_charge(counter, val, limit_fail_at, false); +} + int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at) { - int ret, r; - unsigned long flags; - struct res_counter *c; - - r = ret = 0; - *limit_fail_at = NULL; - local_irq_save(flags); - for (c = counter; c != NULL; c = c->parent) { - spin_lock(&c->lock); - r = res_counter_charge_locked(c, val); - if (r) - c->usage += val; - spin_unlock(&c->lock); - if (r < 0 && ret == 0) { - *limit_fail_at = c; - ret = r; - } - } - local_irq_restore(flags); - - return ret; + return __res_counter_charge(counter, val, limit_fail_at, true); } + void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) { if (WARN_ON(counter->usage < val)) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ea8a476..03667c3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7985,13 +7985,9 @@ static struct cftype cpu_files[] = { .write_u64 = cpu_rt_period_write_uint, }, #endif + { } /* terminate */ }; -static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) -{ - return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); -} - struct cgroup_subsys cpu_cgroup_subsys = { .name = "cpu", .create = cpu_cgroup_create, @@ -7999,8 +7995,8 @@ struct cgroup_subsys cpu_cgroup_subsys = { .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .exit = cpu_cgroup_exit, - .populate = cpu_cgroup_populate, .subsys_id = cpu_cgroup_subsys_id, + .base_cftypes = cpu_files, .early_init = 1, }; @@ -8185,13 +8181,9 @@ static struct cftype files[] = { .name = "stat", .read_map = cpuacct_stats_show, }, + { } /* terminate */ }; -static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) -{ - return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); -} - /* * charge this task's execution time to its accounting group. * @@ -8223,7 +8215,7 @@ struct cgroup_subsys cpuacct_subsys = { .name = "cpuacct", .create = cpuacct_create, .destroy = cpuacct_destroy, - .populate = cpuacct_populate, .subsys_id = cpuacct_subsys_id, + .base_cftypes = files, }; #endif /* CONFIG_CGROUP_CPUACCT */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7685d4a..f342778 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3873,14 +3873,21 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) return val << PAGE_SHIFT; } -static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) +static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft, + struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + char str[64]; u64 val; - int type, name; + int type, name, len; type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (type) { case _MEM: if (name == RES_USAGE) @@ -3897,7 +3904,9 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) default: BUG(); } - return val; + + len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); + return simple_read_from_buffer(buf, nbytes, ppos, str, len); } /* * The user of this function is... @@ -3913,6 +3922,10 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (name) { case RES_LIMIT: if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ @@ -3978,12 +3991,15 @@ out: static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) { - struct mem_cgroup *memcg; + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); int type, name; - memcg = mem_cgroup_from_cont(cont); type = MEMFILE_TYPE(event); name = MEMFILE_ATTR(event); + + if (!do_swap_account && type == _MEMSWAP) + return -EOPNOTSUPP; + switch (name) { case RES_MAX_USAGE: if (type == _MEM) @@ -4624,29 +4640,22 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) #endif /* CONFIG_NUMA */ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { - /* - * Part of this would be better living in a separate allocation - * function, leaving us with just the cgroup tree population work. - * We, however, depend on state such as network's proto_list that - * is only initialized after cgroup creation. I found the less - * cumbersome way to deal with it to defer it all to populate time - */ - return mem_cgroup_sockets_init(cont, ss); + return mem_cgroup_sockets_init(memcg, ss); }; -static void kmem_cgroup_destroy(struct cgroup *cont) +static void kmem_cgroup_destroy(struct mem_cgroup *memcg) { - mem_cgroup_sockets_destroy(cont); + mem_cgroup_sockets_destroy(memcg); } #else -static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) +static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; } -static void kmem_cgroup_destroy(struct cgroup *cont) +static void kmem_cgroup_destroy(struct mem_cgroup *memcg) { } #endif @@ -4655,7 +4664,7 @@ static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, .register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, }, @@ -4663,25 +4672,25 @@ static struct cftype mem_cgroup_files[] = { .name = "max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "soft_limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "failcnt", .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "stat", @@ -4721,14 +4730,11 @@ static struct cftype mem_cgroup_files[] = { .mode = S_IRUGO, }, #endif -}; - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP -static struct cftype memsw_cgroup_files[] = { { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, .register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, }, @@ -4736,35 +4742,23 @@ static struct cftype memsw_cgroup_files[] = { .name = "memsw.max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "memsw.limit_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), .write_string = mem_cgroup_write, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, { .name = "memsw.failcnt", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read_u64 = mem_cgroup_read, + .read = mem_cgroup_read, }, -}; - -static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - if (!do_swap_account) - return 0; - return cgroup_add_files(cont, ss, memsw_cgroup_files, - ARRAY_SIZE(memsw_cgroup_files)); -}; -#else -static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) -{ - return 0; -} #endif + { }, /* terminate */ +}; static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) { @@ -5016,6 +5010,17 @@ mem_cgroup_create(struct cgroup *cont) memcg->move_charge_at_immigrate = 0; mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); + + error = memcg_init_kmem(memcg, &mem_cgroup_subsys); + if (error) { + /* + * We call put now because our (and parent's) refcnts + * are already in place. mem_cgroup_put() will internally + * call __mem_cgroup_free, so return directly + */ + mem_cgroup_put(memcg); + return ERR_PTR(error); + } return &memcg->css; free_out: __mem_cgroup_free(memcg); @@ -5033,28 +5038,11 @@ static void mem_cgroup_destroy(struct cgroup *cont) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); - kmem_cgroup_destroy(cont); + kmem_cgroup_destroy(memcg); mem_cgroup_put(memcg); } -static int mem_cgroup_populate(struct cgroup_subsys *ss, - struct cgroup *cont) -{ - int ret; - - ret = cgroup_add_files(cont, ss, mem_cgroup_files, - ARRAY_SIZE(mem_cgroup_files)); - - if (!ret) - ret = register_memsw_files(cont, ss); - - if (!ret) - ret = register_kmem_files(cont, ss); - - return ret; -} - #ifdef CONFIG_MMU /* Handlers for move charge at task migration. */ #define PRECHARGE_COUNT_AT_ONCE 256 @@ -5638,12 +5626,13 @@ struct cgroup_subsys mem_cgroup_subsys = { .create = mem_cgroup_create, .pre_destroy = mem_cgroup_pre_destroy, .destroy = mem_cgroup_destroy, - .populate = mem_cgroup_populate, .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, + .base_cftypes = mem_cgroup_files, .early_init = 0, .use_id = 1, + .__DEPRECATED_clear_css_refs = true, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 09eda68..5b8aa2f 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -25,21 +25,6 @@ #include <net/sock.h> #include <net/netprio_cgroup.h> -static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp); -static void cgrp_destroy(struct cgroup *cgrp); -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); - -struct cgroup_subsys net_prio_subsys = { - .name = "net_prio", - .create = cgrp_create, - .destroy = cgrp_destroy, - .populate = cgrp_populate, -#ifdef CONFIG_NETPRIO_CGROUP - .subsys_id = net_prio_subsys_id, -#endif - .module = THIS_MODULE -}; - #define PRIOIDX_SZ 128 static unsigned long prioidx_map[PRIOIDX_SZ]; @@ -259,12 +244,19 @@ static struct cftype ss_files[] = { .read_map = read_priomap, .write_string = write_priomap, }, + { } /* terminate */ }; -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) -{ - return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); -} +struct cgroup_subsys net_prio_subsys = { + .name = "net_prio", + .create = cgrp_create, + .destroy = cgrp_destroy, +#ifdef CONFIG_NETPRIO_CGROUP + .subsys_id = net_prio_subsys_id, +#endif + .base_cftypes = ss_files, + .module = THIS_MODULE +}; static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) diff --git a/net/core/sock.c b/net/core/sock.c index 5efcd63..f372d9b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -143,7 +143,7 @@ static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { struct proto *proto; int ret = 0; @@ -151,7 +151,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) mutex_lock(&proto_list_mutex); list_for_each_entry(proto, &proto_list, node) { if (proto->init_cgroup) { - ret = proto->init_cgroup(cgrp, ss); + ret = proto->init_cgroup(memcg, ss); if (ret) goto out; } @@ -162,19 +162,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) out: list_for_each_entry_continue_reverse(proto, &proto_list, node) if (proto->destroy_cgroup) - proto->destroy_cgroup(cgrp); + proto->destroy_cgroup(memcg); mutex_unlock(&proto_list_mutex); return ret; } -void mem_cgroup_sockets_destroy(struct cgroup *cgrp) +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) { struct proto *proto; mutex_lock(&proto_list_mutex); list_for_each_entry_reverse(proto, &proto_list, node) if (proto->destroy_cgroup) - proto->destroy_cgroup(cgrp); + proto->destroy_cgroup(memcg); mutex_unlock(&proto_list_mutex); } #endif diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index e795272..1517037 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -6,37 +6,6 @@ #include <linux/memcontrol.h> #include <linux/module.h> -static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft); -static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, - const char *buffer); -static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event); - -static struct cftype tcp_files[] = { - { - .name = "kmem.tcp.limit_in_bytes", - .write_string = tcp_cgroup_write, - .read_u64 = tcp_cgroup_read, - .private = RES_LIMIT, - }, - { - .name = "kmem.tcp.usage_in_bytes", - .read_u64 = tcp_cgroup_read, - .private = RES_USAGE, - }, - { - .name = "kmem.tcp.failcnt", - .private = RES_FAILCNT, - .trigger = tcp_cgroup_reset, - .read_u64 = tcp_cgroup_read, - }, - { - .name = "kmem.tcp.max_usage_in_bytes", - .private = RES_MAX_USAGE, - .trigger = tcp_cgroup_reset, - .read_u64 = tcp_cgroup_read, - }, -}; - static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto) { return container_of(cg_proto, struct tcp_memcontrol, cg_proto); @@ -49,7 +18,7 @@ static void memcg_tcp_enter_memory_pressure(struct sock *sk) } EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); -int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) +int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { /* * The root cgroup does not use res_counters, but rather, @@ -59,13 +28,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) struct res_counter *res_parent = NULL; struct cg_proto *cg_proto, *parent_cg; struct tcp_memcontrol *tcp; - struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *parent = parent_mem_cgroup(memcg); struct net *net = current->nsproxy->net_ns; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) - goto create_files; + return 0; tcp = tcp_from_cgproto(cg_proto); @@ -88,15 +56,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated; cg_proto->memcg = memcg; -create_files: - return cgroup_add_files(cgrp, ss, tcp_files, - ARRAY_SIZE(tcp_files)); + return 0; } EXPORT_SYMBOL(tcp_init_cgroup); -void tcp_destroy_cgroup(struct cgroup *cgrp) +void tcp_destroy_cgroup(struct mem_cgroup *memcg) { - struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct cg_proto *cg_proto; struct tcp_memcontrol *tcp; u64 val; @@ -270,3 +235,37 @@ void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx) tcp->tcp_prot_mem[idx] = val; } + +static struct cftype tcp_files[] = { + { + .name = "kmem.tcp.limit_in_bytes", + .write_string = tcp_cgroup_write, + .read_u64 = tcp_cgroup_read, + .private = RES_LIMIT, + }, + { + .name = "kmem.tcp.usage_in_bytes", + .read_u64 = tcp_cgroup_read, + .private = RES_USAGE, + }, + { + .name = "kmem.tcp.failcnt", + .private = RES_FAILCNT, + .trigger = tcp_cgroup_reset, + .read_u64 = tcp_cgroup_read, + }, + { + .name = "kmem.tcp.max_usage_in_bytes", + .private = RES_MAX_USAGE, + .trigger = tcp_cgroup_reset, + .read_u64 = tcp_cgroup_read, + }, + { } /* terminate */ +}; + +static int __init tcp_memcontrol_init(void) +{ + WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files)); + return 0; +} +__initcall(tcp_memcontrol_init); diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 1afaa28..7743ea8 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -22,22 +22,6 @@ #include <net/sock.h> #include <net/cls_cgroup.h> -static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp); -static void cgrp_destroy(struct cgroup *cgrp); -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); - -struct cgroup_subsys net_cls_subsys = { - .name = "net_cls", - .create = cgrp_create, - .destroy = cgrp_destroy, - .populate = cgrp_populate, -#ifdef CONFIG_NET_CLS_CGROUP - .subsys_id = net_cls_subsys_id, -#endif - .module = THIS_MODULE, -}; - - static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), @@ -86,12 +70,19 @@ static struct cftype ss_files[] = { .read_u64 = read_classid, .write_u64 = write_classid, }, + { } /* terminate */ }; -static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) -{ - return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); -} +struct cgroup_subsys net_cls_subsys = { + .name = "net_cls", + .create = cgrp_create, + .destroy = cgrp_destroy, +#ifdef CONFIG_NET_CLS_CGROUP + .subsys_id = net_cls_subsys_id, +#endif + .base_cftypes = ss_files, + .module = THIS_MODULE, +}; struct cls_cgroup_head { u32 handle; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index c43a332..442204c 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -447,22 +447,16 @@ static struct cftype dev_cgroup_files[] = { .read_seq_string = devcgroup_seq_read, .private = DEVCG_LIST, }, + { } /* terminate */ }; -static int devcgroup_populate(struct cgroup_subsys *ss, - struct cgroup *cgroup) -{ - return cgroup_add_files(cgroup, ss, dev_cgroup_files, - ARRAY_SIZE(dev_cgroup_files)); -} - struct cgroup_subsys devices_subsys = { .name = "devices", .can_attach = devcgroup_can_attach, .create = devcgroup_create, .destroy = devcgroup_destroy, - .populate = devcgroup_populate, .subsys_id = devices_subsys_id, + .base_cftypes = dev_cgroup_files, }; int __devcgroup_inode_permission(struct inode *inode, int mask) |