Merge branch 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo: "A lot updates for cgroup: - The biggest one is cgroup's conversion to kernfs. cgroup took after the long abandoned vfs-entangled sysfs implementation and made it even more convoluted over time. cgroup's internal objects were fused with vfs objects which also brought in vfs locking and object lifetime rules. Naturally, there are places where vfs rules don't fit and nasty hacks, such as credential switching or lock dance interleaving inode mutex and cgroup_mutex with object serial number comparison thrown in to decide whether the operation is actually necessary, needed to be employed. After conversion to kernfs, internal object lifetime and locking rules are mostly isolated from vfs interactions allowing shedding of several nasty hacks and overall simplification. This will also allow implmentation of operations which may affect multiple cgroups which weren't possible before as it would have required nesting i_mutexes. - Various simplifications including dropping of module support, easier cgroup name/path handling, simplified cgroup file type handling and task_cg_lists optimization. - Prepatory changes for the planned unified hierarchy, which is still a patchset away from being actually operational. The dummy hierarchy is updated to serve as the default unified hierarchy. Controllers which aren't claimed by other hierarchies are associated with it, which BTW was what the dummy hierarchy was for anyway. - Various fixes from Li and others. This pull request includes some patches to add missing slab.h to various subsystems. This was triggered xattr.h include removal from cgroup.h. cgroup.h indirectly got included a lot of files which brought in xattr.h which brought in slab.h. There are several merge commits - one to pull in kernfs updates necessary for converting cgroup (already in upstream through driver-core), others for interfering changes in the fixes branch" * 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (74 commits) cgroup: remove useless argument from cgroup_exit() cgroup: fix spurious lockdep warning in cgroup_exit() cgroup: Use RCU_INIT_POINTER(x, NULL) in cgroup.c cgroup: break kernfs active_ref protection in cgroup directory operations cgroup: fix cgroup_taskset walking order cgroup: implement CFTYPE_ONLY_ON_DFL cgroup: make cgrp_dfl_root mountable cgroup: drop const from @buffer of cftype->write_string() cgroup: rename cgroup_dummy_root and related names cgroup: move ->subsys_mask from cgroupfs_root to cgroup cgroup: treat cgroup_dummy_root as an equivalent hierarchy during rebinding cgroup: remove NULL checks from [pr_cont_]cgroup_{name|path}() cgroup: use cgroup_setup_root() to initialize cgroup_dummy_root cgroup: reorganize cgroup bootstrapping cgroup: relocate setting of CGRP_DEAD cpuset: use rcu_read_lock() to protect task_cs() cgroup_freezer: document freezer_fork() subtleties cgroup: update cgroup_transfer_tasks() to either succeed or fail cgroup: drop task_lock() protection around task->cgroups cgroup: update how a newly forked task gets associated with css_set ...
This commit is contained in:
Коммит
32d01dc7be
|
@ -8,6 +8,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/io.h>
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/timer.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
|
|
@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
|
|||
int ret = 0;
|
||||
|
||||
/* task_lock() is needed to avoid races with exit_io_context() */
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
||||
|
@ -906,17 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct cgroup_subsys blkio_subsys = {
|
||||
.name = "blkio",
|
||||
struct cgroup_subsys blkio_cgrp_subsys = {
|
||||
.css_alloc = blkcg_css_alloc,
|
||||
.css_offline = blkcg_css_offline,
|
||||
.css_free = blkcg_css_free,
|
||||
.can_attach = blkcg_can_attach,
|
||||
.subsys_id = blkio_subsys_id,
|
||||
.base_cftypes = blkcg_files,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(blkio_subsys);
|
||||
EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
|
||||
|
||||
/**
|
||||
* blkcg_activate_policy - activate a blkcg policy on a request_queue
|
||||
|
@ -1106,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
|
|||
|
||||
/* everything is in place, add intf files for the new policy */
|
||||
if (pol->cftypes)
|
||||
WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
|
||||
WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&blkcg_pol_mutex);
|
||||
|
|
|
@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
|||
|
||||
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
|
||||
{
|
||||
return css_to_blkcg(task_css(tsk, blkio_subsys_id));
|
||||
return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
|
||||
}
|
||||
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
|
@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
|
|||
*/
|
||||
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
{
|
||||
int ret;
|
||||
char *p;
|
||||
|
||||
ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
if (ret)
|
||||
p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
if (!p) {
|
||||
strncpy(buf, "<unavailable>", buflen);
|
||||
return ret;
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
|
||||
memmove(buf, p, buf + buflen - p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1408,13 +1408,13 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
|
|||
}
|
||||
|
||||
static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buf)
|
||||
char *buf)
|
||||
{
|
||||
return tg_set_conf(css, cft, buf, true);
|
||||
}
|
||||
|
||||
static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buf)
|
||||
char *buf)
|
||||
{
|
||||
return tg_set_conf(css, cft, buf, false);
|
||||
}
|
||||
|
@ -1425,28 +1425,24 @@ static struct cftype throtl_files[] = {
|
|||
.private = offsetof(struct throtl_grp, bps[READ]),
|
||||
.seq_show = tg_print_conf_u64,
|
||||
.write_string = tg_set_conf_u64,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.write_bps_device",
|
||||
.private = offsetof(struct throtl_grp, bps[WRITE]),
|
||||
.seq_show = tg_print_conf_u64,
|
||||
.write_string = tg_set_conf_u64,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.read_iops_device",
|
||||
.private = offsetof(struct throtl_grp, iops[READ]),
|
||||
.seq_show = tg_print_conf_uint,
|
||||
.write_string = tg_set_conf_uint,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.write_iops_device",
|
||||
.private = offsetof(struct throtl_grp, iops[WRITE]),
|
||||
.seq_show = tg_print_conf_uint,
|
||||
.write_string = tg_set_conf_uint,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "throttle.io_service_bytes",
|
||||
|
|
|
@ -1701,13 +1701,13 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
|
|||
}
|
||||
|
||||
static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
struct cftype *cft, char *buf)
|
||||
{
|
||||
return __cfqg_set_weight_device(css, cft, buf, false);
|
||||
}
|
||||
|
||||
static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
struct cftype *cft, char *buf)
|
||||
{
|
||||
return __cfqg_set_weight_device(css, cft, buf, true);
|
||||
}
|
||||
|
@ -1838,7 +1838,6 @@ static struct cftype cfq_blkcg_files[] = {
|
|||
.flags = CFTYPE_ONLY_ON_ROOT,
|
||||
.seq_show = cfqg_print_leaf_weight_device,
|
||||
.write_string = cfqg_set_leaf_weight_device,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "weight",
|
||||
|
@ -1853,7 +1852,6 @@ static struct cftype cfq_blkcg_files[] = {
|
|||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
.seq_show = cfqg_print_weight_device,
|
||||
.write_string = cfqg_set_weight_device,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "weight",
|
||||
|
@ -1866,7 +1864,6 @@ static struct cftype cfq_blkcg_files[] = {
|
|||
.name = "leaf_weight_device",
|
||||
.seq_show = cfqg_print_leaf_weight_device,
|
||||
.write_string = cfqg_set_leaf_weight_device,
|
||||
.max_write_len = 256,
|
||||
},
|
||||
{
|
||||
.name = "leaf_weight",
|
||||
|
|
2
fs/bio.c
2
fs/bio.c
|
@ -1969,7 +1969,7 @@ int bio_associate_current(struct bio *bio)
|
|||
|
||||
/* associate blkcg if exists */
|
||||
rcu_read_lock();
|
||||
css = task_css(current, blkio_subsys_id);
|
||||
css = task_css(current, blkio_cgrp_id);
|
||||
if (css && css_tryget(css))
|
||||
bio->bi_css = css;
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -112,6 +112,7 @@ char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
|
|||
spin_unlock_irqrestore(&kernfs_rename_lock, flags);
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kernfs_path);
|
||||
|
||||
/**
|
||||
* pr_cont_kernfs_name - pr_cont name of a kernfs_node
|
||||
|
|
|
@ -14,18 +14,17 @@
|
|||
#include <linux/rcupdate.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/cgroupstats.h>
|
||||
#include <linux/prio_heap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/kernfs.h>
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
|
||||
struct cgroupfs_root;
|
||||
struct cgroup_root;
|
||||
struct cgroup_subsys;
|
||||
struct inode;
|
||||
struct cgroup;
|
||||
|
@ -34,31 +33,16 @@ extern int cgroup_init_early(void);
|
|||
extern int cgroup_init(void);
|
||||
extern void cgroup_fork(struct task_struct *p);
|
||||
extern void cgroup_post_fork(struct task_struct *p);
|
||||
extern void cgroup_exit(struct task_struct *p, int run_callbacks);
|
||||
extern void cgroup_exit(struct task_struct *p);
|
||||
extern int cgroupstats_build(struct cgroupstats *stats,
|
||||
struct dentry *dentry);
|
||||
extern int cgroup_load_subsys(struct cgroup_subsys *ss);
|
||||
extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
|
||||
|
||||
extern int proc_cgroup_show(struct seq_file *, void *);
|
||||
|
||||
/*
|
||||
* Define the enumeration of all cgroup subsystems.
|
||||
*
|
||||
* We define ids for builtin subsystems and then modular ones.
|
||||
*/
|
||||
#define SUBSYS(_x) _x ## _subsys_id,
|
||||
/* define the enumeration of all cgroup subsystems */
|
||||
#define SUBSYS(_x) _x ## _cgrp_id,
|
||||
enum cgroup_subsys_id {
|
||||
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
|
||||
#include <linux/cgroup_subsys.h>
|
||||
#undef IS_SUBSYS_ENABLED
|
||||
CGROUP_BUILTIN_SUBSYS_COUNT,
|
||||
|
||||
__CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,
|
||||
|
||||
#define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
|
||||
#include <linux/cgroup_subsys.h>
|
||||
#undef IS_SUBSYS_ENABLED
|
||||
CGROUP_SUBSYS_COUNT,
|
||||
};
|
||||
#undef SUBSYS
|
||||
|
@ -153,11 +137,6 @@ enum {
|
|||
CGRP_SANE_BEHAVIOR,
|
||||
};
|
||||
|
||||
struct cgroup_name {
|
||||
struct rcu_head rcu_head;
|
||||
char name[];
|
||||
};
|
||||
|
||||
struct cgroup {
|
||||
unsigned long flags; /* "unsigned long" so bitops work */
|
||||
|
||||
|
@ -174,16 +153,17 @@ struct cgroup {
|
|||
/* the number of attached css's */
|
||||
int nr_css;
|
||||
|
||||
atomic_t refcnt;
|
||||
|
||||
/*
|
||||
* We link our 'sibling' struct into our parent's 'children'.
|
||||
* Our children link their 'sibling' into our 'children'.
|
||||
*/
|
||||
struct list_head sibling; /* my parent's children */
|
||||
struct list_head children; /* my children */
|
||||
struct list_head files; /* my files */
|
||||
|
||||
struct cgroup *parent; /* my parent */
|
||||
struct dentry *dentry; /* cgroup fs entry, RCU protected */
|
||||
struct kernfs_node *kn; /* cgroup kernfs entry */
|
||||
|
||||
/*
|
||||
* Monotonically increasing unique serial number which defines a
|
||||
|
@ -193,23 +173,13 @@ struct cgroup {
|
|||
*/
|
||||
u64 serial_nr;
|
||||
|
||||
/*
|
||||
* This is a copy of dentry->d_name, and it's needed because
|
||||
* we can't use dentry->d_name in cgroup_path().
|
||||
*
|
||||
* You must acquire rcu_read_lock() to access cgrp->name, and
|
||||
* the only place that can change it is rename(), which is
|
||||
* protected by parent dir's i_mutex.
|
||||
*
|
||||
* Normally you should use cgroup_name() wrapper rather than
|
||||
* access it directly.
|
||||
*/
|
||||
struct cgroup_name __rcu *name;
|
||||
/* The bitmask of subsystems attached to this cgroup */
|
||||
unsigned long subsys_mask;
|
||||
|
||||
/* Private pointers for each registered subsystem */
|
||||
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
struct cgroupfs_root *root;
|
||||
struct cgroup_root *root;
|
||||
|
||||
/*
|
||||
* List of cgrp_cset_links pointing at css_sets with tasks in this
|
||||
|
@ -237,14 +207,11 @@ struct cgroup {
|
|||
/* For css percpu_ref killing and RCU-protected deletion */
|
||||
struct rcu_head rcu_head;
|
||||
struct work_struct destroy_work;
|
||||
|
||||
/* directory xattrs */
|
||||
struct simple_xattrs xattrs;
|
||||
};
|
||||
|
||||
#define MAX_CGROUP_ROOT_NAMELEN 64
|
||||
|
||||
/* cgroupfs_root->flags */
|
||||
/* cgroup_root->flags */
|
||||
enum {
|
||||
/*
|
||||
* Unfortunately, cgroup core and various controllers are riddled
|
||||
|
@ -262,8 +229,8 @@ enum {
|
|||
*
|
||||
* The followings are the behaviors currently affected this flag.
|
||||
*
|
||||
* - Mount options "noprefix" and "clone_children" are disallowed.
|
||||
* Also, cgroupfs file cgroup.clone_children is not created.
|
||||
* - Mount options "noprefix", "xattr", "clone_children",
|
||||
* "release_agent" and "name" are disallowed.
|
||||
*
|
||||
* - When mounting an existing superblock, mount options should
|
||||
* match.
|
||||
|
@ -281,6 +248,11 @@ enum {
|
|||
* - "release_agent" and "notify_on_release" are removed.
|
||||
* Replacement notification mechanism will be implemented.
|
||||
*
|
||||
* - "cgroup.clone_children" is removed.
|
||||
*
|
||||
* - If mount is requested with sane_behavior but without any
|
||||
* subsystem, the default unified hierarchy is mounted.
|
||||
*
|
||||
* - cpuset: tasks will be kept in empty cpusets when hotplug happens
|
||||
* and take masks of ancestors with non-empty cpus/mems, instead of
|
||||
* being moved to an ancestor.
|
||||
|
@ -300,29 +272,24 @@ enum {
|
|||
|
||||
/* mount options live below bit 16 */
|
||||
CGRP_ROOT_OPTION_MASK = (1 << 16) - 1,
|
||||
|
||||
CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */
|
||||
};
|
||||
|
||||
/*
|
||||
* A cgroupfs_root represents the root of a cgroup hierarchy, and may be
|
||||
* associated with a superblock to form an active hierarchy. This is
|
||||
* A cgroup_root represents the root of a cgroup hierarchy, and may be
|
||||
* associated with a kernfs_root to form an active hierarchy. This is
|
||||
* internal to cgroup core. Don't access directly from controllers.
|
||||
*/
|
||||
struct cgroupfs_root {
|
||||
struct super_block *sb;
|
||||
|
||||
/* The bitmask of subsystems attached to this hierarchy */
|
||||
unsigned long subsys_mask;
|
||||
struct cgroup_root {
|
||||
struct kernfs_root *kf_root;
|
||||
|
||||
/* Unique id for this hierarchy. */
|
||||
int hierarchy_id;
|
||||
|
||||
/* The root cgroup for this hierarchy */
|
||||
struct cgroup top_cgroup;
|
||||
/* The root cgroup. Root is destroyed on its release. */
|
||||
struct cgroup cgrp;
|
||||
|
||||
/* Tracks how many cgroups are currently defined in hierarchy.*/
|
||||
int number_of_cgroups;
|
||||
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
|
||||
atomic_t nr_cgrps;
|
||||
|
||||
/* A list running through the active hierarchies */
|
||||
struct list_head root_list;
|
||||
|
@ -360,10 +327,14 @@ struct css_set {
|
|||
struct hlist_node hlist;
|
||||
|
||||
/*
|
||||
* List running through all tasks using this cgroup
|
||||
* group. Protected by css_set_lock
|
||||
* Lists running through all tasks using this cgroup group.
|
||||
* mg_tasks lists tasks which belong to this cset but are in the
|
||||
* process of being migrated out or in. Protected by
|
||||
* css_set_rwsem, but, during migration, once tasks are moved to
|
||||
* mg_tasks, it can be read safely while holding cgroup_mutex.
|
||||
*/
|
||||
struct list_head tasks;
|
||||
struct list_head mg_tasks;
|
||||
|
||||
/*
|
||||
* List of cgrp_cset_links pointing at cgroups referenced from this
|
||||
|
@ -372,13 +343,29 @@ struct css_set {
|
|||
struct list_head cgrp_links;
|
||||
|
||||
/*
|
||||
* Set of subsystem states, one for each subsystem. This array
|
||||
* is immutable after creation apart from the init_css_set
|
||||
* during subsystem registration (at boot time) and modular subsystem
|
||||
* loading/unloading.
|
||||
* Set of subsystem states, one for each subsystem. This array is
|
||||
* immutable after creation apart from the init_css_set during
|
||||
* subsystem registration (at boot time).
|
||||
*/
|
||||
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/*
|
||||
* List of csets participating in the on-going migration either as
|
||||
* source or destination. Protected by cgroup_mutex.
|
||||
*/
|
||||
struct list_head mg_preload_node;
|
||||
struct list_head mg_node;
|
||||
|
||||
/*
|
||||
* If this cset is acting as the source of migration the following
|
||||
* two fields are set. mg_src_cgrp is the source cgroup of the
|
||||
* on-going migration and mg_dst_cset is the destination cset the
|
||||
* target tasks on this cset should be migrated to. Protected by
|
||||
* cgroup_mutex.
|
||||
*/
|
||||
struct cgroup *mg_src_cgrp;
|
||||
struct css_set *mg_dst_cset;
|
||||
|
||||
/* For RCU-protected deletion */
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
@ -397,6 +384,7 @@ enum {
|
|||
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
|
||||
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
|
||||
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
|
||||
CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */
|
||||
};
|
||||
|
||||
#define MAX_CFTYPE_NAME 64
|
||||
|
@ -416,8 +404,9 @@ struct cftype {
|
|||
umode_t mode;
|
||||
|
||||
/*
|
||||
* If non-zero, defines the maximum length of string that can
|
||||
* be passed to write_string; defaults to 64
|
||||
* The maximum length of string, excluding trailing nul, that can
|
||||
* be passed to write_string. If < PAGE_SIZE-1, PAGE_SIZE-1 is
|
||||
* assumed.
|
||||
*/
|
||||
size_t max_write_len;
|
||||
|
||||
|
@ -425,10 +414,12 @@ struct cftype {
|
|||
unsigned int flags;
|
||||
|
||||
/*
|
||||
* The subsys this file belongs to. Initialized automatically
|
||||
* during registration. NULL for cgroup core files.
|
||||
* Fields used for internal bookkeeping. Initialized automatically
|
||||
* during registration.
|
||||
*/
|
||||
struct cgroup_subsys *ss;
|
||||
struct cgroup_subsys *ss; /* NULL for cgroup core files */
|
||||
struct list_head node; /* anchored at ss->cfts */
|
||||
struct kernfs_ops *kf_ops;
|
||||
|
||||
/*
|
||||
* read_u64() is a shortcut for the common case of returning a
|
||||
|
@ -467,7 +458,7 @@ struct cftype {
|
|||
* Returns 0 or -ve error code.
|
||||
*/
|
||||
int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer);
|
||||
char *buffer);
|
||||
/*
|
||||
* trigger() callback can be used to get some kick from the
|
||||
* userspace, when the actual string written is not important
|
||||
|
@ -475,37 +466,18 @@ struct cftype {
|
|||
* kick type for multiplexing.
|
||||
*/
|
||||
int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lock_class_key lockdep_key;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* cftype_sets describe cftypes belonging to a subsystem and are chained at
|
||||
* cgroup_subsys->cftsets. Each cftset points to an array of cftypes
|
||||
* terminated by zero length name.
|
||||
*/
|
||||
struct cftype_set {
|
||||
struct list_head node; /* chained at subsys->cftsets */
|
||||
struct cftype *cfts;
|
||||
};
|
||||
extern struct cgroup_root cgrp_dfl_root;
|
||||
|
||||
/*
|
||||
* cgroupfs file entry, pointed to from leaf dentry->d_fsdata. Don't
|
||||
* access directly.
|
||||
*/
|
||||
struct cfent {
|
||||
struct list_head node;
|
||||
struct dentry *dentry;
|
||||
struct cftype *type;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
/* file xattrs */
|
||||
struct simple_xattrs xattrs;
|
||||
};
|
||||
|
||||
/* seq_file->private points to the following, only ->priv is public */
|
||||
struct cgroup_open_file {
|
||||
struct cfent *cfe;
|
||||
void *priv;
|
||||
};
|
||||
static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
|
||||
{
|
||||
return cgrp->root == &cgrp_dfl_root;
|
||||
}
|
||||
|
||||
/*
|
||||
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
|
||||
|
@ -516,34 +488,63 @@ static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
|
|||
return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
|
||||
}
|
||||
|
||||
/* Caller should hold rcu_read_lock() */
|
||||
static inline const char *cgroup_name(const struct cgroup *cgrp)
|
||||
/* no synchronization, the result can only be used as a hint */
|
||||
static inline bool cgroup_has_tasks(struct cgroup *cgrp)
|
||||
{
|
||||
return rcu_dereference(cgrp->name)->name;
|
||||
return !list_empty(&cgrp->cset_links);
|
||||
}
|
||||
|
||||
static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
|
||||
/* returns ino associated with a cgroup, 0 indicates unmounted root */
|
||||
static inline ino_t cgroup_ino(struct cgroup *cgrp)
|
||||
{
|
||||
struct cgroup_open_file *of = seq->private;
|
||||
return of->cfe->css;
|
||||
if (cgrp->kn)
|
||||
return cgrp->kn->ino;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct cftype *seq_cft(struct seq_file *seq)
|
||||
{
|
||||
struct cgroup_open_file *of = seq->private;
|
||||
return of->cfe->type;
|
||||
struct kernfs_open_file *of = seq->private;
|
||||
|
||||
return of->kn->priv;
|
||||
}
|
||||
|
||||
struct cgroup_subsys_state *seq_css(struct seq_file *seq);
|
||||
|
||||
/*
|
||||
* Name / path handling functions. All are thin wrappers around the kernfs
|
||||
* counterparts and can be called under any context.
|
||||
*/
|
||||
|
||||
static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
|
||||
{
|
||||
return kernfs_name(cgrp->kn, buf, buflen);
|
||||
}
|
||||
|
||||
static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
|
||||
size_t buflen)
|
||||
{
|
||||
return kernfs_path(cgrp->kn, buf, buflen);
|
||||
}
|
||||
|
||||
static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
|
||||
{
|
||||
pr_cont_kernfs_name(cgrp->kn);
|
||||
}
|
||||
|
||||
static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
||||
{
|
||||
pr_cont_kernfs_path(cgrp->kn);
|
||||
}
|
||||
|
||||
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
|
||||
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
|
||||
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
|
||||
|
||||
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
|
||||
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
|
||||
int cgroup_task_count(const struct cgroup *cgrp);
|
||||
|
||||
/*
|
||||
* Control Group taskset, used to pass around set of tasks to cgroup_subsys
|
||||
* methods.
|
||||
|
@ -551,22 +552,15 @@ int cgroup_task_count(const struct cgroup *cgrp);
|
|||
struct cgroup_taskset;
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
|
||||
struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
|
||||
int subsys_id);
|
||||
int cgroup_taskset_size(struct cgroup_taskset *tset);
|
||||
|
||||
/**
|
||||
* cgroup_taskset_for_each - iterate cgroup_taskset
|
||||
* @task: the loop cursor
|
||||
* @skip_css: skip if task's css matches this, %NULL to iterate through all
|
||||
* @tset: taskset to iterate
|
||||
*/
|
||||
#define cgroup_taskset_for_each(task, skip_css, tset) \
|
||||
#define cgroup_taskset_for_each(task, tset) \
|
||||
for ((task) = cgroup_taskset_first((tset)); (task); \
|
||||
(task) = cgroup_taskset_next((tset))) \
|
||||
if (!(skip_css) || \
|
||||
cgroup_taskset_cur_css((tset), \
|
||||
(skip_css)->ss->subsys_id) != (skip_css))
|
||||
(task) = cgroup_taskset_next((tset)))
|
||||
|
||||
/*
|
||||
* Control Group subsystem type.
|
||||
|
@ -591,7 +585,6 @@ struct cgroup_subsys {
|
|||
struct task_struct *task);
|
||||
void (*bind)(struct cgroup_subsys_state *root_css);
|
||||
|
||||
int subsys_id;
|
||||
int disabled;
|
||||
int early_init;
|
||||
|
||||
|
@ -610,27 +603,26 @@ struct cgroup_subsys {
|
|||
bool broken_hierarchy;
|
||||
bool warned_broken_hierarchy;
|
||||
|
||||
/* the following two fields are initialized automtically during boot */
|
||||
int id;
|
||||
#define MAX_CGROUP_TYPE_NAMELEN 32
|
||||
const char *name;
|
||||
|
||||
/* link to parent, protected by cgroup_lock() */
|
||||
struct cgroupfs_root *root;
|
||||
struct cgroup_root *root;
|
||||
|
||||
/* list of cftype_sets */
|
||||
struct list_head cftsets;
|
||||
/*
|
||||
* List of cftypes. Each entry is the first entry of an array
|
||||
* terminated by zero length name.
|
||||
*/
|
||||
struct list_head cfts;
|
||||
|
||||
/* base cftypes, automatically [de]registered with subsys itself */
|
||||
/* base cftypes, automatically registered with subsys itself */
|
||||
struct cftype *base_cftypes;
|
||||
struct cftype_set base_cftset;
|
||||
|
||||
/* should be defined only by modular subsystems */
|
||||
struct module *module;
|
||||
};
|
||||
|
||||
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
|
||||
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
|
||||
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
|
||||
#include <linux/cgroup_subsys.h>
|
||||
#undef IS_SUBSYS_ENABLED
|
||||
#undef SUBSYS
|
||||
|
||||
/**
|
||||
|
@ -661,10 +653,12 @@ struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
|
|||
*/
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
extern struct mutex cgroup_mutex;
|
||||
extern struct rw_semaphore css_set_rwsem;
|
||||
#define task_css_set_check(task, __c) \
|
||||
rcu_dereference_check((task)->cgroups, \
|
||||
lockdep_is_held(&(task)->alloc_lock) || \
|
||||
lockdep_is_held(&cgroup_mutex) || (__c))
|
||||
lockdep_is_held(&cgroup_mutex) || \
|
||||
lockdep_is_held(&css_set_rwsem) || \
|
||||
((task)->flags & PF_EXITING) || (__c))
|
||||
#else
|
||||
#define task_css_set_check(task, __c) \
|
||||
rcu_dereference((task)->cgroups)
|
||||
|
@ -837,16 +831,11 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
|
|||
struct task_struct *css_task_iter_next(struct css_task_iter *it);
|
||||
void css_task_iter_end(struct css_task_iter *it);
|
||||
|
||||
int css_scan_tasks(struct cgroup_subsys_state *css,
|
||||
bool (*test)(struct task_struct *, void *),
|
||||
void (*process)(struct task_struct *, void *),
|
||||
void *data, struct ptr_heap *heap);
|
||||
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
|
||||
|
||||
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss);
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
|
@ -854,7 +843,7 @@ static inline int cgroup_init_early(void) { return 0; }
|
|||
static inline int cgroup_init(void) { return 0; }
|
||||
static inline void cgroup_fork(struct task_struct *p) {}
|
||||
static inline void cgroup_post_fork(struct task_struct *p) {}
|
||||
static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
|
||||
static inline void cgroup_exit(struct task_struct *p) {}
|
||||
|
||||
static inline int cgroupstats_build(struct cgroupstats *stats,
|
||||
struct dentry *dentry)
|
||||
|
|
|
@ -3,51 +3,51 @@
|
|||
*
|
||||
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
|
||||
*/
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS)
|
||||
#if IS_ENABLED(CONFIG_CPUSETS)
|
||||
SUBSYS(cpuset)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
|
||||
SUBSYS(debug)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED)
|
||||
SUBSYS(cpu_cgroup)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_SCHED)
|
||||
SUBSYS(cpu)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_CPUACCT)
|
||||
SUBSYS(cpuacct)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_MEMCG)
|
||||
SUBSYS(mem_cgroup)
|
||||
#if IS_ENABLED(CONFIG_MEMCG)
|
||||
SUBSYS(memory)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_DEVICE)
|
||||
SUBSYS(devices)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_FREEZER)
|
||||
SUBSYS(freezer)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
|
||||
SUBSYS(net_cls)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP)
|
||||
#if IS_ENABLED(CONFIG_BLK_CGROUP)
|
||||
SUBSYS(blkio)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
|
||||
SUBSYS(perf)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_PERF)
|
||||
SUBSYS(perf_event)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_PRIO)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
|
||||
SUBSYS(net_prio)
|
||||
#endif
|
||||
|
||||
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB)
|
||||
#if IS_ENABLED(CONFIG_CGROUP_HUGETLB)
|
||||
SUBSYS(hugetlb)
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -49,7 +49,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
|
|||
|
||||
static inline bool hugetlb_cgroup_disabled(void)
|
||||
{
|
||||
if (hugetlb_subsys.disabled)
|
||||
if (hugetlb_cgrp_subsys.disabled)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ extern int do_swap_account;
|
|||
|
||||
static inline bool mem_cgroup_disabled(void)
|
||||
{
|
||||
if (mem_cgroup_subsys.disabled)
|
||||
if (memory_cgrp_subsys.disabled)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
|
|||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
classid = container_of(task_css(p, net_cls_subsys_id),
|
||||
classid = container_of(task_css(p, net_cls_cgrp_id),
|
||||
struct cgroup_cls_state, css)->classid;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -27,32 +27,17 @@ struct netprio_map {
|
|||
|
||||
void sock_update_netprioidx(struct sock *sk);
|
||||
|
||||
#if IS_BUILTIN(CONFIG_CGROUP_NET_PRIO)
|
||||
static inline u32 task_netprioidx(struct task_struct *p)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
u32 idx;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_css(p, net_prio_subsys_id);
|
||||
css = task_css(p, net_prio_cgrp_id);
|
||||
idx = css->cgroup->id;
|
||||
rcu_read_unlock();
|
||||
return idx;
|
||||
}
|
||||
#elif IS_MODULE(CONFIG_CGROUP_NET_PRIO)
|
||||
static inline u32 task_netprioidx(struct task_struct *p)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
u32 idx = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
css = task_css(p, net_prio_subsys_id);
|
||||
if (css)
|
||||
idx = css->cgroup->id;
|
||||
rcu_read_unlock();
|
||||
return idx;
|
||||
}
|
||||
#endif
|
||||
#else /* !CONFIG_CGROUP_NET_PRIO */
|
||||
static inline u32 task_netprioidx(struct task_struct *p)
|
||||
{
|
||||
|
|
|
@ -854,6 +854,7 @@ config NUMA_BALANCING
|
|||
|
||||
menuconfig CGROUPS
|
||||
boolean "Control Group support"
|
||||
select KERNFS
|
||||
help
|
||||
This option adds support for grouping sets of processes together, for
|
||||
use with process control subsystems such as Cpusets, CFS, memory
|
||||
|
|
3749
kernel/cgroup.c
3749
kernel/cgroup.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
|
|||
|
||||
static inline struct freezer *task_freezer(struct task_struct *task)
|
||||
{
|
||||
return css_freezer(task_css(task, freezer_subsys_id));
|
||||
return css_freezer(task_css(task, freezer_cgrp_id));
|
||||
}
|
||||
|
||||
static struct freezer *parent_freezer(struct freezer *freezer)
|
||||
|
@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state)
|
|||
return "THAWED";
|
||||
};
|
||||
|
||||
struct cgroup_subsys freezer_subsys;
|
||||
|
||||
static struct cgroup_subsys_state *
|
||||
freezer_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
|
@ -189,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
|
|||
* current state before executing the following - !frozen tasks may
|
||||
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
|
||||
*/
|
||||
cgroup_taskset_for_each(task, new_css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
if (!(freezer->state & CGROUP_FREEZING)) {
|
||||
__thaw_task(task);
|
||||
} else {
|
||||
|
@ -216,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* freezer_fork - cgroup post fork callback
|
||||
* @task: a task which has just been forked
|
||||
*
|
||||
* @task has just been created and should conform to the current state of
|
||||
* the cgroup_freezer it belongs to. This function may race against
|
||||
* freezer_attach(). Losing to freezer_attach() means that we don't have
|
||||
* to do anything as freezer_attach() will put @task into the appropriate
|
||||
* state.
|
||||
*/
|
||||
static void freezer_fork(struct task_struct *task)
|
||||
{
|
||||
struct freezer *freezer;
|
||||
|
@ -224,14 +232,26 @@ static void freezer_fork(struct task_struct *task)
|
|||
freezer = task_freezer(task);
|
||||
|
||||
/*
|
||||
* The root cgroup is non-freezable, so we can skip the
|
||||
* following check.
|
||||
* The root cgroup is non-freezable, so we can skip locking the
|
||||
* freezer. This is safe regardless of race with task migration.
|
||||
* If we didn't race or won, skipping is obviously the right thing
|
||||
* to do. If we lost and root is the new cgroup, noop is still the
|
||||
* right thing to do.
|
||||
*/
|
||||
if (!parent_freezer(freezer))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Grab @freezer->lock and freeze @task after verifying @task still
|
||||
* belongs to @freezer and it's freezing. The former is for the
|
||||
* case where we have raced against task migration and lost and
|
||||
* @task is already in a different cgroup which may not be frozen.
|
||||
* This isn't strictly necessary as freeze_task() is allowed to be
|
||||
* called spuriously but let's do it anyway for, if nothing else,
|
||||
* documentation.
|
||||
*/
|
||||
spin_lock_irq(&freezer->lock);
|
||||
if (freezer->state & CGROUP_FREEZING)
|
||||
if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
|
||||
freeze_task(task);
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
out:
|
||||
|
@ -422,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
|
|||
}
|
||||
|
||||
static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
char *buffer)
|
||||
{
|
||||
bool freeze;
|
||||
|
||||
|
@ -473,13 +493,11 @@ static struct cftype files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cgroup_subsys freezer_subsys = {
|
||||
.name = "freezer",
|
||||
struct cgroup_subsys freezer_cgrp_subsys = {
|
||||
.css_alloc = freezer_css_alloc,
|
||||
.css_online = freezer_css_online,
|
||||
.css_offline = freezer_css_offline,
|
||||
.css_free = freezer_css_free,
|
||||
.subsys_id = freezer_subsys_id,
|
||||
.attach = freezer_attach,
|
||||
.fork = freezer_fork,
|
||||
.base_cftypes = files,
|
||||
|
|
262
kernel/cpuset.c
262
kernel/cpuset.c
|
@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
|
|||
/* Retrieve the cpuset for a task */
|
||||
static inline struct cpuset *task_cs(struct task_struct *task)
|
||||
{
|
||||
return css_cs(task_css(task, cpuset_subsys_id));
|
||||
return css_cs(task_css(task, cpuset_cgrp_id));
|
||||
}
|
||||
|
||||
static inline struct cpuset *parent_cs(struct cpuset *cs)
|
||||
|
@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
|||
* be changed to have empty cpus_allowed or mems_allowed.
|
||||
*/
|
||||
ret = -ENOSPC;
|
||||
if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
|
||||
if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
|
||||
if (!cpumask_empty(cur->cpus_allowed) &&
|
||||
cpumask_empty(trial->cpus_allowed))
|
||||
goto out;
|
||||
|
@ -828,56 +828,37 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
|
|||
return cs;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
|
||||
* @tsk: task to test
|
||||
* @data: cpuset to @tsk belongs to
|
||||
*
|
||||
* Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
|
||||
* mask needs to be changed.
|
||||
*
|
||||
* We don't need to re-check for the cgroup/cpuset membership, since we're
|
||||
* holding cpuset_mutex at this point.
|
||||
*/
|
||||
static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
|
||||
{
|
||||
struct cpuset *cs = data;
|
||||
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
|
||||
|
||||
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*
|
||||
* The css_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* calling callback functions for each.
|
||||
*
|
||||
* No return value. It's guaranteed that css_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
* Iterate through each task of @cs updating its cpus_allowed to the
|
||||
* effective cpuset's. As this function is called with cpuset_mutex held,
|
||||
* cpuset membership stays stable.
|
||||
*/
|
||||
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
static void update_tasks_cpumask(struct cpuset *cs)
|
||||
{
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
|
||||
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
css_task_iter_start(&cs->css, &it);
|
||||
while ((task = css_task_iter_next(&it)))
|
||||
set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
/*
|
||||
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
|
||||
* @root_cs: the root cpuset of the hierarchy
|
||||
* @update_root: update root cpuset or not?
|
||||
* @heap: the heap used by css_scan_tasks()
|
||||
*
|
||||
* This will update cpumasks of tasks in @root_cs and all other empty cpusets
|
||||
* which take on cpumask of @root_cs.
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*/
|
||||
static void update_tasks_cpumask_hier(struct cpuset *root_cs,
|
||||
bool update_root, struct ptr_heap *heap)
|
||||
static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
|
||||
{
|
||||
struct cpuset *cp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
|
|||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
update_tasks_cpumask(cp, heap);
|
||||
update_tasks_cpumask(cp);
|
||||
|
||||
rcu_read_lock();
|
||||
css_put(&cp->css);
|
||||
|
@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
|
|||
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
||||
const char *buf)
|
||||
{
|
||||
struct ptr_heap heap;
|
||||
int retval;
|
||||
int is_load_balanced;
|
||||
|
||||
|
@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
is_load_balanced = is_sched_load_balance(trialcs);
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
update_tasks_cpumask_hier(cs, true, &heap);
|
||||
|
||||
heap_free(&heap);
|
||||
update_tasks_cpumask_hier(cs, true);
|
||||
|
||||
if (is_load_balanced)
|
||||
rebuild_sched_domains_locked();
|
||||
|
@ -1048,53 +1022,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
|||
task_unlock(tsk);
|
||||
}
|
||||
|
||||
struct cpuset_change_nodemask_arg {
|
||||
struct cpuset *cs;
|
||||
nodemask_t *newmems;
|
||||
};
|
||||
|
||||
/*
|
||||
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
|
||||
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
|
||||
* memory_migrate flag is set. Called with cpuset_mutex held.
|
||||
*/
|
||||
static void cpuset_change_nodemask(struct task_struct *p, void *data)
|
||||
{
|
||||
struct cpuset_change_nodemask_arg *arg = data;
|
||||
struct cpuset *cs = arg->cs;
|
||||
struct mm_struct *mm;
|
||||
int migrate;
|
||||
|
||||
cpuset_change_task_nodemask(p, arg->newmems);
|
||||
|
||||
mm = get_task_mm(p);
|
||||
if (!mm)
|
||||
return;
|
||||
|
||||
migrate = is_memory_migrate(cs);
|
||||
|
||||
mpol_rebind_mm(mm, &cs->mems_allowed);
|
||||
if (migrate)
|
||||
cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
static void *cpuset_being_rebound;
|
||||
|
||||
/**
|
||||
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held. No return value. It's guaranteed that
|
||||
* css_scan_tasks() always returns 0 if @heap != NULL.
|
||||
* Iterate through each task of @cs updating its mems_allowed to the
|
||||
* effective cpuset's. As this function is called with cpuset_mutex held,
|
||||
* cpuset membership stays stable.
|
||||
*/
|
||||
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
static void update_tasks_nodemask(struct cpuset *cs)
|
||||
{
|
||||
static nodemask_t newmems; /* protected by cpuset_mutex */
|
||||
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
|
||||
struct cpuset_change_nodemask_arg arg = { .cs = cs,
|
||||
.newmems = &newmems };
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
|
||||
|
||||
|
@ -1110,7 +1053,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
|||
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
|
||||
* is idempotent. Also migrate pages in each mm to new nodes.
|
||||
*/
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
|
||||
css_task_iter_start(&cs->css, &it);
|
||||
while ((task = css_task_iter_next(&it))) {
|
||||
struct mm_struct *mm;
|
||||
bool migrate;
|
||||
|
||||
cpuset_change_task_nodemask(task, &newmems);
|
||||
|
||||
mm = get_task_mm(task);
|
||||
if (!mm)
|
||||
continue;
|
||||
|
||||
migrate = is_memory_migrate(cs);
|
||||
|
||||
mpol_rebind_mm(mm, &cs->mems_allowed);
|
||||
if (migrate)
|
||||
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
|
||||
mmput(mm);
|
||||
}
|
||||
css_task_iter_end(&it);
|
||||
|
||||
/*
|
||||
* All the tasks' nodemasks have been updated, update
|
||||
|
@ -1126,15 +1087,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
|||
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
|
||||
* @cs: the root cpuset of the hierarchy
|
||||
* @update_root: update the root cpuset or not?
|
||||
* @heap: the heap used by css_scan_tasks()
|
||||
*
|
||||
* This will update nodemasks of tasks in @root_cs and all other empty cpusets
|
||||
* which take on nodemask of @root_cs.
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*/
|
||||
static void update_tasks_nodemask_hier(struct cpuset *root_cs,
|
||||
bool update_root, struct ptr_heap *heap)
|
||||
static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
|
||||
{
|
||||
struct cpuset *cp;
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
@ -1155,7 +1114,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
|
|||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
update_tasks_nodemask(cp, heap);
|
||||
update_tasks_nodemask(cp);
|
||||
|
||||
rcu_read_lock();
|
||||
css_put(&cp->css);
|
||||
|
@ -1180,7 +1139,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|||
const char *buf)
|
||||
{
|
||||
int retval;
|
||||
struct ptr_heap heap;
|
||||
|
||||
/*
|
||||
* top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
|
||||
|
@ -1219,17 +1177,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|||
if (retval < 0)
|
||||
goto done;
|
||||
|
||||
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
||||
if (retval < 0)
|
||||
goto done;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
cs->mems_allowed = trialcs->mems_allowed;
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
update_tasks_nodemask_hier(cs, true, &heap);
|
||||
|
||||
heap_free(&heap);
|
||||
update_tasks_nodemask_hier(cs, true);
|
||||
done:
|
||||
return retval;
|
||||
}
|
||||
|
@ -1256,39 +1208,23 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
|
||||
* @tsk: task to be updated
|
||||
* @data: cpuset to @tsk belongs to
|
||||
*
|
||||
* Called by css_scan_tasks() for each task in a cgroup.
|
||||
*
|
||||
* We don't need to re-check for the cgroup/cpuset membership, since we're
|
||||
* holding cpuset_mutex at this point.
|
||||
*/
|
||||
static void cpuset_change_flag(struct task_struct *tsk, void *data)
|
||||
{
|
||||
struct cpuset *cs = data;
|
||||
|
||||
cpuset_update_task_spread_flag(cs, tsk);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_tasks_flags - update the spread flags of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's spread flags needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
*
|
||||
* The css_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* calling callback functions for each.
|
||||
*
|
||||
* No return value. It's guaranteed that css_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
* Iterate through each task of @cs updating its spread flags. As this
|
||||
* function is called with cpuset_mutex held, cpuset membership stays
|
||||
* stable.
|
||||
*/
|
||||
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
|
||||
static void update_tasks_flags(struct cpuset *cs)
|
||||
{
|
||||
css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
css_task_iter_start(&cs->css, &it);
|
||||
while ((task = css_task_iter_next(&it)))
|
||||
cpuset_update_task_spread_flag(cs, task);
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1306,7 +1242,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|||
struct cpuset *trialcs;
|
||||
int balance_flag_changed;
|
||||
int spread_flag_changed;
|
||||
struct ptr_heap heap;
|
||||
int err;
|
||||
|
||||
trialcs = alloc_trial_cpuset(cs);
|
||||
|
@ -1322,10 +1257,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
balance_flag_changed = (is_sched_load_balance(cs) !=
|
||||
is_sched_load_balance(trialcs));
|
||||
|
||||
|
@ -1340,8 +1271,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|||
rebuild_sched_domains_locked();
|
||||
|
||||
if (spread_flag_changed)
|
||||
update_tasks_flags(cs, &heap);
|
||||
heap_free(&heap);
|
||||
update_tasks_flags(cs);
|
||||
out:
|
||||
free_trial_cpuset(trialcs);
|
||||
return err;
|
||||
|
@ -1445,6 +1375,8 @@ static int fmeter_getrate(struct fmeter *fmp)
|
|||
return val;
|
||||
}
|
||||
|
||||
static struct cpuset *cpuset_attach_old_cs;
|
||||
|
||||
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
|
||||
static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
|
@ -1453,6 +1385,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
|||
struct task_struct *task;
|
||||
int ret;
|
||||
|
||||
/* used later by cpuset_attach() */
|
||||
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
|
||||
|
||||
mutex_lock(&cpuset_mutex);
|
||||
|
||||
/*
|
||||
|
@ -1464,7 +1399,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
|||
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
|
||||
goto out_unlock;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
/*
|
||||
* Kthreads which disallow setaffinity shouldn't be moved
|
||||
* to a new cpuset; we don't want to change their cpu
|
||||
|
@ -1516,10 +1451,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
|
|||
struct mm_struct *mm;
|
||||
struct task_struct *task;
|
||||
struct task_struct *leader = cgroup_taskset_first(tset);
|
||||
struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
|
||||
cpuset_subsys_id);
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cpuset *oldcs = css_cs(oldcss);
|
||||
struct cpuset *oldcs = cpuset_attach_old_cs;
|
||||
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
|
||||
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
|
||||
|
||||
|
@ -1533,7 +1466,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
|
|||
|
||||
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
/*
|
||||
* can_attach beforehand should guarantee that this doesn't
|
||||
* fail. TODO: have a better way to handle failure here
|
||||
|
@ -1673,7 +1606,7 @@ out_unlock:
|
|||
* Common handling for a write to a "cpus" or "mems" file.
|
||||
*/
|
||||
static int cpuset_write_resmask(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buf)
|
||||
struct cftype *cft, char *buf)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cpuset *trialcs;
|
||||
|
@ -2020,8 +1953,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
|
|||
kfree(cs);
|
||||
}
|
||||
|
||||
struct cgroup_subsys cpuset_subsys = {
|
||||
.name = "cpuset",
|
||||
struct cgroup_subsys cpuset_cgrp_subsys = {
|
||||
.css_alloc = cpuset_css_alloc,
|
||||
.css_online = cpuset_css_online,
|
||||
.css_offline = cpuset_css_offline,
|
||||
|
@ -2029,7 +1961,6 @@ struct cgroup_subsys cpuset_subsys = {
|
|||
.can_attach = cpuset_can_attach,
|
||||
.cancel_attach = cpuset_cancel_attach,
|
||||
.attach = cpuset_attach,
|
||||
.subsys_id = cpuset_subsys_id,
|
||||
.base_cftypes = files,
|
||||
.early_init = 1,
|
||||
};
|
||||
|
@ -2086,10 +2017,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
|
|||
parent = parent_cs(parent);
|
||||
|
||||
if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
|
||||
rcu_read_lock();
|
||||
printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
|
||||
cgroup_name(cs->css.cgroup));
|
||||
rcu_read_unlock();
|
||||
printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
|
||||
pr_cont_cgroup_name(cs->css.cgroup);
|
||||
pr_cont("\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2137,7 +2067,7 @@ retry:
|
|||
*/
|
||||
if ((sane && cpumask_empty(cs->cpus_allowed)) ||
|
||||
(!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
|
||||
update_tasks_cpumask(cs, NULL);
|
||||
update_tasks_cpumask(cs);
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
|
||||
|
@ -2151,7 +2081,7 @@ retry:
|
|||
*/
|
||||
if ((sane && nodes_empty(cs->mems_allowed)) ||
|
||||
(!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
|
||||
update_tasks_nodemask(cs, NULL);
|
||||
update_tasks_nodemask(cs);
|
||||
|
||||
is_empty = cpumask_empty(cs->cpus_allowed) ||
|
||||
nodes_empty(cs->mems_allowed);
|
||||
|
@ -2213,7 +2143,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|||
mutex_lock(&callback_mutex);
|
||||
top_cpuset.mems_allowed = new_mems;
|
||||
mutex_unlock(&callback_mutex);
|
||||
update_tasks_nodemask(&top_cpuset, NULL);
|
||||
update_tasks_nodemask(&top_cpuset);
|
||||
}
|
||||
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
|
@ -2305,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|||
struct cpuset *cpus_cs;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
task_lock(tsk);
|
||||
rcu_read_lock();
|
||||
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
|
||||
guarantee_online_cpus(cpus_cs, pmask);
|
||||
task_unlock(tsk);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&callback_mutex);
|
||||
}
|
||||
|
||||
|
@ -2361,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
|
|||
nodemask_t mask;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
task_lock(tsk);
|
||||
rcu_read_lock();
|
||||
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
|
||||
guarantee_online_mems(mems_cs, &mask);
|
||||
task_unlock(tsk);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
return mask;
|
||||
|
@ -2480,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|||
/* Not hardwall and node outside mems_allowed: scan up cpusets */
|
||||
mutex_lock(&callback_mutex);
|
||||
|
||||
task_lock(current);
|
||||
rcu_read_lock();
|
||||
cs = nearest_hardwall_ancestor(task_cs(current));
|
||||
allowed = node_isset(node, cs->mems_allowed);
|
||||
task_unlock(current);
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&callback_mutex);
|
||||
return allowed;
|
||||
|
@ -2609,27 +2539,27 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|||
* @task: pointer to task_struct of some task.
|
||||
*
|
||||
* Description: Prints @task's name, cpuset name, and cached copy of its
|
||||
* mems_allowed to the kernel log. Must hold task_lock(task) to allow
|
||||
* dereferencing task_cs(task).
|
||||
* mems_allowed to the kernel log.
|
||||
*/
|
||||
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
|
||||
{
|
||||
/* Statically allocated to prevent using excess stack. */
|
||||
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
|
||||
static DEFINE_SPINLOCK(cpuset_buffer_lock);
|
||||
struct cgroup *cgrp;
|
||||
|
||||
struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&cpuset_buffer_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
cgrp = task_cs(tsk)->css.cgroup;
|
||||
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
|
||||
tsk->mems_allowed);
|
||||
printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
|
||||
tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
|
||||
printk(KERN_INFO "%s cpuset=", tsk->comm);
|
||||
pr_cont_cgroup_name(cgrp);
|
||||
pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
|
||||
|
||||
spin_unlock(&cpuset_buffer_lock);
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&cpuset_buffer_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2660,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
|
|||
|
||||
void __cpuset_memory_pressure_bump(void)
|
||||
{
|
||||
task_lock(current);
|
||||
rcu_read_lock();
|
||||
fmeter_markevent(&task_cs(current)->fmeter);
|
||||
task_unlock(current);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_PID_CPUSET
|
||||
|
@ -2679,12 +2609,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
|
|||
{
|
||||
struct pid *pid;
|
||||
struct task_struct *tsk;
|
||||
char *buf;
|
||||
char *buf, *p;
|
||||
struct cgroup_subsys_state *css;
|
||||
int retval;
|
||||
|
||||
retval = -ENOMEM;
|
||||
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
buf = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto out;
|
||||
|
||||
|
@ -2694,14 +2624,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
|
|||
if (!tsk)
|
||||
goto out_free;
|
||||
|
||||
retval = -ENAMETOOLONG;
|
||||
rcu_read_lock();
|
||||
css = task_css(tsk, cpuset_subsys_id);
|
||||
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
|
||||
css = task_css(tsk, cpuset_cgrp_id);
|
||||
p = cgroup_path(css->cgroup, buf, PATH_MAX);
|
||||
rcu_read_unlock();
|
||||
if (retval < 0)
|
||||
if (!p)
|
||||
goto out_put_task;
|
||||
seq_puts(m, buf);
|
||||
seq_puts(m, p);
|
||||
seq_putc(m, '\n');
|
||||
retval = 0;
|
||||
out_put_task:
|
||||
put_task_struct(tsk);
|
||||
out_free:
|
||||
|
|
|
@ -361,7 +361,7 @@ struct perf_cgroup {
|
|||
static inline struct perf_cgroup *
|
||||
perf_cgroup_from_task(struct task_struct *task)
|
||||
{
|
||||
return container_of(task_css(task, perf_subsys_id),
|
||||
return container_of(task_css(task, perf_event_cgrp_id),
|
||||
struct perf_cgroup, css);
|
||||
}
|
||||
|
||||
|
@ -389,11 +389,6 @@ perf_cgroup_match(struct perf_event *event)
|
|||
event->cgrp->css.cgroup);
|
||||
}
|
||||
|
||||
static inline bool perf_tryget_cgroup(struct perf_event *event)
|
||||
{
|
||||
return css_tryget(&event->cgrp->css);
|
||||
}
|
||||
|
||||
static inline void perf_put_cgroup(struct perf_event *event)
|
||||
{
|
||||
css_put(&event->cgrp->css);
|
||||
|
@ -612,9 +607,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
|||
if (!f.file)
|
||||
return -EBADF;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
css = css_from_dir(f.file->f_dentry, &perf_subsys);
|
||||
css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys);
|
||||
if (IS_ERR(css)) {
|
||||
ret = PTR_ERR(css);
|
||||
goto out;
|
||||
|
@ -623,13 +616,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
|||
cgrp = container_of(css, struct perf_cgroup, css);
|
||||
event->cgrp = cgrp;
|
||||
|
||||
/* must be done before we fput() the file */
|
||||
if (!perf_tryget_cgroup(event)) {
|
||||
event->cgrp = NULL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* all events in a group must monitor
|
||||
* the same cgroup because a task belongs
|
||||
|
@ -640,7 +626,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
|||
ret = -EINVAL;
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return ret;
|
||||
}
|
||||
|
@ -8053,7 +8038,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
task_function_call(task, __perf_cgroup_move, task);
|
||||
}
|
||||
|
||||
|
@ -8072,9 +8057,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css,
|
|||
task_function_call(task, __perf_cgroup_move, task);
|
||||
}
|
||||
|
||||
struct cgroup_subsys perf_subsys = {
|
||||
.name = "perf_event",
|
||||
.subsys_id = perf_subsys_id,
|
||||
struct cgroup_subsys perf_event_cgrp_subsys = {
|
||||
.css_alloc = perf_cgroup_css_alloc,
|
||||
.css_free = perf_cgroup_css_free,
|
||||
.exit = perf_cgroup_exit,
|
||||
|
|
|
@ -797,7 +797,7 @@ void do_exit(long code)
|
|||
*/
|
||||
perf_event_exit_task(tsk);
|
||||
|
||||
cgroup_exit(tsk, 1);
|
||||
cgroup_exit(tsk);
|
||||
|
||||
if (group_dead)
|
||||
disassociate_ctty(1);
|
||||
|
|
|
@ -1272,7 +1272,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
if (IS_ERR(p->mempolicy)) {
|
||||
retval = PTR_ERR(p->mempolicy);
|
||||
p->mempolicy = NULL;
|
||||
goto bad_fork_cleanup_cgroup;
|
||||
goto bad_fork_cleanup_threadgroup_lock;
|
||||
}
|
||||
mpol_fix_fork_child_flag(p);
|
||||
#endif
|
||||
|
@ -1525,11 +1525,10 @@ bad_fork_cleanup_policy:
|
|||
perf_event_free_task(p);
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_cgroup:
|
||||
bad_fork_cleanup_threadgroup_lock:
|
||||
#endif
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
threadgroup_change_end(current);
|
||||
cgroup_exit(p, 0);
|
||||
delayacct_tsk_free(p);
|
||||
module_put(task_thread_info(p)->exec_domain->module);
|
||||
bad_fork_cleanup_count:
|
||||
|
|
|
@ -7230,7 +7230,7 @@ void sched_move_task(struct task_struct *tsk)
|
|||
if (unlikely(running))
|
||||
tsk->sched_class->put_prev_task(rq, tsk);
|
||||
|
||||
tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
|
||||
tg = container_of(task_css_check(tsk, cpu_cgrp_id,
|
||||
lockdep_is_held(&tsk->sighand->siglock)),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
|
@ -7657,7 +7657,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (!sched_rt_can_attach(css_tg(css), task))
|
||||
return -EINVAL;
|
||||
|
@ -7675,7 +7675,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
sched_move_task(task);
|
||||
}
|
||||
|
||||
|
@ -8014,8 +8014,7 @@ static struct cftype cpu_files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cgroup_subsys cpu_cgroup_subsys = {
|
||||
.name = "cpu",
|
||||
struct cgroup_subsys cpu_cgrp_subsys = {
|
||||
.css_alloc = cpu_cgroup_css_alloc,
|
||||
.css_free = cpu_cgroup_css_free,
|
||||
.css_online = cpu_cgroup_css_online,
|
||||
|
@ -8023,7 +8022,6 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
|||
.can_attach = cpu_cgroup_can_attach,
|
||||
.attach = cpu_cgroup_attach,
|
||||
.exit = cpu_cgroup_exit,
|
||||
.subsys_id = cpu_cgroup_subsys_id,
|
||||
.base_cftypes = cpu_files,
|
||||
.early_init = 1,
|
||||
};
|
||||
|
|
|
@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
|
|||
/* return cpu accounting group to which this task belongs */
|
||||
static inline struct cpuacct *task_ca(struct task_struct *tsk)
|
||||
{
|
||||
return css_ca(task_css(tsk, cpuacct_subsys_id));
|
||||
return css_ca(task_css(tsk, cpuacct_cgrp_id));
|
||||
}
|
||||
|
||||
static inline struct cpuacct *parent_ca(struct cpuacct *ca)
|
||||
|
@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct cgroup_subsys cpuacct_subsys = {
|
||||
.name = "cpuacct",
|
||||
struct cgroup_subsys cpuacct_cgrp_subsys = {
|
||||
.css_alloc = cpuacct_css_alloc,
|
||||
.css_free = cpuacct_css_free,
|
||||
.subsys_id = cpuacct_subsys_id,
|
||||
.base_cftypes = files,
|
||||
.early_init = 1,
|
||||
};
|
||||
|
|
|
@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg)
|
|||
if (autogroup_path(tg, group_path, PATH_MAX))
|
||||
return group_path;
|
||||
|
||||
cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
|
||||
return group_path;
|
||||
return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ struct hugetlb_cgroup {
|
|||
#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
|
||||
#define MEMFILE_ATTR(val) ((val) & 0xffff)
|
||||
|
||||
struct cgroup_subsys hugetlb_subsys __read_mostly;
|
||||
static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
|
||||
|
||||
static inline
|
||||
|
@ -42,7 +41,7 @@ struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
|
|||
static inline
|
||||
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
|
||||
{
|
||||
return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
|
||||
return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
|
||||
}
|
||||
|
||||
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
|
||||
|
@ -255,7 +254,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
|
|||
}
|
||||
|
||||
static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buffer)
|
||||
struct cftype *cft, char *buffer)
|
||||
{
|
||||
int idx, name, ret;
|
||||
unsigned long long val;
|
||||
|
@ -358,7 +357,7 @@ static void __init __hugetlb_cgroup_file_init(int idx)
|
|||
cft = &h->cgroup_files[4];
|
||||
memset(cft, 0, sizeof(*cft));
|
||||
|
||||
WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
|
||||
WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -402,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
|||
return;
|
||||
}
|
||||
|
||||
struct cgroup_subsys hugetlb_subsys = {
|
||||
.name = "hugetlb",
|
||||
struct cgroup_subsys hugetlb_cgrp_subsys = {
|
||||
.css_alloc = hugetlb_cgroup_css_alloc,
|
||||
.css_offline = hugetlb_cgroup_css_offline,
|
||||
.css_free = hugetlb_cgroup_css_free,
|
||||
.subsys_id = hugetlb_subsys_id,
|
||||
};
|
||||
|
|
110
mm/memcontrol.c
110
mm/memcontrol.c
|
@ -66,8 +66,8 @@
|
|||
|
||||
#include <trace/events/vmscan.h>
|
||||
|
||||
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
|
||||
EXPORT_SYMBOL(mem_cgroup_subsys);
|
||||
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
|
||||
EXPORT_SYMBOL(memory_cgrp_subsys);
|
||||
|
||||
#define MEM_CGROUP_RECLAIM_RETRIES 5
|
||||
static struct mem_cgroup *root_mem_cgroup __read_mostly;
|
||||
|
@ -538,7 +538,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
|
|||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
css = css_from_id(id - 1, &mem_cgroup_subsys);
|
||||
css = css_from_id(id - 1, &memory_cgrp_subsys);
|
||||
return mem_cgroup_from_css(css);
|
||||
}
|
||||
|
||||
|
@ -1072,7 +1072,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
|
|||
if (unlikely(!p))
|
||||
return NULL;
|
||||
|
||||
return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
|
||||
return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
|
||||
}
|
||||
|
||||
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
|
||||
|
@ -1683,15 +1683,8 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
|
|||
*/
|
||||
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* protects memcg_name and makes sure that parallel ooms do not
|
||||
* interleave
|
||||
*/
|
||||
/* oom_info_lock ensures that parallel ooms do not interleave */
|
||||
static DEFINE_MUTEX(oom_info_lock);
|
||||
struct cgroup *task_cgrp;
|
||||
struct cgroup *mem_cgrp;
|
||||
static char memcg_name[PATH_MAX];
|
||||
int ret;
|
||||
struct mem_cgroup *iter;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -1701,36 +1694,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
|||
mutex_lock(&oom_info_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
mem_cgrp = memcg->css.cgroup;
|
||||
task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
|
||||
pr_info("Task in ");
|
||||
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
|
||||
pr_info(" killed as a result of limit of ");
|
||||
pr_cont_cgroup_path(memcg->css.cgroup);
|
||||
pr_info("\n");
|
||||
|
||||
ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Unfortunately, we are unable to convert to a useful name
|
||||
* But we'll still print out the usage information
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
goto done;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
pr_info("Task in %s killed", memcg_name);
|
||||
|
||||
rcu_read_lock();
|
||||
ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
|
||||
if (ret < 0) {
|
||||
rcu_read_unlock();
|
||||
goto done;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Continues from above, so we don't need an KERN_ level
|
||||
*/
|
||||
pr_cont(" as a result of limit of %s\n", memcg_name);
|
||||
done:
|
||||
|
||||
pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
|
||||
res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
|
||||
res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
|
||||
|
@ -1745,13 +1716,8 @@ done:
|
|||
res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
|
||||
|
||||
for_each_mem_cgroup_tree(iter, memcg) {
|
||||
pr_info("Memory cgroup stats");
|
||||
|
||||
rcu_read_lock();
|
||||
ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
|
||||
if (!ret)
|
||||
pr_cont(" for %s", memcg_name);
|
||||
rcu_read_unlock();
|
||||
pr_info("Memory cgroup stats for ");
|
||||
pr_cont_cgroup_path(iter->css.cgroup);
|
||||
pr_cont(":");
|
||||
|
||||
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
|
||||
|
@ -3401,7 +3367,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|||
struct kmem_cache *s)
|
||||
{
|
||||
struct kmem_cache *new = NULL;
|
||||
static char *tmp_name = NULL;
|
||||
static char *tmp_path = NULL, *tmp_name = NULL;
|
||||
static DEFINE_MUTEX(mutex); /* protects tmp_name */
|
||||
|
||||
BUG_ON(!memcg_can_account_kmem(memcg));
|
||||
|
@ -3413,18 +3379,20 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|||
* This static temporary buffer is used to prevent from
|
||||
* pointless shortliving allocation.
|
||||
*/
|
||||
if (!tmp_name) {
|
||||
tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!tmp_path || !tmp_name) {
|
||||
if (!tmp_path)
|
||||
tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!tmp_name)
|
||||
tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
|
||||
if (!tmp_path || !tmp_name)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
|
||||
memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
|
||||
rcu_read_unlock();
|
||||
cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
|
||||
snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
|
||||
memcg_cache_id(memcg), tmp_name);
|
||||
|
||||
new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
|
||||
new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
|
||||
(s->flags & ~SLAB_PANIC), s->ctor, s);
|
||||
if (new)
|
||||
new->allocflags |= __GFP_KMEMCG;
|
||||
|
@ -4990,7 +4958,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
|
|||
struct cgroup *cgrp = memcg->css.cgroup;
|
||||
|
||||
/* returns EBUSY if there is a task or if we come here twice. */
|
||||
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
|
||||
if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
|
||||
return -EBUSY;
|
||||
|
||||
/* we call try-to-free pages for make this cgroup empty */
|
||||
|
@ -5172,7 +5140,7 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
|
|||
* of course permitted.
|
||||
*/
|
||||
mutex_lock(&memcg_create_mutex);
|
||||
if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg))
|
||||
if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
|
||||
err = -EBUSY;
|
||||
mutex_unlock(&memcg_create_mutex);
|
||||
if (err)
|
||||
|
@ -5274,7 +5242,7 @@ static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
|
|||
* RES_LIMIT.
|
||||
*/
|
||||
static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
char *buffer)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
enum res_type type;
|
||||
|
@ -6095,7 +6063,7 @@ static void memcg_event_ptable_queue_proc(struct file *file,
|
|||
* Interpretation of args is defined by control file implementation.
|
||||
*/
|
||||
static int memcg_write_event_control(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buffer)
|
||||
struct cftype *cft, char *buffer)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup_event *event;
|
||||
|
@ -6183,17 +6151,15 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css,
|
|||
* automatically removed on cgroup destruction but the removal is
|
||||
* asynchronous, so take an extra ref on @css.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
|
||||
&memory_cgrp_subsys);
|
||||
ret = -EINVAL;
|
||||
cfile_css = css_from_dir(cfile.file->f_dentry->d_parent,
|
||||
&mem_cgroup_subsys);
|
||||
if (cfile_css == css && css_tryget(css))
|
||||
ret = 0;
|
||||
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
if (IS_ERR(cfile_css))
|
||||
goto out_put_cfile;
|
||||
if (cfile_css != css) {
|
||||
css_put(cfile_css);
|
||||
goto out_put_cfile;
|
||||
}
|
||||
|
||||
ret = event->register_event(memcg, event->eventfd, buffer);
|
||||
if (ret)
|
||||
|
@ -6566,11 +6532,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|||
* unfortunate state in our controller.
|
||||
*/
|
||||
if (parent != root_mem_cgroup)
|
||||
mem_cgroup_subsys.broken_hierarchy = true;
|
||||
memory_cgrp_subsys.broken_hierarchy = true;
|
||||
}
|
||||
mutex_unlock(&memcg_create_mutex);
|
||||
|
||||
return memcg_init_kmem(memcg, &mem_cgroup_subsys);
|
||||
return memcg_init_kmem(memcg, &memory_cgrp_subsys);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7272,9 +7238,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
|
|||
mem_cgroup_from_css(root_css)->use_hierarchy = true;
|
||||
}
|
||||
|
||||
struct cgroup_subsys mem_cgroup_subsys = {
|
||||
.name = "memory",
|
||||
.subsys_id = mem_cgroup_subsys_id,
|
||||
struct cgroup_subsys memory_cgrp_subsys = {
|
||||
.css_alloc = mem_cgroup_css_alloc,
|
||||
.css_online = mem_cgroup_css_online,
|
||||
.css_offline = mem_cgroup_css_offline,
|
||||
|
@ -7300,7 +7264,7 @@ __setup("swapaccount=", enable_swap_account);
|
|||
|
||||
static void __init memsw_file_init(void)
|
||||
{
|
||||
WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
|
||||
WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
|
||||
}
|
||||
|
||||
static void __init enable_swap_cgroup(void)
|
||||
|
|
|
@ -145,14 +145,10 @@ static int hwpoison_filter_task(struct page *p)
|
|||
return -EINVAL;
|
||||
|
||||
css = mem_cgroup_css(mem);
|
||||
/* root_mem_cgroup has NULL dentries */
|
||||
if (!css->cgroup->dentry)
|
||||
return -EINVAL;
|
||||
|
||||
ino = css->cgroup->dentry->d_inode->i_ino;
|
||||
ino = cgroup_ino(css->cgroup);
|
||||
css_put(css);
|
||||
|
||||
if (ino != hwpoison_filter_memcg)
|
||||
if (!ino || ino != hwpoison_filter_memcg)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -243,7 +243,7 @@ config XPS
|
|||
default y
|
||||
|
||||
config CGROUP_NET_PRIO
|
||||
tristate "Network priority cgroup"
|
||||
bool "Network priority cgroup"
|
||||
depends on CGROUPS
|
||||
---help---
|
||||
Cgroup subsystem for use in assigning processes to network priorities on
|
||||
|
|
|
@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
|
|||
|
||||
struct cgroup_cls_state *task_cls_state(struct task_struct *p)
|
||||
{
|
||||
return css_cls_state(task_css(p, net_cls_subsys_id));
|
||||
return css_cls_state(task_css(p, net_cls_cgrp_id));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(task_cls_state);
|
||||
|
||||
|
@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
|
|||
void *v = (void *)(unsigned long)cs->classid;
|
||||
struct task_struct *p;
|
||||
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
cgroup_taskset_for_each(p, tset) {
|
||||
task_lock(p);
|
||||
iterate_fd(p->files, 0, update_classid, v);
|
||||
task_unlock(p);
|
||||
|
@ -102,19 +102,10 @@ static struct cftype ss_files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cgroup_subsys net_cls_subsys = {
|
||||
.name = "net_cls",
|
||||
struct cgroup_subsys net_cls_cgrp_subsys = {
|
||||
.css_alloc = cgrp_css_alloc,
|
||||
.css_online = cgrp_css_online,
|
||||
.css_free = cgrp_css_free,
|
||||
.attach = cgrp_attach,
|
||||
.subsys_id = net_cls_subsys_id,
|
||||
.base_cftypes = ss_files,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init init_netclassid_cgroup(void)
|
||||
{
|
||||
return cgroup_load_subsys(&net_cls_subsys);
|
||||
}
|
||||
__initcall(init_netclassid_cgroup);
|
||||
|
|
|
@ -186,7 +186,7 @@ static int read_priomap(struct seq_file *sf, void *v)
|
|||
}
|
||||
|
||||
static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
char *buffer)
|
||||
{
|
||||
char devname[IFNAMSIZ + 1];
|
||||
struct net_device *dev;
|
||||
|
@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
|
|||
struct task_struct *p;
|
||||
void *v = (void *)(unsigned long)css->cgroup->id;
|
||||
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
cgroup_taskset_for_each(p, tset) {
|
||||
task_lock(p);
|
||||
iterate_fd(p->files, 0, update_netprio, v);
|
||||
task_unlock(p);
|
||||
|
@ -244,15 +244,12 @@ static struct cftype ss_files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cgroup_subsys net_prio_subsys = {
|
||||
.name = "net_prio",
|
||||
struct cgroup_subsys net_prio_cgrp_subsys = {
|
||||
.css_alloc = cgrp_css_alloc,
|
||||
.css_online = cgrp_css_online,
|
||||
.css_free = cgrp_css_free,
|
||||
.attach = net_prio_attach,
|
||||
.subsys_id = net_prio_subsys_id,
|
||||
.base_cftypes = ss_files,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int netprio_device_event(struct notifier_block *unused,
|
||||
|
@ -283,37 +280,9 @@ static struct notifier_block netprio_device_notifier = {
|
|||
|
||||
static int __init init_cgroup_netprio(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cgroup_load_subsys(&net_prio_subsys);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
register_netdevice_notifier(&netprio_device_notifier);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_cgroup_netprio(void)
|
||||
{
|
||||
struct netprio_map *old;
|
||||
struct net_device *dev;
|
||||
|
||||
unregister_netdevice_notifier(&netprio_device_notifier);
|
||||
|
||||
cgroup_unload_subsys(&net_prio_subsys);
|
||||
|
||||
rtnl_lock();
|
||||
for_each_netdev(&init_net, dev) {
|
||||
old = rtnl_dereference(dev->priomap);
|
||||
RCU_INIT_POINTER(dev->priomap, NULL);
|
||||
if (old)
|
||||
kfree_rcu(old, rcu);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
module_init(init_cgroup_netprio);
|
||||
module_exit(exit_cgroup_netprio);
|
||||
subsys_initcall(init_cgroup_netprio);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -103,7 +103,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
|
|||
}
|
||||
|
||||
static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
const char *buffer)
|
||||
char *buffer)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
unsigned long long val;
|
||||
|
@ -219,7 +219,7 @@ static struct cftype tcp_files[] = {
|
|||
|
||||
static int __init tcp_memcontrol_init(void)
|
||||
{
|
||||
WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
|
||||
WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files));
|
||||
return 0;
|
||||
}
|
||||
__initcall(tcp_memcontrol_init);
|
||||
|
|
|
@ -58,11 +58,9 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
|
|||
|
||||
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
|
||||
{
|
||||
return css_to_devcgroup(task_css(task, devices_subsys_id));
|
||||
return css_to_devcgroup(task_css(task, devices_cgrp_id));
|
||||
}
|
||||
|
||||
struct cgroup_subsys devices_subsys;
|
||||
|
||||
/*
|
||||
* called under devcgroup_mutex
|
||||
*/
|
||||
|
@ -498,7 +496,7 @@ static inline bool has_children(struct dev_cgroup *devcgroup)
|
|||
* parent cgroup has the access you're asking for.
|
||||
*/
|
||||
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
|
||||
int filetype, const char *buffer)
|
||||
int filetype, char *buffer)
|
||||
{
|
||||
const char *b;
|
||||
char temp[12]; /* 11 + 1 characters needed for a u32 */
|
||||
|
@ -654,7 +652,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
|
|||
}
|
||||
|
||||
static int devcgroup_access_write(struct cgroup_subsys_state *css,
|
||||
struct cftype *cft, const char *buffer)
|
||||
struct cftype *cft, char *buffer)
|
||||
{
|
||||
int retval;
|
||||
|
||||
|
@ -684,13 +682,11 @@ static struct cftype dev_cgroup_files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cgroup_subsys devices_subsys = {
|
||||
.name = "devices",
|
||||
struct cgroup_subsys devices_cgrp_subsys = {
|
||||
.css_alloc = devcgroup_css_alloc,
|
||||
.css_free = devcgroup_css_free,
|
||||
.css_online = devcgroup_online,
|
||||
.css_offline = devcgroup_offline,
|
||||
.subsys_id = devices_subsys_id,
|
||||
.base_cftypes = dev_cgroup_files,
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче