|
@@ -5,6 +5,7 @@
|
|
*
|
|
*
|
|
* Copyright (C) 2003 BULL SA.
|
|
* Copyright (C) 2003 BULL SA.
|
|
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
|
|
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
|
|
|
|
+ * Copyright (C) 2006 Google, Inc
|
|
*
|
|
*
|
|
* Portions derived from Patrick Mochel's sysfs code.
|
|
* Portions derived from Patrick Mochel's sysfs code.
|
|
* sysfs is Copyright (c) 2001-3 Patrick Mochel
|
|
* sysfs is Copyright (c) 2001-3 Patrick Mochel
|
|
@@ -12,6 +13,7 @@
|
|
* 2003-10-10 Written by Simon Derr.
|
|
* 2003-10-10 Written by Simon Derr.
|
|
* 2003-10-22 Updates by Stephen Hemminger.
|
|
* 2003-10-22 Updates by Stephen Hemminger.
|
|
* 2004 May-July Rework by Paul Jackson.
|
|
* 2004 May-July Rework by Paul Jackson.
|
|
|
|
+ * 2006 Rework by Paul Menage to use generic cgroups
|
|
*
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file COPYING in the main directory of the Linux
|
|
* License. See the file COPYING in the main directory of the Linux
|
|
@@ -53,8 +55,6 @@
|
|
#include <asm/atomic.h>
|
|
#include <asm/atomic.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
-#define CPUSET_SUPER_MAGIC 0x27e0eb
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Tracks how many cpusets are currently defined in system.
|
|
* Tracks how many cpusets are currently defined in system.
|
|
* When there is only one cpuset (the root cpuset) we can
|
|
* When there is only one cpuset (the root cpuset) we can
|
|
@@ -62,6 +62,10 @@
|
|
*/
|
|
*/
|
|
int number_of_cpusets __read_mostly;
|
|
int number_of_cpusets __read_mostly;
|
|
|
|
|
|
|
|
+/* Retrieve the cpuset from a cgroup */
|
|
|
|
+struct cgroup_subsys cpuset_subsys;
|
|
|
|
+struct cpuset;
|
|
|
|
+
|
|
/* See "Frequency meter" comments, below. */
|
|
/* See "Frequency meter" comments, below. */
|
|
|
|
|
|
struct fmeter {
|
|
struct fmeter {
|
|
@@ -72,24 +76,13 @@ struct fmeter {
|
|
};
|
|
};
|
|
|
|
|
|
struct cpuset {
|
|
struct cpuset {
|
|
|
|
+ struct cgroup_subsys_state css;
|
|
|
|
+
|
|
unsigned long flags; /* "unsigned long" so bitops work */
|
|
unsigned long flags; /* "unsigned long" so bitops work */
|
|
cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
|
|
cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
|
|
nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
|
|
nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
|
|
|
|
|
|
- /*
|
|
|
|
- * Count is atomic so can incr (fork) or decr (exit) without a lock.
|
|
|
|
- */
|
|
|
|
- atomic_t count; /* count tasks using this cpuset */
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * We link our 'sibling' struct into our parents 'children'.
|
|
|
|
- * Our children link their 'sibling' into our 'children'.
|
|
|
|
- */
|
|
|
|
- struct list_head sibling; /* my parents children */
|
|
|
|
- struct list_head children; /* my children */
|
|
|
|
-
|
|
|
|
struct cpuset *parent; /* my parent */
|
|
struct cpuset *parent; /* my parent */
|
|
- struct dentry *dentry; /* cpuset fs entry */
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Copy of global cpuset_mems_generation as of the most
|
|
* Copy of global cpuset_mems_generation as of the most
|
|
@@ -100,13 +93,26 @@ struct cpuset {
|
|
struct fmeter fmeter; /* memory_pressure filter */
|
|
struct fmeter fmeter; /* memory_pressure filter */
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+/* Retrieve the cpuset for a cgroup */
|
|
|
|
+static inline struct cpuset *cgroup_cs(struct cgroup *cont)
|
|
|
|
+{
|
|
|
|
+ return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
|
|
|
|
+ struct cpuset, css);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Retrieve the cpuset for a task */
|
|
|
|
+static inline struct cpuset *task_cs(struct task_struct *task)
|
|
|
|
+{
|
|
|
|
+ return container_of(task_subsys_state(task, cpuset_subsys_id),
|
|
|
|
+ struct cpuset, css);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/* bits in struct cpuset flags field */
|
|
/* bits in struct cpuset flags field */
|
|
typedef enum {
|
|
typedef enum {
|
|
CS_CPU_EXCLUSIVE,
|
|
CS_CPU_EXCLUSIVE,
|
|
CS_MEM_EXCLUSIVE,
|
|
CS_MEM_EXCLUSIVE,
|
|
CS_MEMORY_MIGRATE,
|
|
CS_MEMORY_MIGRATE,
|
|
- CS_REMOVED,
|
|
|
|
- CS_NOTIFY_ON_RELEASE,
|
|
|
|
CS_SPREAD_PAGE,
|
|
CS_SPREAD_PAGE,
|
|
CS_SPREAD_SLAB,
|
|
CS_SPREAD_SLAB,
|
|
} cpuset_flagbits_t;
|
|
} cpuset_flagbits_t;
|
|
@@ -122,16 +128,6 @@ static inline int is_mem_exclusive(const struct cpuset *cs)
|
|
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
|
|
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int is_removed(const struct cpuset *cs)
|
|
|
|
-{
|
|
|
|
- return test_bit(CS_REMOVED, &cs->flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int notify_on_release(const struct cpuset *cs)
|
|
|
|
-{
|
|
|
|
- return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline int is_memory_migrate(const struct cpuset *cs)
|
|
static inline int is_memory_migrate(const struct cpuset *cs)
|
|
{
|
|
{
|
|
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
|
|
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
|
|
@@ -172,14 +168,8 @@ static struct cpuset top_cpuset = {
|
|
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
|
|
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
|
|
.cpus_allowed = CPU_MASK_ALL,
|
|
.cpus_allowed = CPU_MASK_ALL,
|
|
.mems_allowed = NODE_MASK_ALL,
|
|
.mems_allowed = NODE_MASK_ALL,
|
|
- .count = ATOMIC_INIT(0),
|
|
|
|
- .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
|
|
|
|
- .children = LIST_HEAD_INIT(top_cpuset.children),
|
|
|
|
};
|
|
};
|
|
|
|
|
|
-static struct vfsmount *cpuset_mount;
|
|
|
|
-static struct super_block *cpuset_sb;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We have two global cpuset mutexes below. They can nest.
|
|
* We have two global cpuset mutexes below. They can nest.
|
|
* It is ok to first take manage_mutex, then nest callback_mutex. We also
|
|
* It is ok to first take manage_mutex, then nest callback_mutex. We also
|
|
@@ -263,297 +253,33 @@ static struct super_block *cpuset_sb;
|
|
* the routine cpuset_update_task_memory_state().
|
|
* the routine cpuset_update_task_memory_state().
|
|
*/
|
|
*/
|
|
|
|
|
|
-static DEFINE_MUTEX(manage_mutex);
|
|
|
|
static DEFINE_MUTEX(callback_mutex);
|
|
static DEFINE_MUTEX(callback_mutex);
|
|
|
|
|
|
-/*
|
|
|
|
- * A couple of forward declarations required, due to cyclic reference loop:
|
|
|
|
- * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
|
|
|
|
- * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
|
|
|
|
-static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
|
|
|
|
-
|
|
|
|
-static struct backing_dev_info cpuset_backing_dev_info = {
|
|
|
|
- .ra_pages = 0, /* No readahead */
|
|
|
|
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static struct inode *cpuset_new_inode(mode_t mode)
|
|
|
|
-{
|
|
|
|
- struct inode *inode = new_inode(cpuset_sb);
|
|
|
|
-
|
|
|
|
- if (inode) {
|
|
|
|
- inode->i_mode = mode;
|
|
|
|
- inode->i_uid = current->fsuid;
|
|
|
|
- inode->i_gid = current->fsgid;
|
|
|
|
- inode->i_blocks = 0;
|
|
|
|
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
- inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
|
|
|
|
- }
|
|
|
|
- return inode;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void cpuset_diput(struct dentry *dentry, struct inode *inode)
|
|
|
|
-{
|
|
|
|
- /* is dentry a directory ? if so, kfree() associated cpuset */
|
|
|
|
- if (S_ISDIR(inode->i_mode)) {
|
|
|
|
- struct cpuset *cs = dentry->d_fsdata;
|
|
|
|
- BUG_ON(!(is_removed(cs)));
|
|
|
|
- kfree(cs);
|
|
|
|
- }
|
|
|
|
- iput(inode);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static struct dentry_operations cpuset_dops = {
|
|
|
|
- .d_iput = cpuset_diput,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
|
|
|
|
-{
|
|
|
|
- struct dentry *d = lookup_one_len(name, parent, strlen(name));
|
|
|
|
- if (!IS_ERR(d))
|
|
|
|
- d->d_op = &cpuset_dops;
|
|
|
|
- return d;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void remove_dir(struct dentry *d)
|
|
|
|
-{
|
|
|
|
- struct dentry *parent = dget(d->d_parent);
|
|
|
|
-
|
|
|
|
- d_delete(d);
|
|
|
|
- simple_rmdir(parent->d_inode, d);
|
|
|
|
- dput(parent);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * NOTE : the dentry must have been dget()'ed
|
|
|
|
- */
|
|
|
|
-static void cpuset_d_remove_dir(struct dentry *dentry)
|
|
|
|
-{
|
|
|
|
- struct list_head *node;
|
|
|
|
-
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
|
- node = dentry->d_subdirs.next;
|
|
|
|
- while (node != &dentry->d_subdirs) {
|
|
|
|
- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
|
|
|
|
- list_del_init(node);
|
|
|
|
- if (d->d_inode) {
|
|
|
|
- d = dget_locked(d);
|
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
|
- d_delete(d);
|
|
|
|
- simple_unlink(dentry->d_inode, d);
|
|
|
|
- dput(d);
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
|
- }
|
|
|
|
- node = dentry->d_subdirs.next;
|
|
|
|
- }
|
|
|
|
- list_del_init(&dentry->d_u.d_child);
|
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
|
- remove_dir(dentry);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static struct super_operations cpuset_ops = {
|
|
|
|
- .statfs = simple_statfs,
|
|
|
|
- .drop_inode = generic_delete_inode,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int cpuset_fill_super(struct super_block *sb, void *unused_data,
|
|
|
|
- int unused_silent)
|
|
|
|
-{
|
|
|
|
- struct inode *inode;
|
|
|
|
- struct dentry *root;
|
|
|
|
-
|
|
|
|
- sb->s_blocksize = PAGE_CACHE_SIZE;
|
|
|
|
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
|
|
|
- sb->s_magic = CPUSET_SUPER_MAGIC;
|
|
|
|
- sb->s_op = &cpuset_ops;
|
|
|
|
- cpuset_sb = sb;
|
|
|
|
-
|
|
|
|
- inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
|
|
|
|
- if (inode) {
|
|
|
|
- inode->i_op = &simple_dir_inode_operations;
|
|
|
|
- inode->i_fop = &simple_dir_operations;
|
|
|
|
- /* directories start off with i_nlink == 2 (for "." entry) */
|
|
|
|
- inc_nlink(inode);
|
|
|
|
- } else {
|
|
|
|
- return -ENOMEM;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- root = d_alloc_root(inode);
|
|
|
|
- if (!root) {
|
|
|
|
- iput(inode);
|
|
|
|
- return -ENOMEM;
|
|
|
|
- }
|
|
|
|
- sb->s_root = root;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
|
|
+/* This is ugly, but preserves the userspace API for existing cpuset
|
|
|
|
+ * users. If someone tries to mount the "cpuset" filesystem, we
|
|
|
|
+ * silently switch it to mount "cgroup" instead */
|
|
static int cpuset_get_sb(struct file_system_type *fs_type,
|
|
static int cpuset_get_sb(struct file_system_type *fs_type,
|
|
int flags, const char *unused_dev_name,
|
|
int flags, const char *unused_dev_name,
|
|
void *data, struct vfsmount *mnt)
|
|
void *data, struct vfsmount *mnt)
|
|
{
|
|
{
|
|
- return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt);
|
|
|
|
|
|
+ struct file_system_type *cgroup_fs = get_fs_type("cgroup");
|
|
|
|
+ int ret = -ENODEV;
|
|
|
|
+ if (cgroup_fs) {
|
|
|
|
+ char mountopts[] =
|
|
|
|
+ "cpuset,noprefix,"
|
|
|
|
+ "release_agent=/sbin/cpuset_release_agent";
|
|
|
|
+ ret = cgroup_fs->get_sb(cgroup_fs, flags,
|
|
|
|
+ unused_dev_name, mountopts, mnt);
|
|
|
|
+ put_filesystem(cgroup_fs);
|
|
|
|
+ }
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static struct file_system_type cpuset_fs_type = {
|
|
static struct file_system_type cpuset_fs_type = {
|
|
.name = "cpuset",
|
|
.name = "cpuset",
|
|
.get_sb = cpuset_get_sb,
|
|
.get_sb = cpuset_get_sb,
|
|
- .kill_sb = kill_litter_super,
|
|
|
|
};
|
|
};
|
|
|
|
|
|
-/* struct cftype:
|
|
|
|
- *
|
|
|
|
- * The files in the cpuset filesystem mostly have a very simple read/write
|
|
|
|
- * handling, some common function will take care of it. Nevertheless some cases
|
|
|
|
- * (read tasks) are special and therefore I define this structure for every
|
|
|
|
- * kind of file.
|
|
|
|
- *
|
|
|
|
- *
|
|
|
|
- * When reading/writing to a file:
|
|
|
|
- * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata
|
|
|
|
- * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-struct cftype {
|
|
|
|
- char *name;
|
|
|
|
- int private;
|
|
|
|
- int (*open) (struct inode *inode, struct file *file);
|
|
|
|
- ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
|
|
|
|
- loff_t *ppos);
|
|
|
|
- int (*write) (struct file *file, const char __user *buf, size_t nbytes,
|
|
|
|
- loff_t *ppos);
|
|
|
|
- int (*release) (struct inode *inode, struct file *file);
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static inline struct cpuset *__d_cs(struct dentry *dentry)
|
|
|
|
-{
|
|
|
|
- return dentry->d_fsdata;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline struct cftype *__d_cft(struct dentry *dentry)
|
|
|
|
-{
|
|
|
|
- return dentry->d_fsdata;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Call with manage_mutex held. Writes path of cpuset into buf.
|
|
|
|
- * Returns 0 on success, -errno on error.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
|
|
|
|
-{
|
|
|
|
- char *start;
|
|
|
|
-
|
|
|
|
- start = buf + buflen;
|
|
|
|
-
|
|
|
|
- *--start = '\0';
|
|
|
|
- for (;;) {
|
|
|
|
- int len = cs->dentry->d_name.len;
|
|
|
|
- if ((start -= len) < buf)
|
|
|
|
- return -ENAMETOOLONG;
|
|
|
|
- memcpy(start, cs->dentry->d_name.name, len);
|
|
|
|
- cs = cs->parent;
|
|
|
|
- if (!cs)
|
|
|
|
- break;
|
|
|
|
- if (!cs->parent)
|
|
|
|
- continue;
|
|
|
|
- if (--start < buf)
|
|
|
|
- return -ENAMETOOLONG;
|
|
|
|
- *start = '/';
|
|
|
|
- }
|
|
|
|
- memmove(buf, start, buf + buflen - start);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Notify userspace when a cpuset is released, by running
|
|
|
|
- * /sbin/cpuset_release_agent with the name of the cpuset (path
|
|
|
|
- * relative to the root of cpuset file system) as the argument.
|
|
|
|
- *
|
|
|
|
- * Most likely, this user command will try to rmdir this cpuset.
|
|
|
|
- *
|
|
|
|
- * This races with the possibility that some other task will be
|
|
|
|
- * attached to this cpuset before it is removed, or that some other
|
|
|
|
- * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
|
|
|
|
- * The presumed 'rmdir' will fail quietly if this cpuset is no longer
|
|
|
|
- * unused, and this cpuset will be reprieved from its death sentence,
|
|
|
|
- * to continue to serve a useful existence. Next time it's released,
|
|
|
|
- * we will get notified again, if it still has 'notify_on_release' set.
|
|
|
|
- *
|
|
|
|
- * The final arg to call_usermodehelper() is 0, which means don't
|
|
|
|
- * wait. The separate /sbin/cpuset_release_agent task is forked by
|
|
|
|
- * call_usermodehelper(), then control in this thread returns here,
|
|
|
|
- * without waiting for the release agent task. We don't bother to
|
|
|
|
- * wait because the caller of this routine has no use for the exit
|
|
|
|
- * status of the /sbin/cpuset_release_agent task, so no sense holding
|
|
|
|
- * our caller up for that.
|
|
|
|
- *
|
|
|
|
- * When we had only one cpuset mutex, we had to call this
|
|
|
|
- * without holding it, to avoid deadlock when call_usermodehelper()
|
|
|
|
- * allocated memory. With two locks, we could now call this while
|
|
|
|
- * holding manage_mutex, but we still don't, so as to minimize
|
|
|
|
- * the time manage_mutex is held.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static void cpuset_release_agent(const char *pathbuf)
|
|
|
|
-{
|
|
|
|
- char *argv[3], *envp[3];
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- if (!pathbuf)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- i = 0;
|
|
|
|
- argv[i++] = "/sbin/cpuset_release_agent";
|
|
|
|
- argv[i++] = (char *)pathbuf;
|
|
|
|
- argv[i] = NULL;
|
|
|
|
-
|
|
|
|
- i = 0;
|
|
|
|
- /* minimal command environment */
|
|
|
|
- envp[i++] = "HOME=/";
|
|
|
|
- envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
|
|
|
|
- envp[i] = NULL;
|
|
|
|
-
|
|
|
|
- call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
|
|
|
|
- kfree(pathbuf);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Either cs->count of using tasks transitioned to zero, or the
|
|
|
|
- * cs->children list of child cpusets just became empty. If this
|
|
|
|
- * cs is notify_on_release() and now both the user count is zero and
|
|
|
|
- * the list of children is empty, prepare cpuset path in a kmalloc'd
|
|
|
|
- * buffer, to be returned via ppathbuf, so that the caller can invoke
|
|
|
|
- * cpuset_release_agent() with it later on, once manage_mutex is dropped.
|
|
|
|
- * Call here with manage_mutex held.
|
|
|
|
- *
|
|
|
|
- * This check_for_release() routine is responsible for kmalloc'ing
|
|
|
|
- * pathbuf. The above cpuset_release_agent() is responsible for
|
|
|
|
- * kfree'ing pathbuf. The caller of these routines is responsible
|
|
|
|
- * for providing a pathbuf pointer, initialized to NULL, then
|
|
|
|
- * calling check_for_release() with manage_mutex held and the address
|
|
|
|
- * of the pathbuf pointer, then dropping manage_mutex, then calling
|
|
|
|
- * cpuset_release_agent() with pathbuf, as set by check_for_release().
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static void check_for_release(struct cpuset *cs, char **ppathbuf)
|
|
|
|
-{
|
|
|
|
- if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
|
|
|
|
- list_empty(&cs->children)) {
|
|
|
|
- char *buf;
|
|
|
|
-
|
|
|
|
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
|
- if (!buf)
|
|
|
|
- return;
|
|
|
|
- if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
|
|
|
|
- kfree(buf);
|
|
|
|
- else
|
|
|
|
- *ppathbuf = buf;
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Return in *pmask the portion of a cpusets's cpus_allowed that
|
|
* Return in *pmask the portion of a cpusets's cpus_allowed that
|
|
* are online. If none are online, walk up the cpuset hierarchy
|
|
* are online. If none are online, walk up the cpuset hierarchy
|
|
@@ -653,20 +379,19 @@ void cpuset_update_task_memory_state(void)
|
|
struct task_struct *tsk = current;
|
|
struct task_struct *tsk = current;
|
|
struct cpuset *cs;
|
|
struct cpuset *cs;
|
|
|
|
|
|
- if (tsk->cpuset == &top_cpuset) {
|
|
|
|
|
|
+ if (task_cs(tsk) == &top_cpuset) {
|
|
/* Don't need rcu for top_cpuset. It's never freed. */
|
|
/* Don't need rcu for top_cpuset. It's never freed. */
|
|
my_cpusets_mem_gen = top_cpuset.mems_generation;
|
|
my_cpusets_mem_gen = top_cpuset.mems_generation;
|
|
} else {
|
|
} else {
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- cs = rcu_dereference(tsk->cpuset);
|
|
|
|
- my_cpusets_mem_gen = cs->mems_generation;
|
|
|
|
|
|
+ my_cpusets_mem_gen = task_cs(current)->mems_generation;
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
|
|
if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
task_lock(tsk);
|
|
task_lock(tsk);
|
|
- cs = tsk->cpuset; /* Maybe changed when task not locked */
|
|
|
|
|
|
+ cs = task_cs(tsk); /* Maybe changed when task not locked */
|
|
guarantee_online_mems(cs, &tsk->mems_allowed);
|
|
guarantee_online_mems(cs, &tsk->mems_allowed);
|
|
tsk->cpuset_mems_generation = cs->mems_generation;
|
|
tsk->cpuset_mems_generation = cs->mems_generation;
|
|
if (is_spread_page(cs))
|
|
if (is_spread_page(cs))
|
|
@@ -721,11 +446,12 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
|
|
|
|
|
static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|
static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|
{
|
|
{
|
|
|
|
+ struct cgroup *cont;
|
|
struct cpuset *c, *par;
|
|
struct cpuset *c, *par;
|
|
|
|
|
|
/* Each of our child cpusets must be a subset of us */
|
|
/* Each of our child cpusets must be a subset of us */
|
|
- list_for_each_entry(c, &cur->children, sibling) {
|
|
|
|
- if (!is_cpuset_subset(c, trial))
|
|
|
|
|
|
+ list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
|
|
|
|
+ if (!is_cpuset_subset(cgroup_cs(cont), trial))
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -740,7 +466,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|
return -EACCES;
|
|
return -EACCES;
|
|
|
|
|
|
/* If either I or some sibling (!= me) is exclusive, we can't overlap */
|
|
/* If either I or some sibling (!= me) is exclusive, we can't overlap */
|
|
- list_for_each_entry(c, &par->children, sibling) {
|
|
|
|
|
|
+ list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
|
|
|
|
+ c = cgroup_cs(cont);
|
|
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
|
|
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
|
|
c != cur &&
|
|
c != cur &&
|
|
cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
|
|
cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
|
|
@@ -783,7 +510,8 @@ static int update_cpumask(struct cpuset *cs, char *buf)
|
|
}
|
|
}
|
|
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
|
|
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
|
|
/* cpus_allowed cannot be empty for a cpuset with attached tasks. */
|
|
/* cpus_allowed cannot be empty for a cpuset with attached tasks. */
|
|
- if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed))
|
|
|
|
|
|
+ if (cgroup_task_count(cs->css.cgroup) &&
|
|
|
|
+ cpus_empty(trialcs.cpus_allowed))
|
|
return -ENOSPC;
|
|
return -ENOSPC;
|
|
retval = validate_change(cs, &trialcs);
|
|
retval = validate_change(cs, &trialcs);
|
|
if (retval < 0)
|
|
if (retval < 0)
|
|
@@ -839,7 +567,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
|
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
|
|
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
- guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
|
|
|
|
|
|
+ guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -857,16 +585,19 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
|
* their mempolicies to the cpusets new mems_allowed.
|
|
* their mempolicies to the cpusets new mems_allowed.
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+static void *cpuset_being_rebound;
|
|
|
|
+
|
|
static int update_nodemask(struct cpuset *cs, char *buf)
|
|
static int update_nodemask(struct cpuset *cs, char *buf)
|
|
{
|
|
{
|
|
struct cpuset trialcs;
|
|
struct cpuset trialcs;
|
|
nodemask_t oldmem;
|
|
nodemask_t oldmem;
|
|
- struct task_struct *g, *p;
|
|
|
|
|
|
+ struct task_struct *p;
|
|
struct mm_struct **mmarray;
|
|
struct mm_struct **mmarray;
|
|
int i, n, ntasks;
|
|
int i, n, ntasks;
|
|
int migrate;
|
|
int migrate;
|
|
int fudge;
|
|
int fudge;
|
|
int retval;
|
|
int retval;
|
|
|
|
+ struct cgroup_iter it;
|
|
|
|
|
|
/*
|
|
/*
|
|
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
|
|
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
|
|
@@ -909,7 +640,8 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
/* mems_allowed cannot be empty for a cpuset with attached tasks. */
|
|
/* mems_allowed cannot be empty for a cpuset with attached tasks. */
|
|
- if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) {
|
|
|
|
|
|
+ if (cgroup_task_count(cs->css.cgroup) &&
|
|
|
|
+ nodes_empty(trialcs.mems_allowed)) {
|
|
retval = -ENOSPC;
|
|
retval = -ENOSPC;
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
@@ -922,7 +654,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
- set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
|
|
|
|
|
|
+ cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
|
|
|
|
|
|
fudge = 10; /* spare mmarray[] slots */
|
|
fudge = 10; /* spare mmarray[] slots */
|
|
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
|
|
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
|
|
@@ -936,13 +668,13 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|
* enough mmarray[] w/o using GFP_ATOMIC.
|
|
* enough mmarray[] w/o using GFP_ATOMIC.
|
|
*/
|
|
*/
|
|
while (1) {
|
|
while (1) {
|
|
- ntasks = atomic_read(&cs->count); /* guess */
|
|
|
|
|
|
+ ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
|
|
ntasks += fudge;
|
|
ntasks += fudge;
|
|
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
|
|
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
|
|
if (!mmarray)
|
|
if (!mmarray)
|
|
goto done;
|
|
goto done;
|
|
read_lock(&tasklist_lock); /* block fork */
|
|
read_lock(&tasklist_lock); /* block fork */
|
|
- if (atomic_read(&cs->count) <= ntasks)
|
|
|
|
|
|
+ if (cgroup_task_count(cs->css.cgroup) <= ntasks)
|
|
break; /* got enough */
|
|
break; /* got enough */
|
|
read_unlock(&tasklist_lock); /* try again */
|
|
read_unlock(&tasklist_lock); /* try again */
|
|
kfree(mmarray);
|
|
kfree(mmarray);
|
|
@@ -951,21 +683,21 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|
n = 0;
|
|
n = 0;
|
|
|
|
|
|
/* Load up mmarray[] with mm reference for each task in cpuset. */
|
|
/* Load up mmarray[] with mm reference for each task in cpuset. */
|
|
- do_each_thread(g, p) {
|
|
|
|
|
|
+ cgroup_iter_start(cs->css.cgroup, &it);
|
|
|
|
+ while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
|
|
struct mm_struct *mm;
|
|
struct mm_struct *mm;
|
|
|
|
|
|
if (n >= ntasks) {
|
|
if (n >= ntasks) {
|
|
printk(KERN_WARNING
|
|
printk(KERN_WARNING
|
|
"Cpuset mempolicy rebind incomplete.\n");
|
|
"Cpuset mempolicy rebind incomplete.\n");
|
|
- continue;
|
|
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
- if (p->cpuset != cs)
|
|
|
|
- continue;
|
|
|
|
mm = get_task_mm(p);
|
|
mm = get_task_mm(p);
|
|
if (!mm)
|
|
if (!mm)
|
|
continue;
|
|
continue;
|
|
mmarray[n++] = mm;
|
|
mmarray[n++] = mm;
|
|
- } while_each_thread(g, p);
|
|
|
|
|
|
+ }
|
|
|
|
+ cgroup_iter_end(cs->css.cgroup, &it);
|
|
read_unlock(&tasklist_lock);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -993,12 +725,17 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|
|
|
|
|
/* We're done rebinding vma's to this cpusets new mems_allowed. */
|
|
/* We're done rebinding vma's to this cpusets new mems_allowed. */
|
|
kfree(mmarray);
|
|
kfree(mmarray);
|
|
- set_cpuset_being_rebound(NULL);
|
|
|
|
|
|
+ cpuset_being_rebound = NULL;
|
|
retval = 0;
|
|
retval = 0;
|
|
done:
|
|
done:
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+int current_cpuset_is_being_rebound(void)
|
|
|
|
+{
|
|
|
|
+ return task_cs(current) == cpuset_being_rebound;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Call with manage_mutex held.
|
|
* Call with manage_mutex held.
|
|
*/
|
|
*/
|
|
@@ -1145,85 +882,34 @@ static int fmeter_getrate(struct fmeter *fmp)
|
|
return val;
|
|
return val;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
|
|
|
|
- * writing the path of the old cpuset in 'ppathbuf' if it needs to be
|
|
|
|
- * notified on release.
|
|
|
|
- *
|
|
|
|
- * Call holding manage_mutex. May take callback_mutex and task_lock of
|
|
|
|
- * the task 'pid' during call.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
|
|
|
|
|
|
+static int cpuset_can_attach(struct cgroup_subsys *ss,
|
|
|
|
+ struct cgroup *cont, struct task_struct *tsk)
|
|
{
|
|
{
|
|
- pid_t pid;
|
|
|
|
- struct task_struct *tsk;
|
|
|
|
- struct cpuset *oldcs;
|
|
|
|
- cpumask_t cpus;
|
|
|
|
- nodemask_t from, to;
|
|
|
|
- struct mm_struct *mm;
|
|
|
|
- int retval;
|
|
|
|
|
|
+ struct cpuset *cs = cgroup_cs(cont);
|
|
|
|
|
|
- if (sscanf(pidbuf, "%d", &pid) != 1)
|
|
|
|
- return -EIO;
|
|
|
|
if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
|
|
if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
|
|
return -ENOSPC;
|
|
return -ENOSPC;
|
|
|
|
|
|
- if (pid) {
|
|
|
|
- read_lock(&tasklist_lock);
|
|
|
|
-
|
|
|
|
- tsk = find_task_by_pid(pid);
|
|
|
|
- if (!tsk || tsk->flags & PF_EXITING) {
|
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
|
- return -ESRCH;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- get_task_struct(tsk);
|
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
|
-
|
|
|
|
- if ((current->euid) && (current->euid != tsk->uid)
|
|
|
|
- && (current->euid != tsk->suid)) {
|
|
|
|
- put_task_struct(tsk);
|
|
|
|
- return -EACCES;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- tsk = current;
|
|
|
|
- get_task_struct(tsk);
|
|
|
|
- }
|
|
|
|
|
|
+ return security_task_setscheduler(tsk, 0, NULL);
|
|
|
|
+}
|
|
|
|
|
|
- retval = security_task_setscheduler(tsk, 0, NULL);
|
|
|
|
- if (retval) {
|
|
|
|
- put_task_struct(tsk);
|
|
|
|
- return retval;
|
|
|
|
- }
|
|
|
|
|
|
+static void cpuset_attach(struct cgroup_subsys *ss,
|
|
|
|
+ struct cgroup *cont, struct cgroup *oldcont,
|
|
|
|
+ struct task_struct *tsk)
|
|
|
|
+{
|
|
|
|
+ cpumask_t cpus;
|
|
|
|
+ nodemask_t from, to;
|
|
|
|
+ struct mm_struct *mm;
|
|
|
|
+ struct cpuset *cs = cgroup_cs(cont);
|
|
|
|
+ struct cpuset *oldcs = cgroup_cs(oldcont);
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
-
|
|
|
|
- task_lock(tsk);
|
|
|
|
- oldcs = tsk->cpuset;
|
|
|
|
- /*
|
|
|
|
- * After getting 'oldcs' cpuset ptr, be sure still not exiting.
|
|
|
|
- * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack
|
|
|
|
- * then fail this attach_task(), to avoid breaking top_cpuset.count.
|
|
|
|
- */
|
|
|
|
- if (tsk->flags & PF_EXITING) {
|
|
|
|
- task_unlock(tsk);
|
|
|
|
- mutex_unlock(&callback_mutex);
|
|
|
|
- put_task_struct(tsk);
|
|
|
|
- return -ESRCH;
|
|
|
|
- }
|
|
|
|
- atomic_inc(&cs->count);
|
|
|
|
- rcu_assign_pointer(tsk->cpuset, cs);
|
|
|
|
- task_unlock(tsk);
|
|
|
|
-
|
|
|
|
guarantee_online_cpus(cs, &cpus);
|
|
guarantee_online_cpus(cs, &cpus);
|
|
set_cpus_allowed(tsk, cpus);
|
|
set_cpus_allowed(tsk, cpus);
|
|
|
|
+ mutex_unlock(&callback_mutex);
|
|
|
|
|
|
from = oldcs->mems_allowed;
|
|
from = oldcs->mems_allowed;
|
|
to = cs->mems_allowed;
|
|
to = cs->mems_allowed;
|
|
-
|
|
|
|
- mutex_unlock(&callback_mutex);
|
|
|
|
-
|
|
|
|
mm = get_task_mm(tsk);
|
|
mm = get_task_mm(tsk);
|
|
if (mm) {
|
|
if (mm) {
|
|
mpol_rebind_mm(mm, &to);
|
|
mpol_rebind_mm(mm, &to);
|
|
@@ -1232,40 +918,31 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
|
|
mmput(mm);
|
|
mmput(mm);
|
|
}
|
|
}
|
|
|
|
|
|
- put_task_struct(tsk);
|
|
|
|
- synchronize_rcu();
|
|
|
|
- if (atomic_dec_and_test(&oldcs->count))
|
|
|
|
- check_for_release(oldcs, ppathbuf);
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* The various types of files and directories in a cpuset file system */
|
|
/* The various types of files and directories in a cpuset file system */
|
|
|
|
|
|
typedef enum {
|
|
typedef enum {
|
|
- FILE_ROOT,
|
|
|
|
- FILE_DIR,
|
|
|
|
FILE_MEMORY_MIGRATE,
|
|
FILE_MEMORY_MIGRATE,
|
|
FILE_CPULIST,
|
|
FILE_CPULIST,
|
|
FILE_MEMLIST,
|
|
FILE_MEMLIST,
|
|
FILE_CPU_EXCLUSIVE,
|
|
FILE_CPU_EXCLUSIVE,
|
|
FILE_MEM_EXCLUSIVE,
|
|
FILE_MEM_EXCLUSIVE,
|
|
- FILE_NOTIFY_ON_RELEASE,
|
|
|
|
FILE_MEMORY_PRESSURE_ENABLED,
|
|
FILE_MEMORY_PRESSURE_ENABLED,
|
|
FILE_MEMORY_PRESSURE,
|
|
FILE_MEMORY_PRESSURE,
|
|
FILE_SPREAD_PAGE,
|
|
FILE_SPREAD_PAGE,
|
|
FILE_SPREAD_SLAB,
|
|
FILE_SPREAD_SLAB,
|
|
- FILE_TASKLIST,
|
|
|
|
} cpuset_filetype_t;
|
|
} cpuset_filetype_t;
|
|
|
|
|
|
-static ssize_t cpuset_common_file_write(struct file *file,
|
|
|
|
|
|
+static ssize_t cpuset_common_file_write(struct cgroup *cont,
|
|
|
|
+ struct cftype *cft,
|
|
|
|
+ struct file *file,
|
|
const char __user *userbuf,
|
|
const char __user *userbuf,
|
|
size_t nbytes, loff_t *unused_ppos)
|
|
size_t nbytes, loff_t *unused_ppos)
|
|
{
|
|
{
|
|
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
|
|
|
|
- struct cftype *cft = __d_cft(file->f_path.dentry);
|
|
|
|
|
|
+ struct cpuset *cs = cgroup_cs(cont);
|
|
cpuset_filetype_t type = cft->private;
|
|
cpuset_filetype_t type = cft->private;
|
|
char *buffer;
|
|
char *buffer;
|
|
- char *pathbuf = NULL;
|
|
|
|
int retval = 0;
|
|
int retval = 0;
|
|
|
|
|
|
/* Crude upper limit on largest legitimate cpulist user might write. */
|
|
/* Crude upper limit on largest legitimate cpulist user might write. */
|
|
@@ -1282,9 +959,9 @@ static ssize_t cpuset_common_file_write(struct file *file,
|
|
}
|
|
}
|
|
buffer[nbytes] = 0; /* nul-terminate */
|
|
buffer[nbytes] = 0; /* nul-terminate */
|
|
|
|
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
|
|
+ cgroup_lock();
|
|
|
|
|
|
- if (is_removed(cs)) {
|
|
|
|
|
|
+ if (cgroup_is_removed(cont)) {
|
|
retval = -ENODEV;
|
|
retval = -ENODEV;
|
|
goto out2;
|
|
goto out2;
|
|
}
|
|
}
|
|
@@ -1302,9 +979,6 @@ static ssize_t cpuset_common_file_write(struct file *file,
|
|
case FILE_MEM_EXCLUSIVE:
|
|
case FILE_MEM_EXCLUSIVE:
|
|
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
|
|
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
|
|
break;
|
|
break;
|
|
- case FILE_NOTIFY_ON_RELEASE:
|
|
|
|
- retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
|
|
|
|
- break;
|
|
|
|
case FILE_MEMORY_MIGRATE:
|
|
case FILE_MEMORY_MIGRATE:
|
|
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
|
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
|
break;
|
|
break;
|
|
@@ -1322,9 +996,6 @@ static ssize_t cpuset_common_file_write(struct file *file,
|
|
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
|
|
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
break;
|
|
break;
|
|
- case FILE_TASKLIST:
|
|
|
|
- retval = attach_task(cs, buffer, &pathbuf);
|
|
|
|
- break;
|
|
|
|
default:
|
|
default:
|
|
retval = -EINVAL;
|
|
retval = -EINVAL;
|
|
goto out2;
|
|
goto out2;
|
|
@@ -1333,30 +1004,12 @@ static ssize_t cpuset_common_file_write(struct file *file,
|
|
if (retval == 0)
|
|
if (retval == 0)
|
|
retval = nbytes;
|
|
retval = nbytes;
|
|
out2:
|
|
out2:
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- cpuset_release_agent(pathbuf);
|
|
|
|
|
|
+ cgroup_unlock();
|
|
out1:
|
|
out1:
|
|
kfree(buffer);
|
|
kfree(buffer);
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
|
|
|
|
- size_t nbytes, loff_t *ppos)
|
|
|
|
-{
|
|
|
|
- ssize_t retval = 0;
|
|
|
|
- struct cftype *cft = __d_cft(file->f_path.dentry);
|
|
|
|
- if (!cft)
|
|
|
|
- return -ENODEV;
|
|
|
|
-
|
|
|
|
- /* special function ? */
|
|
|
|
- if (cft->write)
|
|
|
|
- retval = cft->write(file, buf, nbytes, ppos);
|
|
|
|
- else
|
|
|
|
- retval = cpuset_common_file_write(file, buf, nbytes, ppos);
|
|
|
|
-
|
|
|
|
- return retval;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* These ascii lists should be read in a single call, by using a user
|
|
* These ascii lists should be read in a single call, by using a user
|
|
* buffer large enough to hold the entire map. If read in smaller
|
|
* buffer large enough to hold the entire map. If read in smaller
|
|
@@ -1391,11 +1044,13 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
|
|
return nodelist_scnprintf(page, PAGE_SIZE, mask);
|
|
return nodelist_scnprintf(page, PAGE_SIZE, mask);
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
|
|
|
|
- size_t nbytes, loff_t *ppos)
|
|
|
|
|
|
+static ssize_t cpuset_common_file_read(struct cgroup *cont,
|
|
|
|
+ struct cftype *cft,
|
|
|
|
+ struct file *file,
|
|
|
|
+ char __user *buf,
|
|
|
|
+ size_t nbytes, loff_t *ppos)
|
|
{
|
|
{
|
|
- struct cftype *cft = __d_cft(file->f_path.dentry);
|
|
|
|
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
|
|
|
|
|
|
+ struct cpuset *cs = cgroup_cs(cont);
|
|
cpuset_filetype_t type = cft->private;
|
|
cpuset_filetype_t type = cft->private;
|
|
char *page;
|
|
char *page;
|
|
ssize_t retval = 0;
|
|
ssize_t retval = 0;
|
|
@@ -1419,9 +1074,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
|
|
case FILE_MEM_EXCLUSIVE:
|
|
case FILE_MEM_EXCLUSIVE:
|
|
*s++ = is_mem_exclusive(cs) ? '1' : '0';
|
|
*s++ = is_mem_exclusive(cs) ? '1' : '0';
|
|
break;
|
|
break;
|
|
- case FILE_NOTIFY_ON_RELEASE:
|
|
|
|
- *s++ = notify_on_release(cs) ? '1' : '0';
|
|
|
|
- break;
|
|
|
|
case FILE_MEMORY_MIGRATE:
|
|
case FILE_MEMORY_MIGRATE:
|
|
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
|
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
|
break;
|
|
break;
|
|
@@ -1449,389 +1101,140 @@ out:
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
|
|
|
|
- loff_t *ppos)
|
|
|
|
-{
|
|
|
|
- ssize_t retval = 0;
|
|
|
|
- struct cftype *cft = __d_cft(file->f_path.dentry);
|
|
|
|
- if (!cft)
|
|
|
|
- return -ENODEV;
|
|
|
|
|
|
|
|
- /* special function ? */
|
|
|
|
- if (cft->read)
|
|
|
|
- retval = cft->read(file, buf, nbytes, ppos);
|
|
|
|
- else
|
|
|
|
- retval = cpuset_common_file_read(file, buf, nbytes, ppos);
|
|
|
|
|
|
|
|
- return retval;
|
|
|
|
-}
|
|
|
|
|
|
|
|
-static int cpuset_file_open(struct inode *inode, struct file *file)
|
|
|
|
-{
|
|
|
|
- int err;
|
|
|
|
- struct cftype *cft;
|
|
|
|
-
|
|
|
|
- err = generic_file_open(inode, file);
|
|
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- cft = __d_cft(file->f_path.dentry);
|
|
|
|
- if (!cft)
|
|
|
|
- return -ENODEV;
|
|
|
|
- if (cft->open)
|
|
|
|
- err = cft->open(inode, file);
|
|
|
|
- else
|
|
|
|
- err = 0;
|
|
|
|
-
|
|
|
|
- return err;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int cpuset_file_release(struct inode *inode, struct file *file)
|
|
|
|
-{
|
|
|
|
- struct cftype *cft = __d_cft(file->f_path.dentry);
|
|
|
|
- if (cft->release)
|
|
|
|
- return cft->release(inode, file);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * cpuset_rename - Only allow simple rename of directories in place.
|
|
|
|
- */
|
|
|
|
-static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
|
|
- struct inode *new_dir, struct dentry *new_dentry)
|
|
|
|
-{
|
|
|
|
- if (!S_ISDIR(old_dentry->d_inode->i_mode))
|
|
|
|
- return -ENOTDIR;
|
|
|
|
- if (new_dentry->d_inode)
|
|
|
|
- return -EEXIST;
|
|
|
|
- if (old_dir != new_dir)
|
|
|
|
- return -EIO;
|
|
|
|
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static const struct file_operations cpuset_file_operations = {
|
|
|
|
- .read = cpuset_file_read,
|
|
|
|
- .write = cpuset_file_write,
|
|
|
|
- .llseek = generic_file_llseek,
|
|
|
|
- .open = cpuset_file_open,
|
|
|
|
- .release = cpuset_file_release,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static const struct inode_operations cpuset_dir_inode_operations = {
|
|
|
|
- .lookup = simple_lookup,
|
|
|
|
- .mkdir = cpuset_mkdir,
|
|
|
|
- .rmdir = cpuset_rmdir,
|
|
|
|
- .rename = cpuset_rename,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int cpuset_create_file(struct dentry *dentry, int mode)
|
|
|
|
-{
|
|
|
|
- struct inode *inode;
|
|
|
|
-
|
|
|
|
- if (!dentry)
|
|
|
|
- return -ENOENT;
|
|
|
|
- if (dentry->d_inode)
|
|
|
|
- return -EEXIST;
|
|
|
|
-
|
|
|
|
- inode = cpuset_new_inode(mode);
|
|
|
|
- if (!inode)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- if (S_ISDIR(mode)) {
|
|
|
|
- inode->i_op = &cpuset_dir_inode_operations;
|
|
|
|
- inode->i_fop = &simple_dir_operations;
|
|
|
|
-
|
|
|
|
- /* start off with i_nlink == 2 (for "." entry) */
|
|
|
|
- inc_nlink(inode);
|
|
|
|
- } else if (S_ISREG(mode)) {
|
|
|
|
- inode->i_size = 0;
|
|
|
|
- inode->i_fop = &cpuset_file_operations;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- d_instantiate(dentry, inode);
|
|
|
|
- dget(dentry); /* Extra count - pin the dentry in core */
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * cpuset_create_dir - create a directory for an object.
|
|
|
|
- * cs: the cpuset we create the directory for.
|
|
|
|
- * It must have a valid ->parent field
|
|
|
|
- * And we are going to fill its ->dentry field.
|
|
|
|
- * name: The name to give to the cpuset directory. Will be copied.
|
|
|
|
- * mode: mode to set on new directory.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
|
|
|
|
-{
|
|
|
|
- struct dentry *dentry = NULL;
|
|
|
|
- struct dentry *parent;
|
|
|
|
- int error = 0;
|
|
|
|
-
|
|
|
|
- parent = cs->parent->dentry;
|
|
|
|
- dentry = cpuset_get_dentry(parent, name);
|
|
|
|
- if (IS_ERR(dentry))
|
|
|
|
- return PTR_ERR(dentry);
|
|
|
|
- error = cpuset_create_file(dentry, S_IFDIR | mode);
|
|
|
|
- if (!error) {
|
|
|
|
- dentry->d_fsdata = cs;
|
|
|
|
- inc_nlink(parent->d_inode);
|
|
|
|
- cs->dentry = dentry;
|
|
|
|
- }
|
|
|
|
- dput(dentry);
|
|
|
|
-
|
|
|
|
- return error;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
|
|
|
|
-{
|
|
|
|
- struct dentry *dentry;
|
|
|
|
- int error;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dir->d_inode->i_mutex);
|
|
|
|
- dentry = cpuset_get_dentry(dir, cft->name);
|
|
|
|
- if (!IS_ERR(dentry)) {
|
|
|
|
- error = cpuset_create_file(dentry, 0644 | S_IFREG);
|
|
|
|
- if (!error)
|
|
|
|
- dentry->d_fsdata = (void *)cft;
|
|
|
|
- dput(dentry);
|
|
|
|
- } else
|
|
|
|
- error = PTR_ERR(dentry);
|
|
|
|
- mutex_unlock(&dir->d_inode->i_mutex);
|
|
|
|
- return error;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Stuff for reading the 'tasks' file.
|
|
|
|
- *
|
|
|
|
- * Reading this file can return large amounts of data if a cpuset has
|
|
|
|
- * *lots* of attached tasks. So it may need several calls to read(),
|
|
|
|
- * but we cannot guarantee that the information we produce is correct
|
|
|
|
- * unless we produce it entirely atomically.
|
|
|
|
- *
|
|
|
|
- * Upon tasks file open(), a struct ctr_struct is allocated, that
|
|
|
|
- * will have a pointer to an array (also allocated here). The struct
|
|
|
|
- * ctr_struct * is stored in file->private_data. Its resources will
|
|
|
|
- * be freed by release() when the file is closed. The array is used
|
|
|
|
- * to sprintf the PIDs and then used by read().
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-/* cpusets_tasks_read array */
|
|
|
|
-
|
|
|
|
-struct ctr_struct {
|
|
|
|
- char *buf;
|
|
|
|
- int bufsz;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
|
|
|
|
- * Return actual number of pids loaded. No need to task_lock(p)
|
|
|
|
- * when reading out p->cpuset, as we don't really care if it changes
|
|
|
|
- * on the next cycle, and we are not going to try to dereference it.
|
|
|
|
- */
|
|
|
|
-static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
|
|
|
|
-{
|
|
|
|
- int n = 0;
|
|
|
|
- struct task_struct *g, *p;
|
|
|
|
-
|
|
|
|
- read_lock(&tasklist_lock);
|
|
|
|
-
|
|
|
|
- do_each_thread(g, p) {
|
|
|
|
- if (p->cpuset == cs) {
|
|
|
|
- pidarray[n++] = p->pid;
|
|
|
|
- if (unlikely(n == npids))
|
|
|
|
- goto array_full;
|
|
|
|
- }
|
|
|
|
- } while_each_thread(g, p);
|
|
|
|
-
|
|
|
|
-array_full:
|
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
|
- return n;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int cmppid(const void *a, const void *b)
|
|
|
|
-{
|
|
|
|
- return *(pid_t *)a - *(pid_t *)b;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Convert array 'a' of 'npids' pid_t's to a string of newline separated
|
|
|
|
- * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
|
|
|
|
- * count 'cnt' of how many chars would be written if buf were large enough.
|
|
|
|
- */
|
|
|
|
-static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
|
|
|
|
-{
|
|
|
|
- int cnt = 0;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- for (i = 0; i < npids; i++)
|
|
|
|
- cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
|
|
|
|
- return cnt;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Handle an open on 'tasks' file. Prepare a buffer listing the
|
|
|
|
- * process id's of tasks currently attached to the cpuset being opened.
|
|
|
|
- *
|
|
|
|
- * Does not require any specific cpuset mutexes, and does not take any.
|
|
|
|
- */
|
|
|
|
-static int cpuset_tasks_open(struct inode *unused, struct file *file)
|
|
|
|
-{
|
|
|
|
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
|
|
|
|
- struct ctr_struct *ctr;
|
|
|
|
- pid_t *pidarray;
|
|
|
|
- int npids;
|
|
|
|
- char c;
|
|
|
|
-
|
|
|
|
- if (!(file->f_mode & FMODE_READ))
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
|
|
|
|
- if (!ctr)
|
|
|
|
- goto err0;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If cpuset gets more users after we read count, we won't have
|
|
|
|
- * enough space - tough. This race is indistinguishable to the
|
|
|
|
- * caller from the case that the additional cpuset users didn't
|
|
|
|
- * show up until sometime later on.
|
|
|
|
- */
|
|
|
|
- npids = atomic_read(&cs->count);
|
|
|
|
- pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
|
|
|
|
- if (!pidarray)
|
|
|
|
- goto err1;
|
|
|
|
-
|
|
|
|
- npids = pid_array_load(pidarray, npids, cs);
|
|
|
|
- sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
|
|
|
|
-
|
|
|
|
- /* Call pid_array_to_buf() twice, first just to get bufsz */
|
|
|
|
- ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
|
|
|
|
- ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
|
|
|
|
- if (!ctr->buf)
|
|
|
|
- goto err2;
|
|
|
|
- ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
|
|
|
|
-
|
|
|
|
- kfree(pidarray);
|
|
|
|
- file->private_data = ctr;
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
-err2:
|
|
|
|
- kfree(pidarray);
|
|
|
|
-err1:
|
|
|
|
- kfree(ctr);
|
|
|
|
-err0:
|
|
|
|
- return -ENOMEM;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
|
|
|
|
- size_t nbytes, loff_t *ppos)
|
|
|
|
-{
|
|
|
|
- struct ctr_struct *ctr = file->private_data;
|
|
|
|
-
|
|
|
|
- return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
|
|
|
|
-{
|
|
|
|
- struct ctr_struct *ctr;
|
|
|
|
-
|
|
|
|
- if (file->f_mode & FMODE_READ) {
|
|
|
|
- ctr = file->private_data;
|
|
|
|
- kfree(ctr->buf);
|
|
|
|
- kfree(ctr);
|
|
|
|
- }
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* for the common functions, 'private' gives the type of file
|
|
* for the common functions, 'private' gives the type of file
|
|
*/
|
|
*/
|
|
|
|
|
|
-static struct cftype cft_tasks = {
|
|
|
|
- .name = "tasks",
|
|
|
|
- .open = cpuset_tasks_open,
|
|
|
|
- .read = cpuset_tasks_read,
|
|
|
|
- .release = cpuset_tasks_release,
|
|
|
|
- .private = FILE_TASKLIST,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
static struct cftype cft_cpus = {
|
|
static struct cftype cft_cpus = {
|
|
.name = "cpus",
|
|
.name = "cpus",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_CPULIST,
|
|
.private = FILE_CPULIST,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_mems = {
|
|
static struct cftype cft_mems = {
|
|
.name = "mems",
|
|
.name = "mems",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_MEMLIST,
|
|
.private = FILE_MEMLIST,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_cpu_exclusive = {
|
|
static struct cftype cft_cpu_exclusive = {
|
|
.name = "cpu_exclusive",
|
|
.name = "cpu_exclusive",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_CPU_EXCLUSIVE,
|
|
.private = FILE_CPU_EXCLUSIVE,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_mem_exclusive = {
|
|
static struct cftype cft_mem_exclusive = {
|
|
.name = "mem_exclusive",
|
|
.name = "mem_exclusive",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_MEM_EXCLUSIVE,
|
|
.private = FILE_MEM_EXCLUSIVE,
|
|
};
|
|
};
|
|
|
|
|
|
-static struct cftype cft_notify_on_release = {
|
|
|
|
- .name = "notify_on_release",
|
|
|
|
- .private = FILE_NOTIFY_ON_RELEASE,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
static struct cftype cft_memory_migrate = {
|
|
static struct cftype cft_memory_migrate = {
|
|
.name = "memory_migrate",
|
|
.name = "memory_migrate",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_MEMORY_MIGRATE,
|
|
.private = FILE_MEMORY_MIGRATE,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_memory_pressure_enabled = {
|
|
static struct cftype cft_memory_pressure_enabled = {
|
|
.name = "memory_pressure_enabled",
|
|
.name = "memory_pressure_enabled",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_MEMORY_PRESSURE_ENABLED,
|
|
.private = FILE_MEMORY_PRESSURE_ENABLED,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_memory_pressure = {
|
|
static struct cftype cft_memory_pressure = {
|
|
.name = "memory_pressure",
|
|
.name = "memory_pressure",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_MEMORY_PRESSURE,
|
|
.private = FILE_MEMORY_PRESSURE,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_spread_page = {
|
|
static struct cftype cft_spread_page = {
|
|
.name = "memory_spread_page",
|
|
.name = "memory_spread_page",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_SPREAD_PAGE,
|
|
.private = FILE_SPREAD_PAGE,
|
|
};
|
|
};
|
|
|
|
|
|
static struct cftype cft_spread_slab = {
|
|
static struct cftype cft_spread_slab = {
|
|
.name = "memory_spread_slab",
|
|
.name = "memory_spread_slab",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
.private = FILE_SPREAD_SLAB,
|
|
.private = FILE_SPREAD_SLAB,
|
|
};
|
|
};
|
|
|
|
|
|
-static int cpuset_populate_dir(struct dentry *cs_dentry)
|
|
|
|
|
|
+static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
|
return err;
|
|
return err;
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0)
|
|
|
|
- return err;
|
|
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0)
|
|
|
|
- return err;
|
|
|
|
- if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
|
|
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
|
|
return err;
|
|
return err;
|
|
|
|
+ /* memory_pressure_enabled is in root cpuset only */
|
|
|
|
+ if (err == 0 && !cont->parent)
|
|
|
|
+ err = cgroup_add_file(cont, ss,
|
|
|
|
+ &cft_memory_pressure_enabled);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * post_clone() is called at the end of cgroup_clone().
|
|
|
|
+ * 'cgroup' was just created automatically as a result of
|
|
|
|
+ * a cgroup_clone(), and the current task is about to
|
|
|
|
+ * be moved into 'cgroup'.
|
|
|
|
+ *
|
|
|
|
+ * Currently we refuse to set up the cgroup - thereby
|
|
|
|
+ * refusing the task to be entered, and as a result refusing
|
|
|
|
+ * the sys_unshare() or clone() which initiated it - if any
|
|
|
|
+ * sibling cpusets have exclusive cpus or mem.
|
|
|
|
+ *
|
|
|
|
+ * If this becomes a problem for some users who wish to
|
|
|
|
+ * allow that scenario, then cpuset_post_clone() could be
|
|
|
|
+ * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
|
|
|
|
+ * (and likewise for mems) to the new cgroup.
|
|
|
|
+ */
|
|
|
|
+static void cpuset_post_clone(struct cgroup_subsys *ss,
|
|
|
|
+ struct cgroup *cgroup)
|
|
|
|
+{
|
|
|
|
+ struct cgroup *parent, *child;
|
|
|
|
+ struct cpuset *cs, *parent_cs;
|
|
|
|
+
|
|
|
|
+ parent = cgroup->parent;
|
|
|
|
+ list_for_each_entry(child, &parent->children, sibling) {
|
|
|
|
+ cs = cgroup_cs(child);
|
|
|
|
+ if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ cs = cgroup_cs(cgroup);
|
|
|
|
+ parent_cs = cgroup_cs(parent);
|
|
|
|
+
|
|
|
|
+ cs->mems_allowed = parent_cs->mems_allowed;
|
|
|
|
+ cs->cpus_allowed = parent_cs->cpus_allowed;
|
|
|
|
+ return;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* cpuset_create - create a cpuset
|
|
* cpuset_create - create a cpuset
|
|
* parent: cpuset that will be parent of the new cpuset.
|
|
* parent: cpuset that will be parent of the new cpuset.
|
|
@@ -1841,106 +1244,60 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
|
|
* Must be called with the mutex on the parent inode held
|
|
* Must be called with the mutex on the parent inode held
|
|
*/
|
|
*/
|
|
|
|
|
|
-static long cpuset_create(struct cpuset *parent, const char *name, int mode)
|
|
|
|
|
|
+static struct cgroup_subsys_state *cpuset_create(
|
|
|
|
+ struct cgroup_subsys *ss,
|
|
|
|
+ struct cgroup *cont)
|
|
{
|
|
{
|
|
struct cpuset *cs;
|
|
struct cpuset *cs;
|
|
- int err;
|
|
|
|
|
|
+ struct cpuset *parent;
|
|
|
|
|
|
|
|
+ if (!cont->parent) {
|
|
|
|
+ /* This is early initialization for the top cgroup */
|
|
|
|
+ top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
|
|
+ return &top_cpuset.css;
|
|
|
|
+ }
|
|
|
|
+ parent = cgroup_cs(cont->parent);
|
|
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
|
|
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
|
|
if (!cs)
|
|
if (!cs)
|
|
- return -ENOMEM;
|
|
|
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
cpuset_update_task_memory_state();
|
|
cpuset_update_task_memory_state();
|
|
cs->flags = 0;
|
|
cs->flags = 0;
|
|
- if (notify_on_release(parent))
|
|
|
|
- set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
|
|
|
|
if (is_spread_page(parent))
|
|
if (is_spread_page(parent))
|
|
set_bit(CS_SPREAD_PAGE, &cs->flags);
|
|
set_bit(CS_SPREAD_PAGE, &cs->flags);
|
|
if (is_spread_slab(parent))
|
|
if (is_spread_slab(parent))
|
|
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
|
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
|
cs->cpus_allowed = CPU_MASK_NONE;
|
|
cs->cpus_allowed = CPU_MASK_NONE;
|
|
cs->mems_allowed = NODE_MASK_NONE;
|
|
cs->mems_allowed = NODE_MASK_NONE;
|
|
- atomic_set(&cs->count, 0);
|
|
|
|
- INIT_LIST_HEAD(&cs->sibling);
|
|
|
|
- INIT_LIST_HEAD(&cs->children);
|
|
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
fmeter_init(&cs->fmeter);
|
|
fmeter_init(&cs->fmeter);
|
|
|
|
|
|
cs->parent = parent;
|
|
cs->parent = parent;
|
|
-
|
|
|
|
- mutex_lock(&callback_mutex);
|
|
|
|
- list_add(&cs->sibling, &cs->parent->children);
|
|
|
|
number_of_cpusets++;
|
|
number_of_cpusets++;
|
|
- mutex_unlock(&callback_mutex);
|
|
|
|
-
|
|
|
|
- err = cpuset_create_dir(cs, name, mode);
|
|
|
|
- if (err < 0)
|
|
|
|
- goto err;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Release manage_mutex before cpuset_populate_dir() because it
|
|
|
|
- * will down() this new directory's i_mutex and if we race with
|
|
|
|
- * another mkdir, we might deadlock.
|
|
|
|
- */
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
-
|
|
|
|
- err = cpuset_populate_dir(cs->dentry);
|
|
|
|
- /* If err < 0, we have a half-filled directory - oh well ;) */
|
|
|
|
- return 0;
|
|
|
|
-err:
|
|
|
|
- list_del(&cs->sibling);
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- kfree(cs);
|
|
|
|
- return err;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|
|
|
-{
|
|
|
|
- struct cpuset *c_parent = dentry->d_parent->d_fsdata;
|
|
|
|
-
|
|
|
|
- /* the vfs holds inode->i_mutex already */
|
|
|
|
- return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
|
|
|
|
|
|
+ return &cs->css ;
|
|
}
|
|
}
|
|
|
|
|
|
-static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|
|
|
|
|
+static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
{
|
|
{
|
|
- struct cpuset *cs = dentry->d_fsdata;
|
|
|
|
- struct dentry *d;
|
|
|
|
- struct cpuset *parent;
|
|
|
|
- char *pathbuf = NULL;
|
|
|
|
-
|
|
|
|
- /* the vfs holds both inode->i_mutex already */
|
|
|
|
|
|
+ struct cpuset *cs = cgroup_cs(cont);
|
|
|
|
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
cpuset_update_task_memory_state();
|
|
cpuset_update_task_memory_state();
|
|
- if (atomic_read(&cs->count) > 0) {
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- return -EBUSY;
|
|
|
|
- }
|
|
|
|
- if (!list_empty(&cs->children)) {
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- return -EBUSY;
|
|
|
|
- }
|
|
|
|
- parent = cs->parent;
|
|
|
|
- mutex_lock(&callback_mutex);
|
|
|
|
- set_bit(CS_REMOVED, &cs->flags);
|
|
|
|
- list_del(&cs->sibling); /* delete my sibling from parent->children */
|
|
|
|
- spin_lock(&cs->dentry->d_lock);
|
|
|
|
- d = dget(cs->dentry);
|
|
|
|
- cs->dentry = NULL;
|
|
|
|
- spin_unlock(&d->d_lock);
|
|
|
|
- cpuset_d_remove_dir(d);
|
|
|
|
- dput(d);
|
|
|
|
number_of_cpusets--;
|
|
number_of_cpusets--;
|
|
- mutex_unlock(&callback_mutex);
|
|
|
|
- if (list_empty(&parent->children))
|
|
|
|
- check_for_release(parent, &pathbuf);
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- cpuset_release_agent(pathbuf);
|
|
|
|
- return 0;
|
|
|
|
|
|
+ kfree(cs);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct cgroup_subsys cpuset_subsys = {
|
|
|
|
+ .name = "cpuset",
|
|
|
|
+ .create = cpuset_create,
|
|
|
|
+ .destroy = cpuset_destroy,
|
|
|
|
+ .can_attach = cpuset_can_attach,
|
|
|
|
+ .attach = cpuset_attach,
|
|
|
|
+ .populate = cpuset_populate,
|
|
|
|
+ .post_clone = cpuset_post_clone,
|
|
|
|
+ .subsys_id = cpuset_subsys_id,
|
|
|
|
+ .early_init = 1,
|
|
|
|
+};
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* cpuset_init_early - just enough so that the calls to
|
|
* cpuset_init_early - just enough so that the calls to
|
|
* cpuset_update_task_memory_state() in early init code
|
|
* cpuset_update_task_memory_state() in early init code
|
|
@@ -1949,13 +1306,11 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|
|
|
|
|
int __init cpuset_init_early(void)
|
|
int __init cpuset_init_early(void)
|
|
{
|
|
{
|
|
- struct task_struct *tsk = current;
|
|
|
|
-
|
|
|
|
- tsk->cpuset = &top_cpuset;
|
|
|
|
- tsk->cpuset->mems_generation = cpuset_mems_generation++;
|
|
|
|
|
|
+ top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* cpuset_init - initialize cpusets at system boot
|
|
* cpuset_init - initialize cpusets at system boot
|
|
*
|
|
*
|
|
@@ -1964,8 +1319,7 @@ int __init cpuset_init_early(void)
|
|
|
|
|
|
int __init cpuset_init(void)
|
|
int __init cpuset_init(void)
|
|
{
|
|
{
|
|
- struct dentry *root;
|
|
|
|
- int err;
|
|
|
|
|
|
+ int err = 0;
|
|
|
|
|
|
top_cpuset.cpus_allowed = CPU_MASK_ALL;
|
|
top_cpuset.cpus_allowed = CPU_MASK_ALL;
|
|
top_cpuset.mems_allowed = NODE_MASK_ALL;
|
|
top_cpuset.mems_allowed = NODE_MASK_ALL;
|
|
@@ -1973,30 +1327,12 @@ int __init cpuset_init(void)
|
|
fmeter_init(&top_cpuset.fmeter);
|
|
fmeter_init(&top_cpuset.fmeter);
|
|
top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
|
|
|
|
- init_task.cpuset = &top_cpuset;
|
|
|
|
-
|
|
|
|
err = register_filesystem(&cpuset_fs_type);
|
|
err = register_filesystem(&cpuset_fs_type);
|
|
if (err < 0)
|
|
if (err < 0)
|
|
- goto out;
|
|
|
|
- cpuset_mount = kern_mount(&cpuset_fs_type);
|
|
|
|
- if (IS_ERR(cpuset_mount)) {
|
|
|
|
- printk(KERN_ERR "cpuset: could not mount!\n");
|
|
|
|
- err = PTR_ERR(cpuset_mount);
|
|
|
|
- cpuset_mount = NULL;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- root = cpuset_mount->mnt_sb->s_root;
|
|
|
|
- root->d_fsdata = &top_cpuset;
|
|
|
|
- inc_nlink(root->d_inode);
|
|
|
|
- top_cpuset.dentry = root;
|
|
|
|
- root->d_inode->i_op = &cpuset_dir_inode_operations;
|
|
|
|
|
|
+ return err;
|
|
|
|
+
|
|
number_of_cpusets = 1;
|
|
number_of_cpusets = 1;
|
|
- err = cpuset_populate_dir(root);
|
|
|
|
- /* memory_pressure_enabled is in root cpuset only */
|
|
|
|
- if (err == 0)
|
|
|
|
- err = cpuset_add_file(root, &cft_memory_pressure_enabled);
|
|
|
|
-out:
|
|
|
|
- return err;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2022,10 +1358,12 @@ out:
|
|
|
|
|
|
static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
|
|
static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
|
|
{
|
|
{
|
|
|
|
+ struct cgroup *cont;
|
|
struct cpuset *c;
|
|
struct cpuset *c;
|
|
|
|
|
|
/* Each of our child cpusets mems must be online */
|
|
/* Each of our child cpusets mems must be online */
|
|
- list_for_each_entry(c, &cur->children, sibling) {
|
|
|
|
|
|
+ list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
|
|
|
|
+ c = cgroup_cs(cont);
|
|
guarantee_online_cpus_mems_in_subtree(c);
|
|
guarantee_online_cpus_mems_in_subtree(c);
|
|
if (!cpus_empty(c->cpus_allowed))
|
|
if (!cpus_empty(c->cpus_allowed))
|
|
guarantee_online_cpus(c, &c->cpus_allowed);
|
|
guarantee_online_cpus(c, &c->cpus_allowed);
|
|
@@ -2053,7 +1391,7 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
|
|
|
|
|
|
static void common_cpu_mem_hotplug_unplug(void)
|
|
static void common_cpu_mem_hotplug_unplug(void)
|
|
{
|
|
{
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
|
|
+ cgroup_lock();
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
|
|
|
|
guarantee_online_cpus_mems_in_subtree(&top_cpuset);
|
|
guarantee_online_cpus_mems_in_subtree(&top_cpuset);
|
|
@@ -2061,7 +1399,7 @@ static void common_cpu_mem_hotplug_unplug(void)
|
|
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
|
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
|
|
|
|
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
|
|
+ cgroup_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2113,109 +1451,7 @@ void __init cpuset_init_smp(void)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * cpuset_fork - attach newly forked task to its parents cpuset.
|
|
|
|
- * @tsk: pointer to task_struct of forking parent process.
|
|
|
|
- *
|
|
|
|
- * Description: A task inherits its parent's cpuset at fork().
|
|
|
|
- *
|
|
|
|
- * A pointer to the shared cpuset was automatically copied in fork.c
|
|
|
|
- * by dup_task_struct(). However, we ignore that copy, since it was
|
|
|
|
- * not made under the protection of task_lock(), so might no longer be
|
|
|
|
- * a valid cpuset pointer. attach_task() might have already changed
|
|
|
|
- * current->cpuset, allowing the previously referenced cpuset to
|
|
|
|
- * be removed and freed. Instead, we task_lock(current) and copy
|
|
|
|
- * its present value of current->cpuset for our freshly forked child.
|
|
|
|
- *
|
|
|
|
- * At the point that cpuset_fork() is called, 'current' is the parent
|
|
|
|
- * task, and the passed argument 'child' points to the child task.
|
|
|
|
- **/
|
|
|
|
-
|
|
|
|
-void cpuset_fork(struct task_struct *child)
|
|
|
|
-{
|
|
|
|
- task_lock(current);
|
|
|
|
- child->cpuset = current->cpuset;
|
|
|
|
- atomic_inc(&child->cpuset->count);
|
|
|
|
- task_unlock(current);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * cpuset_exit - detach cpuset from exiting task
|
|
|
|
- * @tsk: pointer to task_struct of exiting process
|
|
|
|
- *
|
|
|
|
- * Description: Detach cpuset from @tsk and release it.
|
|
|
|
- *
|
|
|
|
- * Note that cpusets marked notify_on_release force every task in
|
|
|
|
- * them to take the global manage_mutex mutex when exiting.
|
|
|
|
- * This could impact scaling on very large systems. Be reluctant to
|
|
|
|
- * use notify_on_release cpusets where very high task exit scaling
|
|
|
|
- * is required on large systems.
|
|
|
|
- *
|
|
|
|
- * Don't even think about derefencing 'cs' after the cpuset use count
|
|
|
|
- * goes to zero, except inside a critical section guarded by manage_mutex
|
|
|
|
- * or callback_mutex. Otherwise a zero cpuset use count is a license to
|
|
|
|
- * any other task to nuke the cpuset immediately, via cpuset_rmdir().
|
|
|
|
- *
|
|
|
|
- * This routine has to take manage_mutex, not callback_mutex, because
|
|
|
|
- * it is holding that mutex while calling check_for_release(),
|
|
|
|
- * which calls kmalloc(), so can't be called holding callback_mutex().
|
|
|
|
- *
|
|
|
|
- * the_top_cpuset_hack:
|
|
|
|
- *
|
|
|
|
- * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
|
|
|
|
- *
|
|
|
|
- * Don't leave a task unable to allocate memory, as that is an
|
|
|
|
- * accident waiting to happen should someone add a callout in
|
|
|
|
- * do_exit() after the cpuset_exit() call that might allocate.
|
|
|
|
- * If a task tries to allocate memory with an invalid cpuset,
|
|
|
|
- * it will oops in cpuset_update_task_memory_state().
|
|
|
|
- *
|
|
|
|
- * We call cpuset_exit() while the task is still competent to
|
|
|
|
- * handle notify_on_release(), then leave the task attached to
|
|
|
|
- * the root cpuset (top_cpuset) for the remainder of its exit.
|
|
|
|
- *
|
|
|
|
- * To do this properly, we would increment the reference count on
|
|
|
|
- * top_cpuset, and near the very end of the kernel/exit.c do_exit()
|
|
|
|
- * code we would add a second cpuset function call, to drop that
|
|
|
|
- * reference. This would just create an unnecessary hot spot on
|
|
|
|
- * the top_cpuset reference count, to no avail.
|
|
|
|
- *
|
|
|
|
- * Normally, holding a reference to a cpuset without bumping its
|
|
|
|
- * count is unsafe. The cpuset could go away, or someone could
|
|
|
|
- * attach us to a different cpuset, decrementing the count on
|
|
|
|
- * the first cpuset that we never incremented. But in this case,
|
|
|
|
- * top_cpuset isn't going away, and either task has PF_EXITING set,
|
|
|
|
- * which wards off any attach_task() attempts, or task is a failed
|
|
|
|
- * fork, never visible to attach_task.
|
|
|
|
- *
|
|
|
|
- * Another way to do this would be to set the cpuset pointer
|
|
|
|
- * to NULL here, and check in cpuset_update_task_memory_state()
|
|
|
|
- * for a NULL pointer. This hack avoids that NULL check, for no
|
|
|
|
- * cost (other than this way too long comment ;).
|
|
|
|
- **/
|
|
|
|
-
|
|
|
|
-void cpuset_exit(struct task_struct *tsk)
|
|
|
|
-{
|
|
|
|
- struct cpuset *cs;
|
|
|
|
-
|
|
|
|
- task_lock(current);
|
|
|
|
- cs = tsk->cpuset;
|
|
|
|
- tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */
|
|
|
|
- task_unlock(current);
|
|
|
|
-
|
|
|
|
- if (notify_on_release(cs)) {
|
|
|
|
- char *pathbuf = NULL;
|
|
|
|
|
|
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
- if (atomic_dec_and_test(&cs->count))
|
|
|
|
- check_for_release(cs, &pathbuf);
|
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
- cpuset_release_agent(pathbuf);
|
|
|
|
- } else {
|
|
|
|
- atomic_dec(&cs->count);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
|
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
|
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
|
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
|
*
|
|
*
|
|
@@ -2231,7 +1467,7 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
task_lock(tsk);
|
|
task_lock(tsk);
|
|
- guarantee_online_cpus(tsk->cpuset, &mask);
|
|
|
|
|
|
+ guarantee_online_cpus(task_cs(tsk), &mask);
|
|
task_unlock(tsk);
|
|
task_unlock(tsk);
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
@@ -2259,7 +1495,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
task_lock(tsk);
|
|
task_lock(tsk);
|
|
- guarantee_online_mems(tsk->cpuset, &mask);
|
|
|
|
|
|
+ guarantee_online_mems(task_cs(tsk), &mask);
|
|
task_unlock(tsk);
|
|
task_unlock(tsk);
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
@@ -2390,7 +1626,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
|
|
mutex_lock(&callback_mutex);
|
|
mutex_lock(&callback_mutex);
|
|
|
|
|
|
task_lock(current);
|
|
task_lock(current);
|
|
- cs = nearest_exclusive_ancestor(current->cpuset);
|
|
|
|
|
|
+ cs = nearest_exclusive_ancestor(task_cs(current));
|
|
task_unlock(current);
|
|
task_unlock(current);
|
|
|
|
|
|
allowed = node_isset(node, cs->mems_allowed);
|
|
allowed = node_isset(node, cs->mems_allowed);
|
|
@@ -2550,14 +1786,12 @@ int cpuset_memory_pressure_enabled __read_mostly;
|
|
|
|
|
|
void __cpuset_memory_pressure_bump(void)
|
|
void __cpuset_memory_pressure_bump(void)
|
|
{
|
|
{
|
|
- struct cpuset *cs;
|
|
|
|
-
|
|
|
|
task_lock(current);
|
|
task_lock(current);
|
|
- cs = current->cpuset;
|
|
|
|
- fmeter_markevent(&cs->fmeter);
|
|
|
|
|
|
+ fmeter_markevent(&task_cs(current)->fmeter);
|
|
task_unlock(current);
|
|
task_unlock(current);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PROC_PID_CPUSET
|
|
/*
|
|
/*
|
|
* proc_cpuset_show()
|
|
* proc_cpuset_show()
|
|
* - Print tasks cpuset path into seq_file.
|
|
* - Print tasks cpuset path into seq_file.
|
|
@@ -2574,6 +1808,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
|
|
struct pid *pid;
|
|
struct pid *pid;
|
|
struct task_struct *tsk;
|
|
struct task_struct *tsk;
|
|
char *buf;
|
|
char *buf;
|
|
|
|
+ struct cgroup_subsys_state *css;
|
|
int retval;
|
|
int retval;
|
|
|
|
|
|
retval = -ENOMEM;
|
|
retval = -ENOMEM;
|
|
@@ -2588,15 +1823,15 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
|
|
goto out_free;
|
|
goto out_free;
|
|
|
|
|
|
retval = -EINVAL;
|
|
retval = -EINVAL;
|
|
- mutex_lock(&manage_mutex);
|
|
|
|
-
|
|
|
|
- retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
|
|
|
|
|
|
+ cgroup_lock();
|
|
|
|
+ css = task_subsys_state(tsk, cpuset_subsys_id);
|
|
|
|
+ retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
|
|
if (retval < 0)
|
|
if (retval < 0)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
seq_puts(m, buf);
|
|
seq_puts(m, buf);
|
|
seq_putc(m, '\n');
|
|
seq_putc(m, '\n');
|
|
out_unlock:
|
|
out_unlock:
|
|
- mutex_unlock(&manage_mutex);
|
|
|
|
|
|
+ cgroup_unlock();
|
|
put_task_struct(tsk);
|
|
put_task_struct(tsk);
|
|
out_free:
|
|
out_free:
|
|
kfree(buf);
|
|
kfree(buf);
|
|
@@ -2616,6 +1851,7 @@ const struct file_operations proc_cpuset_operations = {
|
|
.llseek = seq_lseek,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
.release = single_release,
|
|
};
|
|
};
|
|
|
|
+#endif /* CONFIG_PROC_PID_CPUSET */
|
|
|
|
|
|
/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
|
|
/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
|
|
char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
|
|
char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
|