cgroup fixes for v6.0-rc2
Contains fixes for the following issues: * psi data structure was changed to be allocated dynamically but it wasn't being cleared leading to reporting garbage values and triggering spurious oom kills. * A deadlock involving cpuset and cpu hotplug. * When a controller is moved across cgroup hierarchies, css->rstat_css_node didn't get RCU drained properly from the previous list. -----BEGIN PGP SIGNATURE----- iIQEABYIACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCYwVmRg4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGb/eAP44dr9/OQtapKm63H/qmLF39LWE6nC99RYHECl5 ncuZvwD/XIkZt212nr/qC1C0ggB5qCGG7tIZG6tIgkS+J5huqg4= =CC/Y -----END PGP SIGNATURE----- Merge tag 'cgroup-for-6.0-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup Pull cgroup fixes from Tejun Heo: - The psi data structure was changed to be allocated dynamically but it wasn't being cleared leading to it reporting garbage values and triggering spurious oom kills. - A deadlock involving cpuset and cpu hotplug. - When a controller is moved across cgroup hierarchies, css->rstat_css_node didn't get RCU drained properly from the previous list. * tag 'cgroup-for-6.0-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: Fix race condition at rebind_subsystems() cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock sched/psi: Remove redundant cgroup_psi() when !CONFIG_CGROUPS sched/psi: Remove unused parameter nbytes of psi_trigger_create() sched/psi: Zero the memory of struct psi_group
This commit is contained in:
commit
c40e8341e3
5 changed files with 61 additions and 39 deletions
|
@ -734,11 +734,6 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool cgroup_psi_enabled(void)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -27,7 +27,7 @@ void psi_memstall_leave(unsigned long *flags);
|
|||
|
||||
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
|
||||
struct psi_trigger *psi_trigger_create(struct psi_group *group,
|
||||
char *buf, size_t nbytes, enum psi_res res);
|
||||
char *buf, enum psi_res res);
|
||||
void psi_trigger_destroy(struct psi_trigger *t);
|
||||
|
||||
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
|
||||
|
|
|
@ -1820,6 +1820,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
|||
|
||||
if (ss->css_rstat_flush) {
|
||||
list_del_rcu(&css->rstat_css_node);
|
||||
synchronize_rcu();
|
||||
list_add_rcu(&css->rstat_css_node,
|
||||
&dcgrp->rstat_css_list);
|
||||
}
|
||||
|
@ -2369,6 +2370,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(task_cgroup_path);
|
||||
|
||||
/**
|
||||
* cgroup_attach_lock - Lock for ->attach()
|
||||
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
|
||||
*
|
||||
* cgroup migration sometimes needs to stabilize threadgroups against forks and
|
||||
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
|
||||
* implementations (e.g. cpuset), also need to disable CPU hotplug.
|
||||
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
|
||||
* lead to deadlocks.
|
||||
*
|
||||
* Bringing up a CPU may involve creating and destroying tasks which requires
|
||||
* read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
|
||||
* cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
|
||||
* write-locking threadgroup_rwsem, the locking order is reversed and we end up
|
||||
* waiting for an on-going CPU hotplug operation which in turn is waiting for
|
||||
* the threadgroup_rwsem to be released to create new tasks. For more details:
|
||||
*
|
||||
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
|
||||
*
|
||||
* Resolve the situation by always acquiring cpus_read_lock() before optionally
|
||||
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
|
||||
* CPU hotplug is disabled on entry.
|
||||
*/
|
||||
static void cgroup_attach_lock(bool lock_threadgroup)
|
||||
{
|
||||
cpus_read_lock();
|
||||
if (lock_threadgroup)
|
||||
percpu_down_write(&cgroup_threadgroup_rwsem);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_attach_unlock - Undo cgroup_attach_lock()
|
||||
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
|
||||
*/
|
||||
static void cgroup_attach_unlock(bool lock_threadgroup)
|
||||
{
|
||||
if (lock_threadgroup)
|
||||
percpu_up_write(&cgroup_threadgroup_rwsem);
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_migrate_add_task - add a migration target task to a migration context
|
||||
* @task: target task
|
||||
|
@ -2841,8 +2883,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
|
|||
}
|
||||
|
||||
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
||||
bool *locked)
|
||||
__acquires(&cgroup_threadgroup_rwsem)
|
||||
bool *threadgroup_locked)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
pid_t pid;
|
||||
|
@ -2859,12 +2900,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
|||
* Therefore, we can skip the global lock.
|
||||
*/
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
if (pid || threadgroup) {
|
||||
percpu_down_write(&cgroup_threadgroup_rwsem);
|
||||
*locked = true;
|
||||
} else {
|
||||
*locked = false;
|
||||
}
|
||||
*threadgroup_locked = pid || threadgroup;
|
||||
cgroup_attach_lock(*threadgroup_locked);
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid) {
|
||||
|
@ -2895,17 +2932,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
|||
goto out_unlock_rcu;
|
||||
|
||||
out_unlock_threadgroup:
|
||||
if (*locked) {
|
||||
percpu_up_write(&cgroup_threadgroup_rwsem);
|
||||
*locked = false;
|
||||
}
|
||||
cgroup_attach_unlock(*threadgroup_locked);
|
||||
*threadgroup_locked = false;
|
||||
out_unlock_rcu:
|
||||
rcu_read_unlock();
|
||||
return tsk;
|
||||
}
|
||||
|
||||
void cgroup_procs_write_finish(struct task_struct *task, bool locked)
|
||||
__releases(&cgroup_threadgroup_rwsem)
|
||||
void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
|
||||
{
|
||||
struct cgroup_subsys *ss;
|
||||
int ssid;
|
||||
|
@ -2913,8 +2947,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
|
|||
/* release reference from cgroup_procs_write_start() */
|
||||
put_task_struct(task);
|
||||
|
||||
if (locked)
|
||||
percpu_up_write(&cgroup_threadgroup_rwsem);
|
||||
cgroup_attach_unlock(threadgroup_locked);
|
||||
|
||||
for_each_subsys(ss, ssid)
|
||||
if (ss->post_attach)
|
||||
ss->post_attach();
|
||||
|
@ -3000,8 +3034,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
|||
* write-locking can be skipped safely.
|
||||
*/
|
||||
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
|
||||
if (has_tasks)
|
||||
percpu_down_write(&cgroup_threadgroup_rwsem);
|
||||
cgroup_attach_lock(has_tasks);
|
||||
|
||||
/* NULL dst indicates self on default hierarchy */
|
||||
ret = cgroup_migrate_prepare_dst(&mgctx);
|
||||
|
@ -3022,8 +3055,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
|||
ret = cgroup_migrate_execute(&mgctx);
|
||||
out_finish:
|
||||
cgroup_migrate_finish(&mgctx);
|
||||
if (has_tasks)
|
||||
percpu_up_write(&cgroup_threadgroup_rwsem);
|
||||
cgroup_attach_unlock(has_tasks);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3698,7 +3730,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
|
|||
}
|
||||
|
||||
psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
|
||||
new = psi_trigger_create(psi, buf, nbytes, res);
|
||||
new = psi_trigger_create(psi, buf, res);
|
||||
if (IS_ERR(new)) {
|
||||
cgroup_put(cgrp);
|
||||
return PTR_ERR(new);
|
||||
|
@ -4971,13 +5003,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
|||
struct task_struct *task;
|
||||
const struct cred *saved_cred;
|
||||
ssize_t ret;
|
||||
bool locked;
|
||||
bool threadgroup_locked;
|
||||
|
||||
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
|
||||
if (!dst_cgrp)
|
||||
return -ENODEV;
|
||||
|
||||
task = cgroup_procs_write_start(buf, threadgroup, &locked);
|
||||
task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
|
||||
ret = PTR_ERR_OR_ZERO(task);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
@ -5003,7 +5035,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
|||
ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
|
||||
|
||||
out_finish:
|
||||
cgroup_procs_write_finish(task, locked);
|
||||
cgroup_procs_write_finish(task, threadgroup_locked);
|
||||
out_unlock:
|
||||
cgroup_kn_unlock(of->kn);
|
||||
|
||||
|
|
|
@ -2289,7 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
|||
cgroup_taskset_first(tset, &css);
|
||||
cs = css_cs(css);
|
||||
|
||||
cpus_read_lock();
|
||||
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
|
||||
percpu_down_write(&cpuset_rwsem);
|
||||
|
||||
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
||||
|
@ -2343,7 +2343,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
|||
wake_up(&cpuset_attach_wq);
|
||||
|
||||
percpu_up_write(&cpuset_rwsem);
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
/* The various types of files and directories in a cpuset file system */
|
||||
|
|
|
@ -190,12 +190,8 @@ static void group_init(struct psi_group *group)
|
|||
/* Init trigger-related members */
|
||||
mutex_init(&group->trigger_lock);
|
||||
INIT_LIST_HEAD(&group->triggers);
|
||||
memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
|
||||
group->poll_states = 0;
|
||||
group->poll_min_period = U32_MAX;
|
||||
memset(group->polling_total, 0, sizeof(group->polling_total));
|
||||
group->polling_next_update = ULLONG_MAX;
|
||||
group->polling_until = 0;
|
||||
init_waitqueue_head(&group->poll_wait);
|
||||
timer_setup(&group->poll_timer, poll_timer_fn, 0);
|
||||
rcu_assign_pointer(group->poll_task, NULL);
|
||||
|
@ -957,7 +953,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
|
|||
if (static_branch_likely(&psi_disabled))
|
||||
return 0;
|
||||
|
||||
cgroup->psi = kmalloc(sizeof(struct psi_group), GFP_KERNEL);
|
||||
cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
|
||||
if (!cgroup->psi)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1091,7 +1087,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
|
|||
}
|
||||
|
||||
struct psi_trigger *psi_trigger_create(struct psi_group *group,
|
||||
char *buf, size_t nbytes, enum psi_res res)
|
||||
char *buf, enum psi_res res)
|
||||
{
|
||||
struct psi_trigger *t;
|
||||
enum psi_states state;
|
||||
|
@ -1320,7 +1316,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
new = psi_trigger_create(&psi_system, buf, nbytes, res);
|
||||
new = psi_trigger_create(&psi_system, buf, res);
|
||||
if (IS_ERR(new)) {
|
||||
mutex_unlock(&seq->lock);
|
||||
return PTR_ERR(new);
|
||||
|
|
Loading…
Add table
Reference in a new issue