summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-02 13:50:56 +0300
committerIngo Molnar <mingo@kernel.org>2017-03-02 10:42:25 +0300
commit780de9dd2720debc14c501dab4dc80d1f75ad50e (patch)
tree706aee839aae1d5f82391eeb5c76cc8088c53e27
parentf9411ebe3d85cbbea06298241e6053d031d281fc (diff)
downloadlinux-780de9dd2720debc14c501dab4dc80d1f75ad50e.tar.xz
sched/headers, cgroups: Remove the threadgroup_change_*() wrappery
threadgroup_change_begin()/end() is a pointless wrapper around cgroup_threadgroup_change_begin()/end(), minus a might_sleep() in the !CONFIG_CGROUPS=y case. Remove the wrappery, move the might_sleep() (the down_read() already has a might_sleep() check). This debloats <linux/sched.h> a bit and simplifies this API. Update all call sites. No change in functionality. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--fs/exec.c6
-rw-r--r--include/linux/cgroup-defs.h13
-rw-r--r--include/linux/sched.h28
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/signal.c6
6 files changed, 18 insertions, 43 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 698a86094f76..e595e1529581 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1088,7 +1088,7 @@ static int de_thread(struct task_struct *tsk)
struct task_struct *leader = tsk->group_leader;
for (;;) {
- threadgroup_change_begin(tsk);
+ cgroup_threadgroup_change_begin(tsk);
write_lock_irq(&tasklist_lock);
/*
* Do this under tasklist_lock to ensure that
@@ -1099,7 +1099,7 @@ static int de_thread(struct task_struct *tsk)
break;
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
- threadgroup_change_end(tsk);
+ cgroup_threadgroup_change_end(tsk);
schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
@@ -1157,7 +1157,7 @@ static int de_thread(struct task_struct *tsk)
if (unlikely(leader->ptrace))
__wake_up_parent(leader, leader->parent);
write_unlock_irq(&tasklist_lock);
- threadgroup_change_end(tsk);
+ cgroup_threadgroup_change_end(tsk);
release_task(leader);
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3c02404cfce9..6a3f850cabab 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -531,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
- * Called from threadgroup_change_begin() and allows cgroup operations to
- * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ * Allows cgroup operations to synchronize against threadgroup changes
+ * using a percpu_rw_semaphore.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
@@ -543,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
- * Called from threadgroup_change_end(). Counterpart of
- * cgroup_threadcgroup_change_begin().
+ * Counterpart of cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
@@ -555,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
#define CGROUP_SUBSYS_COUNT 0
-static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+ might_sleep();
+}
+
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
#endif /* CONFIG_CGROUPS */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e732881517f2..3f61baac928b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3162,34 +3162,6 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
-/**
- * threadgroup_change_begin - mark the beginning of changes to a threadgroup
- * @tsk: task causing the changes
- *
- * All operations which modify a threadgroup - a new thread joining the
- * group, death of a member thread (the assertion of PF_EXITING) and
- * exec(2) dethreading the process and replacing the leader - are wrapped
- * by threadgroup_change_{begin|end}(). This is to provide a place which
- * subsystems needing threadgroup stability can hook into for
- * synchronization.
- */
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
- might_sleep();
- cgroup_threadgroup_change_begin(tsk);
-}
-
-/**
- * threadgroup_change_end - mark the end of changes to a threadgroup
- * @tsk: task causing the changes
- *
- * See threadgroup_change_begin().
- */
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
- cgroup_threadgroup_change_end(tsk);
-}
-
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index 2bd673783f1a..e756dae49300 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
/*
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
- * on threadgroup_change_begin() held by the copy_process().
+ * on cgroup_threadgroup_change_begin() held by the copy_process().
*/
static int pids_can_fork(struct task_struct *task)
{
diff --git a/kernel/fork.c b/kernel/fork.c
index 246bf9aaf9df..d043fedc03c8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1746,7 +1746,7 @@ static __latent_entropy struct task_struct *copy_process(
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
- threadgroup_change_begin(current);
+ cgroup_threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
@@ -1843,7 +1843,7 @@ static __latent_entropy struct task_struct *copy_process(
proc_fork_connector(p);
cgroup_post_fork(p);
- threadgroup_change_end(current);
+ cgroup_threadgroup_change_end(current);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
@@ -1854,7 +1854,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cancel_cgroup:
cgroup_cancel_fork(p);
bad_fork_free_pid:
- threadgroup_change_end(current);
+ cgroup_threadgroup_change_end(current);
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_thread:
diff --git a/kernel/signal.c b/kernel/signal.c
index 214a8feeb771..bae358532d0a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2395,11 +2395,11 @@ void exit_signals(struct task_struct *tsk)
* @tsk is about to have PF_EXITING set - lock out users which
* expect stable threadgroup.
*/
- threadgroup_change_begin(tsk);
+ cgroup_threadgroup_change_begin(tsk);
if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
tsk->flags |= PF_EXITING;
- threadgroup_change_end(tsk);
+ cgroup_threadgroup_change_end(tsk);
return;
}
@@ -2410,7 +2410,7 @@ void exit_signals(struct task_struct *tsk)
*/
tsk->flags |= PF_EXITING;
- threadgroup_change_end(tsk);
+ cgroup_threadgroup_change_end(tsk);
if (!signal_pending(tsk))
goto out;