summaryrefslogtreecommitdiff
path: root/include/linux/futex.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-11-07 00:55:37 +0300
committerThomas Gleixner <tglx@linutronix.de>2019-11-20 11:40:07 +0300
commit3d4775df0a89240f671861c6ab6e8d59af8e9e41 (patch)
tree6813a938bd225f703d9569f60eea618a82642e31 /include/linux/futex.h
parentba31c1a48538992316cc71ce94fa9cd3e7b427c0 (diff)
downloadlinux-3d4775df0a89240f671861c6ab6e8d59af8e9e41.tar.xz
futex: Replace PF_EXITPIDONE with a state
The futex exit handling relies on PF_ flags. That's suboptimal as it requires a smp_mb() and an ugly lock/unlock of the exiting tasks pi_lock in the middle of do_exit() to enforce the observability of PF_EXITING in the futex code. Add a futex_state member to task_struct and convert the PF_EXITPIDONE logic over to the new state. The PF_EXITING dependency will be cleaned up in a later step. This prepares for handling various futex exit issues later. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20191106224556.149449274@linutronix.de
Diffstat (limited to 'include/linux/futex.h')
-rw-r--r--include/linux/futex.h33
1 files changed, 33 insertions, 0 deletions
diff --git a/include/linux/futex.h b/include/linux/futex.h
index d6ed11c51a8e..025ad96bcf9d 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -50,6 +50,10 @@ union futex_key {
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
#ifdef CONFIG_FUTEX
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_DEAD,
+};
static inline void futex_init_task(struct task_struct *tsk)
{
@@ -59,6 +63,34 @@ static inline void futex_init_task(struct task_struct *tsk)
#endif
INIT_LIST_HEAD(&tsk->pi_state_list);
tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+}
+
+/**
+ * futex_exit_done - Sets the tasks futex state to FUTEX_STATE_DEAD
+ * @tsk: task to set the state on
+ *
+ * Set the futex exit state of the task lockless. The futex waiter code
+ * observes that state when a task is exiting and loops until the task has
+ * actually finished the futex cleanup. The worst case for this is that the
+ * waiter runs through the wait loop until the state becomes visible.
+ *
+ * This has two callers:
+ *
+ * - futex_mm_release() after the futex exit cleanup has been done
+ *
+ * - do_exit() from the recursive fault handling path.
+ *
+ * In case of a recursive fault this is best effort. Either the futex exit
+ * code has run already or not. If the OWNER_DIED bit has been set on the
+ * futex then the waiter can take it over. If not, the problem is pushed
+ * back to user space. If the futex exit code did not run yet, then an
+ * already queued waiter might block forever, but there is nothing which
+ * can be done about that.
+ */
+static inline void futex_exit_done(struct task_struct *tsk)
+{
+ tsk->futex_state = FUTEX_STATE_DEAD;
}
void futex_mm_release(struct task_struct *tsk);
@@ -68,6 +100,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
#else
static inline void futex_init_task(struct task_struct *tsk) { }
static inline void futex_mm_release(struct task_struct *tsk) { }
+static inline void futex_exit_done(struct task_struct *tsk) { }
static inline long do_futex(u32 __user *uaddr, int op, u32 val,
ktime_t *timeout, u32 __user *uaddr2,
u32 val2, u32 val3)