summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h78
1 files changed, 42 insertions, 36 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 291a9bd5b97f..a2cd15855bad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,6 +21,7 @@
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
@@ -47,6 +48,7 @@ struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
+struct capture_control;
struct robust_list_head;
struct sched_attr;
struct sched_param;
@@ -176,7 +178,7 @@ struct task_group;
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
- * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
@@ -356,12 +358,6 @@ struct util_est {
* For cfs_rq, it is the aggregated load_avg of all runnable and
* blocked sched_entities.
*
- * load_avg may also take frequency scaling into account:
- *
- * load_avg = runnable% * scale_load_down(load) * freq%
- *
- * where freq% is the CPU frequency normalized to the highest frequency.
- *
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
@@ -370,17 +366,14 @@ struct util_est {
* a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
* and blocked sched_entities.
*
- * util_avg may also factor frequency scaling and CPU capacity scaling:
- *
- * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
- *
- * where freq% is the same as above, and capacity% is the CPU capacity
- * normalized to the greatest capacity (due to uarch differences, etc).
+ * load_avg and util_avg don't direcly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that
+ * is used for computing those signals (see update_rq_clock_pelt())
*
- * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
- * themselves are in the range of [0, 1]. To do fixed point arithmetics,
- * we therefore scale them to as large a range as necessary. This is for
- * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ * N.B., the above ratios (runnable% and running%) themselves are in the
+ * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
+ * to as large a range as necessary. This is for example reflected by
+ * util_avg's SCHED_CAPACITY_SCALE.
*
* [Overflow issue]
*
@@ -515,7 +508,7 @@ struct sched_dl_entity {
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
@@ -572,8 +565,10 @@ union rcu_special {
struct {
u8 blocked;
u8 need_qs;
+ u8 exp_hint; /* Hint for performance. */
+ u8 pad; /* No garbage from compiler! */
} b; /* Bits. */
- u16 s; /* Set of bits. */
+ u32 s; /* Set of bits. */
};
enum perf_event_task_context {
@@ -605,7 +600,7 @@ struct task_struct {
randomized_struct_fields_start
void *stack;
- atomic_t usage;
+ refcount_t usage;
/* Per task flags (PF_*), defined further below: */
unsigned int flags;
unsigned int ptrace;
@@ -731,18 +726,14 @@ struct task_struct {
#ifdef CONFIG_CGROUPS
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
+ /* task is frozen/stopped (used by the cgroup freezer) */
+ unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
/* to be used once the psi infrastructure lands upstream. */
unsigned use_memdelay:1;
#endif
- /*
- * May usercopy functions fault on kernel addresses?
- * This is not just a single bit because this can potentially nest.
- */
- unsigned int kernel_uaccess_faults_ok;
-
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -883,8 +874,10 @@ struct task_struct {
struct callback_head *task_works;
- struct audit_context *audit_context;
+#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALL
+ struct audit_context *audit_context;
+#endif
kuid_t loginuid;
unsigned int sessionid;
#endif
@@ -962,6 +955,9 @@ struct task_struct {
struct io_context *io_context;
+#ifdef CONFIG_COMPACTION
+ struct capture_control *capture_control;
+#endif
/* Ptrace state: */
unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;
@@ -993,7 +989,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT
+#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
#endif
@@ -1063,7 +1059,6 @@ struct task_struct {
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
- u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
@@ -1191,7 +1186,7 @@ struct task_struct {
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
- atomic_t stack_refcount;
+ refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
int patch_state;
@@ -1404,9 +1399,10 @@ extern struct pid *cad_pid;
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
+#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
@@ -1456,6 +1452,7 @@ static inline bool is_percpu_thread(void)
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
+#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1481,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
@@ -1751,9 +1752,9 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
#ifdef CONFIG_THREAD_INFO_IN_TASK
- return p->cpu;
+ return READ_ONCE(p->cpu);
#else
- return task_thread_info(p)->cpu;
+ return READ_ONCE(task_thread_info(p)->cpu);
#endif
}
@@ -1855,12 +1856,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_THREAD) {
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
@@ -1869,7 +1868,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
@@ -1902,6 +1900,14 @@ static inline void rseq_execve(struct task_struct *t)
#endif
+void __exit_umh(struct task_struct *tsk);
+
+static inline void exit_umh(struct task_struct *tsk)
+{
+ if (unlikely(tsk->flags & PF_UMH))
+ __exit_umh(tsk);
+}
+
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);