diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 136 |
1 files changed, 69 insertions, 67 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 81a173c0897..68dcffaa62a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -90,6 +90,7 @@ struct sched_param { #include <linux/latencytop.h> #include <linux/cred.h> #include <linux/llist.h> +#include <linux/uidgid.h> #include <asm/processor.h> @@ -144,6 +145,7 @@ extern unsigned long this_cpu_load(void); extern void calc_global_load(unsigned long ticks); +extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); @@ -332,6 +334,14 @@ static inline void lockup_detector_init(void) } #endif +#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND) +void lockup_detector_bootcpu_resume(void); +#else +static inline void lockup_detector_bootcpu_resume(void) +{ +} +#endif + #ifdef CONFIG_DETECT_HUNG_TASK extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_check_count; @@ -404,6 +414,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); +/* get/set_dumpable() values */ +#define SUID_DUMPABLE_DISABLED 0 +#define SUID_DUMPABLE_ENABLED 1 +#define SUID_DUMPABLE_SAFE 2 + /* mm flags */ /* dumpable bits */ #define MMF_DUMPABLE 0 /* core dump is permitted */ @@ -437,6 +452,7 @@ extern int get_dumpable(struct mm_struct *mm); /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -728,8 +744,7 @@ struct user_struct { /* Hash table maintenance information */ struct hlist_node uidhash_node; - uid_t uid; - struct user_namespace *user_ns; + kuid_t uid; #ifdef CONFIG_PERF_EVENTS atomic_long_t locked_vm; @@ -738,7 +753,7 @@ struct user_struct { extern int uids_sysfs_init(void); -extern struct user_struct *find_user(uid_t); +extern struct user_struct *find_user(kuid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) @@ -855,61 +870,14 @@ enum cpu_idle_type { #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ -#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -enum powersavings_balance_level { - POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ - POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package - * first for long running threads - */ - POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle - * cpu package for power savings - */ - MAX_POWERSAVINGS_BALANCE_LEVELS -}; - -extern int sched_mc_power_savings, sched_smt_power_savings; - -static inline int sd_balance_for_mc_power(void) -{ - if (sched_smt_power_savings) - return SD_POWERSAVINGS_BALANCE; - - if (!sched_mc_power_savings) - return SD_PREFER_SIBLING; - - return 0; -} - -static inline int sd_balance_for_package_power(void) -{ - if (sched_mc_power_savings | sched_smt_power_savings) - return SD_POWERSAVINGS_BALANCE; - - return SD_PREFER_SIBLING; -} - extern int __weak arch_sd_sibiling_asym_packing(void); -/* - * Optimise SD flags for power savings: - * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. - * Keep default SD flags if sched_{smt,mc}_power_saving=0 - */ - -static inline int sd_power_saving_flags(void) -{ - if (sched_mc_power_savings | sched_smt_power_savings) - return SD_BALANCE_NEWIDLE; - - return 0; -} - struct sched_group_power { atomic_t ref; /* @@ -922,6 +890,8 @@ struct sched_group_power { * Number of busy cpus in this group. */ atomic_t nr_busy_cpus; + + unsigned long cpumask[0]; /* iteration mask */ }; struct sched_group { @@ -946,6 +916,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) return to_cpumask(sg->cpumask); } +/* + * cpumask masking which cpus in the group are allowed to iterate up the domain + * tree. + */ +static inline struct cpumask *sched_group_mask(struct sched_group *sg) +{ + return to_cpumask(sg->sgp->cpumask); +} + /** * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. * @group: The group whose first cpu is to be returned. @@ -983,6 +962,7 @@ struct sched_domain { unsigned int smt_gain; int flags; /* See SD_* */ int level; + int idle_buddy; /* cpu assigned to select_idle_sibling() */ /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@ -1234,7 +1214,6 @@ struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned int time_slice; - int nr_cpus_allowed; struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED @@ -1279,6 +1258,9 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -1299,6 +1281,7 @@ struct task_struct { #endif unsigned int policy; + int nr_cpus_allowed; cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU @@ -1341,16 +1324,13 @@ struct task_struct { * execve */ unsigned in_iowait:1; + /* task may not gain privileges */ + unsigned no_new_privs:1; /* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; -#ifdef CONFIG_GENERIC_HARDIRQS - /* IRQ handler threads */ - unsigned irq_thread:1; -#endif - pid_t pid; pid_t tgid; @@ -1358,10 +1338,9 @@ struct task_struct { /* Canary value for the -fstack-protector gcc feature */ unsigned long stack_canary; #endif - - /* + /* * pointers to (original) parent process, youngest child, younger sibling, - * older sibling, respectively. (p->father can be replaced with + * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ struct task_struct __rcu *real_parent; /* real parent process */ @@ -1408,8 +1387,6 @@ struct task_struct { * credentials (COW) */ const struct cred __rcu *cred; /* effective (overridable) subjective task * credentials (COW) */ - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ - char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) @@ -1445,12 +1422,14 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; + struct callback_head *task_works; + struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL uid_t loginuid; unsigned int sessionid; #endif - seccomp_t seccomp; + struct seccomp seccomp; /* Thread group tracking */ u32 parent_exec_id; @@ -1584,7 +1563,6 @@ struct task_struct { unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; - struct list_head *scm_work_list; #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; @@ -1617,6 +1595,9 @@ struct task_struct { #ifdef CONFIG_HAVE_HW_BREAKPOINT atomic_t ptrace_bp_refcnt; #endif +#ifdef CONFIG_UPROBES + struct uprobe_task *utask; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -1933,6 +1914,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif +#ifdef CONFIG_NO_HZ +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ */ + #ifndef CONFIG_CPUMASK_OFFSTACK static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { @@ -1950,7 +1939,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) */ extern unsigned long long notrace sched_clock(void); /* - * See the comment in kernel/sched_clock.c + * See the comment in kernel/sched/clock.c */ extern u64 cpu_clock(int cpu); extern u64 local_clock(void); @@ -2177,14 +2166,13 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr, extern void __set_special_pids(struct pid *pid); /* per-UID process charging. */ -extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); +extern struct user_struct * alloc_uid(kuid_t); static inline struct user_struct *get_uid(struct user_struct *u) { atomic_inc(&u->__count); return u; } extern void free_uid(struct user_struct *); -extern void release_uids(struct user_namespace *ns); #include <asm/current.h> @@ -2245,6 +2233,20 @@ extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + static inline int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); @@ -2736,7 +2738,7 @@ extern int sched_group_set_rt_period(struct task_group *tg, extern long sched_group_rt_period(struct task_group *tg); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif -#endif +#endif /* CONFIG_CGROUP_SCHED */ extern int task_can_switch_user(struct user_struct *up, struct task_struct *tsk); |