summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h113
1 files changed, 24 insertions, 89 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 228e0a8ce24..c204ab0d4df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1,8 +1,6 @@
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
-#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
-
/*
* cloning flags:
*/
@@ -58,12 +56,12 @@ struct sched_param {
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
+#include <linux/mm_types.h>
#include <asm/system.h>
#include <asm/semaphore.h>
#include <asm/page.h>
#include <asm/ptrace.h>
-#include <asm/mmu.h>
#include <asm/cputime.h>
#include <linux/smp.h>
@@ -76,6 +74,7 @@ struct sched_param {
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
+#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/futex.h>
@@ -262,6 +261,7 @@ extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
extern void touch_all_softlockup_watchdogs(void);
+extern int softlockup_thresh;
#else
static inline void softlockup_tick(void)
{
@@ -319,7 +319,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
-typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
@@ -331,7 +330,6 @@ typedef atomic_long_t mm_counter_t;
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
-typedef unsigned long mm_counter_t;
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
@@ -361,81 +359,14 @@ extern int get_dumpable(struct mm_struct *mm);
#define MMF_DUMP_ANON_SHARED 3
#define MMF_DUMP_MAPPED_PRIVATE 4
#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 4
+#define MMF_DUMP_FILTER_BITS 5
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
-struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
- struct rb_root mm_rb;
- struct vm_area_struct * mmap_cache; /* last find_vma result */
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
- unsigned long mmap_base; /* base of mmap area */
- unsigned long task_size; /* size of task vm space */
- unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
- unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
- pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- int map_count; /* number of VMAs */
- struct rw_semaphore mmap_sem;
- spinlock_t page_table_lock; /* Protects page tables and some counters */
-
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
-
- /* Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- mm_counter_t _file_rss;
- mm_counter_t _anon_rss;
-
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
-
- unsigned long total_vm, locked_vm, shared_vm, exec_vm;
- unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
-
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
-
- cpumask_t cpu_vm_mask;
-
- /* Architecture-specific MM context */
- mm_context_t context;
-
- /* Swap token stuff */
- /*
- * Last value of global fault stamp as seen by this process.
- * In other words, this value gives an indication of how long
- * it has been since this task got the token.
- * Look at mm/thrash.c
- */
- unsigned int faultstamp;
- unsigned int token_priority;
- unsigned int last_interval;
-
- unsigned long flags; /* Must use atomic bitops to access the bits */
-
- /* coredumping support */
- int core_waiters;
- struct completion *core_startup_done, core_done;
-
- /* aio bits */
- rwlock_t ioctx_list_lock;
- struct kioctx *ioctx_list;
-};
-
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
@@ -587,8 +518,10 @@ struct user_struct {
atomic_t inotify_watches; /* How many inotify watches does this user have? */
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
+#ifdef CONFIG_POSIX_MQUEUE
/* protected by mq_lock */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
#ifdef CONFIG_KEYS
@@ -602,10 +535,12 @@ struct user_struct {
#ifdef CONFIG_FAIR_USER_SCHED
struct task_group *tg;
+#ifdef CONFIG_SYSFS
struct kset kset;
struct subsys_attribute user_attr;
struct work_struct work;
#endif
+#endif
};
#ifdef CONFIG_FAIR_USER_SCHED
@@ -801,9 +736,6 @@ struct sched_domain {
#endif
};
-extern int partition_sched_domains(cpumask_t *partition1,
- cpumask_t *partition2);
-
#endif /* CONFIG_SMP */
/*
@@ -988,6 +920,16 @@ struct task_struct {
#endif
unsigned short ioprio;
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
+ s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
@@ -1073,16 +1015,6 @@ struct task_struct {
struct key *thread_keyring; /* keyring private to this thread */
unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
- /*
- * fpu_counter contains the number of consecutive context switches
- * that the FPU is used. If this is over a threshold, the lazy fpu
- * saving becomes unlazy to save the trap. This is an unsigned char
- * so that after 256 times the counter wraps and the behavior turns
- * lazy again; this to deal with bursty apps that only use FPU for
- * a short time
- */
- unsigned char fpu_counter;
- int oomkilladj; /* OOM kill score adjustment (bit shift). */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
@@ -1114,8 +1046,9 @@ struct task_struct {
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
-
+#ifdef CONFIG_SECURITY
void *security;
+#endif
struct audit_context *audit_context;
seccomp_t seccomp;
@@ -1204,13 +1137,14 @@ struct task_struct {
int cpuset_mems_generation;
int cpuset_mem_spread_rotor;
#endif
+#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
-
+#endif
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
@@ -1224,6 +1158,7 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
+ struct prop_local_single dirties;
};
/*