summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h81
1 files changed, 62 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4032ec1cf83..0c3854b0d4b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
+extern void schedule_preempt_disabled(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy;
@@ -552,6 +553,18 @@ struct signal_struct {
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ /*
+ * PR_SET_CHILD_SUBREAPER marks a process, like a service
+ * manager, to re-parent orphan (double-forking) child processes
+ * to this process instead of 'init'. The service manager is
+ * able to receive SIGCHLD signals and is able to investigate
+ * the process until it calls wait(). All children of this
+ * process will inherit a flag if they should look for a
+ * child_subreaper process at exit.
+ */
+ unsigned int is_child_subreaper:1;
+ unsigned int has_child_subreaper:1;
+
/* POSIX.1b Interval Timers */
struct list_head posix_timers;
@@ -905,6 +918,7 @@ struct sched_group_power {
* single CPU.
*/
unsigned int power, power_orig;
+ unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
@@ -1052,6 +1066,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
+bool cpus_share_cache(int this_cpu, int that_cpu);
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -1061,6 +1077,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
}
+
+static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
#endif /* !CONFIG_SMP */
@@ -1225,6 +1247,12 @@ struct sched_rt_entity {
#endif
};
+/*
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define RR_TIMESLICE (100 * HZ / 1000)
+
struct rcu_node;
enum perf_event_task_context {
@@ -1319,6 +1347,11 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+#ifdef CONFIG_GENERIC_HARDIRQS
+ /* IRQ handler threads */
+ unsigned irq_thread:1;
+#endif
+
pid_t pid;
pid_t tgid;
@@ -1427,11 +1460,6 @@ struct task_struct {
* mempolicy */
spinlock_t alloc_lock;
-#ifdef CONFIG_GENERIC_HARDIRQS
- /* IRQ handler threads */
- struct irqaction *irqaction;
-#endif
-
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
@@ -1498,7 +1526,7 @@ struct task_struct {
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
- int mems_allowed_change_disable;
+ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
@@ -1777,7 +1805,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/*
* Per process flags
*/
-#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1864,8 +1891,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
#ifdef CONFIG_PREEMPT_RCU
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
-#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
-#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
+#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
static inline void rcu_copy_process(struct task_struct *p)
{
@@ -2049,7 +2075,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
-extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
+extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2066,12 +2092,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return tsk->pi_blocked_on != NULL;
+}
#else
static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return false;
+}
#endif
extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2088,9 +2122,9 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int,
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
- * @tsk: the task in question.
+ * @p: the task in question.
*/
-static inline bool is_idle_task(struct task_struct *p)
+static inline bool is_idle_task(const struct task_struct *p)
{
return p->pid == 0;
}
@@ -2259,6 +2293,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
+/*
+ * Grab a reference to a task's mm, if it is not already going away
+ * and ptrace_may_access with the mode parameter passed to it
+ * succeeds.
+ */
+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
@@ -2365,7 +2405,7 @@ static inline int thread_group_empty(struct task_struct *p)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[].
+ * ->cgroup.subsys[]. And ->vfork_done.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
@@ -2384,12 +2424,15 @@ static inline void task_unlock(struct task_struct *p)
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
-#define lock_task_sighand(tsk, flags) \
-({ struct sighand_struct *__ss; \
- __cond_lock(&(tsk)->sighand->siglock, \
- (__ss = __lock_task_sighand(tsk, flags))); \
- __ss; \
-}) \
+static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ struct sighand_struct *ret;
+
+ ret = __lock_task_sighand(tsk, flags);
+ (void)__cond_lock(&tsk->sighand->siglock, ret);
+ return ret;
+}
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)