diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
commit | bcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch) | |
tree | e420679a5db6ea4e1694eef57f9abb6acac8d4d3 /include/linux/sched.h | |
parent | 26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff) | |
parent | 000078bc3ee69efb1124b8478c7527389a826074 (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
* Fix include order for bison/flex-generated C files, from Ben Hutchings
* Build fixes and documentation corrections from David Ahern
* Group parsing support, from Jiri Olsa
* UI/gtk refactorings and improvements from Namhyung Kim
* NULL deref fix for perf script, from Namhyung Kim
* Assorted cleanups from Robert Richter
* Let O= makes handle relative paths, from Steven Rostedt
* perf script python fixes, from Feng Tang.
* Improve 'perf lock' error message when the needed tracepoints
are not present, from David Ahern.
* Initial bash completion support, from Frederic Weisbecker
* Allow building without libelf, from Namhyung Kim.
* Support DWARF CFI based unwind to have callchains when %bp
based unwinding is not possible, from Jiri Olsa.
* Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF
section was the end goal, several fixes for code that handles all
architectures and cleanups are included, from Cody Schafer.
* Add a description for the JIT interface, from Andi Kleen.
* Assorted fixes for Documentation and build in 32 bit, from Robert Richter
* Add support for non-tracepoint events in perf script python, from Feng Tang
* Cache the libtraceevent event_format associated to each evsel early, so that we
avoid relookups, i.e. calling pevent_find_event repeatedly when processing
tracepoint events.
[ This is to reduce the surface contact with libtraceevents and make clear what
is that the perf tools needs from that lib: so far parsing the common and per
event fields. ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 31 |
1 files changed, 27 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 64d9df5c3a4..c147e7024f1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -334,6 +334,14 @@ static inline void lockup_detector_init(void) } #endif +#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND) +void lockup_detector_bootcpu_resume(void); +#else +static inline void lockup_detector_bootcpu_resume(void) +{ +} +#endif + #ifdef CONFIG_DETECT_HUNG_TASK extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_check_count; @@ -406,6 +414,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); +/* get/set_dumpable() values */ +#define SUID_DUMPABLE_DISABLED 0 +#define SUID_DUMPABLE_ENABLED 1 +#define SUID_DUMPABLE_SAFE 2 + /* mm flags */ /* dumpable bits */ #define MMF_DUMPABLE 0 /* core dump is permitted */ @@ -949,6 +962,7 @@ struct sched_domain { unsigned int smt_gain; int flags; /* See SD_* */ int level; + int idle_buddy; /* cpu assigned to select_idle_sibling() */ /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@ -1244,6 +1258,9 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -1405,7 +1422,7 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - struct hlist_head task_works; + struct callback_head *task_works; struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL @@ -1546,7 +1563,6 @@ struct task_struct { unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; - struct list_head *scm_work_list; #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; @@ -1568,7 +1584,7 @@ struct task_struct { /* bitmask and counter of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ -#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ +#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ struct memcg_batch_info { int do_batch; /* incremented when batch uncharge started */ struct mem_cgroup *memcg; /* target memcg of uncharge */ @@ -1878,6 +1894,13 @@ static inline void rcu_copy_process(struct task_struct *p) #endif +static inline void tsk_restore_flags(struct task_struct *task, + unsigned long orig_flags, unsigned long flags) +{ + task->flags &= ~flags; + task->flags |= orig_flags & flags; +} + #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); @@ -2722,7 +2745,7 @@ extern int sched_group_set_rt_period(struct task_group *tg, extern long sched_group_rt_period(struct task_group *tg); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif -#endif +#endif /* CONFIG_CGROUP_SCHED */ extern int task_can_switch_user(struct user_struct *up, struct task_struct *tsk); |