diff options
-rw-r--r-- | arch/x86/kernel/entry_32.S | 3 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace.c | 37 | ||||
-rw-r--r-- | kernel/trace/trace.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 207 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 10 |
7 files changed, 68 insertions, 209 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 2cfbc3a3a2d..f0dcb0ceb6a 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1176,6 +1176,9 @@ ftrace_restore_flags: #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) + cmpl $__PAGE_OFFSET, %esp + jb ftrace_stub /* Paging not enabled yet? */ + cmpl $0, function_trace_stop jne ftrace_stub diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 33eb4620aa1..b02a339836b 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -122,7 +122,7 @@ struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); -int debug_lockdep_rcu_enabled(void) +int notrace debug_lockdep_rcu_enabled(void) { return rcu_scheduler_active && debug_locks && current->lockdep_recursion == 0; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a6d098c6df3..03cf44ac54d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1978,12 +1978,27 @@ int __weak ftrace_arch_code_modify_post_process(void) void ftrace_modify_all_code(int command) { + int update = command & FTRACE_UPDATE_TRACE_FUNC; + + /* + * If the ftrace_caller calls a ftrace_ops func directly, + * we need to make sure that it only traces functions it + * expects to trace. When doing the switch of functions, + * we need to update to the ftrace_ops_list_func first + * before the transition between old and new calls are set, + * as the ftrace_ops_list_func will check the ops hashes + * to make sure the ops are having the right functions + * traced. + */ + if (update) + ftrace_update_ftrace_func(ftrace_ops_list_func); + if (command & FTRACE_UPDATE_CALLS) ftrace_replace_code(1); else if (command & FTRACE_DISABLE_CALLS) ftrace_replace_code(0); - if (command & FTRACE_UPDATE_TRACE_FUNC) + if (update && ftrace_trace_function != ftrace_ops_list_func) ftrace_update_ftrace_func(ftrace_trace_function); if (command & FTRACE_START_FUNC_RET) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 496f94d5769..7974ba20557 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3166,11 +3166,6 @@ static const struct file_operations show_traces_fops = { }; /* - * Only trace on a CPU if the bitmask is set: - */ -static cpumask_var_t tracing_cpumask; - -/* * The tracer itself will not take this lock, but still we want * to provide a consistent cpumask to user-space: */ @@ -3186,11 +3181,12 @@ static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { + struct trace_array *tr = file_inode(filp)->i_private; int len; mutex_lock(&tracing_cpumask_update_lock); - len = cpumask_scnprintf(mask_str, count, tracing_cpumask); + len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); if (count - len < 2) { count = -EINVAL; goto out_err; @@ -3208,7 +3204,7 @@ static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { - struct trace_array *tr = filp->private_data; + struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; @@ -3228,12 +3224,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ - if (cpumask_test_cpu(cpu, tracing_cpumask) && + if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } - if (!cpumask_test_cpu(cpu, tracing_cpumask) && + if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); @@ -3242,7 +3238,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, arch_spin_unlock(&ftrace_max_lock); local_irq_enable(); - cpumask_copy(tracing_cpumask, tracing_cpumask_new); + cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); mutex_unlock(&tracing_cpumask_update_lock); free_cpumask_var(tracing_cpumask_new); @@ -3256,9 +3252,10 @@ err_unlock: } static const struct file_operations tracing_cpumask_fops = { - .open = tracing_open_generic, + .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, + .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; @@ -5938,6 +5935,11 @@ static int new_instance_create(const char *name) if (!tr->name) goto out_free_tr; + if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) + goto out_free_tr; + + cpumask_copy(tr->tracing_cpumask, cpu_all_mask); + raw_spin_lock_init(&tr->start_lock); tr->current_trace = &nop_trace; @@ -5969,6 +5971,7 @@ static int new_instance_create(const char *name) out_free_tr: if (tr->trace_buffer.buffer) ring_buffer_free(tr->trace_buffer.buffer); + free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); @@ -6098,6 +6101,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) { int cpu; + trace_create_file("tracing_cpumask", 0644, d_tracer, + tr, &tracing_cpumask_fops); + trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); @@ -6147,9 +6153,6 @@ static __init int tracer_init_debugfs(void) init_tracer_debugfs(&global_trace, d_tracer); - trace_create_file("tracing_cpumask", 0644, d_tracer, - &global_trace, &tracing_cpumask_fops); - trace_create_file("available_tracers", 0444, d_tracer, &global_trace, &show_traces_fops); @@ -6371,7 +6374,7 @@ __init static int tracer_alloc_buffers(void) if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; - if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) + if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ @@ -6386,7 +6389,7 @@ __init static int tracer_alloc_buffers(void) ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); - cpumask_copy(tracing_cpumask, cpu_all_mask); + cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); @@ -6441,7 +6444,7 @@ out_free_cpumask: #ifdef CONFIG_TRACER_MAX_TRACE free_percpu(global_trace.max_buffer.data); #endif - free_cpumask_var(tracing_cpumask); + free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fe39acd4c1a..10c86fb7a2b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -206,6 +206,7 @@ struct trace_array { struct dentry *event_dir; struct list_head systems; struct list_head events; + cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ int ref; }; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 29a7ebcfb42..368a4d50cc3 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1489,12 +1489,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, } static int -event_create_dir(struct dentry *parent, - struct ftrace_event_file *file, - const struct file_operations *id, - const struct file_operations *enable, - const struct file_operations *filter, - const struct file_operations *format) +event_create_dir(struct dentry *parent, struct ftrace_event_file *file) { struct ftrace_event_call *call = file->event_call; struct trace_array *tr = file->tr; @@ -1522,12 +1517,13 @@ event_create_dir(struct dentry *parent, if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) trace_create_file("enable", 0644, file->dir, file, - enable); + &ftrace_enable_fops); #ifdef CONFIG_PERF_EVENTS if (call->event.type && call->class->reg) trace_create_file("id", 0444, file->dir, - (void *)(long)call->event.type, id); + (void *)(long)call->event.type, + &ftrace_event_id_fops); #endif /* @@ -1544,10 +1540,10 @@ event_create_dir(struct dentry *parent, } } trace_create_file("filter", 0644, file->dir, call, - filter); + &ftrace_event_filter_fops); trace_create_file("format", 0444, file->dir, call, - format); + &ftrace_event_format_fops); return 0; } @@ -1648,12 +1644,7 @@ trace_create_new_event(struct ftrace_event_call *call, /* Add an event to a trace directory */ static int -__trace_add_new_event(struct ftrace_event_call *call, - struct trace_array *tr, - const struct file_operations *id, - const struct file_operations *enable, - const struct file_operations *filter, - const struct file_operations *format) +__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; @@ -1661,7 +1652,7 @@ __trace_add_new_event(struct ftrace_event_call *call, if (!file) return -ENOMEM; - return event_create_dir(tr->event_dir, file, id, enable, filter, format); + return event_create_dir(tr->event_dir, file); } /* @@ -1683,8 +1674,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call, } struct ftrace_module_file_ops; -static void __add_event_to_tracers(struct ftrace_event_call *call, - struct ftrace_module_file_ops *file_ops); +static void __add_event_to_tracers(struct ftrace_event_call *call); /* Add an additional event_call dynamically */ int trace_add_event_call(struct ftrace_event_call *call) @@ -1695,7 +1685,7 @@ int trace_add_event_call(struct ftrace_event_call *call) ret = __register_event(call, NULL); if (ret >= 0) - __add_event_to_tracers(call, NULL); + __add_event_to_tracers(call); mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); @@ -1769,100 +1759,21 @@ int trace_remove_event_call(struct ftrace_event_call *call) #ifdef CONFIG_MODULES -static LIST_HEAD(ftrace_module_file_list); - -/* - * Modules must own their file_operations to keep up with - * reference counting. - */ -struct ftrace_module_file_ops { - struct list_head list; - struct module *mod; - struct file_operations id; - struct file_operations enable; - struct file_operations format; - struct file_operations filter; -}; - -static struct ftrace_module_file_ops * -find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) -{ - /* - * As event_calls are added in groups by module, - * when we find one file_ops, we don't need to search for - * each call in that module, as the rest should be the - * same. Only search for a new one if the last one did - * not match. - */ - if (file_ops && mod == file_ops->mod) - return file_ops; - - list_for_each_entry(file_ops, &ftrace_module_file_list, list) { - if (file_ops->mod == mod) - return file_ops; - } - return NULL; -} - -static struct ftrace_module_file_ops * -trace_create_file_ops(struct module *mod) -{ - struct ftrace_module_file_ops *file_ops; - - /* - * This is a bit of a PITA. To allow for correct reference - * counting, modules must "own" their file_operations. - * To do this, we allocate the file operations that will be - * used in the event directory. - */ - - file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL); - if (!file_ops) - return NULL; - - file_ops->mod = mod; - - file_ops->id = ftrace_event_id_fops; - file_ops->id.owner = mod; - - file_ops->enable = ftrace_enable_fops; - file_ops->enable.owner = mod; - - file_ops->filter = ftrace_event_filter_fops; - file_ops->filter.owner = mod; - - file_ops->format = ftrace_event_format_fops; - file_ops->format.owner = mod; - - list_add(&file_ops->list, &ftrace_module_file_list); - - return file_ops; -} - static void trace_module_add_events(struct module *mod) { - struct ftrace_module_file_ops *file_ops = NULL; struct ftrace_event_call **call, **start, **end; start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; - if (start == end) - return; - - file_ops = trace_create_file_ops(mod); - if (!file_ops) - return; - for_each_event(call, start, end) { __register_event(*call, mod); - __add_event_to_tracers(*call, file_ops); + __add_event_to_tracers(*call); } } static void trace_module_remove_events(struct module *mod) { - struct ftrace_module_file_ops *file_ops; struct ftrace_event_call *call, *p; bool clear_trace = false; @@ -1874,16 +1785,6 @@ static void trace_module_remove_events(struct module *mod) __trace_remove_event_call(call); } } - - /* Now free the file_operations */ - list_for_each_entry(file_ops, &ftrace_module_file_list, list) { - if (file_ops->mod == mod) - break; - } - if (&file_ops->list != &ftrace_module_file_list) { - list_del(&file_ops->list); - kfree(file_ops); - } up_write(&trace_event_sem); /* @@ -1919,67 +1820,21 @@ static int trace_module_notify(struct notifier_block *self, return 0; } -static int -__trace_add_new_mod_event(struct ftrace_event_call *call, - struct trace_array *tr, - struct ftrace_module_file_ops *file_ops) -{ - return __trace_add_new_event(call, tr, - &file_ops->id, &file_ops->enable, - &file_ops->filter, &file_ops->format); -} - -#else -static inline struct ftrace_module_file_ops * -find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) -{ - return NULL; -} -static inline int trace_module_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -static inline int -__trace_add_new_mod_event(struct ftrace_event_call *call, - struct trace_array *tr, - struct ftrace_module_file_ops *file_ops) -{ - return -ENODEV; -} +static struct notifier_block trace_module_nb = { + .notifier_call = trace_module_notify, + .priority = 0, +}; #endif /* CONFIG_MODULES */ /* Create a new event directory structure for a trace directory. */ static void __trace_add_event_dirs(struct trace_array *tr) { - struct ftrace_module_file_ops *file_ops = NULL; struct ftrace_event_call *call; int ret; list_for_each_entry(call, &ftrace_events, list) { - if (call->mod) { - /* - * Directories for events by modules need to - * keep module ref counts when opened (as we don't - * want the module to disappear when reading one - * of these files). The file_ops keep account of - * the module ref count. - */ - file_ops = find_ftrace_file_ops(file_ops, call->mod); - if (!file_ops) - continue; /* Warn? */ - ret = __trace_add_new_mod_event(call, tr, file_ops); - if (ret < 0) - pr_warning("Could not create directory for event %s\n", - call->name); - continue; - } - ret = __trace_add_new_event(call, tr, - &ftrace_event_id_fops, - &ftrace_enable_fops, - &ftrace_event_filter_fops, - &ftrace_event_format_fops); + ret = __trace_add_new_event(call, tr); if (ret < 0) pr_warning("Could not create directory for event %s\n", call->name); @@ -2287,11 +2142,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) list_for_each_entry(file, &tr->events, list) { - ret = event_create_dir(tr->event_dir, file, - &ftrace_event_id_fops, - &ftrace_enable_fops, - &ftrace_event_filter_fops, - &ftrace_event_format_fops); + ret = event_create_dir(tr->event_dir, file); if (ret < 0) pr_warning("Could not create directory for event %s\n", file->event_call->name); @@ -2332,29 +2183,14 @@ __trace_remove_event_dirs(struct trace_array *tr) remove_event_file_dir(file); } -static void -__add_event_to_tracers(struct ftrace_event_call *call, - struct ftrace_module_file_ops *file_ops) +static void __add_event_to_tracers(struct ftrace_event_call *call) { struct trace_array *tr; - list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (file_ops) - __trace_add_new_mod_event(call, tr, file_ops); - else - __trace_add_new_event(call, tr, - &ftrace_event_id_fops, - &ftrace_enable_fops, - &ftrace_event_filter_fops, - &ftrace_event_format_fops); - } + list_for_each_entry(tr, &ftrace_trace_arrays, list) + __trace_add_new_event(call, tr); } -static struct notifier_block trace_module_nb = { - .notifier_call = trace_module_notify, - .priority = 0, -}; - extern struct ftrace_event_call *__start_ftrace_events[]; extern struct ftrace_event_call *__stop_ftrace_events[]; @@ -2559,10 +2395,11 @@ static __init int event_trace_init(void) if (ret) return ret; +#ifdef CONFIG_MODULES ret = register_module_notifier(&trace_module_nb); if (ret) pr_warning("Failed to register trace events module notifier\n"); - +#endif return 0; } early_initcall(event_trace_memsetup); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8fd03657bc7..559329d9bd2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -200,8 +200,8 @@ extern char *__bad_type_size(void); #type, #name, offsetof(typeof(trace), name), \ sizeof(trace.name), is_signed_type(type) -static -int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) +static int __init +__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) { int i; int pos = 0; @@ -228,7 +228,7 @@ int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) return pos; } -static int set_syscall_print_fmt(struct ftrace_event_call *call) +static int __init set_syscall_print_fmt(struct ftrace_event_call *call) { char *print_fmt; int len; @@ -253,7 +253,7 @@ static int set_syscall_print_fmt(struct ftrace_event_call *call) return 0; } -static void free_syscall_print_fmt(struct ftrace_event_call *call) +static void __init free_syscall_print_fmt(struct ftrace_event_call *call) { struct syscall_metadata *entry = call->data; @@ -459,7 +459,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, mutex_unlock(&syscall_trace_lock); } -static int init_syscall_trace(struct ftrace_event_call *call) +static int __init init_syscall_trace(struct ftrace_event_call *call) { int id; int num; |