diff options
Diffstat (limited to 'kernel')
83 files changed, 4866 insertions, 2077 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 9df4501cb92..2093a691f1c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -69,8 +69,9 @@ obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o -obj-$(CONFIG_AUDIT) += audit.o auditfilter.o +obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o +obj-$(CONFIG_GCOV_KERNEL) += gcov/ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += kgdb.o @@ -95,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ +obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o diff --git a/kernel/acct.c b/kernel/acct.c index 7afa3156416..9a4715a2f6b 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file, static int acct_on(char *name) { struct file *file; + struct vfsmount *mnt; int error; struct pid_namespace *ns; struct bsd_acct_struct *acct = NULL; @@ -256,11 +257,12 @@ static int acct_on(char *name) acct = NULL; } - mnt_pin(file->f_path.mnt); + mnt = file->f_path.mnt; + mnt_pin(mnt); acct_file_reopen(ns->bacct, file, ns); spin_unlock(&acct_lock); - mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ + mntput(mnt); /* it's pinned, now give up active reference */ kfree(acct); return 0; @@ -489,13 +491,17 @@ static void do_acct_process(struct bsd_acct_struct *acct, u64 run_time; struct timespec uptime; struct tty_struct *tty; + const struct cred *orig_cred; + + /* Perform file operations on behalf of whoever enabled accounting */ + orig_cred = override_creds(file->f_cred); /* * First check to see if there is enough free_space to continue * the process accounting system. */ if (!check_free_space(acct, file)) - return; + goto out; /* * Fill the accounting struct with the needed info as recorded @@ -576,6 +582,8 @@ static void do_acct_process(struct bsd_acct_struct *acct, sizeof(acct_t), &file->f_pos); current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; set_fs(fs); +out: + revert_creds(orig_cred); } /** diff --git a/kernel/audit.c b/kernel/audit.c index 9442c3533ba..defc2e6f1e3 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -115,9 +115,6 @@ static atomic_t audit_lost = ATOMIC_INIT(0); /* The netlink socket. */ static struct sock *audit_sock; -/* Inotify handle. */ -struct inotify_handle *audit_ih; - /* Hash for inode-based rules */ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; @@ -136,7 +133,7 @@ static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); /* Serialize requests from userspace. */ -static DEFINE_MUTEX(audit_cmd_mutex); +DEFINE_MUTEX(audit_cmd_mutex); /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting * audit records. Since printk uses a 1024 byte buffer, this buffer @@ -375,6 +372,25 @@ static void audit_hold_skb(struct sk_buff *skb) kfree_skb(skb); } +/* + * For one reason or another this nlh isn't getting delivered to the userspace + * audit daemon, just send it to printk. + */ +static void audit_printk_skb(struct sk_buff *skb) +{ + struct nlmsghdr *nlh = nlmsg_hdr(skb); + char *data = NLMSG_DATA(nlh); + + if (nlh->nlmsg_type != AUDIT_EOE) { + if (printk_ratelimit()) + printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data); + else + audit_log_lost("printk limit exceeded\n"); + } + + audit_hold_skb(skb); +} + static void kauditd_send_skb(struct sk_buff *skb) { int err; @@ -427,14 +443,8 @@ static int kauditd_thread(void *dummy) if (skb) { if (audit_pid) kauditd_send_skb(skb); - else { - if (printk_ratelimit()) - printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0)); - else - audit_log_lost("printk limit exceeded\n"); - - audit_hold_skb(skb); - } + else + audit_printk_skb(skb); } else { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_INTERRUPTIBLE); @@ -495,42 +505,25 @@ int audit_send_list(void *_dest) return 0; } -#ifdef CONFIG_AUDIT_TREE -static int prune_tree_thread(void *unused) -{ - mutex_lock(&audit_cmd_mutex); - audit_prune_trees(); - mutex_unlock(&audit_cmd_mutex); - return 0; -} - -void audit_schedule_prune(void) -{ - kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); -} -#endif - struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; - int len = NLMSG_SPACE(size); void *data; int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; - skb = alloc_skb(len, GFP_KERNEL); + skb = nlmsg_new(size, GFP_KERNEL); if (!skb) return NULL; - nlh = NLMSG_PUT(skb, pid, seq, t, size); - nlh->nlmsg_flags = flags; - data = NLMSG_DATA(nlh); + nlh = NLMSG_NEW(skb, pid, seq, t, size, flags); + data = NLMSG_DATA(nlh); memcpy(data, payload, size); return skb; -nlmsg_failure: /* Used by NLMSG_PUT */ +nlmsg_failure: /* Used by NLMSG_NEW */ if (skb) kfree_skb(skb); return NULL; @@ -926,28 +919,29 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } /* - * Get message from skb (based on rtnetlink_rcv_skb). Each message is - * processed by audit_receive_msg. Malformed skbs with wrong length are - * discarded silently. + * Get message from skb. Each message is processed by audit_receive_msg. + * Malformed skbs with wrong length are discarded silently. */ static void audit_receive_skb(struct sk_buff *skb) { - int err; - struct nlmsghdr *nlh; - u32 rlen; + struct nlmsghdr *nlh; + /* + * len MUST be signed for NLMSG_NEXT to be able to dec it below 0 + * if the nlmsg_len was not aligned + */ + int len; + int err; - while (skb->len >= NLMSG_SPACE(0)) { - nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) - return; - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - if ((err = audit_receive_msg(skb, nlh))) { + nlh = nlmsg_hdr(skb); + len = skb->len; + + while (NLMSG_OK(nlh, len)) { + err = audit_receive_msg(skb, nlh); + /* if err or if this message says it wants a response */ + if (err || (nlh->nlmsg_flags & NLM_F_ACK)) netlink_ack(skb, nlh, err); - } else if (nlh->nlmsg_flags & NLM_F_ACK) - netlink_ack(skb, nlh, 0); - skb_pull(skb, rlen); + + nlh = NLMSG_NEXT(nlh, len); } } @@ -959,13 +953,6 @@ static void audit_receive(struct sk_buff *skb) mutex_unlock(&audit_cmd_mutex); } -#ifdef CONFIG_AUDITSYSCALL -static const struct inotify_operations audit_inotify_ops = { - .handle_event = audit_handle_ievent, - .destroy_watch = audit_free_parent, -}; -#endif - /* Initialize audit support at boot time. */ static int __init audit_init(void) { @@ -991,12 +978,6 @@ static int __init audit_init(void) audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); -#ifdef CONFIG_AUDITSYSCALL - audit_ih = inotify_init(&audit_inotify_ops); - if (IS_ERR(audit_ih)) - audit_panic("cannot initialize inotify handle"); -#endif - for (i = 0; i < AUDIT_INODE_BUCKETS; i++) INIT_LIST_HEAD(&audit_inode_hash[i]); @@ -1070,18 +1051,20 @@ static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, goto err; } - ab->skb = alloc_skb(AUDIT_BUFSIZ, gfp_mask); - if (!ab->skb) - goto err; - ab->ctx = ctx; ab->gfp_mask = gfp_mask; - nlh = (struct nlmsghdr *)skb_put(ab->skb, NLMSG_SPACE(0)); - nlh->nlmsg_type = type; - nlh->nlmsg_flags = 0; - nlh->nlmsg_pid = 0; - nlh->nlmsg_seq = 0; + + ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); + if (!ab->skb) + goto nlmsg_failure; + + nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0); + return ab; + +nlmsg_failure: /* Used by NLMSG_NEW */ + kfree_skb(ab->skb); + ab->skb = NULL; err: audit_buffer_free(ab); return NULL; @@ -1452,6 +1435,15 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, kfree(pathname); } +void audit_log_key(struct audit_buffer *ab, char *key) +{ + audit_log_format(ab, " key="); + if (key) + audit_log_untrustedstring(ab, key); + else + audit_log_format(ab, "(null)"); +} + /** * audit_log_end - end one audit record * @ab: the audit_buffer @@ -1475,15 +1467,7 @@ void audit_log_end(struct audit_buffer *ab) skb_queue_tail(&audit_skb_queue, ab->skb); wake_up_interruptible(&kauditd_wait); } else { - if (nlh->nlmsg_type != AUDIT_EOE) { - if (printk_ratelimit()) { - printk(KERN_NOTICE "type=%d %s\n", - nlh->nlmsg_type, - ab->skb->data + NLMSG_SPACE(0)); - } else - audit_log_lost("printk limit exceeded\n"); - } - audit_hold_skb(ab->skb); + audit_printk_skb(ab->skb); } ab->skb = NULL; } diff --git a/kernel/audit.h b/kernel/audit.h index 16f18cac661..208687be4f3 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -53,18 +53,7 @@ enum audit_state { }; /* Rule lists */ -struct audit_parent; - -struct audit_watch { - atomic_t count; /* reference count */ - char *path; /* insertion path */ - dev_t dev; /* associated superblock device */ - unsigned long ino; /* associated inode number */ - struct audit_parent *parent; /* associated parent */ - struct list_head wlist; /* entry in parent->watches list */ - struct list_head rules; /* associated rules */ -}; - +struct audit_watch; struct audit_tree; struct audit_chunk; @@ -108,19 +97,28 @@ struct audit_netlink_list { int audit_send_list(void *); -struct inotify_watch; -/* Inotify handle */ -extern struct inotify_handle *audit_ih; - -extern void audit_free_parent(struct inotify_watch *); -extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32, - const char *, struct inode *); extern int selinux_audit_rule_update(void); extern struct mutex audit_filter_mutex; extern void audit_free_rule_rcu(struct rcu_head *); extern struct list_head audit_filter_list[]; +/* audit watch functions */ +extern unsigned long audit_watch_inode(struct audit_watch *watch); +extern dev_t audit_watch_dev(struct audit_watch *watch); +extern void audit_put_watch(struct audit_watch *watch); +extern void audit_get_watch(struct audit_watch *watch); +extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op); +extern int audit_add_watch(struct audit_krule *krule); +extern void audit_remove_watch(struct audit_watch *watch); +extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list); +extern void audit_inotify_unregister(struct list_head *in_list); +extern char *audit_watch_path(struct audit_watch *watch); +extern struct list_head *audit_watch_rules(struct audit_watch *watch); + +extern struct audit_entry *audit_dupe_rule(struct audit_krule *old, + struct audit_watch *watch); + #ifdef CONFIG_AUDIT_TREE extern struct audit_chunk *audit_tree_lookup(const struct inode *); extern void audit_put_chunk(struct audit_chunk *); @@ -130,10 +128,9 @@ extern int audit_add_tree_rule(struct audit_krule *); extern int audit_remove_tree_rule(struct audit_krule *); extern void audit_trim_trees(void); extern int audit_tag_tree(char *old, char *new); -extern void audit_schedule_prune(void); -extern void audit_prune_trees(void); extern const char *audit_tree_path(struct audit_tree *); extern void audit_put_tree(struct audit_tree *); +extern void audit_kill_trees(struct list_head *); #else #define audit_remove_tree_rule(rule) BUG() #define audit_add_tree_rule(rule) -EINVAL @@ -142,6 +139,7 @@ extern void audit_put_tree(struct audit_tree *); #define audit_put_tree(tree) (void)0 #define audit_tag_tree(old, new) -EINVAL #define audit_tree_path(rule) "" /* never called */ +#define audit_kill_trees(list) BUG() #endif extern char *audit_unpack_string(void **, size_t *, size_t); @@ -160,7 +158,10 @@ static inline int audit_signal_info(int sig, struct task_struct *t) return 0; } extern void audit_filter_inodes(struct task_struct *, struct audit_context *); +extern struct list_head *audit_killed_trees(void); #else #define audit_signal_info(s,t) AUDIT_DISABLED #define audit_filter_inodes(t,c) AUDIT_DISABLED #endif + +extern struct mutex audit_cmd_mutex; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 1f6396d7668..2451dc6f328 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -2,6 +2,7 @@ #include <linux/inotify.h> #include <linux/namei.h> #include <linux/mount.h> +#include <linux/kthread.h> struct audit_tree; struct audit_chunk; @@ -441,13 +442,11 @@ static void kill_rules(struct audit_tree *tree) if (rule->tree) { /* not a half-baked one */ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); - audit_log_format(ab, "op=remove rule dir="); + audit_log_format(ab, "op="); + audit_log_string(ab, "remove rule"); + audit_log_format(ab, " dir="); audit_log_untrustedstring(ab, rule->tree->pathname); - if (rule->filterkey) { - audit_log_format(ab, " key="); - audit_log_untrustedstring(ab, rule->filterkey); - } else - audit_log_format(ab, " key=(null)"); + audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); rule->tree = NULL; @@ -519,6 +518,8 @@ static void trim_marked(struct audit_tree *tree) } } +static void audit_schedule_prune(void); + /* called with audit_filter_mutex */ int audit_remove_tree_rule(struct audit_krule *rule) { @@ -824,10 +825,11 @@ int audit_tag_tree(char *old, char *new) /* * That gets run when evict_chunk() ends up needing to kill audit_tree. - * Runs from a separate thread, with audit_cmd_mutex held. + * Runs from a separate thread. */ -void audit_prune_trees(void) +static int prune_tree_thread(void *unused) { + mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { @@ -844,6 +846,40 @@ void audit_prune_trees(void) } mutex_unlock(&audit_filter_mutex); + mutex_unlock(&audit_cmd_mutex); + return 0; +} + +static void audit_schedule_prune(void) +{ + kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); +} + +/* + * ... and that one is done if evict_chunk() decides to delay until the end + * of syscall. Runs synchronously. + */ +void audit_kill_trees(struct list_head *list) +{ + mutex_lock(&audit_cmd_mutex); + mutex_lock(&audit_filter_mutex); + + while (!list_empty(list)) { + struct audit_tree *victim; + + victim = list_entry(list->next, struct audit_tree, list); + kill_rules(victim); + list_del_init(&victim->list); + + mutex_unlock(&audit_filter_mutex); + + prune_one(victim); + + mutex_lock(&audit_filter_mutex); + } + + mutex_unlock(&audit_filter_mutex); + mutex_unlock(&audit_cmd_mutex); } /* @@ -854,6 +890,8 @@ void audit_prune_trees(void) static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; + struct list_head *postponed = audit_killed_trees(); + int need_prune = 0; int n; if (chunk->dead) @@ -869,15 +907,21 @@ static void evict_chunk(struct audit_chunk *chunk) owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); - kill_rules(owner); - list_move(&owner->list, &prune_list); - audit_schedule_prune(); + if (!postponed) { + kill_rules(owner); + list_move(&owner->list, &prune_list); + need_prune = 1; + } else { + list_move(&owner->list, postponed); + } spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); + if (need_prune) + audit_schedule_prune(); mutex_unlock(&audit_filter_mutex); } diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c new file mode 100644 index 00000000000..0e96dbc60ea --- /dev/null +++ b/kernel/audit_watch.c @@ -0,0 +1,543 @@ +/* audit_watch.c -- watching inodes + * + * Copyright 2003-2009 Red Hat, Inc. + * Copyright 2005 Hewlett-Packard Development Company, L.P. + * Copyright 2005 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/audit.h> +#include <linux/kthread.h> +#include <linux/mutex.h> +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/netlink.h> +#include <linux/sched.h> +#include <linux/inotify.h> +#include <linux/security.h> +#include "audit.h" + +/* + * Reference counting: + * + * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED + * event. Each audit_watch holds a reference to its associated parent. + * + * audit_watch: if added to lists, lifetime is from audit_init_watch() to + * audit_remove_watch(). Additionally, an audit_watch may exist + * temporarily to assist in searching existing filter data. Each + * audit_krule holds a reference to its associated watch. + */ + +struct audit_watch { + atomic_t count; /* reference count */ + char *path; /* insertion path */ + dev_t dev; /* associated superblock device */ + unsigned long ino; /* associated inode number */ + struct audit_parent *parent; /* associated parent */ + struct list_head wlist; /* entry in parent->watches list */ + struct list_head rules; /* associated rules */ +}; + +struct audit_parent { + struct list_head ilist; /* entry in inotify registration list */ + struct list_head watches; /* associated watches */ + struct inotify_watch wdata; /* inotify watch data */ + unsigned flags; /* status flags */ +}; + +/* Inotify handle. */ +struct inotify_handle *audit_ih; + +/* + * audit_parent status flags: + * + * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to + * a filesystem event to ensure we're adding audit watches to a valid parent. + * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot + * receive them while we have nameidata, but must be used for IN_MOVE_SELF which + * we can receive while holding nameidata. + */ +#define AUDIT_PARENT_INVALID 0x001 + +/* Inotify events we care about. */ +#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF + +static void audit_free_parent(struct inotify_watch *i_watch) +{ + struct audit_parent *parent; + + parent = container_of(i_watch, struct audit_parent, wdata); + WARN_ON(!list_empty(&parent->watches)); + kfree(parent); +} + +void audit_get_watch(struct audit_watch *watch) +{ + atomic_inc(&watch->count); +} + +void audit_put_watch(struct audit_watch *watch) +{ + if (atomic_dec_and_test(&watch->count)) { + WARN_ON(watch->parent); + WARN_ON(!list_empty(&watch->rules)); + kfree(watch->path); + kfree(watch); + } +} + +void audit_remove_watch(struct audit_watch *watch) +{ + list_del(&watch->wlist); + put_inotify_watch(&watch->parent->wdata); + watch->parent = NULL; + audit_put_watch(watch); /* match initial get */ +} + +char *audit_watch_path(struct audit_watch *watch) +{ + return watch->path; +} + +struct list_head *audit_watch_rules(struct audit_watch *watch) +{ + return &watch->rules; +} + +unsigned long audit_watch_inode(struct audit_watch *watch) +{ + return watch->ino; +} + +dev_t audit_watch_dev(struct audit_watch *watch) +{ + return watch->dev; +} + +/* Initialize a parent watch entry. */ +static struct audit_parent *audit_init_parent(struct nameidata *ndp) +{ + struct audit_parent *parent; + s32 wd; + + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (unlikely(!parent)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&parent->watches); + parent->flags = 0; + + inotify_init_watch(&parent->wdata); + /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */ + get_inotify_watch(&parent->wdata); + wd = inotify_add_watch(audit_ih, &parent->wdata, + ndp->path.dentry->d_inode, AUDIT_IN_WATCH); + if (wd < 0) { + audit_free_parent(&parent->wdata); + return ERR_PTR(wd); + } + + return parent; +} + +/* Initialize a watch entry. */ +static struct audit_watch *audit_init_watch(char *path) +{ + struct audit_watch *watch; + + watch = kzalloc(sizeof(*watch), GFP_KERNEL); + if (unlikely(!watch)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&watch->rules); + atomic_set(&watch->count, 1); + watch->path = path; + watch->dev = (dev_t)-1; + watch->ino = (unsigned long)-1; + + return watch; +} + +/* Translate a watch string to kernel respresentation. */ +int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) +{ + struct audit_watch *watch; + + if (!audit_ih) + return -EOPNOTSUPP; + + if (path[0] != '/' || path[len-1] == '/' || + krule->listnr != AUDIT_FILTER_EXIT || + op != Audit_equal || + krule->inode_f || krule->watch || krule->tree) + return -EINVAL; + + watch = audit_init_watch(path); + if (IS_ERR(watch)) + return PTR_ERR(watch); + + audit_get_watch(watch); + krule->watch = watch; + + return 0; +} + +/* Duplicate the given audit watch. The new watch's rules list is initialized + * to an empty list and wlist is undefined. */ +static struct audit_watch *audit_dupe_watch(struct audit_watch *old) +{ + char *path; + struct audit_watch *new; + + path = kstrdup(old->path, GFP_KERNEL); + if (unlikely(!path)) + return ERR_PTR(-ENOMEM); + + new = audit_init_watch(path); + if (IS_ERR(new)) { + kfree(path); + goto out; + } + + new->dev = old->dev; + new->ino = old->ino; + get_inotify_watch(&old->parent->wdata); + new->parent = old->parent; + +out: + return new; +} + +static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op) +{ + if (audit_enabled) { + struct audit_buffer *ab; + ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); + audit_log_format(ab, "auid=%u ses=%u op=", + audit_get_loginuid(current), + audit_get_sessionid(current)); + audit_log_string(ab, op); + audit_log_format(ab, " path="); + audit_log_untrustedstring(ab, w->path); + audit_log_key(ab, r->filterkey); + audit_log_format(ab, " list=%d res=1", r->listnr); + audit_log_end(ab); + } +} + +/* Update inode info in audit rules based on filesystem event. */ +static void audit_update_watch(struct audit_parent *parent, + const char *dname, dev_t dev, + unsigned long ino, unsigned invalidating) +{ + struct audit_watch *owatch, *nwatch, *nextw; + struct audit_krule *r, *nextr; + struct audit_entry *oentry, *nentry; + + mutex_lock(&audit_filter_mutex); + list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) { + if (audit_compare_dname_path(dname, owatch->path, NULL)) + continue; + + /* If the update involves invalidating rules, do the inode-based + * filtering now, so we don't omit records. */ + if (invalidating && current->audit_context) + audit_filter_inodes(current, current->audit_context); + + nwatch = audit_dupe_watch(owatch); + if (IS_ERR(nwatch)) { + mutex_unlock(&audit_filter_mutex); + audit_panic("error updating watch, skipping"); + return; + } + nwatch->dev = dev; + nwatch->ino = ino; + + list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) { + + oentry = container_of(r, struct audit_entry, rule); + list_del(&oentry->rule.rlist); + list_del_rcu(&oentry->list); + + nentry = audit_dupe_rule(&oentry->rule, nwatch); + if (IS_ERR(nentry)) { + list_del(&oentry->rule.list); + audit_panic("error updating watch, removing"); + } else { + int h = audit_hash_ino((u32)ino); + list_add(&nentry->rule.rlist, &nwatch->rules); + list_add_rcu(&nentry->list, &audit_inode_hash[h]); + list_replace(&oentry->rule.list, + &nentry->rule.list); + } + + audit_watch_log_rule_change(r, owatch, "updated rules"); + + call_rcu(&oentry->rcu, audit_free_rule_rcu); + } + + audit_remove_watch(owatch); + goto add_watch_to_parent; /* event applies to a single watch */ + } + mutex_unlock(&audit_filter_mutex); + return; + +add_watch_to_parent: + list_add(&nwatch->wlist, &parent->watches); + mutex_unlock(&audit_filter_mutex); + return; +} + +/* Remove all watches & rules associated with a parent that is going away. */ +static void audit_remove_parent_watches(struct audit_parent *parent) +{ + struct audit_watch *w, *nextw; + struct audit_krule *r, *nextr; + struct audit_entry *e; + + mutex_lock(&audit_filter_mutex); + parent->flags |= AUDIT_PARENT_INVALID; + list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { + list_for_each_entry_safe(r, nextr, &w->rules, rlist) { + e = container_of(r, struct audit_entry, rule); + audit_watch_log_rule_change(r, w, "remove rule"); + list_del(&r->rlist); + list_del(&r->list); + list_del_rcu(&e->list); + call_rcu(&e->rcu, audit_free_rule_rcu); + } + audit_remove_watch(w); + } + mutex_unlock(&audit_filter_mutex); +} + +/* Unregister inotify watches for parents on in_list. + * Generates an IN_IGNORED event. */ +void audit_inotify_unregister(struct list_head *in_list) +{ + struct audit_parent *p, *n; + + list_for_each_entry_safe(p, n, in_list, ilist) { + list_del(&p->ilist); + inotify_rm_watch(audit_ih, &p->wdata); + /* the unpin matching the pin in audit_do_del_rule() */ + unpin_inotify_watch(&p->wdata); + } +} + +/* Get path information necessary for adding watches. */ +static int audit_get_nd(char *path, struct nameidata **ndp, struct nameidata **ndw) +{ + struct nameidata *ndparent, *ndwatch; + int err; + + ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL); + if (unlikely(!ndparent)) + return -ENOMEM; + + ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL); + if (unlikely(!ndwatch)) { + kfree(ndparent); + return -ENOMEM; + } + + err = path_lookup(path, LOOKUP_PARENT, ndparent); + if (err) { + kfree(ndparent); + kfree(ndwatch); + return err; + } + + err = path_lookup(path, 0, ndwatch); + if (err) { + kfree(ndwatch); + ndwatch = NULL; + } + + *ndp = ndparent; + *ndw = ndwatch; + + return 0; +} + +/* Release resources used for watch path information. */ +static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw) +{ + if (ndp) { + path_put(&ndp->path); + kfree(ndp); + } + if (ndw) { + path_put(&ndw->path); + kfree(ndw); + } +} + +/* Associate the given rule with an existing parent inotify_watch. + * Caller must hold audit_filter_mutex. */ +static void audit_add_to_parent(struct audit_krule *krule, + struct audit_parent *parent) +{ + struct audit_watch *w, *watch = krule->watch; + int watch_found = 0; + + list_for_each_entry(w, &parent->watches, wlist) { + if (strcmp(watch->path, w->path)) + continue; + + watch_found = 1; + + /* put krule's and initial refs to temporary watch */ + audit_put_watch(watch); + audit_put_watch(watch); + + audit_get_watch(w); + krule->watch = watch = w; + break; + } + + if (!watch_found) { + get_inotify_watch(&parent->wdata); + watch->parent = parent; + + list_add(&watch->wlist, &parent->watches); + } + list_add(&krule->rlist, &watch->rules); +} + +/* Find a matching watch entry, or add this one. + * Caller must hold audit_filter_mutex. */ +int audit_add_watch(struct audit_krule *krule) +{ + struct audit_watch *watch = krule->watch; + struct inotify_watch *i_watch; + struct audit_parent *parent; + struct nameidata *ndp = NULL, *ndw = NULL; + int ret = 0; + + mutex_unlock(&audit_filter_mutex); + + /* Avoid calling path_lookup under audit_filter_mutex. */ + ret = audit_get_nd(watch->path, &ndp, &ndw); + if (ret) { + /* caller expects mutex locked */ + mutex_lock(&audit_filter_mutex); + goto error; + } + + /* update watch filter fields */ + if (ndw) { + watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev; + watch->ino = ndw->path.dentry->d_inode->i_ino; + } + + /* The audit_filter_mutex must not be held during inotify calls because + * we hold it during inotify event callback processing. If an existing + * inotify watch is found, inotify_find_watch() grabs a reference before + * returning. + */ + if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode, + &i_watch) < 0) { + parent = audit_init_parent(ndp); + if (IS_ERR(parent)) { + /* caller expects mutex locked */ + mutex_lock(&audit_filter_mutex); + ret = PTR_ERR(parent); + goto error; + } + } else + parent = container_of(i_watch, struct audit_parent, wdata); + + mutex_lock(&audit_filter_mutex); + + /* parent was moved before we took audit_filter_mutex */ + if (parent->flags & AUDIT_PARENT_INVALID) + ret = -ENOENT; + else + audit_add_to_parent(krule, parent); + + /* match get in audit_init_parent or inotify_find_watch */ + put_inotify_watch(&parent->wdata); + +error: + audit_put_nd(ndp, ndw); /* NULL args OK */ + return ret; + +} + +void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list) +{ + struct audit_watch *watch = krule->watch; + struct audit_parent *parent = watch->parent; + + list_del(&krule->rlist); + + if (list_empty(&watch->rules)) { + audit_remove_watch(watch); + + if (list_empty(&parent->watches)) { + /* Put parent on the inotify un-registration + * list. Grab a reference before releasing + * audit_filter_mutex, to be released in + * audit_inotify_unregister(). + * If filesystem is going away, just leave + * the sucker alone, eviction will take + * care of it. */ + if (pin_inotify_watch(&parent->wdata)) + list_add(&parent->ilist, list); + } + } +} + +/* Update watch data in audit rules based on inotify events. */ +static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, + u32 cookie, const char *dname, struct inode *inode) +{ + struct audit_parent *parent; + + parent = container_of(i_watch, struct audit_parent, wdata); + + if (mask & (IN_CREATE|IN_MOVED_TO) && inode) + audit_update_watch(parent, dname, inode->i_sb->s_dev, + inode->i_ino, 0); + else if (mask & (IN_DELETE|IN_MOVED_FROM)) + audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); + /* inotify automatically removes the watch and sends IN_IGNORED */ + else if (mask & (IN_DELETE_SELF|IN_UNMOUNT)) + audit_remove_parent_watches(parent); + /* inotify does not remove the watch, so remove it manually */ + else if(mask & IN_MOVE_SELF) { + audit_remove_parent_watches(parent); + inotify_remove_watch_locked(audit_ih, i_watch); + } else if (mask & IN_IGNORED) + put_inotify_watch(i_watch); +} + +static const struct inotify_operations audit_inotify_ops = { + .handle_event = audit_handle_ievent, + .destroy_watch = audit_free_parent, +}; + +static int __init audit_watch_init(void) +{ + audit_ih = inotify_init(&audit_inotify_ops); + if (IS_ERR(audit_ih)) + audit_panic("cannot initialize inotify handle"); + return 0; +} +subsys_initcall(audit_watch_init); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 713098ee5a0..a70604047f3 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -27,7 +27,6 @@ #include <linux/namei.h> #include <linux/netlink.h> #include <linux/sched.h> -#include <linux/inotify.h> #include <linux/security.h> #include "audit.h" @@ -44,36 +43,6 @@ * be written directly provided audit_filter_mutex is held. */ -/* - * Reference counting: - * - * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED - * event. Each audit_watch holds a reference to its associated parent. - * - * audit_watch: if added to lists, lifetime is from audit_init_watch() to - * audit_remove_watch(). Additionally, an audit_watch may exist - * temporarily to assist in searching existing filter data. Each - * audit_krule holds a reference to its associated watch. - */ - -struct audit_parent { - struct list_head ilist; /* entry in inotify registration list */ - struct list_head watches; /* associated watches */ - struct inotify_watch wdata; /* inotify watch data */ - unsigned flags; /* status flags */ -}; - -/* - * audit_parent status flags: - * - * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to - * a filesystem event to ensure we're adding audit watches to a valid parent. - * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot - * receive them while we have nameidata, but must be used for IN_MOVE_SELF which - * we can receive while holding nameidata. - */ -#define AUDIT_PARENT_INVALID 0x001 - /* Audit filter lists, defined in <linux/audit.h> */ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_filter_list[0]), @@ -97,41 +66,6 @@ static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = { DEFINE_MUTEX(audit_filter_mutex); -/* Inotify events we care about. */ -#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF - -void audit_free_parent(struct inotify_watch *i_watch) -{ - struct audit_parent *parent; - - parent = container_of(i_watch, struct audit_parent, wdata); - WARN_ON(!list_empty(&parent->watches)); - kfree(parent); -} - -static inline void audit_get_watch(struct audit_watch *watch) -{ - atomic_inc(&watch->count); -} - -static void audit_put_watch(struct audit_watch *watch) -{ - if (atomic_dec_and_test(&watch->count)) { - WARN_ON(watch->parent); - WARN_ON(!list_empty(&watch->rules)); - kfree(watch->path); - kfree(watch); - } -} - -static void audit_remove_watch(struct audit_watch *watch) -{ - list_del(&watch->wlist); - put_inotify_watch(&watch->parent->wdata); - watch->parent = NULL; - audit_put_watch(watch); /* match initial get */ -} - static inline void audit_free_rule(struct audit_entry *e) { int i; @@ -156,50 +90,6 @@ void audit_free_rule_rcu(struct rcu_head *head) audit_free_rule(e); } -/* Initialize a parent watch entry. */ -static struct audit_parent *audit_init_parent(struct nameidata *ndp) -{ - struct audit_parent *parent; - s32 wd; - - parent = kzalloc(sizeof(*parent), GFP_KERNEL); - if (unlikely(!parent)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&parent->watches); - parent->flags = 0; - - inotify_init_watch(&parent->wdata); - /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */ - get_inotify_watch(&parent->wdata); - wd = inotify_add_watch(audit_ih, &parent->wdata, - ndp->path.dentry->d_inode, AUDIT_IN_WATCH); - if (wd < 0) { - audit_free_parent(&parent->wdata); - return ERR_PTR(wd); - } - - return parent; -} - -/* Initialize a watch entry. */ -static struct audit_watch *audit_init_watch(char *path) -{ - struct audit_watch *watch; - - watch = kzalloc(sizeof(*watch), GFP_KERNEL); - if (unlikely(!watch)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&watch->rules); - atomic_set(&watch->count, 1); - watch->path = path; - watch->dev = (dev_t)-1; - watch->ino = (unsigned long)-1; - - return watch; -} - /* Initialize an audit filterlist entry. */ static inline struct audit_entry *audit_init_entry(u32 field_count) { @@ -260,31 +150,6 @@ static inline int audit_to_inode(struct audit_krule *krule, return 0; } -/* Translate a watch string to kernel respresentation. */ -static int audit_to_watch(struct audit_krule *krule, char *path, int len, - u32 op) -{ - struct audit_watch *watch; - - if (!audit_ih) - return -EOPNOTSUPP; - - if (path[0] != '/' || path[len-1] == '/' || - krule->listnr != AUDIT_FILTER_EXIT || - op != Audit_equal || - krule->inode_f || krule->watch || krule->tree) - return -EINVAL; - - watch = audit_init_watch(path); - if (IS_ERR(watch)) - return PTR_ERR(watch); - - audit_get_watch(watch); - krule->watch = watch; - - return 0; -} - static __u32 *classes[AUDIT_SYSCALL_CLASSES]; int __init audit_register_class(int class, unsigned *list) @@ -766,7 +631,8 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) break; case AUDIT_WATCH: data->buflen += data->values[i] = - audit_pack_string(&bufp, krule->watch->path); + audit_pack_string(&bufp, + audit_watch_path(krule->watch)); break; case AUDIT_DIR: data->buflen += data->values[i] = @@ -818,7 +684,8 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) return 1; break; case AUDIT_WATCH: - if (strcmp(a->watch->path, b->watch->path)) + if (strcmp(audit_watch_path(a->watch), + audit_watch_path(b->watch))) return 1; break; case AUDIT_DIR: @@ -844,32 +711,6 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) return 0; } -/* Duplicate the given audit watch. The new watch's rules list is initialized - * to an empty list and wlist is undefined. */ -static struct audit_watch *audit_dupe_watch(struct audit_watch *old) -{ - char *path; - struct audit_watch *new; - - path = kstrdup(old->path, GFP_KERNEL); - if (unlikely(!path)) - return ERR_PTR(-ENOMEM); - - new = audit_init_watch(path); - if (IS_ERR(new)) { - kfree(path); - goto out; - } - - new->dev = old->dev; - new->ino = old->ino; - get_inotify_watch(&old->parent->wdata); - new->parent = old->parent; - -out: - return new; -} - /* Duplicate LSM field information. The lsm_rule is opaque, so must be * re-initialized. */ static inline int audit_dupe_lsm_field(struct audit_field *df, @@ -904,8 +745,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df, * rule with the new rule in the filterlist, then free the old rule. * The rlist element is undefined; list manipulations are handled apart from * the initial copy. */ -static struct audit_entry *audit_dupe_rule(struct audit_krule *old, - struct audit_watch *watch) +struct audit_entry *audit_dupe_rule(struct audit_krule *old, + struct audit_watch *watch) { u32 fcount = old->field_count; struct audit_entry *entry; @@ -977,137 +818,6 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old, return entry; } -/* Update inode info in audit rules based on filesystem event. */ -static void audit_update_watch(struct audit_parent *parent, - const char *dname, dev_t dev, - unsigned long ino, unsigned invalidating) -{ - struct audit_watch *owatch, *nwatch, *nextw; - struct audit_krule *r, *nextr; - struct audit_entry *oentry, *nentry; - - mutex_lock(&audit_filter_mutex); - list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) { - if (audit_compare_dname_path(dname, owatch->path, NULL)) - continue; - - /* If the update involves invalidating rules, do the inode-based - * filtering now, so we don't omit records. */ - if (invalidating && current->audit_context) - audit_filter_inodes(current, current->audit_context); - - nwatch = audit_dupe_watch(owatch); - if (IS_ERR(nwatch)) { - mutex_unlock(&audit_filter_mutex); - audit_panic("error updating watch, skipping"); - return; - } - nwatch->dev = dev; - nwatch->ino = ino; - - list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) { - - oentry = container_of(r, struct audit_entry, rule); - list_del(&oentry->rule.rlist); - list_del_rcu(&oentry->list); - - nentry = audit_dupe_rule(&oentry->rule, nwatch); - if (IS_ERR(nentry)) { - list_del(&oentry->rule.list); - audit_panic("error updating watch, removing"); - } else { - int h = audit_hash_ino((u32)ino); - list_add(&nentry->rule.rlist, &nwatch->rules); - list_add_rcu(&nentry->list, &audit_inode_hash[h]); - list_replace(&oentry->rule.list, - &nentry->rule.list); - } - - call_rcu(&oentry->rcu, audit_free_rule_rcu); - } - - if (audit_enabled) { - struct audit_buffer *ab; - ab = audit_log_start(NULL, GFP_NOFS, - AUDIT_CONFIG_CHANGE); - audit_log_format(ab, "auid=%u ses=%u", - audit_get_loginuid(current), - audit_get_sessionid(current)); - audit_log_format(ab, - " op=updated rules specifying path="); - audit_log_untrustedstring(ab, owatch->path); - audit_log_format(ab, " with dev=%u ino=%lu\n", - dev, ino); - audit_log_format(ab, " list=%d res=1", r->listnr); - audit_log_end(ab); - } - audit_remove_watch(owatch); - goto add_watch_to_parent; /* event applies to a single watch */ - } - mutex_unlock(&audit_filter_mutex); - return; - -add_watch_to_parent: - list_add(&nwatch->wlist, &parent->watches); - mutex_unlock(&audit_filter_mutex); - return; -} - -/* Remove all watches & rules associated with a parent that is going away. */ -static void audit_remove_parent_watches(struct audit_parent *parent) -{ - struct audit_watch *w, *nextw; - struct audit_krule *r, *nextr; - struct audit_entry *e; - - mutex_lock(&audit_filter_mutex); - parent->flags |= AUDIT_PARENT_INVALID; - list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { - list_for_each_entry_safe(r, nextr, &w->rules, rlist) { - e = container_of(r, struct audit_entry, rule); - if (audit_enabled) { - struct audit_buffer *ab; - ab = audit_log_start(NULL, GFP_NOFS, - AUDIT_CONFIG_CHANGE); - audit_log_format(ab, "auid=%u ses=%u", - audit_get_loginuid(current), - audit_get_sessionid(current)); - audit_log_format(ab, " op=remove rule path="); - audit_log_untrustedstring(ab, w->path); - if (r->filterkey) { - audit_log_format(ab, " key="); - audit_log_untrustedstring(ab, - r->filterkey); - } else - audit_log_format(ab, " key=(null)"); - audit_log_format(ab, " list=%d res=1", - r->listnr); - audit_log_end(ab); - } - list_del(&r->rlist); - list_del(&r->list); - list_del_rcu(&e->list); - call_rcu(&e->rcu, audit_free_rule_rcu); - } - audit_remove_watch(w); - } - mutex_unlock(&audit_filter_mutex); -} - -/* Unregister inotify watches for parents on in_list. - * Generates an IN_IGNORED event. */ -static void audit_inotify_unregister(struct list_head *in_list) -{ - struct audit_parent *p, *n; - - list_for_each_entry_safe(p, n, in_list, ilist) { - list_del(&p->ilist); - inotify_rm_watch(audit_ih, &p->wdata); - /* the unpin matching the pin in audit_do_del_rule() */ - unpin_inotify_watch(&p->wdata); - } -} - /* Find an existing audit rule. * Caller must hold audit_filter_mutex to prevent stale rule data. */ static struct audit_entry *audit_find_rule(struct audit_entry *entry, @@ -1145,134 +855,6 @@ out: return found; } -/* Get path information necessary for adding watches. */ -static int audit_get_nd(char *path, struct nameidata **ndp, - struct nameidata **ndw) -{ - struct nameidata *ndparent, *ndwatch; - int err; - - ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL); - if (unlikely(!ndparent)) - return -ENOMEM; - - ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL); - if (unlikely(!ndwatch)) { - kfree(ndparent); - return -ENOMEM; - } - - err = path_lookup(path, LOOKUP_PARENT, ndparent); - if (err) { - kfree(ndparent); - kfree(ndwatch); - return err; - } - - err = path_lookup(path, 0, ndwatch); - if (err) { - kfree(ndwatch); - ndwatch = NULL; - } - - *ndp = ndparent; - *ndw = ndwatch; - - return 0; -} - -/* Release resources used for watch path information. */ -static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw) -{ - if (ndp) { - path_put(&ndp->path); - kfree(ndp); - } - if (ndw) { - path_put(&ndw->path); - kfree(ndw); - } -} - -/* Associate the given rule with an existing parent inotify_watch. - * Caller must hold audit_filter_mutex. */ -static void audit_add_to_parent(struct audit_krule *krule, - struct audit_parent *parent) -{ - struct audit_watch *w, *watch = krule->watch; - int watch_found = 0; - - list_for_each_entry(w, &parent->watches, wlist) { - if (strcmp(watch->path, w->path)) - continue; - - watch_found = 1; - - /* put krule's and initial refs to temporary watch */ - audit_put_watch(watch); - audit_put_watch(watch); - - audit_get_watch(w); - krule->watch = watch = w; - break; - } - - if (!watch_found) { - get_inotify_watch(&parent->wdata); - watch->parent = parent; - - list_add(&watch->wlist, &parent->watches); - } - list_add(&krule->rlist, &watch->rules); -} - -/* Find a matching watch entry, or add this one. - * Caller must hold audit_filter_mutex. */ -static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp, - struct nameidata *ndw) -{ - struct audit_watch *watch = krule->watch; - struct inotify_watch *i_watch; - struct audit_parent *parent; - int ret = 0; - - /* update watch filter fields */ - if (ndw) { - watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev; - watch->ino = ndw->path.dentry->d_inode->i_ino; - } - - /* The audit_filter_mutex must not be held during inotify calls because - * we hold it during inotify event callback processing. If an existing - * inotify watch is found, inotify_find_watch() grabs a reference before - * returning. - */ - mutex_unlock(&audit_filter_mutex); - - if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode, - &i_watch) < 0) { - parent = audit_init_parent(ndp); - if (IS_ERR(parent)) { - /* caller expects mutex locked */ - mutex_lock(&audit_filter_mutex); - return PTR_ERR(parent); - } - } else - parent = container_of(i_watch, struct audit_parent, wdata); - - mutex_lock(&audit_filter_mutex); - - /* parent was moved before we took audit_filter_mutex */ - if (parent->flags & AUDIT_PARENT_INVALID) - ret = -ENOENT; - else - audit_add_to_parent(krule, parent); - - /* match get in audit_init_parent or inotify_find_watch */ - put_inotify_watch(&parent->wdata); - return ret; -} - static u64 prio_low = ~0ULL/2; static u64 prio_high = ~0ULL/2 - 1; @@ -1282,7 +864,6 @@ static inline int audit_add_rule(struct audit_entry *entry) struct audit_entry *e; struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; - struct nameidata *ndp = NULL, *ndw = NULL; struct list_head *list; int h, err; #ifdef CONFIG_AUDITSYSCALL @@ -1296,8 +877,8 @@ static inline int audit_add_rule(struct audit_entry *entry) mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, &list); - mutex_unlock(&audit_filter_mutex); if (e) { + mutex_unlock(&audit_filter_mutex); err = -EEXIST; /* normally audit_add_tree_rule() will free it on failure */ if (tree) @@ -1305,22 +886,16 @@ static inline int audit_add_rule(struct audit_entry *entry) goto error; } - /* Avoid calling path_lookup under audit_filter_mutex. */ - if (watch) { - err = audit_get_nd(watch->path, &ndp, &ndw); - if (err) - goto error; - } - - mutex_lock(&audit_filter_mutex); if (watch) { /* audit_filter_mutex is dropped and re-taken during this call */ - err = audit_add_watch(&entry->rule, ndp, ndw); + err = audit_add_watch(&entry->rule); if (err) { mutex_unlock(&audit_filter_mutex); goto error; } - h = audit_hash_ino((u32)watch->ino); + /* entry->rule.watch may have changed during audit_add_watch() */ + watch = entry->rule.watch; + h = audit_hash_ino((u32)audit_watch_inode(watch)); list = &audit_inode_hash[h]; } if (tree) { @@ -1358,11 +933,9 @@ static inline int audit_add_rule(struct audit_entry *entry) #endif mutex_unlock(&audit_filter_mutex); - audit_put_nd(ndp, ndw); /* NULL args OK */ return 0; error: - audit_put_nd(ndp, ndw); /* NULL args OK */ if (watch) audit_put_watch(watch); /* tmp watch, matches initial get */ return err; @@ -1372,7 +945,7 @@ error: static inline int audit_del_rule(struct audit_entry *entry) { struct audit_entry *e; - struct audit_watch *watch, *tmp_watch = entry->rule.watch; + struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; LIST_HEAD(inotify_list); @@ -1394,29 +967,8 @@ static inline int audit_del_rule(struct audit_entry *entry) goto out; } - watch = e->rule.watch; - if (watch) { - struct audit_parent *parent = watch->parent; - - list_del(&e->rule.rlist); - - if (list_empty(&watch->rules)) { - audit_remove_watch(watch); - - if (list_empty(&parent->watches)) { - /* Put parent on the inotify un-registration - * list. Grab a reference before releasing - * audit_filter_mutex, to be released in - * audit_inotify_unregister(). - * If filesystem is going away, just leave - * the sucker alone, eviction will take - * care of it. - */ - if (pin_inotify_watch(&parent->wdata)) - list_add(&parent->ilist, &inotify_list); - } - } - } + if (e->rule.watch) + audit_remove_watch_rule(&e->rule, &inotify_list); if (e->rule.tree) audit_remove_tree_rule(&e->rule); @@ -1438,8 +990,8 @@ static inline int audit_del_rule(struct audit_entry *entry) audit_inotify_unregister(&inotify_list); out: - if (tmp_watch) - audit_put_watch(tmp_watch); /* match initial get */ + if (watch) + audit_put_watch(watch); /* match initial get */ if (tree) audit_put_tree(tree); /* that's the temporary one */ @@ -1527,11 +1079,9 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid, security_release_secctx(ctx, len); } } - audit_log_format(ab, " op=%s rule key=", action); - if (rule->filterkey) - audit_log_untrustedstring(ab, rule->filterkey); - else - audit_log_format(ab, "(null)"); + audit_log_format(ab, " op="); + audit_log_string(ab, action); + audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=%d", rule->listnr, res); audit_log_end(ab); } @@ -1595,7 +1145,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, return PTR_ERR(entry); err = audit_add_rule(entry); - audit_log_rule_change(loginuid, sessionid, sid, "add", + audit_log_rule_change(loginuid, sessionid, sid, "add rule", &entry->rule, !err); if (err) @@ -1611,7 +1161,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, return PTR_ERR(entry); err = audit_del_rule(entry); - audit_log_rule_change(loginuid, sessionid, sid, "remove", + audit_log_rule_change(loginuid, sessionid, sid, "remove rule", &entry->rule, !err); audit_free_rule(entry); @@ -1793,7 +1343,7 @@ static int update_lsm_rule(struct audit_krule *r) list_del(&r->list); } else { if (watch) { - list_add(&nentry->rule.rlist, &watch->rules); + list_add(&nentry->rule.rlist, audit_watch_rules(watch)); list_del(&r->rlist); } else if (tree) list_replace_init(&r->rlist, &nentry->rule.rlist); @@ -1829,27 +1379,3 @@ int audit_update_lsm_rules(void) return err; } - -/* Update watch data in audit rules based on inotify events. */ -void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, - u32 cookie, const char *dname, struct inode *inode) -{ - struct audit_parent *parent; - - parent = container_of(i_watch, struct audit_parent, wdata); - - if (mask & (IN_CREATE|IN_MOVED_TO) && inode) - audit_update_watch(parent, dname, inode->i_sb->s_dev, - inode->i_ino, 0); - else if (mask & (IN_DELETE|IN_MOVED_FROM)) - audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); - /* inotify automatically removes the watch and sends IN_IGNORED */ - else if (mask & (IN_DELETE_SELF|IN_UNMOUNT)) - audit_remove_parent_watches(parent); - /* inotify does not remove the watch, so remove it manually */ - else if(mask & IN_MOVE_SELF) { - audit_remove_parent_watches(parent); - inotify_remove_watch_locked(audit_ih, i_watch); - } else if (mask & IN_IGNORED) - put_inotify_watch(i_watch); -} diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 7d6ac7c1f41..68d3c6a0ecd 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -199,6 +199,7 @@ struct audit_context { struct audit_tree_refs *trees, *first_trees; int tree_count; + struct list_head killed_trees; int type; union { @@ -548,9 +549,9 @@ static int audit_filter_rules(struct task_struct *tsk, } break; case AUDIT_WATCH: - if (name && rule->watch->ino != (unsigned long)-1) - result = (name->dev == rule->watch->dev && - name->ino == rule->watch->ino); + if (name && audit_watch_inode(rule->watch) != (unsigned long)-1) + result = (name->dev == audit_watch_dev(rule->watch) && + name->ino == audit_watch_inode(rule->watch)); break; case AUDIT_DIR: if (ctx) @@ -853,6 +854,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state) if (!(context = kmalloc(sizeof(*context), GFP_KERNEL))) return NULL; audit_zero_context(context, state); + INIT_LIST_HEAD(&context->killed_trees); return context; } @@ -1024,8 +1026,8 @@ static int audit_log_single_execve_arg(struct audit_context *context, { char arg_num_len_buf[12]; const char __user *tmp_p = p; - /* how many digits are in arg_num? 3 is the length of " a=" */ - size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 3; + /* how many digits are in arg_num? 5 is the length of ' a=""' */ + size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5; size_t len, len_left, to_send; size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN; unsigned int i, has_cntl = 0, too_long = 0; @@ -1137,7 +1139,7 @@ static int audit_log_single_execve_arg(struct audit_context *context, if (has_cntl) audit_log_n_hex(*ab, buf, to_send); else - audit_log_format(*ab, "\"%s\"", buf); + audit_log_string(*ab, buf); p += to_send; len_left -= to_send; @@ -1372,11 +1374,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_task_info(ab, tsk); - if (context->filterkey) { - audit_log_format(ab, " key="); - audit_log_untrustedstring(ab, context->filterkey); - } else - audit_log_format(ab, " key=(null)"); + audit_log_key(ab, context->filterkey); audit_log_end(ab); for (aux = context->aux; aux; aux = aux->next) { @@ -1549,6 +1547,8 @@ void audit_free(struct task_struct *tsk) /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); + if (!list_empty(&context->killed_trees)) + audit_kill_trees(&context->killed_trees); audit_free_context(context); } @@ -1692,6 +1692,9 @@ void audit_syscall_exit(int valid, long return_code) context->in_syscall = 0; context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; + if (!list_empty(&context->killed_trees)) + audit_kill_trees(&context->killed_trees); + if (context->previous) { struct audit_context *new_context = context->previous; context->previous = NULL; @@ -2525,3 +2528,11 @@ void audit_core_dumps(long signr) audit_log_format(ab, " sig=%ld", signr); audit_log_end(ab); } + +struct list_head *audit_killed_trees(void) +{ + struct audit_context *ctx = current->audit_context; + if (likely(!ctx || !ctx->in_syscall)) + return NULL; + return &ctx->killed_trees; +} diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3fb789f6df9..c7ece8f027f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -47,6 +47,7 @@ #include <linux/hash.h> #include <linux/namei.h> #include <linux/smp_lock.h> +#include <linux/pid_namespace.h> #include <asm/atomic.h> @@ -599,6 +600,7 @@ static struct inode_operations cgroup_dir_inode_operations; static struct file_operations proc_cgroupstats_operations; static struct backing_dev_info cgroup_backing_dev_info = { + .name = "cgroup", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; @@ -734,16 +736,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry) * reference to css->refcnt. In general, this refcnt is expected to goes down * to zero, soon. * - * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; + * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex; */ DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); -static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) +static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp) { - if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) + if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) wake_up_all(&cgroup_rmdir_waitq); } +void cgroup_exclude_rmdir(struct cgroup_subsys_state *css) +{ + css_get(css); +} + +void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css) +{ + cgroup_wakeup_rmdir_waiter(css->cgroup); + css_put(css); +} + + static int rebind_subsystems(struct cgroupfs_root *root, unsigned long final_bits) { @@ -843,6 +857,11 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) { char *token, *o = data ?: "all"; + unsigned long mask = (unsigned long)-1; + +#ifdef CONFIG_CPUSETS + mask = ~(1UL << cpuset_subsys_id); +#endif opts->subsys_bits = 0; opts->flags = 0; @@ -887,6 +906,15 @@ static int parse_cgroupfs_options(char *data, } } + /* + * Option noprefix was introduced just for backward compatibility + * with the old cpuset, so we allow noprefix only if mounting just + * the cpuset subsystem. + */ + if (test_bit(ROOT_NOPREFIX, &opts->flags) && + (opts->subsys_bits & mask)) + return -EINVAL; + /* We can't have an empty hierarchy */ if (!opts->subsys_bits) return -EINVAL; @@ -946,6 +974,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->children); INIT_LIST_HEAD(&cgrp->css_sets); INIT_LIST_HEAD(&cgrp->release_list); + INIT_LIST_HEAD(&cgrp->pids_list); init_rwsem(&cgrp->pids_mutex); } static void init_cgroup_root(struct cgroupfs_root *root) @@ -1343,7 +1372,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) * wake up rmdir() waiter. the rmdir should fail since the cgroup * is no longer empty. */ - cgroup_wakeup_rmdir_waiters(cgrp); + cgroup_wakeup_rmdir_waiter(cgrp); return 0; } @@ -2187,12 +2216,30 @@ err: return ret; } +/* + * Cache pids for all threads in the same pid namespace that are + * opening the same "tasks" file. + */ +struct cgroup_pids { + /* The node in cgrp->pids_list */ + struct list_head list; + /* The cgroup those pids belong to */ + struct cgroup *cgrp; + /* The namepsace those pids belong to */ + struct pid_namespace *ns; + /* Array of process ids in the cgroup */ + pid_t *tasks_pids; + /* How many files are using the this tasks_pids array */ + int use_count; + /* Length of the current tasks_pids array */ + int length; +}; + static int cmppid(const void *a, const void *b) { return *(pid_t *)a - *(pid_t *)b; } - /* * seq_file methods for the "tasks" file. The seq_file position is the * next pid to display; the seq_file iterator is a pointer to the pid @@ -2207,45 +2254,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) * after a seek to the start). Use a binary-search to find the * next pid to display, if any */ - struct cgroup *cgrp = s->private; + struct cgroup_pids *cp = s->private; + struct cgroup *cgrp = cp->cgrp; int index = 0, pid = *pos; int *iter; down_read(&cgrp->pids_mutex); if (pid) { - int end = cgrp->pids_length; + int end = cp->length; while (index < end) { int mid = (index + end) / 2; - if (cgrp->tasks_pids[mid] == pid) { + if (cp->tasks_pids[mid] == pid) { index = mid; break; - } else if (cgrp->tasks_pids[mid] <= pid) + } else if (cp->tasks_pids[mid] <= pid) index = mid + 1; else end = mid; } } /* If we're off the end of the array, we're done */ - if (index >= cgrp->pids_length) + if (index >= cp->length) return NULL; /* Update the abstract position to be the actual pid that we found */ - iter = cgrp->tasks_pids + index; + iter = cp->tasks_pids + index; *pos = *iter; return iter; } static void cgroup_tasks_stop(struct seq_file *s, void *v) { - struct cgroup *cgrp = s->private; + struct cgroup_pids *cp = s->private; + struct cgroup *cgrp = cp->cgrp; up_read(&cgrp->pids_mutex); } static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) { - struct cgroup *cgrp = s->private; + struct cgroup_pids *cp = s->private; int *p = v; - int *end = cgrp->tasks_pids + cgrp->pids_length; + int *end = cp->tasks_pids + cp->length; /* * Advance to the next pid in the array. If this goes off the @@ -2272,26 +2321,33 @@ static struct seq_operations cgroup_tasks_seq_operations = { .show = cgroup_tasks_show, }; -static void release_cgroup_pid_array(struct cgroup *cgrp) +static void release_cgroup_pid_array(struct cgroup_pids *cp) { + struct cgroup *cgrp = cp->cgrp; + down_write(&cgrp->pids_mutex); - BUG_ON(!cgrp->pids_use_count); - if (!--cgrp->pids_use_count) { - kfree(cgrp->tasks_pids); - cgrp->tasks_pids = NULL; - cgrp->pids_length = 0; + BUG_ON(!cp->use_count); + if (!--cp->use_count) { + list_del(&cp->list); + put_pid_ns(cp->ns); + kfree(cp->tasks_pids); + kfree(cp); } up_write(&cgrp->pids_mutex); } static int cgroup_tasks_release(struct inode *inode, struct file *file) { - struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); + struct seq_file *seq; + struct cgroup_pids *cp; if (!(file->f_mode & FMODE_READ)) return 0; - release_cgroup_pid_array(cgrp); + seq = file->private_data; + cp = seq->private; + + release_cgroup_pid_array(cp); return seq_release(inode, file); } @@ -2310,6 +2366,8 @@ static struct file_operations cgroup_tasks_operations = { static int cgroup_tasks_open(struct inode *unused, struct file *file) { struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); + struct pid_namespace *ns = current->nsproxy->pid_ns; + struct cgroup_pids *cp; pid_t *pidarray; int npids; int retval; @@ -2336,20 +2394,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file) * array if necessary */ down_write(&cgrp->pids_mutex); - kfree(cgrp->tasks_pids); - cgrp->tasks_pids = pidarray; - cgrp->pids_length = npids; - cgrp->pids_use_count++; + + list_for_each_entry(cp, &cgrp->pids_list, list) { + if (ns == cp->ns) + goto found; + } + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) { + up_write(&cgrp->pids_mutex); + kfree(pidarray); + return -ENOMEM; + } + cp->cgrp = cgrp; + cp->ns = ns; + get_pid_ns(ns); + list_add(&cp->list, &cgrp->pids_list); +found: + kfree(cp->tasks_pids); + cp->tasks_pids = pidarray; + cp->length = npids; + cp->use_count++; up_write(&cgrp->pids_mutex); file->f_op = &cgroup_tasks_operations; retval = seq_open(file, &cgroup_tasks_seq_operations); if (retval) { - release_cgroup_pid_array(cgrp); + release_cgroup_pid_array(cp); return retval; } - ((struct seq_file *)file->private_data)->private = cgrp; + ((struct seq_file *)file->private_data)->private = cp; return 0; } @@ -2682,33 +2757,42 @@ again: mutex_unlock(&cgroup_mutex); /* + * In general, subsystem has no css->refcnt after pre_destroy(). But + * in racy cases, subsystem may have to get css->refcnt after + * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes + * make rmdir return -EBUSY too often. To avoid that, we use waitqueue + * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir + * and subsystem's reference count handling. Please see css_get/put + * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation. + */ + set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); + + /* * Call pre_destroy handlers of subsys. Notify subsystems * that rmdir() request comes. */ ret = cgroup_call_pre_destroy(cgrp); - if (ret) + if (ret) { + clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); return ret; + } mutex_lock(&cgroup_mutex); parent = cgrp->parent; if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { + clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); mutex_unlock(&cgroup_mutex); return -EBUSY; } - /* - * css_put/get is provided for subsys to grab refcnt to css. In typical - * case, subsystem has no reference after pre_destroy(). But, under - * hierarchy management, some *temporal* refcnt can be hold. - * To avoid returning -EBUSY to a user, waitqueue is used. If subsys - * is really busy, it should return -EBUSY at pre_destroy(). wake_up - * is called when css_put() is called and refcnt goes down to 0. - */ - set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); - if (!cgroup_clear_css_refs(cgrp)) { mutex_unlock(&cgroup_mutex); - schedule(); + /* + * Because someone may call cgroup_wakeup_rmdir_waiter() before + * prepare_to_wait(), we need to check this flag. + */ + if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)) + schedule(); finish_wait(&cgroup_rmdir_waitq, &wait); clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); if (signal_pending(current)) @@ -3280,7 +3364,7 @@ void __css_put(struct cgroup_subsys_state *css) set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); } - cgroup_wakeup_rmdir_waiters(cgrp); + cgroup_wakeup_rmdir_waiter(cgrp); } rcu_read_unlock(); } diff --git a/kernel/cpu.c b/kernel/cpu.c index 395b6974dc8..8ce10043e4a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -34,14 +34,11 @@ static struct { * an ongoing cpu hotplug operation. */ int refcount; -} cpu_hotplug; - -void __init cpu_hotplug_init(void) -{ - cpu_hotplug.active_writer = NULL; - mutex_init(&cpu_hotplug.lock); - cpu_hotplug.refcount = 0; -} +} cpu_hotplug = { + .active_writer = NULL, + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), + .refcount = 0, +}; #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/cred.c b/kernel/cred.c index 1bb4d7e5d61..006fcab009d 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -18,6 +18,18 @@ #include <linux/cn_proc.h> #include "cred-internals.h" +#if 0 +#define kdebug(FMT, ...) \ + printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) +#else +static inline __attribute__((format(printf, 1, 2))) +void no_printk(const char *fmt, ...) +{ +} +#define kdebug(FMT, ...) \ + no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) +#endif + static struct kmem_cache *cred_jar; /* @@ -36,6 +48,10 @@ static struct thread_group_cred init_tgcred = { */ struct cred init_cred = { .usage = ATOMIC_INIT(4), +#ifdef CONFIG_DEBUG_CREDENTIALS + .subscribers = ATOMIC_INIT(2), + .magic = CRED_MAGIC, +#endif .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_INIT_INH_SET, .cap_permitted = CAP_FULL_SET, @@ -48,6 +64,31 @@ struct cred init_cred = { #endif }; +static inline void set_cred_subscribers(struct cred *cred, int n) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + atomic_set(&cred->subscribers, n); +#endif +} + +static inline int read_cred_subscribers(const struct cred *cred) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + return atomic_read(&cred->subscribers); +#else + return 0; +#endif +} + +static inline void alter_cred_subscribers(const struct cred *_cred, int n) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + struct cred *cred = (struct cred *) _cred; + + atomic_add(n, &cred->subscribers); +#endif +} + /* * Dispose of the shared task group credentials */ @@ -85,9 +126,22 @@ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); + kdebug("put_cred_rcu(%p)", cred); + +#ifdef CONFIG_DEBUG_CREDENTIALS + if (cred->magic != CRED_MAGIC_DEAD || + atomic_read(&cred->usage) != 0 || + read_cred_subscribers(cred) != 0) + panic("CRED: put_cred_rcu() sees %p with" + " mag %x, put %p, usage %d, subscr %d\n", + cred, cred->magic, cred->put_addr, + atomic_read(&cred->usage), + read_cred_subscribers(cred)); +#else if (atomic_read(&cred->usage) != 0) panic("CRED: put_cred_rcu() sees %p with usage %d\n", cred, atomic_read(&cred->usage)); +#endif security_cred_free(cred); key_put(cred->thread_keyring); @@ -106,12 +160,90 @@ static void put_cred_rcu(struct rcu_head *rcu) */ void __put_cred(struct cred *cred) { + kdebug("__put_cred(%p{%d,%d})", cred, + atomic_read(&cred->usage), + read_cred_subscribers(cred)); + BUG_ON(atomic_read(&cred->usage) != 0); +#ifdef CONFIG_DEBUG_CREDENTIALS + BUG_ON(read_cred_subscribers(cred) != 0); + cred->magic = CRED_MAGIC_DEAD; + cred->put_addr = __builtin_return_address(0); +#endif + BUG_ON(cred == current->cred); + BUG_ON(cred == current->real_cred); call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); +/* + * Clean up a task's credentials when it exits + */ +void exit_creds(struct task_struct *tsk) +{ + struct cred *cred; + + kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, + atomic_read(&tsk->cred->usage), + read_cred_subscribers(tsk->cred)); + + cred = (struct cred *) tsk->real_cred; + tsk->real_cred = NULL; + validate_creds(cred); + alter_cred_subscribers(cred, -1); + put_cred(cred); + + cred = (struct cred *) tsk->cred; + tsk->cred = NULL; + validate_creds(cred); + alter_cred_subscribers(cred, -1); + put_cred(cred); + + cred = (struct cred *) tsk->replacement_session_keyring; + if (cred) { + tsk->replacement_session_keyring = NULL; + validate_creds(cred); + put_cred(cred); + } +} + +/* + * Allocate blank credentials, such that the credentials can be filled in at a + * later date without risk of ENOMEM. + */ +struct cred *cred_alloc_blank(void) +{ + struct cred *new; + + new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); + if (!new) + return NULL; + +#ifdef CONFIG_KEYS + new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); + if (!new->tgcred) { + kfree(new); + return NULL; + } + atomic_set(&new->tgcred->usage, 1); +#endif + + atomic_set(&new->usage, 1); + + if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) + goto error; + +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif + return new; + +error: + abort_creds(new); + return NULL; +} + /** * prepare_creds - Prepare a new set of credentials for modification * @@ -132,16 +264,19 @@ struct cred *prepare_creds(void) const struct cred *old; struct cred *new; - BUG_ON(atomic_read(&task->real_cred->usage) < 1); + validate_process_creds(); new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; + kdebug("prepare_creds() alloc %p", new); + old = task->cred; memcpy(new, old, sizeof(struct cred)); atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_group_info(new->group_info); get_uid(new->user); @@ -157,6 +292,7 @@ struct cred *prepare_creds(void) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; + validate_creds(new); return new; error: @@ -229,9 +365,12 @@ struct cred *prepare_usermodehelper_creds(void) if (!new) return NULL; + kdebug("prepare_usermodehelper_creds() alloc %p", new); + memcpy(new, &init_cred, sizeof(struct cred)); atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_group_info(new->group_info); get_uid(new->user); @@ -250,6 +389,7 @@ struct cred *prepare_usermodehelper_creds(void) #endif if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) goto error; + validate_creds(new); BUG_ON(atomic_read(&new->usage) != 1); return new; @@ -286,6 +426,10 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) ) { p->real_cred = get_cred(p->cred); get_cred(p->cred); + alter_cred_subscribers(p->cred, 2); + kdebug("share_creds(%p{%d,%d})", + p->cred, atomic_read(&p->cred->usage), + read_cred_subscribers(p->cred)); atomic_inc(&p->cred->user->processes); return 0; } @@ -331,6 +475,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); + alter_cred_subscribers(new, 2); + validate_creds(new); return 0; error_put: @@ -355,13 +501,20 @@ error_put: int commit_creds(struct cred *new) { struct task_struct *task = current; - const struct cred *old; + const struct cred *old = task->real_cred; - BUG_ON(task->cred != task->real_cred); - BUG_ON(atomic_read(&task->real_cred->usage) < 2); + kdebug("commit_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); + + BUG_ON(task->cred != old); +#ifdef CONFIG_DEBUG_CREDENTIALS + BUG_ON(read_cred_subscribers(old) < 2); + validate_creds(old); + validate_creds(new); +#endif BUG_ON(atomic_read(&new->usage) < 1); - old = task->real_cred; security_commit_creds(new, old); get_cred(new); /* we will require a ref for the subj creds too */ @@ -390,12 +543,14 @@ int commit_creds(struct cred *new) * cheaply with the new uid cache, so if it matters * we should be checking for it. -DaveM */ + alter_cred_subscribers(new, 2); if (new->user != old->user) atomic_inc(&new->user->processes); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) atomic_dec(&old->user->processes); + alter_cred_subscribers(old, -2); sched_switch_user(task); @@ -428,6 +583,13 @@ EXPORT_SYMBOL(commit_creds); */ void abort_creds(struct cred *new) { + kdebug("abort_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); + +#ifdef CONFIG_DEBUG_CREDENTIALS + BUG_ON(read_cred_subscribers(new) != 0); +#endif BUG_ON(atomic_read(&new->usage) < 1); put_cred(new); } @@ -444,7 +606,20 @@ const struct cred *override_creds(const struct cred *new) { const struct cred *old = current->cred; - rcu_assign_pointer(current->cred, get_cred(new)); + kdebug("override_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); + + validate_creds(old); + validate_creds(new); + get_cred(new); + alter_cred_subscribers(new, 1); + rcu_assign_pointer(current->cred, new); + alter_cred_subscribers(old, -1); + + kdebug("override_creds() = %p{%d,%d}", old, + atomic_read(&old->usage), + read_cred_subscribers(old)); return old; } EXPORT_SYMBOL(override_creds); @@ -460,7 +635,15 @@ void revert_creds(const struct cred *old) { const struct cred *override = current->cred; + kdebug("revert_creds(%p{%d,%d})", old, + atomic_read(&old->usage), + read_cred_subscribers(old)); + + validate_creds(old); + validate_creds(override); + alter_cred_subscribers(old, 1); rcu_assign_pointer(current->cred, old); + alter_cred_subscribers(override, -1); put_cred(override); } EXPORT_SYMBOL(revert_creds); @@ -502,11 +685,15 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (!new) return NULL; + kdebug("prepare_kernel_cred() alloc %p", new); + if (daemon) old = get_task_cred(daemon); else old = get_cred(&init_cred); + validate_creds(old); + *new = *old; get_uid(new->user); get_group_info(new->group_info); @@ -526,7 +713,9 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) goto error; atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); put_cred(old); + validate_creds(new); return new; error: @@ -589,3 +778,95 @@ int set_create_files_as(struct cred *new, struct inode *inode) return security_kernel_create_files_as(new, inode); } EXPORT_SYMBOL(set_create_files_as); + +#ifdef CONFIG_DEBUG_CREDENTIALS + +/* + * dump invalid credentials + */ +static void dump_invalid_creds(const struct cred *cred, const char *label, + const struct task_struct *tsk) +{ + printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n", + label, cred, + cred == &init_cred ? "[init]" : "", + cred == tsk->real_cred ? "[real]" : "", + cred == tsk->cred ? "[eff]" : ""); + printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n", + cred->magic, cred->put_addr); + printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n", + atomic_read(&cred->usage), + read_cred_subscribers(cred)); + printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n", + cred->uid, cred->euid, cred->suid, cred->fsuid); + printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n", + cred->gid, cred->egid, cred->sgid, cred->fsgid); +#ifdef CONFIG_SECURITY + printk(KERN_ERR "CRED: ->security is %p\n", cred->security); + if ((unsigned long) cred->security >= PAGE_SIZE && + (((unsigned long) cred->security & 0xffffff00) != + (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))) + printk(KERN_ERR "CRED: ->security {%x, %x}\n", + ((u32*)cred->security)[0], + ((u32*)cred->security)[1]); +#endif +} + +/* + * report use of invalid credentials + */ +void __invalid_creds(const struct cred *cred, const char *file, unsigned line) +{ + printk(KERN_ERR "CRED: Invalid credentials\n"); + printk(KERN_ERR "CRED: At %s:%u\n", file, line); + dump_invalid_creds(cred, "Specified", current); + BUG(); +} +EXPORT_SYMBOL(__invalid_creds); + +/* + * check the credentials on a process + */ +void __validate_process_creds(struct task_struct *tsk, + const char *file, unsigned line) +{ + if (tsk->cred == tsk->real_cred) { + if (unlikely(read_cred_subscribers(tsk->cred) < 2 || + creds_are_invalid(tsk->cred))) + goto invalid_creds; + } else { + if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 || + read_cred_subscribers(tsk->cred) < 1 || + creds_are_invalid(tsk->real_cred) || + creds_are_invalid(tsk->cred))) + goto invalid_creds; + } + return; + +invalid_creds: + printk(KERN_ERR "CRED: Invalid process credentials\n"); + printk(KERN_ERR "CRED: At %s:%u\n", file, line); + + dump_invalid_creds(tsk->real_cred, "Real", tsk); + if (tsk->cred != tsk->real_cred) + dump_invalid_creds(tsk->cred, "Effective", tsk); + else + printk(KERN_ERR "CRED: Effective creds == Real creds\n"); + BUG(); +} +EXPORT_SYMBOL(__validate_process_creds); + +/* + * check creds for do_exit() + */ +void validate_creds_for_do_exit(struct task_struct *tsk) +{ + kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})", + tsk->real_cred, tsk->cred, + atomic_read(&tsk->cred->usage), + read_cred_subscribers(tsk->cred)); + + __validate_process_creds(tsk, __FILE__, __LINE__); +} + +#endif /* CONFIG_DEBUG_CREDENTIALS */ diff --git a/kernel/exit.c b/kernel/exit.c index b6c90b5ef50..c98ff7a8025 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -12,7 +12,6 @@ #include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> -#include <linux/mnt_namespace.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/security.h> @@ -375,9 +374,8 @@ static void set_special_pids(struct pid *pid) } /* - * Let kernel threads use this to say that they - * allow a certain signal (since daemonize() will - * have disabled all of them by default). + * Let kernel threads use this to say that they allow a certain signal. + * Must not be used if kthread was cloned with CLONE_SIGHAND. */ int allow_signal(int sig) { @@ -385,14 +383,14 @@ int allow_signal(int sig) return -EINVAL; spin_lock_irq(¤t->sighand->siglock); + /* This is only needed for daemonize()'ed kthreads */ sigdelset(¤t->blocked, sig); - if (!current->mm) { - /* Kernel threads handle their own signals. - Let the signal code know it'll be handled, so - that they don't get converted to SIGKILL or - just silently dropped */ - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; - } + /* + * Kernel threads handle their own signals. Let the signal code + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ + current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return 0; @@ -591,7 +589,7 @@ retry: /* * Search in the siblings */ - list_for_each_entry(c, &p->parent->children, sibling) { + list_for_each_entry(c, &p->real_parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } @@ -758,7 +756,7 @@ static void reparent_thread(struct task_struct *father, struct task_struct *p, p->exit_signal = SIGCHLD; /* If it has exited notify the new parent about this child's death. */ - if (!p->ptrace && + if (!task_ptrace(p) && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { do_notify_parent(p, p->exit_signal); if (task_detached(p)) { @@ -783,7 +781,7 @@ static void forget_original_parent(struct task_struct *father) list_for_each_entry_safe(p, n, &father->children, sibling) { p->real_parent = reaper; if (p->parent == father) { - BUG_ON(p->ptrace); + BUG_ON(task_ptrace(p)); p->parent = p->real_parent; } reparent_thread(father, p, &dead_children); @@ -903,6 +901,8 @@ NORET_TYPE void do_exit(long code) tracehook_report_exit(&code); + validate_creds_for_do_exit(tsk); + /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. @@ -1011,6 +1011,8 @@ NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); + validate_creds_for_do_exit(tsk); + preempt_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; @@ -1081,6 +1083,18 @@ SYSCALL_DEFINE1(exit_group, int, error_code) return 0; } +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + + struct siginfo __user *wo_info; + int __user *wo_stat; + struct rusage __user *wo_rusage; + + int notask_error; +}; + static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid = NULL; @@ -1091,13 +1105,12 @@ static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) return pid; } -static int eligible_child(enum pid_type type, struct pid *pid, int options, - struct task_struct *p) +static int eligible_child(struct wait_opts *wo, struct task_struct *p) { int err; - if (type < PIDTYPE_MAX) { - if (task_pid_type(p, type) != pid) + if (wo->wo_type < PIDTYPE_MAX) { + if (task_pid_type(p, wo->wo_type) != wo->wo_pid) return 0; } @@ -1106,8 +1119,8 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ - if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) - && !(options & __WALL)) + if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) + && !(wo->wo_flags & __WALL)) return 0; err = security_task_wait(p); @@ -1117,14 +1130,15 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, return 1; } -static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, - int why, int status, - struct siginfo __user *infop, - struct rusage __user *rusagep) +static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, + pid_t pid, uid_t uid, int why, int status) { - int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; + struct siginfo __user *infop; + int retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); + infop = wo->wo_info; if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) @@ -1148,19 +1162,18 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(struct task_struct *p, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); uid_t uid = __task_cred(p)->uid; + struct siginfo __user *infop; - if (!likely(options & WEXITED)) + if (!likely(wo->wo_flags & WEXITED)) return 0; - if (unlikely(options & WNOWAIT)) { + if (unlikely(wo->wo_flags & WNOWAIT)) { int exit_code = p->exit_code; int why, status; @@ -1173,8 +1186,7 @@ static int wait_task_zombie(struct task_struct *p, int options, why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } - return wait_noreap_copyout(p, pid, uid, why, - status, infop, ru); + return wait_noreap_copyout(wo, p, pid, uid, why, status); } /* @@ -1188,11 +1200,13 @@ static int wait_task_zombie(struct task_struct *p, int options, } traced = ptrace_reparented(p); - - if (likely(!traced)) { + /* + * It can be ptraced but not reparented, check + * !task_detached() to filter out sub-threads. + */ + if (likely(!traced) && likely(!task_detached(p))) { struct signal_struct *psig; struct signal_struct *sig; - struct task_cputime cputime; /* * The resource counters for the group leader are in its @@ -1205,26 +1219,23 @@ static int wait_task_zombie(struct task_struct *p, int options, * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do - * need to protect the access to p->parent->signal fields, + * need to protect the access to parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. - * - * We use thread_group_cputime() to get times for the thread - * group, which consolidates times for all threads in the - * group including the group leader. */ - thread_group_cputime(p, &cputime); - spin_lock_irq(&p->parent->sighand->siglock); - psig = p->parent->signal; + spin_lock_irq(&p->real_parent->sighand->siglock); + psig = p->real_parent->signal; sig = p->signal; psig->cutime = cputime_add(psig->cutime, - cputime_add(cputime.utime, - sig->cutime)); + cputime_add(p->utime, + cputime_add(sig->utime, + sig->cutime))); psig->cstime = cputime_add(psig->cstime, - cputime_add(cputime.stime, - sig->cstime)); + cputime_add(p->stime, + cputime_add(sig->stime, + sig->cstime))); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, @@ -1246,7 +1257,7 @@ static int wait_task_zombie(struct task_struct *p, int options, sig->oublock + sig->coublock; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); - spin_unlock_irq(&p->parent->sighand->siglock); + spin_unlock_irq(&p->real_parent->sighand->siglock); } /* @@ -1255,11 +1266,14 @@ static int wait_task_zombie(struct task_struct *p, int options, */ read_unlock(&tasklist_lock); - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; - if (!retval && stat_addr) - retval = put_user(status, stat_addr); + if (!retval && wo->wo_stat) + retval = put_user(status, wo->wo_stat); + + infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) @@ -1327,15 +1341,18 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace) * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(int ptrace, struct task_struct *p, - int options, struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_stopped(struct wait_opts *wo, + int ptrace, struct task_struct *p) { + struct siginfo __user *infop; int retval, exit_code, *p_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; - if (!(options & WUNTRACED)) + /* + * Traditionally we see ptrace'd stopped tasks regardless of options. + */ + if (!ptrace && !(wo->wo_flags & WUNTRACED)) return 0; exit_code = 0; @@ -1349,7 +1366,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, if (!exit_code) goto unlock_sig; - if (!unlikely(options & WNOWAIT)) + if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; /* don't need the RCU readlock here as we're holding a spinlock */ @@ -1371,14 +1388,15 @@ unlock_sig: why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); - if (unlikely(options & WNOWAIT)) - return wait_noreap_copyout(p, pid, uid, - why, exit_code, - infop, ru); + if (unlikely(wo->wo_flags & WNOWAIT)) + return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); + + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; + if (!retval && wo->wo_stat) + retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; - if (!retval && stat_addr) - retval = put_user((exit_code << 8) | 0x7f, stat_addr); + infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) @@ -1405,15 +1423,13 @@ unlock_sig: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(struct task_struct *p, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) { int retval; pid_t pid; uid_t uid; - if (!unlikely(options & WCONTINUED)) + if (!unlikely(wo->wo_flags & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) @@ -1425,7 +1441,7 @@ static int wait_task_continued(struct task_struct *p, int options, spin_unlock_irq(&p->sighand->siglock); return 0; } - if (!unlikely(options & WNOWAIT)) + if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; uid = __task_cred(p)->uid; spin_unlock_irq(&p->sighand->siglock); @@ -1434,17 +1450,17 @@ static int wait_task_continued(struct task_struct *p, int options, get_task_struct(p); read_unlock(&tasklist_lock); - if (!infop) { - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; + if (!wo->wo_info) { + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); - if (!retval && stat_addr) - retval = put_user(0xffff, stat_addr); + if (!retval && wo->wo_stat) + retval = put_user(0xffff, wo->wo_stat); if (!retval) retval = pid; } else { - retval = wait_noreap_copyout(p, pid, uid, - CLD_CONTINUED, SIGCONT, - infop, ru); + retval = wait_noreap_copyout(wo, p, pid, uid, + CLD_CONTINUED, SIGCONT); BUG_ON(retval == 0); } @@ -1454,19 +1470,16 @@ static int wait_task_continued(struct task_struct *p, int options, /* * Consider @p for a wait by @parent. * - * -ECHILD should be in *@notask_error before the first call. + * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; - * then *@notask_error is 0 if @p is an eligible child, + * then ->notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ -static int wait_consider_task(struct task_struct *parent, int ptrace, - struct task_struct *p, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent, + int ptrace, struct task_struct *p) { - int ret = eligible_child(type, pid, options, p); + int ret = eligible_child(wo, p); if (!ret) return ret; @@ -1478,17 +1491,17 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, * to look for security policy problems, rather * than for mysterious wait bugs. */ - if (*notask_error) - *notask_error = ret; + if (wo->notask_error) + wo->notask_error = ret; return 0; } - if (likely(!ptrace) && unlikely(p->ptrace)) { + if (likely(!ptrace) && unlikely(task_ptrace(p))) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ - *notask_error = 0; + wo->notask_error = 0; return 0; } @@ -1499,34 +1512,30 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) - return wait_task_zombie(p, options, infop, stat_addr, ru); + return wait_task_zombie(wo, p); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ - *notask_error = 0; + wo->notask_error = 0; if (task_stopped_code(p, ptrace)) - return wait_task_stopped(ptrace, p, options, - infop, stat_addr, ru); + return wait_task_stopped(wo, ptrace, p); - return wait_task_continued(p, options, infop, stat_addr, ru); + return wait_task_continued(wo, p); } /* * Do the work of do_wait() for one thread in the group, @tsk. * - * -ECHILD should be in *@notask_error before the first call. + * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then - * *@notask_error is 0 if there were any eligible children, + * ->notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ -static int do_wait_thread(struct task_struct *tsk, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; @@ -1535,9 +1544,7 @@ static int do_wait_thread(struct task_struct *tsk, int *notask_error, * Do not consider detached threads. */ if (!task_detached(p)) { - int ret = wait_consider_task(tsk, 0, p, notask_error, - type, pid, options, - infop, stat_addr, ru); + int ret = wait_consider_task(wo, tsk, 0, p); if (ret) return ret; } @@ -1546,22 +1553,12 @@ static int do_wait_thread(struct task_struct *tsk, int *notask_error, return 0; } -static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; - /* - * Traditionally we see ptrace'd stopped tasks regardless of options. - */ - options |= WUNTRACED; - list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { - int ret = wait_consider_task(tsk, 1, p, notask_error, - type, pid, options, - infop, stat_addr, ru); + int ret = wait_consider_task(wo, tsk, 1, p); if (ret) return ret; } @@ -1569,65 +1566,59 @@ static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, return 0; } -static long do_wait(enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static long do_wait(struct wait_opts *wo) { DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; int retval; - trace_sched_process_wait(pid); + trace_sched_process_wait(wo->wo_pid); add_wait_queue(¤t->signal->wait_chldexit,&wait); repeat: /* * If there is nothing that can match our critiera just get out. - * We will clear @retval to zero if we see any child that might later - * match our criteria, even if we are not able to reap it yet. + * We will clear ->notask_error to zero if we see any child that + * might later match our criteria, even if we are not able to reap + * it yet. */ - retval = -ECHILD; - if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) - goto end; + wo->notask_error = -ECHILD; + if ((wo->wo_type < PIDTYPE_MAX) && + (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) + goto notask; - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { - int tsk_result = do_wait_thread(tsk, &retval, - type, pid, options, - infop, stat_addr, ru); - if (!tsk_result) - tsk_result = ptrace_do_wait(tsk, &retval, - type, pid, options, - infop, stat_addr, ru); - if (tsk_result) { - /* - * tasklist_lock is unlocked and we have a final result. - */ - retval = tsk_result; + retval = do_wait_thread(wo, tsk); + if (retval) + goto end; + + retval = ptrace_do_wait(wo, tsk); + if (retval) goto end; - } - if (options & __WNOTHREAD) + if (wo->wo_flags & __WNOTHREAD) break; - tsk = next_thread(tsk); - BUG_ON(tsk->signal != current->signal); - } while (tsk != current); + } while_each_thread(current, tsk); read_unlock(&tasklist_lock); - if (!retval && !(options & WNOHANG)) { +notask: + retval = wo->notask_error; + if (!retval && !(wo->wo_flags & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } - end: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(¤t->signal->wait_chldexit,&wait); - if (infop) { + if (wo->wo_info) { + struct siginfo __user *infop = wo->wo_info; + if (retval > 0) retval = 0; else { @@ -1656,6 +1647,7 @@ end: SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { + struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; @@ -1685,7 +1677,14 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, if (type < PIDTYPE_MAX) pid = find_get_pid(upid); - ret = do_wait(type, pid, options, infop, NULL, ru); + + wo.wo_type = type; + wo.wo_pid = pid; + wo.wo_flags = options; + wo.wo_info = infop; + wo.wo_stat = NULL; + wo.wo_rusage = ru; + ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ @@ -1696,6 +1695,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { + struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; @@ -1717,7 +1717,13 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, pid = find_get_pid(upid); } - ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru); + wo.wo_type = type; + wo.wo_pid = pid; + wo.wo_flags = options | WEXITED; + wo.wo_info = NULL; + wo.wo_stat = stat_addr; + wo.wo_rusage = ru; + ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ diff --git a/kernel/fork.c b/kernel/fork.c index be022c200da..aab8579c609 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -17,7 +17,6 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/completion.h> -#include <linux/mnt_namespace.h> #include <linux/personality.h> #include <linux/mempolicy.h> #include <linux/sem.h> @@ -153,8 +152,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); - put_cred(tsk->real_cred); - put_cred(tsk->cred); + exit_creds(tsk); delayacct_tsk_free(tsk); if (!profile_handoff_task(tsk)) @@ -568,18 +566,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) * the value intact in a core dump, and to save the unnecessary * trouble otherwise. Userland only wants this done for a sys_exit. */ - if (tsk->clear_child_tid - && !(tsk->flags & PF_SIGNALED) - && atomic_read(&mm->mm_users) > 1) { - u32 __user * tidptr = tsk->clear_child_tid; + if (tsk->clear_child_tid) { + if (!(tsk->flags & PF_SIGNALED) && + atomic_read(&mm->mm_users) > 1) { + /* + * We don't check the error code - if userspace has + * not set up a proper pointer then tough luck. + */ + put_user(0, tsk->clear_child_tid); + sys_futex(tsk->clear_child_tid, FUTEX_WAKE, + 1, NULL, NULL, 0); + } tsk->clear_child_tid = NULL; - - /* - * We don't check the error code - if userspace has - * not set up a proper pointer then tough luck. - */ - put_user(0, tidptr); - sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); } } @@ -816,11 +814,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; - if (clone_flags & CLONE_THREAD) { - atomic_inc(¤t->signal->count); - atomic_inc(¤t->signal->live); + if (clone_flags & CLONE_THREAD) return 0; - } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; @@ -878,16 +873,6 @@ void __cleanup_signal(struct signal_struct *sig) kmem_cache_free(signal_cachep, sig); } -static void cleanup_signal(struct task_struct *tsk) -{ - struct signal_struct *sig = tsk->signal; - - atomic_dec(&sig->live); - - if (atomic_dec_and_test(&sig->count)) - __cleanup_signal(sig); -} - static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; @@ -1029,7 +1014,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); - clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); p->utime = cputime_zero; @@ -1241,6 +1225,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, } if (clone_flags & CLONE_THREAD) { + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } @@ -1270,6 +1256,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); + perf_counter_fork(p); return p; bad_fork_free_pid: @@ -1283,7 +1270,8 @@ bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: - cleanup_signal(p); + if (!(clone_flags & CLONE_THREAD)) + __cleanup_signal(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: @@ -1308,8 +1296,7 @@ bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); - put_cred(p->real_cred); - put_cred(p->cred); + exit_creds(p); bad_fork_free: free_task(p); fork_out: @@ -1409,12 +1396,6 @@ long do_fork(unsigned long clone_flags, if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); - } else if (!(clone_flags & CLONE_VM)) { - /* - * vfork will do an exec which will call - * set_task_comm() - */ - perf_counter_fork(p); } audit_finish_fork(p); diff --git a/kernel/freezer.c b/kernel/freezer.c index 2f4936cf708..bd1d42b17cb 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -44,12 +44,19 @@ void refrigerator(void) recalc_sigpending(); /* We sent fake signal, clean it up */ spin_unlock_irq(¤t->sighand->siglock); + /* prevent accounting of that task to load */ + current->flags |= PF_FREEZING; + for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!frozen(current)) break; schedule(); } + + /* Remove the accounting blocker */ + current->flags &= ~PF_FREEZING; + pr_debug("%s left refrigerator\n", current->comm); __set_current_state(save); } diff --git a/kernel/futex.c b/kernel/futex.c index 80b5ce71659..e18cfbdc719 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -247,6 +247,7 @@ again: if (err < 0) return err; + page = compound_head(page); lock_page(page); if (!page->mapping) { unlock_page(page); @@ -284,6 +285,25 @@ void put_futex_key(int fshared, union futex_key *key) drop_futex_key_refs(key); } +/* + * fault_in_user_writeable - fault in user address and verify RW access + * @uaddr: pointer to faulting user space address + * + * Slow path to fixup the fault we just took in the atomic write + * access to @uaddr. + * + * We have no generic implementation of a non destructive write to the + * user address. We know that we faulted in the atomic pagefault + * disabled section so we can as well avoid the #PF overhead by + * calling get_user_pages() right away. + */ +static int fault_in_user_writeable(u32 __user *uaddr) +{ + int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, + 1, 1, 0, NULL, NULL); + return ret < 0 ? ret : 0; +} + /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in @@ -896,7 +916,6 @@ retry: retry_private: op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { - u32 dummy; double_unlock_hb(hb1, hb2); @@ -914,7 +933,7 @@ retry_private: goto out_put_keys; } - ret = get_user(dummy, uaddr2); + ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; @@ -991,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue * q: the futex_q * key: the key of the requeue target futex + * hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect - * atomic lock acquisition. Must be called with the q->lock_ptr held. + * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock + * to protect access to the pi_state to fixup the owner later. Must be called + * with both q->lock_ptr and hb->lock held. */ static inline -void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) +void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, + struct futex_hash_bucket *hb) { drop_futex_key_refs(&q->key); get_futex_key_refs(key); @@ -1011,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; + q->lock_ptr = &hb->lock; +#ifdef CONFIG_DEBUG_PI_LIST + q->list.plist.lock = &hb->lock; +#endif + wake_up_state(q->task, TASK_NORMAL); } @@ -1069,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) - requeue_pi_wake_futex(top_waiter, key2); + requeue_pi_wake_futex(top_waiter, key2, hb2); return ret; } @@ -1204,7 +1232,7 @@ retry_private: double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); - ret = get_user(curval2, uaddr2); + ret = fault_in_user_writeable(uaddr2); if (!ret) goto retry; goto out; @@ -1228,8 +1256,15 @@ retry_private: if (!match_futex(&this->key, &key1)) continue; - WARN_ON(!requeue_pi && this->rt_waiter); - WARN_ON(requeue_pi && !this->rt_waiter); + /* + * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always + * be paired with each other and no other futex ops. + */ + if ((requeue_pi && !this->rt_waiter) || + (!requeue_pi && this->rt_waiter)) { + ret = -EINVAL; + break; + } /* * Wake nr_wake waiters. For requeue_pi, if we acquired the @@ -1254,7 +1289,7 @@ retry_private: this->task, 1); if (ret == 1) { /* We got the lock. */ - requeue_pi_wake_futex(this, &key2); + requeue_pi_wake_futex(this, &key2, hb2); continue; } else if (ret) { /* -EDEADLK */ @@ -1482,7 +1517,7 @@ retry: handle_fault: spin_unlock(q->lock_ptr); - ret = get_user(uval, uaddr); + ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); @@ -1807,7 +1842,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; - u32 uval; struct futex_q q; int res, ret; @@ -1909,16 +1943,9 @@ out: return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: - /* - * We have to r/w *(int __user *)uaddr, and we have to modify it - * atomically. Therefore, if we continue to fault after get_user() - * below, we need to handle the fault ourselves, while still holding - * the mmap_sem. This can occur if the uaddr is under contention as - * we have to drop the mmap_sem in order to call get_user(). - */ queue_unlock(&q, hb); - ret = get_user(uval, uaddr); + ret = fault_in_user_writeable(uaddr); if (ret) goto out_put_key; @@ -2013,17 +2040,10 @@ out: return ret; pi_faulted: - /* - * We have to r/w *(int __user *)uaddr, and we have to modify it - * atomically. Therefore, if we continue to fault after get_user() - * below, we need to handle the fault ourselves, while still holding - * the mmap_sem. This can occur if the uaddr is under contention as - * we have to drop the mmap_sem in order to call get_user(). - */ spin_unlock(&hb->lock); put_futex_key(fshared, &key); - ret = get_user(uval, uaddr); + ret = fault_in_user_writeable(uaddr); if (!ret) goto retry; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d607a5b9ee2..235716556bf 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -180,7 +180,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || - cmd == FUTEX_WAIT_BITSET)) { + cmd == FUTEX_WAIT_BITSET || + cmd == FUTEX_WAIT_REQUEUE_PI)) { if (get_compat_timespec(&ts, utime)) return -EFAULT; if (!timespec_valid(&ts)) @@ -191,7 +192,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, t = ktime_add_safe(ktime_get(), t); tp = &t; } - if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (int) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig new file mode 100644 index 00000000000..22e9dcfaa3d --- /dev/null +++ b/kernel/gcov/Kconfig @@ -0,0 +1,48 @@ +menu "GCOV-based kernel profiling" + +config GCOV_KERNEL + bool "Enable gcov-based kernel profiling" + depends on DEBUG_FS && CONSTRUCTORS + default n + ---help--- + This option enables gcov-based code profiling (e.g. for code coverage + measurements). + + If unsure, say N. + + Additionally specify CONFIG_GCOV_PROFILE_ALL=y to get profiling data + for the entire kernel. To enable profiling for specific files or + directories, add a line similar to the following to the respective + Makefile: + + For a single file (e.g. main.o): + GCOV_PROFILE_main.o := y + + For all files in one directory: + GCOV_PROFILE := y + + To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL + is specified, use: + + GCOV_PROFILE_main.o := n + and: + GCOV_PROFILE := n + + Note that the debugfs filesystem has to be mounted to access + profiling data. + +config GCOV_PROFILE_ALL + bool "Profile entire Kernel" + depends on GCOV_KERNEL + depends on S390 || X86 + default n + ---help--- + This options activates profiling for the entire kernel. + + If unsure, say N. + + Note that a kernel compiled with profiling flags will be significantly + larger and run slower. Also be sure to exclude files from profiling + which are not linked to the kernel image to prevent linker errors. + +endmenu diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile new file mode 100644 index 00000000000..3f761001d51 --- /dev/null +++ b/kernel/gcov/Makefile @@ -0,0 +1,3 @@ +EXTRA_CFLAGS := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"' + +obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o gcc_3_4.o diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c new file mode 100644 index 00000000000..9b22d03cc58 --- /dev/null +++ b/kernel/gcov/base.c @@ -0,0 +1,148 @@ +/* + * This code maintains a list of active profiling data structures. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + * Based on the gcov-kernel patch by: + * Hubertus Franke <frankeh@us.ibm.com> + * Nigel Hinds <nhinds@us.ibm.com> + * Rajan Ravindran <rajancr@us.ibm.com> + * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * Paul Larson + */ + +#define pr_fmt(fmt) "gcov: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include "gcov.h" + +static struct gcov_info *gcov_info_head; +static int gcov_events_enabled; +static DEFINE_MUTEX(gcov_lock); + +/* + * __gcov_init is called by gcc-generated constructor code for each object + * file compiled with -fprofile-arcs. + */ +void __gcov_init(struct gcov_info *info) +{ + static unsigned int gcov_version; + + mutex_lock(&gcov_lock); + if (gcov_version == 0) { + gcov_version = info->version; + /* + * Printing gcc's version magic may prove useful for debugging + * incompatibility reports. + */ + pr_info("version magic: 0x%x\n", gcov_version); + } + /* + * Add new profiling data structure to list and inform event + * listener. + */ + info->next = gcov_info_head; + gcov_info_head = info; + if (gcov_events_enabled) + gcov_event(GCOV_ADD, info); + mutex_unlock(&gcov_lock); +} +EXPORT_SYMBOL(__gcov_init); + +/* + * These functions may be referenced by gcc-generated profiling code but serve + * no function for kernel profiling. + */ +void __gcov_flush(void) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_flush); + +void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_add); + +void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_single); + +void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_delta); + +/** + * gcov_enable_events - enable event reporting through gcov_event() + * + * Turn on reporting of profiling data load/unload-events through the + * gcov_event() callback. Also replay all previous events once. This function + * is needed because some events are potentially generated too early for the + * callback implementation to handle them initially. + */ +void gcov_enable_events(void) +{ + struct gcov_info *info; + + mutex_lock(&gcov_lock); + gcov_events_enabled = 1; + /* Perform event callback for previously registered entries. */ + for (info = gcov_info_head; info; info = info->next) + gcov_event(GCOV_ADD, info); + mutex_unlock(&gcov_lock); +} + +#ifdef CONFIG_MODULES +static inline int within(void *addr, void *start, unsigned long size) +{ + return ((addr >= start) && (addr < start + size)); +} + +/* Update list and generate events when modules are unloaded. */ +static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct module *mod = data; + struct gcov_info *info; + struct gcov_info *prev; + + if (event != MODULE_STATE_GOING) + return NOTIFY_OK; + mutex_lock(&gcov_lock); + prev = NULL; + /* Remove entries located in module from linked list. */ + for (info = gcov_info_head; info; info = info->next) { + if (within(info, mod->module_core, mod->core_size)) { + if (prev) + prev->next = info->next; + else + gcov_info_head = info->next; + if (gcov_events_enabled) + gcov_event(GCOV_REMOVE, info); + } else + prev = info; + } + mutex_unlock(&gcov_lock); + + return NOTIFY_OK; +} + +static struct notifier_block gcov_nb = { + .notifier_call = gcov_module_notifier, +}; + +static int __init gcov_init(void) +{ + return register_module_notifier(&gcov_nb); +} +device_initcall(gcov_init); +#endif /* CONFIG_MODULES */ diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c new file mode 100644 index 00000000000..ef3c3f88a7a --- /dev/null +++ b/kernel/gcov/fs.c @@ -0,0 +1,673 @@ +/* + * This code exports profiling data as debugfs files to userspace. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + * Based on the gcov-kernel patch by: + * Hubertus Franke <frankeh@us.ibm.com> + * Nigel Hinds <nhinds@us.ibm.com> + * Rajan Ravindran <rajancr@us.ibm.com> + * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * Paul Larson + * Yi CDL Yang + */ + +#define pr_fmt(fmt) "gcov: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> +#include "gcov.h" + +/** + * struct gcov_node - represents a debugfs entry + * @list: list head for child node list + * @children: child nodes + * @all: list head for list of all nodes + * @parent: parent node + * @info: associated profiling data structure if not a directory + * @ghost: when an object file containing profiling data is unloaded we keep a + * copy of the profiling data here to allow collecting coverage data + * for cleanup code. Such a node is called a "ghost". + * @dentry: main debugfs entry, either a directory or data file + * @links: associated symbolic links + * @name: data file basename + * + * struct gcov_node represents an entity within the gcov/ subdirectory + * of debugfs. There are directory and data file nodes. The latter represent + * the actual synthesized data file plus any associated symbolic links which + * are needed by the gcov tool to work correctly. + */ +struct gcov_node { + struct list_head list; + struct list_head children; + struct list_head all; + struct gcov_node *parent; + struct gcov_info *info; + struct gcov_info *ghost; + struct dentry *dentry; + struct dentry **links; + char name[0]; +}; + +static const char objtree[] = OBJTREE; +static const char srctree[] = SRCTREE; +static struct gcov_node root_node; +static struct dentry *reset_dentry; +static LIST_HEAD(all_head); +static DEFINE_MUTEX(node_lock); + +/* If non-zero, keep copies of profiling data for unloaded modules. */ +static int gcov_persist = 1; + +static int __init gcov_persist_setup(char *str) +{ + unsigned long val; + + if (strict_strtoul(str, 0, &val)) { + pr_warning("invalid gcov_persist parameter '%s'\n", str); + return 0; + } + gcov_persist = val; + pr_info("setting gcov_persist to %d\n", gcov_persist); + + return 1; +} +__setup("gcov_persist=", gcov_persist_setup); + +/* + * seq_file.start() implementation for gcov data files. Note that the + * gcov_iterator interface is designed to be more restrictive than seq_file + * (no start from arbitrary position, etc.), to simplify the iterator + * implementation. + */ +static void *gcov_seq_start(struct seq_file *seq, loff_t *pos) +{ + loff_t i; + + gcov_iter_start(seq->private); + for (i = 0; i < *pos; i++) { + if (gcov_iter_next(seq->private)) + return NULL; + } + return seq->private; +} + +/* seq_file.next() implementation for gcov data files. */ +static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) +{ + struct gcov_iterator *iter = data; + + if (gcov_iter_next(iter)) + return NULL; + (*pos)++; + + return iter; +} + +/* seq_file.show() implementation for gcov data files. */ +static int gcov_seq_show(struct seq_file *seq, void *data) +{ + struct gcov_iterator *iter = data; + + if (gcov_iter_write(iter, seq)) + return -EINVAL; + return 0; +} + +static void gcov_seq_stop(struct seq_file *seq, void *data) +{ + /* Unused. */ +} + +static const struct seq_operations gcov_seq_ops = { + .start = gcov_seq_start, + .next = gcov_seq_next, + .show = gcov_seq_show, + .stop = gcov_seq_stop, +}; + +/* + * Return the profiling data set for a given node. This can either be the + * original profiling data structure or a duplicate (also called "ghost") + * in case the associated object file has been unloaded. + */ +static struct gcov_info *get_node_info(struct gcov_node *node) +{ + if (node->info) + return node->info; + + return node->ghost; +} + +/* + * open() implementation for gcov data files. Create a copy of the profiling + * data set and initialize the iterator and seq_file interface. + */ +static int gcov_seq_open(struct inode *inode, struct file *file) +{ + struct gcov_node *node = inode->i_private; + struct gcov_iterator *iter; + struct seq_file *seq; + struct gcov_info *info; + int rc = -ENOMEM; + + mutex_lock(&node_lock); + /* + * Read from a profiling data copy to minimize reference tracking + * complexity and concurrent access. + */ + info = gcov_info_dup(get_node_info(node)); + if (!info) + goto out_unlock; + iter = gcov_iter_new(info); + if (!iter) + goto err_free_info; + rc = seq_open(file, &gcov_seq_ops); + if (rc) + goto err_free_iter_info; + seq = file->private_data; + seq->private = iter; +out_unlock: + mutex_unlock(&node_lock); + return rc; + +err_free_iter_info: + gcov_iter_free(iter); +err_free_info: + gcov_info_free(info); + goto out_unlock; +} + +/* + * release() implementation for gcov data files. Release resources allocated + * by open(). + */ +static int gcov_seq_release(struct inode *inode, struct file *file) +{ + struct gcov_iterator *iter; + struct gcov_info *info; + struct seq_file *seq; + + seq = file->private_data; + iter = seq->private; + info = gcov_iter_get_info(iter); + gcov_iter_free(iter); + gcov_info_free(info); + seq_release(inode, file); + + return 0; +} + +/* + * Find a node by the associated data file name. Needs to be called with + * node_lock held. + */ +static struct gcov_node *get_node_by_name(const char *name) +{ + struct gcov_node *node; + struct gcov_info *info; + + list_for_each_entry(node, &all_head, all) { + info = get_node_info(node); + if (info && (strcmp(info->filename, name) == 0)) + return node; + } + + return NULL; +} + +static void remove_node(struct gcov_node *node); + +/* + * write() implementation for gcov data files. Reset profiling data for the + * associated file. If the object file has been unloaded (i.e. this is + * a "ghost" node), remove the debug fs node as well. + */ +static ssize_t gcov_seq_write(struct file *file, const char __user *addr, + size_t len, loff_t *pos) +{ + struct seq_file *seq; + struct gcov_info *info; + struct gcov_node *node; + + seq = file->private_data; + info = gcov_iter_get_info(seq->private); + mutex_lock(&node_lock); + node = get_node_by_name(info->filename); + if (node) { + /* Reset counts or remove node for unloaded modules. */ + if (node->ghost) + remove_node(node); + else + gcov_info_reset(node->info); + } + /* Reset counts for open file. */ + gcov_info_reset(info); + mutex_unlock(&node_lock); + + return len; +} + +/* + * Given a string <path> representing a file path of format: + * path/to/file.gcda + * construct and return a new string: + * <dir/>path/to/file.<ext> + */ +static char *link_target(const char *dir, const char *path, const char *ext) +{ + char *target; + char *old_ext; + char *copy; + + copy = kstrdup(path, GFP_KERNEL); + if (!copy) + return NULL; + old_ext = strrchr(copy, '.'); + if (old_ext) + *old_ext = '\0'; + if (dir) + target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); + else + target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); + kfree(copy); + + return target; +} + +/* + * Construct a string representing the symbolic link target for the given + * gcov data file name and link type. Depending on the link type and the + * location of the data file, the link target can either point to a + * subdirectory of srctree, objtree or in an external location. + */ +static char *get_link_target(const char *filename, const struct gcov_link *ext) +{ + const char *rel; + char *result; + + if (strncmp(filename, objtree, strlen(objtree)) == 0) { + rel = filename + strlen(objtree) + 1; + if (ext->dir == SRC_TREE) + result = link_target(srctree, rel, ext->ext); + else + result = link_target(objtree, rel, ext->ext); + } else { + /* External compilation. */ + result = link_target(NULL, filename, ext->ext); + } + + return result; +} + +#define SKEW_PREFIX ".tmp_" + +/* + * For a filename .tmp_filename.ext return filename.ext. Needed to compensate + * for filename skewing caused by the mod-versioning mechanism. + */ +static const char *deskew(const char *basename) +{ + if (strncmp(basename, SKEW_PREFIX, sizeof(SKEW_PREFIX) - 1) == 0) + return basename + sizeof(SKEW_PREFIX) - 1; + return basename; +} + +/* + * Create links to additional files (usually .c and .gcno files) which the + * gcov tool expects to find in the same directory as the gcov data file. + */ +static void add_links(struct gcov_node *node, struct dentry *parent) +{ + char *basename; + char *target; + int num; + int i; + + for (num = 0; gcov_link[num].ext; num++) + /* Nothing. */; + node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); + if (!node->links) + return; + for (i = 0; i < num; i++) { + target = get_link_target(get_node_info(node)->filename, + &gcov_link[i]); + if (!target) + goto out_err; + basename = strrchr(target, '/'); + if (!basename) + goto out_err; + basename++; + node->links[i] = debugfs_create_symlink(deskew(basename), + parent, target); + if (!node->links[i]) + goto out_err; + kfree(target); + } + + return; +out_err: + kfree(target); + while (i-- > 0) + debugfs_remove(node->links[i]); + kfree(node->links); + node->links = NULL; +} + +static const struct file_operations gcov_data_fops = { + .open = gcov_seq_open, + .release = gcov_seq_release, + .read = seq_read, + .llseek = seq_lseek, + .write = gcov_seq_write, +}; + +/* Basic initialization of a new node. */ +static void init_node(struct gcov_node *node, struct gcov_info *info, + const char *name, struct gcov_node *parent) +{ + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->children); + INIT_LIST_HEAD(&node->all); + node->info = info; + node->parent = parent; + if (name) + strcpy(node->name, name); +} + +/* + * Create a new node and associated debugfs entry. Needs to be called with + * node_lock held. + */ +static struct gcov_node *new_node(struct gcov_node *parent, + struct gcov_info *info, const char *name) +{ + struct gcov_node *node; + + node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); + if (!node) { + pr_warning("out of memory\n"); + return NULL; + } + init_node(node, info, name, parent); + /* Differentiate between gcov data file nodes and directory nodes. */ + if (info) { + node->dentry = debugfs_create_file(deskew(node->name), 0600, + parent->dentry, node, &gcov_data_fops); + } else + node->dentry = debugfs_create_dir(node->name, parent->dentry); + if (!node->dentry) { + pr_warning("could not create file\n"); + kfree(node); + return NULL; + } + if (info) + add_links(node, parent->dentry); + list_add(&node->list, &parent->children); + list_add(&node->all, &all_head); + + return node; +} + +/* Remove symbolic links associated with node. */ +static void remove_links(struct gcov_node *node) +{ + int i; + + if (!node->links) + return; + for (i = 0; gcov_link[i].ext; i++) + debugfs_remove(node->links[i]); + kfree(node->links); + node->links = NULL; +} + +/* + * Remove node from all lists and debugfs and release associated resources. + * Needs to be called with node_lock held. + */ +static void release_node(struct gcov_node *node) +{ + list_del(&node->list); + list_del(&node->all); + debugfs_remove(node->dentry); + remove_links(node); + if (node->ghost) + gcov_info_free(node->ghost); + kfree(node); +} + +/* Release node and empty parents. Needs to be called with node_lock held. */ +static void remove_node(struct gcov_node *node) +{ + struct gcov_node *parent; + + while ((node != &root_node) && list_empty(&node->children)) { + parent = node->parent; + release_node(node); + node = parent; + } +} + +/* + * Find child node with given basename. Needs to be called with node_lock + * held. + */ +static struct gcov_node *get_child_by_name(struct gcov_node *parent, + const char *name) +{ + struct gcov_node *node; + + list_for_each_entry(node, &parent->children, list) { + if (strcmp(node->name, name) == 0) + return node; + } + + return NULL; +} + +/* + * write() implementation for reset file. Reset all profiling data to zero + * and remove ghost nodes. + */ +static ssize_t reset_write(struct file *file, const char __user *addr, + size_t len, loff_t *pos) +{ + struct gcov_node *node; + + mutex_lock(&node_lock); +restart: + list_for_each_entry(node, &all_head, all) { + if (node->info) + gcov_info_reset(node->info); + else if (list_empty(&node->children)) { + remove_node(node); + /* Several nodes may have gone - restart loop. */ + goto restart; + } + } + mutex_unlock(&node_lock); + + return len; +} + +/* read() implementation for reset file. Unused. */ +static ssize_t reset_read(struct file *file, char __user *addr, size_t len, + loff_t *pos) +{ + /* Allow read operation so that a recursive copy won't fail. */ + return 0; +} + +static const struct file_operations gcov_reset_fops = { + .write = reset_write, + .read = reset_read, +}; + +/* + * Create a node for a given profiling data set and add it to all lists and + * debugfs. Needs to be called with node_lock held. + */ +static void add_node(struct gcov_info *info) +{ + char *filename; + char *curr; + char *next; + struct gcov_node *parent; + struct gcov_node *node; + + filename = kstrdup(info->filename, GFP_KERNEL); + if (!filename) + return; + parent = &root_node; + /* Create directory nodes along the path. */ + for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { + if (curr == next) + continue; + *next = 0; + if (strcmp(curr, ".") == 0) + continue; + if (strcmp(curr, "..") == 0) { + if (!parent->parent) + goto err_remove; + parent = parent->parent; + continue; + } + node = get_child_by_name(parent, curr); + if (!node) { + node = new_node(parent, NULL, curr); + if (!node) + goto err_remove; + } + parent = node; + } + /* Create file node. */ + node = new_node(parent, info, curr); + if (!node) + goto err_remove; +out: + kfree(filename); + return; + +err_remove: + remove_node(parent); + goto out; +} + +/* + * The profiling data set associated with this node is being unloaded. Store a + * copy of the profiling data and turn this node into a "ghost". + */ +static int ghost_node(struct gcov_node *node) +{ + node->ghost = gcov_info_dup(node->info); + if (!node->ghost) { + pr_warning("could not save data for '%s' (out of memory)\n", + node->info->filename); + return -ENOMEM; + } + node->info = NULL; + + return 0; +} + +/* + * Profiling data for this node has been loaded again. Add profiling data + * from previous instantiation and turn this node into a regular node. + */ +static void revive_node(struct gcov_node *node, struct gcov_info *info) +{ + if (gcov_info_is_compatible(node->ghost, info)) + gcov_info_add(info, node->ghost); + else { + pr_warning("discarding saved data for '%s' (version changed)\n", + info->filename); + } + gcov_info_free(node->ghost); + node->ghost = NULL; + node->info = info; +} + +/* + * Callback to create/remove profiling files when code compiled with + * -fprofile-arcs is loaded/unloaded. + */ +void gcov_event(enum gcov_action action, struct gcov_info *info) +{ + struct gcov_node *node; + + mutex_lock(&node_lock); + node = get_node_by_name(info->filename); + switch (action) { + case GCOV_ADD: + /* Add new node or revive ghost. */ + if (!node) { + add_node(info); + break; + } + if (gcov_persist) + revive_node(node, info); + else { + pr_warning("could not add '%s' (already exists)\n", + info->filename); + } + break; + case GCOV_REMOVE: + /* Remove node or turn into ghost. */ + if (!node) { + pr_warning("could not remove '%s' (not found)\n", + info->filename); + break; + } + if (gcov_persist) { + if (!ghost_node(node)) + break; + } + remove_node(node); + break; + } + mutex_unlock(&node_lock); +} + +/* Create debugfs entries. */ +static __init int gcov_fs_init(void) +{ + int rc = -EIO; + + init_node(&root_node, NULL, NULL, NULL); + /* + * /sys/kernel/debug/gcov will be parent for the reset control file + * and all profiling files. + */ + root_node.dentry = debugfs_create_dir("gcov", NULL); + if (!root_node.dentry) + goto err_remove; + /* + * Create reset file which resets all profiling counts when written + * to. + */ + reset_dentry = debugfs_create_file("reset", 0600, root_node.dentry, + NULL, &gcov_reset_fops); + if (!reset_dentry) + goto err_remove; + /* Replay previous events to get our fs hierarchy up-to-date. */ + gcov_enable_events(); + return 0; + +err_remove: + pr_err("init failed\n"); + if (root_node.dentry) + debugfs_remove(root_node.dentry); + + return rc; +} +device_initcall(gcov_fs_init); diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c new file mode 100644 index 00000000000..ae5bb426003 --- /dev/null +++ b/kernel/gcov/gcc_3_4.c @@ -0,0 +1,447 @@ +/* + * This code provides functions to handle gcc's profiling data format + * introduced with gcc 3.4. Future versions of gcc may change the gcov + * format (as happened before), so all format-specific information needs + * to be kept modular and easily exchangeable. + * + * This file is based on gcc-internal definitions. Functions and data + * structures are defined to be compatible with gcc counterparts. + * For a better understanding, refer to gcc source: gcc/gcov-io.h. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <linux/vmalloc.h> +#include "gcov.h" + +/* Symbolic links to be created for each profiling data file. */ +const struct gcov_link gcov_link[] = { + { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */ + { 0, NULL}, +}; + +/* + * Determine whether a counter is active. Based on gcc magic. Doesn't change + * at run-time. + */ +static int counter_active(struct gcov_info *info, unsigned int type) +{ + return (1 << type) & info->ctr_mask; +} + +/* Determine number of active counters. Based on gcc magic. */ +static unsigned int num_counter_active(struct gcov_info *info) +{ + unsigned int i; + unsigned int result = 0; + + for (i = 0; i < GCOV_COUNTERS; i++) { + if (counter_active(info, i)) + result++; + } + return result; +} + +/** + * gcov_info_reset - reset profiling data to zero + * @info: profiling data set + */ +void gcov_info_reset(struct gcov_info *info) +{ + unsigned int active = num_counter_active(info); + unsigned int i; + + for (i = 0; i < active; i++) { + memset(info->counts[i].values, 0, + info->counts[i].num * sizeof(gcov_type)); + } +} + +/** + * gcov_info_is_compatible - check if profiling data can be added + * @info1: first profiling data set + * @info2: second profiling data set + * + * Returns non-zero if profiling data can be added, zero otherwise. + */ +int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) +{ + return (info1->stamp == info2->stamp); +} + +/** + * gcov_info_add - add up profiling data + * @dest: profiling data set to which data is added + * @source: profiling data set which is added + * + * Adds profiling counts of @source to @dest. + */ +void gcov_info_add(struct gcov_info *dest, struct gcov_info *source) +{ + unsigned int i; + unsigned int j; + + for (i = 0; i < num_counter_active(dest); i++) { + for (j = 0; j < dest->counts[i].num; j++) { + dest->counts[i].values[j] += + source->counts[i].values[j]; + } + } +} + +/* Get size of function info entry. Based on gcc magic. */ +static size_t get_fn_size(struct gcov_info *info) +{ + size_t size; + + size = sizeof(struct gcov_fn_info) + num_counter_active(info) * + sizeof(unsigned int); + if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int)) + size = ALIGN(size, __alignof__(struct gcov_fn_info)); + return size; +} + +/* Get address of function info entry. Based on gcc magic. */ +static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn) +{ + return (struct gcov_fn_info *) + ((char *) info->functions + fn * get_fn_size(info)); +} + +/** + * gcov_info_dup - duplicate profiling data set + * @info: profiling data set to duplicate + * + * Return newly allocated duplicate on success, %NULL on error. + */ +struct gcov_info *gcov_info_dup(struct gcov_info *info) +{ + struct gcov_info *dup; + unsigned int i; + unsigned int active; + + /* Duplicate gcov_info. */ + active = num_counter_active(info); + dup = kzalloc(sizeof(struct gcov_info) + + sizeof(struct gcov_ctr_info) * active, GFP_KERNEL); + if (!dup) + return NULL; + dup->version = info->version; + dup->stamp = info->stamp; + dup->n_functions = info->n_functions; + dup->ctr_mask = info->ctr_mask; + /* Duplicate filename. */ + dup->filename = kstrdup(info->filename, GFP_KERNEL); + if (!dup->filename) + goto err_free; + /* Duplicate table of functions. */ + dup->functions = kmemdup(info->functions, info->n_functions * + get_fn_size(info), GFP_KERNEL); + if (!dup->functions) + goto err_free; + /* Duplicate counter arrays. */ + for (i = 0; i < active ; i++) { + struct gcov_ctr_info *ctr = &info->counts[i]; + size_t size = ctr->num * sizeof(gcov_type); + + dup->counts[i].num = ctr->num; + dup->counts[i].merge = ctr->merge; + dup->counts[i].values = vmalloc(size); + if (!dup->counts[i].values) + goto err_free; + memcpy(dup->counts[i].values, ctr->values, size); + } + return dup; + +err_free: + gcov_info_free(dup); + return NULL; +} + +/** + * gcov_info_free - release memory for profiling data set duplicate + * @info: profiling data set duplicate to free + */ +void gcov_info_free(struct gcov_info *info) +{ + unsigned int active = num_counter_active(info); + unsigned int i; + + for (i = 0; i < active ; i++) + vfree(info->counts[i].values); + kfree(info->functions); + kfree(info->filename); + kfree(info); +} + +/** + * struct type_info - iterator helper array + * @ctr_type: counter type + * @offset: index of the first value of the current function for this type + * + * This array is needed to convert the in-memory data format into the in-file + * data format: + * + * In-memory: + * for each counter type + * for each function + * values + * + * In-file: + * for each function + * for each counter type + * values + * + * See gcc source gcc/gcov-io.h for more information on data organization. + */ +struct type_info { + int ctr_type; + unsigned int offset; +}; + +/** + * struct gcov_iterator - specifies current file position in logical records + * @info: associated profiling data + * @record: record type + * @function: function number + * @type: counter type + * @count: index into values array + * @num_types: number of counter types + * @type_info: helper array to get values-array offset for current function + */ +struct gcov_iterator { + struct gcov_info *info; + + int record; + unsigned int function; + unsigned int type; + unsigned int count; + + int num_types; + struct type_info type_info[0]; +}; + +static struct gcov_fn_info *get_func(struct gcov_iterator *iter) +{ + return get_fn_info(iter->info, iter->function); +} + +static struct type_info *get_type(struct gcov_iterator *iter) +{ + return &iter->type_info[iter->type]; +} + +/** + * gcov_iter_new - allocate and initialize profiling data iterator + * @info: profiling data set to be iterated + * + * Return file iterator on success, %NULL otherwise. + */ +struct gcov_iterator *gcov_iter_new(struct gcov_info *info) +{ + struct gcov_iterator *iter; + + iter = kzalloc(sizeof(struct gcov_iterator) + + num_counter_active(info) * sizeof(struct type_info), + GFP_KERNEL); + if (iter) + iter->info = info; + + return iter; +} + +/** + * gcov_iter_free - release memory for iterator + * @iter: file iterator to free + */ +void gcov_iter_free(struct gcov_iterator *iter) +{ + kfree(iter); +} + +/** + * gcov_iter_get_info - return profiling data set for given file iterator + * @iter: file iterator + */ +struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) +{ + return iter->info; +} + +/** + * gcov_iter_start - reset file iterator to starting position + * @iter: file iterator + */ +void gcov_iter_start(struct gcov_iterator *iter) +{ + int i; + + iter->record = 0; + iter->function = 0; + iter->type = 0; + iter->count = 0; + iter->num_types = 0; + for (i = 0; i < GCOV_COUNTERS; i++) { + if (counter_active(iter->info, i)) { + iter->type_info[iter->num_types].ctr_type = i; + iter->type_info[iter->num_types++].offset = 0; + } + } +} + +/* Mapping of logical record number to actual file content. */ +#define RECORD_FILE_MAGIC 0 +#define RECORD_GCOV_VERSION 1 +#define RECORD_TIME_STAMP 2 +#define RECORD_FUNCTION_TAG 3 +#define RECORD_FUNCTON_TAG_LEN 4 +#define RECORD_FUNCTION_IDENT 5 +#define RECORD_FUNCTION_CHECK 6 +#define RECORD_COUNT_TAG 7 +#define RECORD_COUNT_LEN 8 +#define RECORD_COUNT 9 + +/** + * gcov_iter_next - advance file iterator to next logical record + * @iter: file iterator + * + * Return zero if new position is valid, non-zero if iterator has reached end. + */ +int gcov_iter_next(struct gcov_iterator *iter) +{ + switch (iter->record) { + case RECORD_FILE_MAGIC: + case RECORD_GCOV_VERSION: + case RECORD_FUNCTION_TAG: + case RECORD_FUNCTON_TAG_LEN: + case RECORD_FUNCTION_IDENT: + case RECORD_COUNT_TAG: + /* Advance to next record */ + iter->record++; + break; + case RECORD_COUNT: + /* Advance to next count */ + iter->count++; + /* fall through */ + case RECORD_COUNT_LEN: + if (iter->count < get_func(iter)->n_ctrs[iter->type]) { + iter->record = 9; + break; + } + /* Advance to next counter type */ + get_type(iter)->offset += iter->count; + iter->count = 0; + iter->type++; + /* fall through */ + case RECORD_FUNCTION_CHECK: + if (iter->type < iter->num_types) { + iter->record = 7; + break; + } + /* Advance to next function */ + iter->type = 0; + iter->function++; + /* fall through */ + case RECORD_TIME_STAMP: + if (iter->function < iter->info->n_functions) + iter->record = 3; + else + iter->record = -1; + break; + } + /* Check for EOF. */ + if (iter->record == -1) + return -EINVAL; + else + return 0; +} + +/** + * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file + * @seq: seq_file handle + * @v: value to be stored + * + * Number format defined by gcc: numbers are recorded in the 32 bit + * unsigned binary form of the endianness of the machine generating the + * file. + */ +static int seq_write_gcov_u32(struct seq_file *seq, u32 v) +{ + return seq_write(seq, &v, sizeof(v)); +} + +/** + * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file + * @seq: seq_file handle + * @v: value to be stored + * + * Number format defined by gcc: numbers are recorded in the 32 bit + * unsigned binary form of the endianness of the machine generating the + * file. 64 bit numbers are stored as two 32 bit numbers, the low part + * first. + */ +static int seq_write_gcov_u64(struct seq_file *seq, u64 v) +{ + u32 data[2]; + + data[0] = (v & 0xffffffffUL); + data[1] = (v >> 32); + return seq_write(seq, data, sizeof(data)); +} + +/** + * gcov_iter_write - write data for current pos to seq_file + * @iter: file iterator + * @seq: seq_file handle + * + * Return zero on success, non-zero otherwise. + */ +int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) +{ + int rc = -EINVAL; + + switch (iter->record) { + case RECORD_FILE_MAGIC: + rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC); + break; + case RECORD_GCOV_VERSION: + rc = seq_write_gcov_u32(seq, iter->info->version); + break; + case RECORD_TIME_STAMP: + rc = seq_write_gcov_u32(seq, iter->info->stamp); + break; + case RECORD_FUNCTION_TAG: + rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION); + break; + case RECORD_FUNCTON_TAG_LEN: + rc = seq_write_gcov_u32(seq, 2); + break; + case RECORD_FUNCTION_IDENT: + rc = seq_write_gcov_u32(seq, get_func(iter)->ident); + break; + case RECORD_FUNCTION_CHECK: + rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); + break; + case RECORD_COUNT_TAG: + rc = seq_write_gcov_u32(seq, + GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); + break; + case RECORD_COUNT_LEN: + rc = seq_write_gcov_u32(seq, + get_func(iter)->n_ctrs[iter->type] * 2); + break; + case RECORD_COUNT: + rc = seq_write_gcov_u64(seq, + iter->info->counts[iter->type]. + values[iter->count + get_type(iter)->offset]); + break; + } + return rc; +} diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h new file mode 100644 index 00000000000..060073ebf7a --- /dev/null +++ b/kernel/gcov/gcov.h @@ -0,0 +1,128 @@ +/* + * Profiling infrastructure declarations. + * + * This file is based on gcc-internal definitions. Data structures are + * defined to be compatible with gcc counterparts. For a better + * understanding, refer to gcc source: gcc/gcov-io.h. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + */ + +#ifndef GCOV_H +#define GCOV_H GCOV_H + +#include <linux/types.h> + +/* + * Profiling data types used for gcc 3.4 and above - these are defined by + * gcc and need to be kept as close to the original definition as possible to + * remain compatible. + */ +#define GCOV_COUNTERS 5 +#define GCOV_DATA_MAGIC ((unsigned int) 0x67636461) +#define GCOV_TAG_FUNCTION ((unsigned int) 0x01000000) +#define GCOV_TAG_COUNTER_BASE ((unsigned int) 0x01a10000) +#define GCOV_TAG_FOR_COUNTER(count) \ + (GCOV_TAG_COUNTER_BASE + ((unsigned int) (count) << 17)) + +#if BITS_PER_LONG >= 64 +typedef long gcov_type; +#else +typedef long long gcov_type; +#endif + +/** + * struct gcov_fn_info - profiling meta data per function + * @ident: object file-unique function identifier + * @checksum: function checksum + * @n_ctrs: number of values per counter type belonging to this function + * + * This data is generated by gcc during compilation and doesn't change + * at run-time. + */ +struct gcov_fn_info { + unsigned int ident; + unsigned int checksum; + unsigned int n_ctrs[0]; +}; + +/** + * struct gcov_ctr_info - profiling data per counter type + * @num: number of counter values for this type + * @values: array of counter values for this type + * @merge: merge function for counter values of this type (unused) + * + * This data is generated by gcc during compilation and doesn't change + * at run-time with the exception of the values array. + */ +struct gcov_ctr_info { + unsigned int num; + gcov_type *values; + void (*merge)(gcov_type *, unsigned int); +}; + +/** + * struct gcov_info - profiling data per object file + * @version: gcov version magic indicating the gcc version used for compilation + * @next: list head for a singly-linked list + * @stamp: time stamp + * @filename: name of the associated gcov data file + * @n_functions: number of instrumented functions + * @functions: function data + * @ctr_mask: mask specifying which counter types are active + * @counts: counter data per counter type + * + * This data is generated by gcc during compilation and doesn't change + * at run-time with the exception of the next pointer. + */ +struct gcov_info { + unsigned int version; + struct gcov_info *next; + unsigned int stamp; + const char *filename; + unsigned int n_functions; + const struct gcov_fn_info *functions; + unsigned int ctr_mask; + struct gcov_ctr_info counts[0]; +}; + +/* Base interface. */ +enum gcov_action { + GCOV_ADD, + GCOV_REMOVE, +}; + +void gcov_event(enum gcov_action action, struct gcov_info *info); +void gcov_enable_events(void); + +/* Iterator control. */ +struct seq_file; +struct gcov_iterator; + +struct gcov_iterator *gcov_iter_new(struct gcov_info *info); +void gcov_iter_free(struct gcov_iterator *iter); +void gcov_iter_start(struct gcov_iterator *iter); +int gcov_iter_next(struct gcov_iterator *iter); +int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq); +struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter); + +/* gcov_info control. */ +void gcov_info_reset(struct gcov_info *info); +int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2); +void gcov_info_add(struct gcov_info *dest, struct gcov_info *source); +struct gcov_info *gcov_info_dup(struct gcov_info *info); +void gcov_info_free(struct gcov_info *info); + +struct gcov_link { + enum { + OBJ_TREE, + SRC_TREE, + } dir; + const char *ext; +}; +extern const struct gcov_link gcov_link[]; + +#endif /* GCOV_H */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 9002958a96e..49da79ab848 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -191,6 +191,46 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, } } + +/* + * Get the preferred target CPU for NOHZ + */ +static int hrtimer_get_target(int this_cpu, int pinned) +{ +#ifdef CONFIG_NO_HZ + if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { + int preferred_cpu = get_nohz_load_balancer(); + + if (preferred_cpu >= 0) + return preferred_cpu; + } +#endif + return this_cpu; +} + +/* + * With HIGHRES=y we do not migrate the timer when it is expiring + * before the next event on the target cpu because we cannot reprogram + * the target cpu hardware and we would cause it to fire late. + * + * Called with cpu_base->lock of target cpu held. + */ +static int +hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) +{ +#ifdef CONFIG_HIGH_RES_TIMERS + ktime_t expires; + + if (!new_base->cpu_base->hres_active) + return 0; + + expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); + return expires.tv64 <= new_base->cpu_base->expires_next.tv64; +#else + return 0; +#endif +} + /* * Switch the timer base to the current CPU when possible. */ @@ -200,16 +240,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, { struct hrtimer_clock_base *new_base; struct hrtimer_cpu_base *new_cpu_base; - int cpu, preferred_cpu = -1; - - cpu = smp_processor_id(); -#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) - if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { - preferred_cpu = get_nohz_load_balancer(); - if (preferred_cpu >= 0) - cpu = preferred_cpu; - } -#endif + int this_cpu = smp_processor_id(); + int cpu = hrtimer_get_target(this_cpu, pinned); again: new_cpu_base = &per_cpu(hrtimer_bases, cpu); @@ -217,7 +249,7 @@ again: if (base != new_base) { /* - * We are trying to schedule the timer on the local CPU. + * We are trying to move timer to new_base. * However we can't change timer's base while it is running, * so we keep it on the same CPU. No hassle vs. reprogramming * the event source in the high resolution case. The softirq @@ -233,38 +265,12 @@ again: spin_unlock(&base->cpu_base->lock); spin_lock(&new_base->cpu_base->lock); - /* Optimized away for NOHZ=n SMP=n */ - if (cpu == preferred_cpu) { - /* Calculate clock monotonic expiry time */ -#ifdef CONFIG_HIGH_RES_TIMERS - ktime_t expires = ktime_sub(hrtimer_get_expires(timer), - new_base->offset); -#else - ktime_t expires = hrtimer_get_expires(timer); -#endif - - /* - * Get the next event on target cpu from the - * clock events layer. - * This covers the highres=off nohz=on case as well. - */ - ktime_t next = clockevents_get_next_event(cpu); - - ktime_t delta = ktime_sub(expires, next); - - /* - * We do not migrate the timer when it is expiring - * before the next event on the target cpu because - * we cannot reprogram the target cpu hardware and - * we would cause it to fire late. - */ - if (delta.tv64 < 0) { - cpu = smp_processor_id(); - spin_unlock(&new_base->cpu_base->lock); - spin_lock(&base->cpu_base->lock); - timer->base = base; - goto again; - } + if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { + cpu = this_cpu; + spin_unlock(&new_base->cpu_base->lock); + spin_lock(&base->cpu_base->lock); + timer->base = base; + goto again; } timer->base = new_base; } @@ -1276,14 +1282,22 @@ void hrtimer_interrupt(struct clock_event_device *dev) expires_next.tv64 = KTIME_MAX; + spin_lock(&cpu_base->lock); + /* + * We set expires_next to KTIME_MAX here with cpu_base->lock + * held to prevent that a timer is enqueued in our queue via + * the migration code. This does not affect enqueueing of + * timers which run their callback and need to be requeued on + * this CPU. + */ + cpu_base->expires_next.tv64 = KTIME_MAX; + base = cpu_base->clock_base; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { ktime_t basenow; struct rb_node *node; - spin_lock(&cpu_base->lock); - basenow = ktime_add(now, base->offset); while ((node = base->first)) { @@ -1316,11 +1330,15 @@ void hrtimer_interrupt(struct clock_event_device *dev) __run_hrtimer(timer); } - spin_unlock(&cpu_base->lock); base++; } + /* + * Store the new expiry value so the migration code can verify + * against it. + */ cpu_base->expires_next = expires_next; + spin_unlock(&cpu_base->lock); /* Reprogramming necessary ? */ if (expires_next.tv64 != KTIME_MAX) { diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 73468253143..e70ed5592eb 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq, extern int irq_select_affinity_usr(unsigned int irq); -extern void -irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask); +extern void irq_set_thread_affinity(struct irq_desc *desc); /* * Debugging printout: diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index aaf5c9d0577..0ec9ed83173 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq) return 1; } -void -irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) +/** + * irq_set_thread_affinity - Notify irq threads to adjust affinity + * @desc: irq descriptor which has affitnity changed + * + * We just set IRQTF_AFFINITY and delegate the affinity setting + * to the interrupt thread itself. We can not call + * set_cpus_allowed_ptr() here as we hold desc->lock and this + * code can be called from hard interrupt context. + */ +void irq_set_thread_affinity(struct irq_desc *desc) { struct irqaction *action = desc->action; while (action) { if (action->thread) - set_cpus_allowed_ptr(action->thread, cpumask); + set_bit(IRQTF_AFFINITY, &action->thread_flags); action = action->next; } } @@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) if (desc->status & IRQ_MOVE_PCNTXT) { if (!desc->chip->set_affinity(irq, cpumask)) { cpumask_copy(desc->affinity, cpumask); - irq_set_thread_affinity(desc, cpumask); + irq_set_thread_affinity(desc); } } else { @@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) #else if (!desc->chip->set_affinity(irq, cpumask)) { cpumask_copy(desc->affinity, cpumask); - irq_set_thread_affinity(desc, cpumask); + irq_set_thread_affinity(desc); } #endif desc->status |= IRQ_AFFINITY_SET; @@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq) spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(irq, desc); if (!ret) - irq_set_thread_affinity(desc, desc->affinity); + irq_set_thread_affinity(desc); spin_unlock_irqrestore(&desc->lock, flags); return ret; @@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action) return -1; } +#ifdef CONFIG_SMP +/* + * Check whether we need to change the affinity of the interrupt thread. + */ +static void +irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) +{ + cpumask_var_t mask; + + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) + return; + + /* + * In case we are out of memory we set IRQTF_AFFINITY again and + * try again next time + */ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + set_bit(IRQTF_AFFINITY, &action->thread_flags); + return; + } + + spin_lock_irq(&desc->lock); + cpumask_copy(mask, desc->affinity); + spin_unlock_irq(&desc->lock); + + set_cpus_allowed_ptr(current, mask); + free_cpumask_var(mask); +} +#else +static inline void +irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } +#endif + /* * Interrupt handler thread */ @@ -458,6 +499,8 @@ static int irq_thread(void *data) while (!irq_wait_for_interrupt(action)) { + irq_thread_check_affinity(desc, action); + atomic_inc(&desc->threads_active); spin_lock_irq(&desc->lock); @@ -564,7 +607,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ get_task_struct(t); new->thread = t; - wake_up_process(t); } /* @@ -647,6 +689,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) (int)(new->flags & IRQF_TRIGGER_MASK)); } + new->irq = irq; *old_ptr = new; /* Reset broken irq detection when installing new handler */ @@ -664,7 +707,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) spin_unlock_irqrestore(&desc->lock, flags); - new->irq = irq; + /* + * Strictly no need to wake it up, but hung_task complains + * when no hard interrupt wakes the thread up. + */ + if (new->thread) + wake_up_process(new->thread); + register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); @@ -718,7 +767,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action, **action_ptr; - struct task_struct *irqthread; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); @@ -766,9 +814,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) desc->chip->disable(irq); } - irqthread = action->thread; - action->thread = NULL; - spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); @@ -776,12 +821,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* Make sure it's not being used on another CPU: */ synchronize_irq(irq); - if (irqthread) { - if (!test_bit(IRQTF_DIED, &action->thread_flags)) - kthread_stop(irqthread); - put_task_struct(irqthread); - } - #ifdef CONFIG_DEBUG_SHIRQ /* * It's a shared IRQ -- the driver ought to be prepared for an IRQ @@ -797,6 +836,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) local_irq_restore(flags); } #endif + + if (action->thread) { + if (!test_bit(IRQTF_DIED, &action->thread_flags)) + kthread_stop(action->thread); + put_task_struct(action->thread); + } + return action; } @@ -856,7 +902,7 @@ EXPORT_SYMBOL(free_irq); * still called in hard interrupt context and has to check * whether the interrupt originates from the device. If yes it * needs to disable the interrupt on the device and return - * IRQ_THREAD_WAKE which will wake up the handler thread and run + * IRQ_WAKE_THREAD which will wake up the handler thread and run * @thread_fn. This split handler design is necessary to support * shared interrupts. * diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index cfe767ca154..fcb6c96f262 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -45,7 +45,7 @@ void move_masked_irq(int irq) < nr_cpu_ids)) if (!desc->chip->set_affinity(irq, desc->pending_mask)) { cpumask_copy(desc->affinity, desc->pending_mask); - irq_set_thread_affinity(desc, desc->pending_mask); + irq_set_thread_affinity(desc); } cpumask_clear(desc->pending_mask); diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 2f69bee57bf..3fd30197da2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -107,8 +107,8 @@ out_unlock: struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) { - /* those all static, do move them */ - if (desc->irq < NR_IRQS_LEGACY) + /* those static or target node is -1, do not move them */ + if (desc->irq < NR_IRQS_LEGACY || node == -1) return desc; if (desc->node != node) diff --git a/kernel/kexec.c b/kernel/kexec.c index ae1c35201cc..f336e2107f9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline, } while (*cur++ == ','); if (*crash_size > 0) { - while (*cur != ' ' && *cur != '@') + while (*cur && *cur != ' ' && *cur != '@') cur++; if (*cur == '@') { cur++; diff --git a/kernel/kmod.c b/kernel/kmod.c index 7e95bedb2bf..4e8cae2e914 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -24,7 +24,6 @@ #include <linux/unistd.h> #include <linux/kmod.h> #include <linux/slab.h> -#include <linux/mnt_namespace.h> #include <linux/completion.h> #include <linux/file.h> #include <linux/fdtable.h> @@ -79,6 +78,10 @@ int __request_module(bool wait, const char *fmt, ...) #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ static int kmod_loop_msg; + ret = security_kernel_module_request(); + if (ret) + return ret; + va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); @@ -463,6 +466,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int retval = 0; BUG_ON(atomic_read(&sub_info->cred->usage) != 1); + validate_creds(sub_info->cred); helper_lock(); if (sub_info->path[0] == '\0') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c0fa54b276d..0540948e29a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void) { struct kprobe_insn_page *kip; struct hlist_node *pos, *next; - int safety; /* Ensure no-one is preepmted on the garbages */ - mutex_unlock(&kprobe_insn_mutex); - safety = check_safety(); - mutex_lock(&kprobe_insn_mutex); - if (safety != 0) + if (check_safety()) return -EAGAIN; hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { @@ -698,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p) p->addr = addr; preempt_disable(); - if (!__kernel_text_address((unsigned long) p->addr) || + if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr)) { preempt_enable(); return -EINVAL; diff --git a/kernel/kthread.c b/kernel/kthread.c index 7fa44133352..eb8751aa041 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -27,7 +27,6 @@ struct kthread_create_info /* Information passed to kthread() from kthreadd. */ int (*threadfn)(void *data); void *data; - struct completion started; /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; @@ -36,17 +35,13 @@ struct kthread_create_info struct list_head list; }; -struct kthread_stop_info -{ - struct task_struct *k; - int err; - struct completion done; +struct kthread { + int should_stop; + struct completion exited; }; -/* Thread stopping is done by setthing this var: lock serializes - * multiple kthread_stop calls. */ -static DEFINE_MUTEX(kthread_stop_lock); -static struct kthread_stop_info kthread_stop_info; +#define to_kthread(tsk) \ + container_of((tsk)->vfork_done, struct kthread, exited) /** * kthread_should_stop - should this kthread return now? @@ -57,36 +52,35 @@ static struct kthread_stop_info kthread_stop_info; */ int kthread_should_stop(void) { - return (kthread_stop_info.k == current); + return to_kthread(current)->should_stop; } EXPORT_SYMBOL(kthread_should_stop); static int kthread(void *_create) { + /* Copy data: it's on kthread's stack */ struct kthread_create_info *create = _create; - int (*threadfn)(void *data); - void *data; - int ret = -EINTR; + int (*threadfn)(void *data) = create->threadfn; + void *data = create->data; + struct kthread self; + int ret; - /* Copy data: it's on kthread's stack */ - threadfn = create->threadfn; - data = create->data; + self.should_stop = 0; + init_completion(&self.exited); + current->vfork_done = &self.exited; /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; - complete(&create->started); + complete(&create->done); schedule(); - if (!kthread_should_stop()) + ret = -EINTR; + if (!self.should_stop) ret = threadfn(data); - /* It might have exited on its own, w/o kthread_stop. Check. */ - if (kthread_should_stop()) { - kthread_stop_info.err = ret; - complete(&kthread_stop_info.done); - } - return 0; + /* we can't just return, we must preserve "self" on stack */ + do_exit(ret); } static void create_kthread(struct kthread_create_info *create) @@ -95,11 +89,10 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); - if (pid < 0) + if (pid < 0) { create->result = ERR_PTR(pid); - else - wait_for_completion(&create->started); - complete(&create->done); + complete(&create->done); + } } /** @@ -130,7 +123,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), create.threadfn = threadfn; create.data = data; - init_completion(&create.started); init_completion(&create.done); spin_lock(&kthread_create_lock); @@ -188,40 +180,34 @@ EXPORT_SYMBOL(kthread_bind); * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and - * waits for it to exit. Your threadfn() must not call do_exit() - * itself if you use this function! This can also be called after - * kthread_create() instead of calling wake_up_process(): the thread - * will exit without calling threadfn(). + * waits for it to exit. This can also be called after kthread_create() + * instead of calling wake_up_process(): the thread will exit without + * calling threadfn(). + * + * If threadfn() may call do_exit() itself, the caller must ensure + * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { + struct kthread *kthread; int ret; - mutex_lock(&kthread_stop_lock); - - /* It could exit after stop_info.k set, but before wake_up_process. */ - get_task_struct(k); - trace_sched_kthread_stop(k); + get_task_struct(k); - /* Must init completion *before* thread sees kthread_stop_info.k */ - init_completion(&kthread_stop_info.done); - smp_wmb(); + kthread = to_kthread(k); + barrier(); /* it might have exited */ + if (k->vfork_done != NULL) { + kthread->should_stop = 1; + wake_up_process(k); + wait_for_completion(&kthread->exited); + } + ret = k->exit_code; - /* Now set kthread_should_stop() to true, and wake it up. */ - kthread_stop_info.k = k; - wake_up_process(k); put_task_struct(k); - - /* Once it dies, reset stop ptr, gather result and we're done. */ - wait_for_completion(&kthread_stop_info.done); - kthread_stop_info.k = NULL; - ret = kthread_stop_info.err; - mutex_unlock(&kthread_stop_lock); - trace_sched_kthread_stop_ret(ret); return ret; diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d7135aa2d2c..e94caa666db 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void) &proc_lockdep_stats_operations); #ifdef CONFIG_LOCK_STAT - proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); + proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, + &proc_lock_stat_operations); #endif return 0; diff --git a/kernel/module.c b/kernel/module.c index 215aaab09e9..2d537186191 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -909,16 +909,18 @@ void __symbol_put(const char *symbol) } EXPORT_SYMBOL(__symbol_put); +/* Note this assumes addr is a function, which it currently always is. */ void symbol_put_addr(void *addr) { struct module *modaddr; + unsigned long a = (unsigned long)dereference_function_descriptor(addr); - if (core_kernel_text((unsigned long)addr)) + if (core_kernel_text(a)) return; /* module_text_address is safe here: we're supposed to have reference * to module from symbol_get, so it can't go away. */ - modaddr = __module_text_address((unsigned long)addr); + modaddr = __module_text_address(a); BUG_ON(!modaddr); module_put(modaddr); } @@ -1068,7 +1070,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, { const unsigned long *crc; - if (!find_symbol("module_layout", NULL, &crc, true, false)) + if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, + &crc, true, false)) BUG(); return check_version(sechdrs, versindex, "module_layout", mod, crc); } @@ -1271,6 +1274,10 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, struct module_notes_attrs *notes_attrs; struct bin_attribute *nattr; + /* failed to create section attributes, so can't create notes */ + if (!mod->sect_attrs) + return; + /* Count notes sections and allocate structures. */ notes = 0; for (i = 0; i < nsect; i++) @@ -2216,6 +2223,10 @@ static noinline struct module *load_module(void __user *umod, mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); #endif +#ifdef CONFIG_CONSTRUCTORS + mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors", + sizeof(*mod->ctors), &mod->num_ctors); +#endif #ifdef CONFIG_MARKERS mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers", @@ -2389,6 +2400,17 @@ static noinline struct module *load_module(void __user *umod, goto free_hdr; } +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS + unsigned long i; + + for (i = 0; i < mod->num_ctors; i++) + mod->ctors[i](); +#endif +} + /* This is where the real work happens */ SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) @@ -2417,6 +2439,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); + do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) ret = do_one_initcall(mod->init); @@ -2435,9 +2458,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, return ret; } if (ret > 0) { - printk(KERN_WARNING "%s: '%s'->init suspiciously returned %d, " - "it should follow 0/-E convention\n" - KERN_WARNING "%s: loading module anyway...\n", + printk(KERN_WARNING +"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" +"%s: loading module anyway...\n", __func__, mod->name, ret, __func__); dump_stack(); diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 63598dca2d0..09b4ff9711b 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -26,19 +26,14 @@ static struct kmem_cache *nsproxy_cachep; struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy); -/* - * creates a copy of "orig" with refcount 1. - */ -static inline struct nsproxy *clone_nsproxy(struct nsproxy *orig) +static inline struct nsproxy *create_nsproxy(void) { - struct nsproxy *ns; + struct nsproxy *nsproxy; - ns = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); - if (ns) { - memcpy(ns, orig, sizeof(struct nsproxy)); - atomic_set(&ns->count, 1); - } - return ns; + nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); + if (nsproxy) + atomic_set(&nsproxy->count, 1); + return nsproxy; } /* @@ -52,7 +47,7 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, struct nsproxy *new_nsp; int err; - new_nsp = clone_nsproxy(tsk->nsproxy); + new_nsp = create_nsproxy(); if (!new_nsp) return ERR_PTR(-ENOMEM); diff --git a/kernel/panic.c b/kernel/panic.c index 984b3ecbd72..512ab73b0ca 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -301,6 +301,7 @@ int oops_may_print(void) */ void oops_enter(void) { + tracing_off(); /* can't trust the integrity of the kernel anymore: */ debug_locks_off(); do_oops_enter_exit(); diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 29b685f551a..d7cbc579fc8 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; static atomic_t nr_counters __read_mostly; static atomic_t nr_mmap_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly; +static atomic_t nr_task_counters __read_mostly; /* * perf counter paranoia level: @@ -49,7 +50,7 @@ static atomic_t nr_comm_counters __read_mostly; * 1 - disallow cpu counters to unpriv * 2 - disallow kernel profiling to unpriv */ -int sysctl_perf_counter_paranoid __read_mostly; +int sysctl_perf_counter_paranoid __read_mostly = 1; static inline bool perf_paranoid_cpu(void) { @@ -87,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } void __weak hw_perf_counter_setup(int cpu) { barrier(); } +void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, @@ -124,7 +126,7 @@ void perf_enable(void) static void get_ctx(struct perf_counter_context *ctx) { - atomic_inc(&ctx->refcount); + WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); } static void free_ctx(struct rcu_head *head) @@ -146,6 +148,28 @@ static void put_ctx(struct perf_counter_context *ctx) } } +static void unclone_ctx(struct perf_counter_context *ctx) +{ + if (ctx->parent_ctx) { + put_ctx(ctx->parent_ctx); + ctx->parent_ctx = NULL; + } +} + +/* + * If we inherit counters we want to return the parent counter id + * to userspace. + */ +static u64 primary_counter_id(struct perf_counter *counter) +{ + u64 id = counter->id; + + if (counter->parent) + id = counter->parent->id; + + return id; +} + /* * Get the perf_counter_context for a task and lock it. * This has to cope with with the fact that until it is locked, @@ -175,6 +199,11 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) spin_unlock_irqrestore(&ctx->lock, *flags); goto retry; } + + if (!atomic_inc_not_zero(&ctx->refcount)) { + spin_unlock_irqrestore(&ctx->lock, *flags); + ctx = NULL; + } } rcu_read_unlock(); return ctx; @@ -193,7 +222,6 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta ctx = perf_lock_task_context(task, &flags); if (ctx) { ++ctx->pin_count; - get_ctx(ctx); spin_unlock_irqrestore(&ctx->lock, flags); } return ctx; @@ -232,6 +260,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) list_add_rcu(&counter->event_entry, &ctx->event_list); ctx->nr_counters++; + if (counter->attr.inherit_stat) + ctx->nr_stat++; } /* @@ -246,6 +276,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) if (list_empty(&counter->list_entry)) return; ctx->nr_counters--; + if (counter->attr.inherit_stat) + ctx->nr_stat--; list_del_init(&counter->list_entry); list_del_rcu(&counter->event_entry); @@ -275,6 +307,10 @@ counter_sched_out(struct perf_counter *counter, return; counter->state = PERF_COUNTER_STATE_INACTIVE; + if (counter->pending_disable) { + counter->pending_disable = 0; + counter->state = PERF_COUNTER_STATE_OFF; + } counter->tstamp_stopped = ctx->time; counter->pmu->disable(counter); counter->oncpu = -1; @@ -1002,6 +1038,81 @@ static int context_equiv(struct perf_counter_context *ctx1, && !ctx1->pin_count && !ctx2->pin_count; } +static void __perf_counter_read(void *counter); + +static void __perf_counter_sync_stat(struct perf_counter *counter, + struct perf_counter *next_counter) +{ + u64 value; + + if (!counter->attr.inherit_stat) + return; + + /* + * Update the counter value, we cannot use perf_counter_read() + * because we're in the middle of a context switch and have IRQs + * disabled, which upsets smp_call_function_single(), however + * we know the counter must be on the current CPU, therefore we + * don't need to use it. + */ + switch (counter->state) { + case PERF_COUNTER_STATE_ACTIVE: + __perf_counter_read(counter); + break; + + case PERF_COUNTER_STATE_INACTIVE: + update_counter_times(counter); + break; + + default: + break; + } + + /* + * In order to keep per-task stats reliable we need to flip the counter + * values when we flip the contexts. + */ + value = atomic64_read(&next_counter->count); + value = atomic64_xchg(&counter->count, value); + atomic64_set(&next_counter->count, value); + + swap(counter->total_time_enabled, next_counter->total_time_enabled); + swap(counter->total_time_running, next_counter->total_time_running); + + /* + * Since we swizzled the values, update the user visible data too. + */ + perf_counter_update_userpage(counter); + perf_counter_update_userpage(next_counter); +} + +#define list_next_entry(pos, member) \ + list_entry(pos->member.next, typeof(*pos), member) + +static void perf_counter_sync_stat(struct perf_counter_context *ctx, + struct perf_counter_context *next_ctx) +{ + struct perf_counter *counter, *next_counter; + + if (!ctx->nr_stat) + return; + + counter = list_first_entry(&ctx->event_list, + struct perf_counter, event_entry); + + next_counter = list_first_entry(&next_ctx->event_list, + struct perf_counter, event_entry); + + while (&counter->event_entry != &ctx->event_list && + &next_counter->event_entry != &next_ctx->event_list) { + + __perf_counter_sync_stat(counter, next_counter); + + counter = list_next_entry(counter, event_entry); + next_counter = list_next_entry(next_counter, event_entry); + } +} + /* * Called from scheduler to remove the counters of the current task, * with interrupts disabled. @@ -1057,6 +1168,8 @@ void perf_counter_task_sched_out(struct task_struct *task, ctx->task = next; next_ctx->task = task; do_switch = 0; + + perf_counter_sync_stat(ctx, next_ctx); } spin_unlock(&next_ctx->lock); spin_unlock(&ctx->lock); @@ -1203,7 +1316,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_counter *counter, int enable); -static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_adjust_period(struct perf_counter *counter, u64 events) { @@ -1222,8 +1334,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) if (!sample_period) sample_period = 1; - perf_log_period(counter, sample_period); - hwc->sample_period = sample_period; } @@ -1283,7 +1393,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) if (!interrupts) { perf_disable(); counter->pmu->disable(counter); - atomic_set(&hwc->period_left, 0); + atomic64_set(&hwc->period_left, 0); counter->pmu->enable(counter); perf_enable(); } @@ -1344,14 +1454,70 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) } /* + * Enable all of a task's counters that have been marked enable-on-exec. + * This expects task == current. + */ +static void perf_counter_enable_on_exec(struct task_struct *task) +{ + struct perf_counter_context *ctx; + struct perf_counter *counter; + unsigned long flags; + int enabled = 0; + + local_irq_save(flags); + ctx = task->perf_counter_ctxp; + if (!ctx || !ctx->nr_counters) + goto out; + + __perf_counter_task_sched_out(ctx); + + spin_lock(&ctx->lock); + + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (!counter->attr.enable_on_exec) + continue; + counter->attr.enable_on_exec = 0; + if (counter->state >= PERF_COUNTER_STATE_INACTIVE) + continue; + counter->state = PERF_COUNTER_STATE_INACTIVE; + counter->tstamp_enabled = + ctx->time - counter->total_time_enabled; + enabled = 1; + } + + /* + * Unclone this context if we enabled any counter. + */ + if (enabled) + unclone_ctx(ctx); + + spin_unlock(&ctx->lock); + + perf_counter_task_sched_in(task, smp_processor_id()); + out: + local_irq_restore(flags); +} + +/* * Cross CPU call to read the hardware counter */ -static void __read(void *info) +static void __perf_counter_read(void *info) { + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_counter *counter = info; struct perf_counter_context *ctx = counter->ctx; unsigned long flags; + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. In that case + * counter->count would have been updated to a recent sample + * when the counter was scheduled out. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + local_irq_save(flags); if (ctx->is_active) update_context_time(ctx); @@ -1368,7 +1534,7 @@ static u64 perf_counter_read(struct perf_counter *counter) */ if (counter->state == PERF_COUNTER_STATE_ACTIVE) { smp_call_function_single(counter->oncpu, - __read, counter, 1); + __perf_counter_read, counter, 1); } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { update_counter_times(counter); } @@ -1394,7 +1560,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, static struct perf_counter_context *find_get_context(pid_t pid, int cpu) { - struct perf_counter_context *parent_ctx; struct perf_counter_context *ctx; struct perf_cpu_context *cpuctx; struct task_struct *task; @@ -1454,16 +1619,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) retry: ctx = perf_lock_task_context(task, &flags); if (ctx) { - parent_ctx = ctx->parent_ctx; - if (parent_ctx) { - put_ctx(parent_ctx); - ctx->parent_ctx = NULL; /* no longer a clone */ - } - /* - * Get an extra reference before dropping the lock so that - * this context won't get freed if the task exits. - */ - get_ctx(ctx); + unclone_ctx(ctx); spin_unlock_irqrestore(&ctx->lock, flags); } @@ -1509,11 +1665,15 @@ static void free_counter(struct perf_counter *counter) { perf_pending_sync(counter); - atomic_dec(&nr_counters); - if (counter->attr.mmap) - atomic_dec(&nr_mmap_counters); - if (counter->attr.comm) - atomic_dec(&nr_comm_counters); + if (!counter->parent) { + atomic_dec(&nr_counters); + if (counter->attr.mmap) + atomic_dec(&nr_mmap_counters); + if (counter->attr.comm) + atomic_dec(&nr_comm_counters); + if (counter->attr.task) + atomic_dec(&nr_task_counters); + } if (counter->destroy) counter->destroy(counter); @@ -1547,14 +1707,133 @@ static int perf_release(struct inode *inode, struct file *file) return 0; } +static int perf_counter_read_size(struct perf_counter *counter) +{ + int entry = sizeof(u64); /* value */ + int size = 0; + int nr = 1; + + if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + size += sizeof(u64); + + if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + size += sizeof(u64); + + if (counter->attr.read_format & PERF_FORMAT_ID) + entry += sizeof(u64); + + if (counter->attr.read_format & PERF_FORMAT_GROUP) { + nr += counter->group_leader->nr_siblings; + size += sizeof(u64); + } + + size += entry * nr; + + return size; +} + +static u64 perf_counter_read_value(struct perf_counter *counter) +{ + struct perf_counter *child; + u64 total = 0; + + total += perf_counter_read(counter); + list_for_each_entry(child, &counter->child_list, child_list) + total += perf_counter_read(child); + + return total; +} + +static int perf_counter_read_entry(struct perf_counter *counter, + u64 read_format, char __user *buf) +{ + int n = 0, count = 0; + u64 values[2]; + + values[n++] = perf_counter_read_value(counter); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_counter_id(counter); + + count = n * sizeof(u64); + + if (copy_to_user(buf, values, count)) + return -EFAULT; + + return count; +} + +static int perf_counter_read_group(struct perf_counter *counter, + u64 read_format, char __user *buf) +{ + struct perf_counter *leader = counter->group_leader, *sub; + int n = 0, size = 0, err = -EFAULT; + u64 values[3]; + + values[n++] = 1 + leader->nr_siblings; + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = leader->total_time_enabled + + atomic64_read(&leader->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = leader->total_time_running + + atomic64_read(&leader->child_total_time_running); + } + + size = n * sizeof(u64); + + if (copy_to_user(buf, values, size)) + return -EFAULT; + + err = perf_counter_read_entry(leader, read_format, buf + size); + if (err < 0) + return err; + + size += err; + + list_for_each_entry(sub, &leader->sibling_list, list_entry) { + err = perf_counter_read_entry(sub, read_format, + buf + size); + if (err < 0) + return err; + + size += err; + } + + return size; +} + +static int perf_counter_read_one(struct perf_counter *counter, + u64 read_format, char __user *buf) +{ + u64 values[4]; + int n = 0; + + values[n++] = perf_counter_read_value(counter); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = counter->total_time_enabled + + atomic64_read(&counter->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = counter->total_time_running + + atomic64_read(&counter->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_counter_id(counter); + + if (copy_to_user(buf, values, n * sizeof(u64))) + return -EFAULT; + + return n * sizeof(u64); +} + /* * Read the performance counter - simple non blocking version for now */ static ssize_t perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) { - u64 values[3]; - int n; + u64 read_format = counter->attr.read_format; + int ret; /* * Return end-of-file for a read on a counter that is in @@ -1564,28 +1843,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) if (counter->state == PERF_COUNTER_STATE_ERROR) return 0; + if (count < perf_counter_read_size(counter)) + return -ENOSPC; + WARN_ON_ONCE(counter->ctx->parent_ctx); mutex_lock(&counter->child_mutex); - values[0] = perf_counter_read(counter); - n = 1; - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = counter->total_time_enabled + - atomic64_read(&counter->child_total_time_enabled); - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = counter->total_time_running + - atomic64_read(&counter->child_total_time_running); - if (counter->attr.read_format & PERF_FORMAT_ID) - values[n++] = counter->id; + if (read_format & PERF_FORMAT_GROUP) + ret = perf_counter_read_group(counter, read_format, buf); + else + ret = perf_counter_read_one(counter, read_format, buf); mutex_unlock(&counter->child_mutex); - if (count < n * sizeof(u64)) - return -EINVAL; - count = n * sizeof(u64); - - if (copy_to_user(buf, values, count)) - return -EFAULT; - - return count; + return ret; } static ssize_t @@ -1620,22 +1889,6 @@ static void perf_counter_reset(struct perf_counter *counter) perf_counter_update_userpage(counter); } -static void perf_counter_for_each_sibling(struct perf_counter *counter, - void (*func)(struct perf_counter *)) -{ - struct perf_counter_context *ctx = counter->ctx; - struct perf_counter *sibling; - - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - counter = counter->group_leader; - - func(counter); - list_for_each_entry(sibling, &counter->sibling_list, list_entry) - func(sibling); - mutex_unlock(&ctx->mutex); -} - /* * Holding the top-level counter's child_mutex means that any * descendant process that has inherited this counter will block @@ -1658,14 +1911,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter, static void perf_counter_for_each(struct perf_counter *counter, void (*func)(struct perf_counter *)) { - struct perf_counter *child; + struct perf_counter_context *ctx = counter->ctx; + struct perf_counter *sibling; - WARN_ON_ONCE(counter->ctx->parent_ctx); - mutex_lock(&counter->child_mutex); - perf_counter_for_each_sibling(counter, func); - list_for_each_entry(child, &counter->child_list, child_list) - perf_counter_for_each_sibling(child, func); - mutex_unlock(&counter->child_mutex); + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + counter = counter->group_leader; + + perf_counter_for_each_child(counter, func); + func(counter); + list_for_each_entry(sibling, &counter->sibling_list, list_entry) + perf_counter_for_each_child(counter, func); + mutex_unlock(&ctx->mutex); } static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) @@ -1694,8 +1951,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) counter->attr.sample_freq = value; } else { - perf_log_period(counter, value); - counter->attr.sample_period = value; counter->hw.sample_period = value; } @@ -1764,6 +2019,18 @@ int perf_counter_task_disable(void) return 0; } +#ifndef PERF_COUNTER_INDEX_OFFSET +# define PERF_COUNTER_INDEX_OFFSET 0 +#endif + +static int perf_counter_index(struct perf_counter *counter) +{ + if (counter->state != PERF_COUNTER_STATE_ACTIVE) + return 0; + + return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; +} + /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch @@ -1788,11 +2055,17 @@ void perf_counter_update_userpage(struct perf_counter *counter) preempt_disable(); ++userpg->lock; barrier(); - userpg->index = counter->hw.idx; + userpg->index = perf_counter_index(counter); userpg->offset = atomic64_read(&counter->count); if (counter->state == PERF_COUNTER_STATE_ACTIVE) userpg->offset -= atomic64_read(&counter->hw.prev_count); + userpg->time_enabled = counter->total_time_enabled + + atomic64_read(&counter->child_total_time_enabled); + + userpg->time_running = counter->total_time_running + + atomic64_read(&counter->child_total_time_running); + barrier(); ++userpg->lock; preempt_enable(); @@ -1806,6 +2079,12 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct perf_mmap_data *data; int ret = VM_FAULT_SIGBUS; + if (vmf->flags & FAULT_FLAG_MKWRITE) { + if (vmf->pgoff == 0) + ret = 0; + return ret; + } + rcu_read_lock(); data = rcu_dereference(counter->data); if (!data) @@ -1819,9 +2098,16 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if ((unsigned)nr > data->nr_pages) goto unlock; + if (vmf->flags & FAULT_FLAG_WRITE) + goto unlock; + vmf->page = virt_to_page(data->data_pages[nr]); } + get_page(vmf->page); + vmf->page->mapping = vma->vm_file->f_mapping; + vmf->page->index = vmf->pgoff; + ret = 0; unlock: rcu_read_unlock(); @@ -1874,6 +2160,14 @@ fail: return -ENOMEM; } +static void perf_mmap_free_page(unsigned long addr) +{ + struct page *page = virt_to_page((void *)addr); + + page->mapping = NULL; + __free_page(page); +} + static void __perf_mmap_data_free(struct rcu_head *rcu_head) { struct perf_mmap_data *data; @@ -1881,9 +2175,10 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head) data = container_of(rcu_head, struct perf_mmap_data, rcu_head); - free_page((unsigned long)data->user_page); + perf_mmap_free_page((unsigned long)data->user_page); for (i = 0; i < data->nr_pages; i++) - free_page((unsigned long)data->data_pages[i]); + perf_mmap_free_page((unsigned long)data->data_pages[i]); + kfree(data); } @@ -1920,9 +2215,10 @@ static void perf_mmap_close(struct vm_area_struct *vma) } static struct vm_operations_struct perf_mmap_vmops = { - .open = perf_mmap_open, - .close = perf_mmap_close, - .fault = perf_mmap_fault, + .open = perf_mmap_open, + .close = perf_mmap_close, + .fault = perf_mmap_fault, + .page_mkwrite = perf_mmap_fault, }; static int perf_mmap(struct file *file, struct vm_area_struct *vma) @@ -1936,7 +2232,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) long user_extra, extra; int ret = 0; - if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) + if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma_size = vma->vm_end - vma->vm_start; @@ -1995,10 +2291,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) atomic_long_add(user_extra, &user->locked_vm); vma->vm_mm->locked_vm += extra; counter->data->nr_locked = extra; + if (vma->vm_flags & VM_WRITE) + counter->data->writable = 1; + unlock: mutex_unlock(&counter->mmap_mutex); - vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_RESERVED; vma->vm_ops = &perf_mmap_vmops; @@ -2064,7 +2362,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry) if (counter->pending_disable) { counter->pending_disable = 0; - perf_counter_disable(counter); + __perf_counter_disable(counter); } if (counter->pending_wakeup) { @@ -2175,11 +2473,38 @@ struct perf_output_handle { unsigned long head; unsigned long offset; int nmi; - int overflow; + int sample; int locked; unsigned long flags; }; +static bool perf_output_space(struct perf_mmap_data *data, + unsigned int offset, unsigned int head) +{ + unsigned long tail; + unsigned long mask; + + if (!data->writable) + return true; + + mask = (data->nr_pages << PAGE_SHIFT) - 1; + /* + * Userspace could choose to issue a mb() before updating the tail + * pointer. So that all reads will be completed before the write is + * issued. + */ + tail = ACCESS_ONCE(data->user_page->data_tail); + smp_rmb(); + + offset = (offset - tail) & mask; + head = (head - tail) & mask; + + if ((int)(head - offset) < 0) + return false; + + return true; +} + static void perf_output_wakeup(struct perf_output_handle *handle) { atomic_set(&handle->data->poll, POLL_IN); @@ -2270,12 +2595,57 @@ out: local_irq_restore(handle->flags); } +static void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len) +{ + unsigned int pages_mask; + unsigned int offset; + unsigned int size; + void **pages; + + offset = handle->offset; + pages_mask = handle->data->nr_pages - 1; + pages = handle->data->data_pages; + + do { + unsigned int page_offset; + int nr; + + nr = (offset >> PAGE_SHIFT) & pages_mask; + page_offset = offset & (PAGE_SIZE - 1); + size = min_t(unsigned int, PAGE_SIZE - page_offset, len); + + memcpy(pages[nr] + page_offset, buf, size); + + len -= size; + buf += size; + offset += size; + } while (len); + + handle->offset = offset; + + /* + * Check we didn't copy past our reservation window, taking the + * possible unsigned int wrap into account. + */ + WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); +} + +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) + static int perf_output_begin(struct perf_output_handle *handle, struct perf_counter *counter, unsigned int size, - int nmi, int overflow) + int nmi, int sample) { struct perf_mmap_data *data; unsigned int offset, head; + int have_lost; + struct { + struct perf_event_header header; + u64 id; + u64 lost; + } lost_event; /* * For inherited counters we send all the output towards the parent. @@ -2288,19 +2658,25 @@ static int perf_output_begin(struct perf_output_handle *handle, if (!data) goto out; - handle->data = data; - handle->counter = counter; - handle->nmi = nmi; - handle->overflow = overflow; + handle->data = data; + handle->counter = counter; + handle->nmi = nmi; + handle->sample = sample; if (!data->nr_pages) goto fail; + have_lost = atomic_read(&data->lost); + if (have_lost) + size += sizeof(lost_event); + perf_output_lock(handle); do { offset = head = atomic_long_read(&data->head); head += size; + if (unlikely(!perf_output_space(data, offset, head))) + goto fail; } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); handle->offset = offset; @@ -2309,55 +2685,27 @@ static int perf_output_begin(struct perf_output_handle *handle, if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) atomic_set(&data->wakeup, 1); + if (have_lost) { + lost_event.header.type = PERF_EVENT_LOST; + lost_event.header.misc = 0; + lost_event.header.size = sizeof(lost_event); + lost_event.id = counter->id; + lost_event.lost = atomic_xchg(&data->lost, 0); + + perf_output_put(handle, lost_event); + } + return 0; fail: - perf_output_wakeup(handle); + atomic_inc(&data->lost); + perf_output_unlock(handle); out: rcu_read_unlock(); return -ENOSPC; } -static void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) -{ - unsigned int pages_mask; - unsigned int offset; - unsigned int size; - void **pages; - - offset = handle->offset; - pages_mask = handle->data->nr_pages - 1; - pages = handle->data->data_pages; - - do { - unsigned int page_offset; - int nr; - - nr = (offset >> PAGE_SHIFT) & pages_mask; - page_offset = offset & (PAGE_SIZE - 1); - size = min_t(unsigned int, PAGE_SIZE - page_offset, len); - - memcpy(pages[nr] + page_offset, buf, size); - - len -= size; - buf += size; - offset += size; - } while (len); - - handle->offset = offset; - - /* - * Check we didn't copy past our reservation window, taking the - * possible unsigned int wrap into account. - */ - WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); -} - -#define perf_output_put(handle, x) \ - perf_output_copy((handle), &(x), sizeof(x)) - static void perf_output_end(struct perf_output_handle *handle) { struct perf_counter *counter = handle->counter; @@ -2365,7 +2713,7 @@ static void perf_output_end(struct perf_output_handle *handle) int wakeup_events = counter->attr.wakeup_events; - if (handle->overflow && wakeup_events) { + if (handle->sample && wakeup_events) { int events = atomic_inc_return(&data->events); if (events >= wakeup_events) { atomic_sub(wakeup_events, &data->events); @@ -2399,7 +2747,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) return task_pid_nr_ns(p, counter->ns); } -static void perf_counter_output(struct perf_counter *counter, int nmi, +static void perf_output_read_one(struct perf_output_handle *handle, + struct perf_counter *counter) +{ + u64 read_format = counter->attr.read_format; + u64 values[4]; + int n = 0; + + values[n++] = atomic64_read(&counter->count); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = counter->total_time_enabled + + atomic64_read(&counter->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = counter->total_time_running + + atomic64_read(&counter->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_counter_id(counter); + + perf_output_copy(handle, values, n * sizeof(u64)); +} + +/* + * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. + */ +static void perf_output_read_group(struct perf_output_handle *handle, + struct perf_counter *counter) +{ + struct perf_counter *leader = counter->group_leader, *sub; + u64 read_format = counter->attr.read_format; + u64 values[5]; + int n = 0; + + values[n++] = 1 + leader->nr_siblings; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = leader->total_time_enabled; + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = leader->total_time_running; + + if (leader != counter) + leader->pmu->read(leader); + + values[n++] = atomic64_read(&leader->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_counter_id(leader); + + perf_output_copy(handle, values, n * sizeof(u64)); + + list_for_each_entry(sub, &leader->sibling_list, list_entry) { + n = 0; + + if (sub != counter) + sub->pmu->read(sub); + + values[n++] = atomic64_read(&sub->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_counter_id(sub); + + perf_output_copy(handle, values, n * sizeof(u64)); + } +} + +static void perf_output_read(struct perf_output_handle *handle, + struct perf_counter *counter) +{ + if (counter->attr.read_format & PERF_FORMAT_GROUP) + perf_output_read_group(handle, counter); + else + perf_output_read_one(handle, counter); +} + +void perf_counter_output(struct perf_counter *counter, int nmi, struct perf_sample_data *data) { int ret; @@ -2410,10 +2831,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, struct { u32 pid, tid; } tid_entry; - struct { - u64 id; - u64 counter; - } group_entry; struct perf_callchain_entry *callchain = NULL; int callchain_size = 0; u64 time; @@ -2421,15 +2838,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, u32 cpu, reserved; } cpu_entry; - header.type = 0; + header.type = PERF_EVENT_SAMPLE; header.size = sizeof(header); - header.misc = PERF_EVENT_MISC_OVERFLOW; + header.misc = 0; header.misc |= perf_misc_flags(data->regs); if (sample_type & PERF_SAMPLE_IP) { ip = perf_instruction_pointer(data->regs); - header.type |= PERF_SAMPLE_IP; header.size += sizeof(ip); } @@ -2438,7 +2854,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, tid_entry.pid = perf_counter_pid(counter, current); tid_entry.tid = perf_counter_tid(counter, current); - header.type |= PERF_SAMPLE_TID; header.size += sizeof(tid_entry); } @@ -2448,47 +2863,51 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, */ time = sched_clock(); - header.type |= PERF_SAMPLE_TIME; header.size += sizeof(u64); } - if (sample_type & PERF_SAMPLE_ADDR) { - header.type |= PERF_SAMPLE_ADDR; + if (sample_type & PERF_SAMPLE_ADDR) header.size += sizeof(u64); - } - if (sample_type & PERF_SAMPLE_ID) { - header.type |= PERF_SAMPLE_ID; + if (sample_type & PERF_SAMPLE_ID) + header.size += sizeof(u64); + + if (sample_type & PERF_SAMPLE_STREAM_ID) header.size += sizeof(u64); - } if (sample_type & PERF_SAMPLE_CPU) { - header.type |= PERF_SAMPLE_CPU; header.size += sizeof(cpu_entry); cpu_entry.cpu = raw_smp_processor_id(); + cpu_entry.reserved = 0; } - if (sample_type & PERF_SAMPLE_PERIOD) { - header.type |= PERF_SAMPLE_PERIOD; + if (sample_type & PERF_SAMPLE_PERIOD) header.size += sizeof(u64); - } - if (sample_type & PERF_SAMPLE_GROUP) { - header.type |= PERF_SAMPLE_GROUP; - header.size += sizeof(u64) + - counter->nr_siblings * sizeof(group_entry); - } + if (sample_type & PERF_SAMPLE_READ) + header.size += perf_counter_read_size(counter); if (sample_type & PERF_SAMPLE_CALLCHAIN) { callchain = perf_callchain(data->regs); if (callchain) { callchain_size = (1 + callchain->nr) * sizeof(u64); - - header.type |= PERF_SAMPLE_CALLCHAIN; header.size += callchain_size; - } + } else + header.size += sizeof(u64); + } + + if (sample_type & PERF_SAMPLE_RAW) { + int size = sizeof(u32); + + if (data->raw) + size += data->raw->size; + else + size += sizeof(u32); + + WARN_ON_ONCE(size & (sizeof(u64)-1)); + header.size += size; } ret = perf_output_begin(&handle, counter, header.size, nmi, 1); @@ -2509,7 +2928,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, if (sample_type & PERF_SAMPLE_ADDR) perf_output_put(&handle, data->addr); - if (sample_type & PERF_SAMPLE_ID) + if (sample_type & PERF_SAMPLE_ID) { + u64 id = primary_counter_id(counter); + + perf_output_put(&handle, id); + } + + if (sample_type & PERF_SAMPLE_STREAM_ID) perf_output_put(&handle, counter->id); if (sample_type & PERF_SAMPLE_CPU) @@ -2518,76 +2943,125 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, if (sample_type & PERF_SAMPLE_PERIOD) perf_output_put(&handle, data->period); - /* - * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. - */ - if (sample_type & PERF_SAMPLE_GROUP) { - struct perf_counter *leader, *sub; - u64 nr = counter->nr_siblings; - - perf_output_put(&handle, nr); - - leader = counter->group_leader; - list_for_each_entry(sub, &leader->sibling_list, list_entry) { - if (sub != counter) - sub->pmu->read(sub); + if (sample_type & PERF_SAMPLE_READ) + perf_output_read(&handle, counter); - group_entry.id = sub->id; - group_entry.counter = atomic64_read(&sub->count); + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + if (callchain) + perf_output_copy(&handle, callchain, callchain_size); + else { + u64 nr = 0; + perf_output_put(&handle, nr); + } + } - perf_output_put(&handle, group_entry); + if (sample_type & PERF_SAMPLE_RAW) { + if (data->raw) { + perf_output_put(&handle, data->raw->size); + perf_output_copy(&handle, data->raw->data, data->raw->size); + } else { + struct { + u32 size; + u32 data; + } raw = { + .size = sizeof(u32), + .data = 0, + }; + perf_output_put(&handle, raw); } } - if (callchain) - perf_output_copy(&handle, callchain, callchain_size); + perf_output_end(&handle); +} + +/* + * read event + */ + +struct perf_read_event { + struct perf_event_header header; + + u32 pid; + u32 tid; +}; + +static void +perf_counter_read_event(struct perf_counter *counter, + struct task_struct *task) +{ + struct perf_output_handle handle; + struct perf_read_event event = { + .header = { + .type = PERF_EVENT_READ, + .misc = 0, + .size = sizeof(event) + perf_counter_read_size(counter), + }, + .pid = perf_counter_pid(counter, task), + .tid = perf_counter_tid(counter, task), + }; + int ret; + + ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); + if (ret) + return; + + perf_output_put(&handle, event); + perf_output_read(&handle, counter); perf_output_end(&handle); } /* - * fork tracking + * task tracking -- fork/exit + * + * enabled by: attr.comm | attr.mmap | attr.task */ -struct perf_fork_event { - struct task_struct *task; +struct perf_task_event { + struct task_struct *task; + struct perf_counter_context *task_ctx; struct { struct perf_event_header header; u32 pid; u32 ppid; + u32 tid; + u32 ptid; } event; }; -static void perf_counter_fork_output(struct perf_counter *counter, - struct perf_fork_event *fork_event) +static void perf_counter_task_output(struct perf_counter *counter, + struct perf_task_event *task_event) { struct perf_output_handle handle; - int size = fork_event->event.header.size; - struct task_struct *task = fork_event->task; + int size = task_event->event.header.size; + struct task_struct *task = task_event->task; int ret = perf_output_begin(&handle, counter, size, 0, 0); if (ret) return; - fork_event->event.pid = perf_counter_pid(counter, task); - fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); + task_event->event.pid = perf_counter_pid(counter, task); + task_event->event.ppid = perf_counter_pid(counter, current); + + task_event->event.tid = perf_counter_tid(counter, task); + task_event->event.ptid = perf_counter_tid(counter, current); - perf_output_put(&handle, fork_event->event); + perf_output_put(&handle, task_event->event); perf_output_end(&handle); } -static int perf_counter_fork_match(struct perf_counter *counter) +static int perf_counter_task_match(struct perf_counter *counter) { - if (counter->attr.comm || counter->attr.mmap) + if (counter->attr.comm || counter->attr.mmap || counter->attr.task) return 1; return 0; } -static void perf_counter_fork_ctx(struct perf_counter_context *ctx, - struct perf_fork_event *fork_event) +static void perf_counter_task_ctx(struct perf_counter_context *ctx, + struct perf_task_event *task_event) { struct perf_counter *counter; @@ -2596,51 +3070,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_counter_fork_match(counter)) - perf_counter_fork_output(counter, fork_event); + if (perf_counter_task_match(counter)) + perf_counter_task_output(counter, task_event); } rcu_read_unlock(); } -static void perf_counter_fork_event(struct perf_fork_event *fork_event) +static void perf_counter_task_event(struct perf_task_event *task_event) { struct perf_cpu_context *cpuctx; - struct perf_counter_context *ctx; + struct perf_counter_context *ctx = task_event->task_ctx; cpuctx = &get_cpu_var(perf_cpu_context); - perf_counter_fork_ctx(&cpuctx->ctx, fork_event); + perf_counter_task_ctx(&cpuctx->ctx, task_event); put_cpu_var(perf_cpu_context); rcu_read_lock(); - /* - * doesn't really matter which of the child contexts the - * events ends up in. - */ - ctx = rcu_dereference(current->perf_counter_ctxp); + if (!ctx) + ctx = rcu_dereference(task_event->task->perf_counter_ctxp); if (ctx) - perf_counter_fork_ctx(ctx, fork_event); + perf_counter_task_ctx(ctx, task_event); rcu_read_unlock(); } -void perf_counter_fork(struct task_struct *task) +static void perf_counter_task(struct task_struct *task, + struct perf_counter_context *task_ctx, + int new) { - struct perf_fork_event fork_event; + struct perf_task_event task_event; if (!atomic_read(&nr_comm_counters) && - !atomic_read(&nr_mmap_counters)) + !atomic_read(&nr_mmap_counters) && + !atomic_read(&nr_task_counters)) return; - fork_event = (struct perf_fork_event){ - .task = task, - .event = { + task_event = (struct perf_task_event){ + .task = task, + .task_ctx = task_ctx, + .event = { .header = { - .type = PERF_EVENT_FORK, - .size = sizeof(fork_event.event), + .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, + .misc = 0, + .size = sizeof(task_event.event), }, + /* .pid */ + /* .ppid */ + /* .tid */ + /* .ptid */ }, }; - perf_counter_fork_event(&fork_event); + perf_counter_task_event(&task_event); +} + +void perf_counter_fork(struct task_struct *task) +{ + perf_counter_task(task, NULL, 1); } /* @@ -2708,8 +3193,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) struct perf_cpu_context *cpuctx; struct perf_counter_context *ctx; unsigned int size; - char *comm = comm_event->task->comm; + char comm[TASK_COMM_LEN]; + memset(comm, 0, sizeof(comm)); + strncpy(comm, comm_event->task->comm, sizeof(comm)); size = ALIGN(strlen(comm)+1, sizeof(u64)); comm_event->comm = comm; @@ -2736,13 +3223,24 @@ void perf_counter_comm(struct task_struct *task) { struct perf_comm_event comm_event; + if (task->perf_counter_ctxp) + perf_counter_enable_on_exec(task); + if (!atomic_read(&nr_comm_counters)) return; comm_event = (struct perf_comm_event){ .task = task, + /* .comm */ + /* .comm_size */ .event = { - .header = { .type = PERF_EVENT_COMM, }, + .header = { + .type = PERF_EVENT_COMM, + .misc = 0, + /* .size */ + }, + /* .pid */ + /* .tid */ }, }; @@ -2825,8 +3323,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) char *buf = NULL; const char *name; + memset(tmp, 0, sizeof(tmp)); + if (file) { - buf = kzalloc(PATH_MAX, GFP_KERNEL); + /* + * d_path works from the end of the buffer backwards, so we + * need to add enough zero bytes after the string to handle + * the 64bit alignment we do later. + */ + buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); if (!buf) { name = strncpy(tmp, "//enomem", sizeof(tmp)); goto got_name; @@ -2837,9 +3342,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) goto got_name; } } else { - name = arch_vma_name(mmap_event->vma); - if (name) + if (arch_vma_name(mmap_event->vma)) { + name = strncpy(tmp, arch_vma_name(mmap_event->vma), + sizeof(tmp)); goto got_name; + } if (!vma->vm_mm) { name = strncpy(tmp, "[vdso]", sizeof(tmp)); @@ -2884,8 +3391,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma) mmap_event = (struct perf_mmap_event){ .vma = vma, + /* .file_name */ + /* .file_size */ .event = { - .header = { .type = PERF_EVENT_MMAP, }, + .header = { + .type = PERF_EVENT_MMAP, + .misc = 0, + /* .size */ + }, + /* .pid */ + /* .tid */ .start = vma->vm_start, .len = vma->vm_end - vma->vm_start, .pgoff = vma->vm_pgoff, @@ -2896,49 +3411,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) } /* - * Log sample_period changes so that analyzing tools can re-normalize the - * event flow. - */ - -struct freq_event { - struct perf_event_header header; - u64 time; - u64 id; - u64 period; -}; - -static void perf_log_period(struct perf_counter *counter, u64 period) -{ - struct perf_output_handle handle; - struct freq_event event; - int ret; - - if (counter->hw.sample_period == period) - return; - - if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) - return; - - event = (struct freq_event) { - .header = { - .type = PERF_EVENT_PERIOD, - .misc = 0, - .size = sizeof(event), - }, - .time = sched_clock(), - .id = counter->id, - .period = period, - }; - - ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); - if (ret) - return; - - perf_output_put(&handle, event); - perf_output_end(&handle); -} - -/* * IRQ throttle logging */ @@ -2951,16 +3423,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) struct perf_event_header header; u64 time; u64 id; + u64 stream_id; } throttle_event = { .header = { - .type = PERF_EVENT_THROTTLE + 1, + .type = PERF_EVENT_THROTTLE, .misc = 0, .size = sizeof(throttle_event), }, - .time = sched_clock(), - .id = counter->id, + .time = sched_clock(), + .id = primary_counter_id(counter), + .stream_id = counter->id, }; + if (enable) + throttle_event.header.type = PERF_EVENT_UNTHROTTLE; + ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); if (ret) return; @@ -2970,7 +3447,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) } /* - * Generic counter overflow handling. + * Generic counter overflow handling, sampling. */ int perf_counter_overflow(struct perf_counter *counter, int nmi, @@ -3037,130 +3514,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, * Generic software counter infrastructure */ -static void perf_swcounter_update(struct perf_counter *counter) +/* + * We directly increment counter->count and keep a second value in + * counter->hw.period_left to count intervals. This period counter + * is kept in the range [-sample_period, 0] so that we can use the + * sign as trigger. + */ + +static u64 perf_swcounter_set_period(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; - u64 prev, now; - s64 delta; + u64 period = hwc->last_period; + u64 nr, offset; + s64 old, val; + + hwc->last_period = hwc->sample_period; again: - prev = atomic64_read(&hwc->prev_count); - now = atomic64_read(&hwc->count); - if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) - goto again; + old = val = atomic64_read(&hwc->period_left); + if (val < 0) + return 0; - delta = now - prev; + nr = div64_u64(period + val, period); + offset = nr * period; + val -= offset; + if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) + goto again; - atomic64_add(delta, &counter->count); - atomic64_sub(delta, &hwc->period_left); + return nr; } -static void perf_swcounter_set_period(struct perf_counter *counter) +static void perf_swcounter_overflow(struct perf_counter *counter, + int nmi, struct perf_sample_data *data) { struct hw_perf_counter *hwc = &counter->hw; - s64 left = atomic64_read(&hwc->period_left); - s64 period = hwc->sample_period; + u64 overflow; - if (unlikely(left <= -period)) { - left = period; - atomic64_set(&hwc->period_left, left); - hwc->last_period = period; - } + data->period = counter->hw.last_period; + overflow = perf_swcounter_set_period(counter); - if (unlikely(left <= 0)) { - left += period; - atomic64_add(period, &hwc->period_left); - hwc->last_period = period; - } + if (hwc->interrupts == MAX_INTERRUPTS) + return; - atomic64_set(&hwc->prev_count, -left); - atomic64_set(&hwc->count, -left); + for (; overflow; overflow--) { + if (perf_counter_overflow(counter, nmi, data)) { + /* + * We inhibit the overflow from happening when + * hwc->interrupts == MAX_INTERRUPTS. + */ + break; + } + } } -static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) +static void perf_swcounter_unthrottle(struct perf_counter *counter) { - enum hrtimer_restart ret = HRTIMER_RESTART; - struct perf_sample_data data; - struct perf_counter *counter; - u64 period; - - counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); - counter->pmu->read(counter); - - data.addr = 0; - data.regs = get_irq_regs(); /* - * In case we exclude kernel IPs or are somehow not in interrupt - * context, provide the next best thing, the user IP. + * Nothing to do, we already reset hwc->interrupts. */ - if ((counter->attr.exclude_kernel || !data.regs) && - !counter->attr.exclude_user) - data.regs = task_pt_regs(current); - - if (data.regs) { - if (perf_counter_overflow(counter, 0, &data)) - ret = HRTIMER_NORESTART; - } - - period = max_t(u64, 10000, counter->hw.sample_period); - hrtimer_forward_now(hrtimer, ns_to_ktime(period)); - - return ret; } -static void perf_swcounter_overflow(struct perf_counter *counter, - int nmi, struct pt_regs *regs, u64 addr) +static void perf_swcounter_add(struct perf_counter *counter, u64 nr, + int nmi, struct perf_sample_data *data) { - struct perf_sample_data data = { - .regs = regs, - .addr = addr, - .period = counter->hw.last_period, - }; + struct hw_perf_counter *hwc = &counter->hw; + + atomic64_add(nr, &counter->count); - perf_swcounter_update(counter); - perf_swcounter_set_period(counter); - if (perf_counter_overflow(counter, nmi, &data)) - /* soft-disable the counter */ - ; + if (!hwc->sample_period) + return; + + if (!data->regs) + return; + if (!atomic64_add_negative(nr, &hwc->period_left)) + perf_swcounter_overflow(counter, nmi, data); } static int perf_swcounter_is_counting(struct perf_counter *counter) { - struct perf_counter_context *ctx; - unsigned long flags; - int count; - + /* + * The counter is active, we're good! + */ if (counter->state == PERF_COUNTER_STATE_ACTIVE) return 1; + /* + * The counter is off/error, not counting. + */ if (counter->state != PERF_COUNTER_STATE_INACTIVE) return 0; /* - * If the counter is inactive, it could be just because - * its task is scheduled out, or because it's in a group - * which could not go on the PMU. We want to count in - * the first case but not the second. If the context is - * currently active then an inactive software counter must - * be the second case. If it's not currently active then - * we need to know whether the counter was active when the - * context was last active, which we can determine by - * comparing counter->tstamp_stopped with ctx->time. - * - * We are within an RCU read-side critical section, - * which protects the existence of *ctx. + * The counter is inactive, if the context is active + * we're part of a group that didn't make it on the 'pmu', + * not counting. */ - ctx = counter->ctx; - spin_lock_irqsave(&ctx->lock, flags); - count = 1; - /* Re-check state now we have the lock */ - if (counter->state < PERF_COUNTER_STATE_INACTIVE || - counter->ctx->is_active || - counter->tstamp_stopped < ctx->time) - count = 0; - spin_unlock_irqrestore(&ctx->lock, flags); - return count; + if (counter->ctx->is_active) + return 0; + + /* + * We're inactive and the context is too, this means the + * task is scheduled out, we're counting events that happen + * to us, like migration events. + */ + return 1; } static int perf_swcounter_match(struct perf_counter *counter, @@ -3186,19 +3644,10 @@ static int perf_swcounter_match(struct perf_counter *counter, return 1; } -static void perf_swcounter_add(struct perf_counter *counter, u64 nr, - int nmi, struct pt_regs *regs, u64 addr) -{ - int neg = atomic64_add_negative(nr, &counter->hw.count); - - if (counter->hw.sample_period && !neg && regs) - perf_swcounter_overflow(counter, nmi, regs, addr); -} - static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, - enum perf_type_id type, u32 event, - u64 nr, int nmi, struct pt_regs *regs, - u64 addr) + enum perf_type_id type, + u32 event, u64 nr, int nmi, + struct perf_sample_data *data) { struct perf_counter *counter; @@ -3207,8 +3656,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { - if (perf_swcounter_match(counter, type, event, regs)) - perf_swcounter_add(counter, nr, nmi, regs, addr); + if (perf_swcounter_match(counter, type, event, data->regs)) + perf_swcounter_add(counter, nr, nmi, data); } rcu_read_unlock(); } @@ -3227,9 +3676,9 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) return &cpuctx->recursion[0]; } -static void __perf_swcounter_event(enum perf_type_id type, u32 event, - u64 nr, int nmi, struct pt_regs *regs, - u64 addr) +static void do_perf_swcounter_event(enum perf_type_id type, u32 event, + u64 nr, int nmi, + struct perf_sample_data *data) { struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); int *recursion = perf_swcounter_recursion_context(cpuctx); @@ -3242,7 +3691,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event, barrier(); perf_swcounter_ctx_event(&cpuctx->ctx, type, event, - nr, nmi, regs, addr); + nr, nmi, data); rcu_read_lock(); /* * doesn't really matter which of the child contexts the @@ -3250,7 +3699,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event, */ ctx = rcu_dereference(current->perf_counter_ctxp); if (ctx) - perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr); + perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); rcu_read_unlock(); barrier(); @@ -3260,35 +3709,79 @@ out: put_cpu_var(perf_cpu_context); } -void -perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +void __perf_swcounter_event(u32 event, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) { - __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr); + struct perf_sample_data data = { + .regs = regs, + .addr = addr, + }; + + do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); } static void perf_swcounter_read(struct perf_counter *counter) { - perf_swcounter_update(counter); } static int perf_swcounter_enable(struct perf_counter *counter) { - perf_swcounter_set_period(counter); + struct hw_perf_counter *hwc = &counter->hw; + + if (hwc->sample_period) { + hwc->last_period = hwc->sample_period; + perf_swcounter_set_period(counter); + } return 0; } static void perf_swcounter_disable(struct perf_counter *counter) { - perf_swcounter_update(counter); } static const struct pmu perf_ops_generic = { .enable = perf_swcounter_enable, .disable = perf_swcounter_disable, .read = perf_swcounter_read, + .unthrottle = perf_swcounter_unthrottle, }; /* + * hrtimer based swcounter callback + */ + +static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) +{ + enum hrtimer_restart ret = HRTIMER_RESTART; + struct perf_sample_data data; + struct perf_counter *counter; + u64 period; + + counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); + counter->pmu->read(counter); + + data.addr = 0; + data.regs = get_irq_regs(); + /* + * In case we exclude kernel IPs or are somehow not in interrupt + * context, provide the next best thing, the user IP. + */ + if ((counter->attr.exclude_kernel || !data.regs) && + !counter->attr.exclude_user) + data.regs = task_pt_regs(current); + + if (data.regs) { + if (perf_counter_overflow(counter, 0, &data)) + ret = HRTIMER_NORESTART; + } + + period = max_t(u64, 10000, counter->hw.sample_period); + hrtimer_forward_now(hrtimer, ns_to_ktime(period)); + + return ret; +} + +/* * Software counter: cpu wall time clock */ @@ -3404,36 +3897,25 @@ static const struct pmu perf_ops_task_clock = { .read = task_clock_perf_counter_read, }; -/* - * Software counter: cpu migrations - */ -void perf_counter_task_migration(struct task_struct *task, int cpu) -{ - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); - struct perf_counter_context *ctx; - - perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE, - PERF_COUNT_SW_CPU_MIGRATIONS, - 1, 1, NULL, 0); - - ctx = perf_pin_task_context(task); - if (ctx) { - perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE, - PERF_COUNT_SW_CPU_MIGRATIONS, - 1, 1, NULL, 0); - perf_unpin_context(ctx); - } -} - #ifdef CONFIG_EVENT_PROFILE -void perf_tpcounter_event(int event_id) +void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, + int entry_size) { - struct pt_regs *regs = get_irq_regs(); + struct perf_raw_record raw = { + .size = entry_size, + .data = record, + }; + + struct perf_sample_data data = { + .regs = get_irq_regs(), + .addr = addr, + .raw = &raw, + }; - if (!regs) - regs = task_pt_regs(current); + if (!data.regs) + data.regs = task_pt_regs(current); - __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0); + do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); } EXPORT_SYMBOL_GPL(perf_tpcounter_event); @@ -3442,16 +3924,20 @@ extern void ftrace_profile_disable(int); static void tp_perf_counter_destroy(struct perf_counter *counter) { - ftrace_profile_disable(perf_event_id(&counter->attr)); + ftrace_profile_disable(counter->attr.config); } static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) { - int event_id = perf_event_id(&counter->attr); - int ret; + /* + * Raw tracepoint data is a severe data leak, only allow root to + * have these. + */ + if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && + !capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); - ret = ftrace_profile_enable(event_id); - if (ret) + if (ftrace_profile_enable(counter->attr.config)) return NULL; counter->destroy = tp_perf_counter_destroy; @@ -3465,9 +3951,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) } #endif +atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; + +static void sw_perf_counter_destroy(struct perf_counter *counter) +{ + u64 event = counter->attr.config; + + WARN_ON(counter->parent); + + atomic_dec(&perf_swcounter_enabled[event]); +} + static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) { const struct pmu *pmu = NULL; + u64 event = counter->attr.config; /* * Software counters (currently) can't in general distinguish @@ -3476,7 +3974,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) * to be kernel events, and page faults are never hypervisor * events. */ - switch (counter->attr.config) { + switch (event) { case PERF_COUNT_SW_CPU_CLOCK: pmu = &perf_ops_cpu_clock; @@ -3497,6 +3995,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) case PERF_COUNT_SW_PAGE_FAULTS_MAJ: case PERF_COUNT_SW_CONTEXT_SWITCHES: case PERF_COUNT_SW_CPU_MIGRATIONS: + if (!counter->parent) { + atomic_inc(&perf_swcounter_enabled[event]); + counter->destroy = sw_perf_counter_destroy; + } pmu = &perf_ops_generic; break; } @@ -3512,6 +4014,7 @@ perf_counter_alloc(struct perf_counter_attr *attr, int cpu, struct perf_counter_context *ctx, struct perf_counter *group_leader, + struct perf_counter *parent_counter, gfp_t gfpflags) { const struct pmu *pmu; @@ -3547,6 +4050,8 @@ perf_counter_alloc(struct perf_counter_attr *attr, counter->ctx = ctx; counter->oncpu = -1; + counter->parent = parent_counter; + counter->ns = get_pid_ns(current->nsproxy->pid_ns); counter->id = atomic64_inc_return(&perf_counter_id); @@ -3561,13 +4066,14 @@ perf_counter_alloc(struct perf_counter_attr *attr, hwc->sample_period = attr->sample_period; if (attr->freq && attr->sample_freq) hwc->sample_period = 1; + hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); /* - * we currently do not support PERF_SAMPLE_GROUP on inherited counters + * we currently do not support PERF_FORMAT_GROUP on inherited counters */ - if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) + if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) goto done; switch (attr->type) { @@ -3604,11 +4110,15 @@ done: counter->pmu = pmu; - atomic_inc(&nr_counters); - if (counter->attr.mmap) - atomic_inc(&nr_mmap_counters); - if (counter->attr.comm) - atomic_inc(&nr_comm_counters); + if (!counter->parent) { + atomic_inc(&nr_counters); + if (counter->attr.mmap) + atomic_inc(&nr_mmap_counters); + if (counter->attr.comm) + atomic_inc(&nr_comm_counters); + if (counter->attr.task) + atomic_inc(&nr_task_counters); + } return counter; } @@ -3771,7 +4281,7 @@ SYSCALL_DEFINE5(perf_counter_open, } counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, - GFP_KERNEL); + NULL, GFP_KERNEL); ret = PTR_ERR(counter); if (IS_ERR(counter)) goto err_put_context; @@ -3837,7 +4347,8 @@ inherit_counter(struct perf_counter *parent_counter, child_counter = perf_counter_alloc(&parent_counter->attr, parent_counter->cpu, child_ctx, - group_leader, GFP_KERNEL); + group_leader, parent_counter, + GFP_KERNEL); if (IS_ERR(child_counter)) return child_counter; get_ctx(child_ctx); @@ -3860,12 +4371,6 @@ inherit_counter(struct perf_counter *parent_counter, */ add_counter_to_ctx(child_counter, child_ctx); - child_counter->parent = parent_counter; - /* - * inherit into child's child as well: - */ - child_counter->attr.inherit = 1; - /* * Get a reference to the parent filp - we will fput it * when the child counter exits. This is safe to do because @@ -3909,10 +4414,14 @@ static int inherit_group(struct perf_counter *parent_counter, } static void sync_child_counter(struct perf_counter *child_counter, - struct perf_counter *parent_counter) + struct task_struct *child) { + struct perf_counter *parent_counter = child_counter->parent; u64 child_val; + if (child_counter->attr.inherit_stat) + perf_counter_read_event(child_counter, child); + child_val = atomic64_read(&child_counter->count); /* @@ -3941,7 +4450,8 @@ static void sync_child_counter(struct perf_counter *child_counter, static void __perf_counter_exit_task(struct perf_counter *child_counter, - struct perf_counter_context *child_ctx) + struct perf_counter_context *child_ctx, + struct task_struct *child) { struct perf_counter *parent_counter; @@ -3955,7 +4465,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter, * counters need to be zapped - but otherwise linger. */ if (parent_counter) { - sync_child_counter(child_counter, parent_counter); + sync_child_counter(child_counter, child); free_counter(child_counter); } } @@ -3969,8 +4479,10 @@ void perf_counter_exit_task(struct task_struct *child) struct perf_counter_context *child_ctx; unsigned long flags; - if (likely(!child->perf_counter_ctxp)) + if (likely(!child->perf_counter_ctxp)) { + perf_counter_task(child, NULL, 0); return; + } local_irq_save(flags); /* @@ -3989,17 +4501,20 @@ void perf_counter_exit_task(struct task_struct *child) */ spin_lock(&child_ctx->lock); child->perf_counter_ctxp = NULL; - if (child_ctx->parent_ctx) { - /* - * This context is a clone; unclone it so it can't get - * swapped to another process while we're removing all - * the counters from it. - */ - put_ctx(child_ctx->parent_ctx); - child_ctx->parent_ctx = NULL; - } - spin_unlock(&child_ctx->lock); - local_irq_restore(flags); + /* + * If this context is a clone; unclone it so it can't get + * swapped to another process while we're removing all + * the counters from it. + */ + unclone_ctx(child_ctx); + spin_unlock_irqrestore(&child_ctx->lock, flags); + + /* + * Report the task dead after unscheduling the counters so that we + * won't get any samples after PERF_EVENT_EXIT. We can however still + * get a few PERF_EVENT_READ events. + */ + perf_counter_task(child, child_ctx, 0); /* * We can recurse on the same lock type through: @@ -4017,7 +4532,7 @@ void perf_counter_exit_task(struct task_struct *child) again: list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_entry) - __perf_counter_exit_task(child_counter, child_ctx); + __perf_counter_exit_task(child_counter, child_ctx, child); /* * If the last counter was a group counter, it will have appended all @@ -4220,6 +4735,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) perf_counter_init_cpu(cpu); break; + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + hw_perf_counter_setup_online(cpu); + break; + case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: perf_counter_exit_cpu(cpu); @@ -4244,6 +4764,8 @@ void __init perf_counter_init(void) { perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, + (void *)(long)smp_processor_id()); register_cpu_notifier(&perf_cpu_nb); } diff --git a/kernel/pid.c b/kernel/pid.c index b2e5f78fd28..31310b5d3f5 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -378,26 +378,15 @@ EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -struct task_struct *find_task_by_pid_type_ns(int type, int nr, - struct pid_namespace *ns) +struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { - return pid_task(find_pid_ns(nr, ns), type); + return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } -EXPORT_SYMBOL(find_task_by_pid_type_ns); - struct task_struct *find_task_by_vpid(pid_t vnr) { - return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, - current->nsproxy->pid_ns); -} -EXPORT_SYMBOL(find_task_by_vpid); - -struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) -{ - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); } -EXPORT_SYMBOL(find_task_by_pid_ns); struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 2d1001b4858..821722ae58a 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -67,9 +67,10 @@ err_alloc: return NULL; } -static struct pid_namespace *create_pid_namespace(unsigned int level) +static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) { struct pid_namespace *ns; + unsigned int level = parent_pid_ns->level + 1; int i; ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); @@ -86,6 +87,7 @@ static struct pid_namespace *create_pid_namespace(unsigned int level) kref_init(&ns->kref); ns->level = level; + ns->parent = get_pid_ns(parent_pid_ns); set_bit(0, ns->pidmap[0].page); atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); @@ -114,25 +116,11 @@ static void destroy_pid_namespace(struct pid_namespace *ns) struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) { - struct pid_namespace *new_ns; - - BUG_ON(!old_ns); - new_ns = get_pid_ns(old_ns); if (!(flags & CLONE_NEWPID)) - goto out; - - new_ns = ERR_PTR(-EINVAL); + return get_pid_ns(old_ns); if (flags & CLONE_THREAD) - goto out_put; - - new_ns = create_pid_namespace(old_ns->level + 1); - if (!IS_ERR(new_ns)) - new_ns->parent = get_pid_ns(old_ns); - -out_put: - put_pid_ns(old_ns); -out: - return new_ns; + return ERR_PTR(-EINVAL); + return create_pid_namespace(old_ns); } void free_pid_ns(struct kref *kref) diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bece7c0b67b..e33a21cb940 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) } void posix_cpu_timers_exit_group(struct task_struct *tsk) { - struct task_cputime cputime; + struct signal_struct *const sig = tsk->signal; - thread_group_cputimer(tsk, &cputime); cleanup_timers(tsk->signal->cpu_timers, - cputime.utime, cputime.stime, cputime.sum_exec_runtime); + cputime_add(tsk->utime, sig->utime), + cputime_add(tsk->stime, sig->stime), + tsk->se.sum_exec_runtime + sig->sum_sched_runtime); } static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 052ec4d195c..d089d052c4a 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer) return -EOPNOTSUPP; } +static int no_nsleep(const clockid_t which_clock, int flags, + struct timespec *tsave, struct timespec __user *rmtp) +{ + return -EOPNOTSUPP; +} + /* * Return nonzero if we know a priori this clockid_t value is bogus. */ @@ -254,6 +260,7 @@ static __init int init_posix_timers(void) .clock_get = posix_get_monotonic_raw, .clock_set = do_posix_clock_nosettime, .timer_create = no_timer_create, + .nsleep = no_nsleep, }; register_posix_clock(CLOCK_REALTIME, &clock_realtime); diff --git a/kernel/power/user.c b/kernel/power/user.c index ed97375daae..bf0014d6a5f 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -23,7 +23,6 @@ #include <linux/console.h> #include <linux/cpu.h> #include <linux/freezer.h> -#include <linux/smp_lock.h> #include <scsi/scsi_scan.h> #include <asm/uaccess.h> diff --git a/kernel/profile.c b/kernel/profile.c index 69911b5745e..419250ebec4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -117,11 +117,12 @@ int __ref profile_init(void) cpumask_copy(prof_cpu_mask, cpu_possible_mask); - prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); + prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); if (prof_buffer) return 0; - prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO); + prof_buffer = alloc_pages_exact(buffer_bytes, + GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); if (prof_buffer) return 0; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index f6d8b8cb5e3..307c285af59 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -152,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) if (!dumpable && !capable(CAP_SYS_PTRACE)) return -EPERM; - return security_ptrace_may_access(task, mode); + return security_ptrace_access_check(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) @@ -167,67 +167,82 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) int ptrace_attach(struct task_struct *task) { int retval; - unsigned long flags; audit_ptrace(task); retval = -EPERM; + if (unlikely(task->flags & PF_KTHREAD)) + goto out; if (same_thread_group(task, current)) goto out; - /* Protect the target's credential calculations against our + /* + * Protect exec's credential calculations against our interference; * interference; SUID, SGID and LSM creds get determined differently * under ptrace. */ - retval = mutex_lock_interruptible(&task->cred_guard_mutex); - if (retval < 0) + retval = -ERESTARTNOINTR; + if (mutex_lock_interruptible(&task->cred_guard_mutex)) goto out; - retval = -EPERM; -repeat: - /* - * Nasty, nasty. - * - * We want to hold both the task-lock and the - * tasklist_lock for writing at the same time. - * But that's against the rules (tasklist_lock - * is taken for reading by interrupts on other - * cpu's that may have task_lock). - */ task_lock(task); - if (!write_trylock_irqsave(&tasklist_lock, flags)) { - task_unlock(task); - do { - cpu_relax(); - } while (!write_can_lock(&tasklist_lock)); - goto repeat; - } - - if (!task->mm) - goto bad; - /* the same process cannot be attached many times */ - if (task->ptrace & PT_PTRACED) - goto bad; retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); + task_unlock(task); if (retval) - goto bad; + goto unlock_creds; - /* Go */ - task->ptrace |= PT_PTRACED; + write_lock_irq(&tasklist_lock); + retval = -EPERM; + if (unlikely(task->exit_state)) + goto unlock_tasklist; + if (task->ptrace) + goto unlock_tasklist; + + task->ptrace = PT_PTRACED; if (capable(CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); - send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); -bad: - write_unlock_irqrestore(&tasklist_lock, flags); - task_unlock(task); + + retval = 0; +unlock_tasklist: + write_unlock_irq(&tasklist_lock); +unlock_creds: mutex_unlock(&task->cred_guard_mutex); out: return retval; } +/** + * ptrace_traceme -- helper for PTRACE_TRACEME + * + * Performs checks and sets PT_PTRACED. + * Should be used by all ptrace implementations for PTRACE_TRACEME. + */ +int ptrace_traceme(void) +{ + int ret = -EPERM; + + write_lock_irq(&tasklist_lock); + /* Are we already being traced? */ + if (!current->ptrace) { + ret = security_ptrace_traceme(current->parent); + /* + * Check PF_EXITING to ensure ->real_parent has not passed + * exit_ptrace(). Otherwise we don't report the error but + * pretend ->real_parent untraces us right after return. + */ + if (!ret && !(current->real_parent->flags & PF_EXITING)) { + current->ptrace = PT_PTRACED; + __ptrace_link(current, current->real_parent); + } + } + write_unlock_irq(&tasklist_lock); + + return ret; +} + /* * Called with irqs disabled, returns true if childs should reap themselves. */ @@ -409,37 +424,33 @@ static int ptrace_setoptions(struct task_struct *child, long data) static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) { + unsigned long flags; int error = -ESRCH; - read_lock(&tasklist_lock); - if (likely(child->sighand != NULL)) { + if (lock_task_sighand(child, &flags)) { error = -EINVAL; - spin_lock_irq(&child->sighand->siglock); if (likely(child->last_siginfo != NULL)) { *info = *child->last_siginfo; error = 0; } - spin_unlock_irq(&child->sighand->siglock); + unlock_task_sighand(child, &flags); } - read_unlock(&tasklist_lock); return error; } static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) { + unsigned long flags; int error = -ESRCH; - read_lock(&tasklist_lock); - if (likely(child->sighand != NULL)) { + if (lock_task_sighand(child, &flags)) { error = -EINVAL; - spin_lock_irq(&child->sighand->siglock); if (likely(child->last_siginfo != NULL)) { *child->last_siginfo = *info; error = 0; } - spin_unlock_irq(&child->sighand->siglock); + unlock_task_sighand(child, &flags); } - read_unlock(&tasklist_lock); return error; } @@ -566,72 +577,16 @@ int ptrace_request(struct task_struct *child, long request, return ret; } -/** - * ptrace_traceme -- helper for PTRACE_TRACEME - * - * Performs checks and sets PT_PTRACED. - * Should be used by all ptrace implementations for PTRACE_TRACEME. - */ -int ptrace_traceme(void) -{ - int ret = -EPERM; - - /* - * Are we already being traced? - */ -repeat: - task_lock(current); - if (!(current->ptrace & PT_PTRACED)) { - /* - * See ptrace_attach() comments about the locking here. - */ - unsigned long flags; - if (!write_trylock_irqsave(&tasklist_lock, flags)) { - task_unlock(current); - do { - cpu_relax(); - } while (!write_can_lock(&tasklist_lock)); - goto repeat; - } - - ret = security_ptrace_traceme(current->parent); - - /* - * Check PF_EXITING to ensure ->real_parent has not passed - * exit_ptrace(). Otherwise we don't report the error but - * pretend ->real_parent untraces us right after return. - */ - if (!ret && !(current->real_parent->flags & PF_EXITING)) { - current->ptrace |= PT_PTRACED; - __ptrace_link(current, current->real_parent); - } - - write_unlock_irqrestore(&tasklist_lock, flags); - } - task_unlock(current); - return ret; -} - -/** - * ptrace_get_task_struct -- grab a task struct reference for ptrace - * @pid: process id to grab a task_struct reference of - * - * This function is a helper for ptrace implementations. It checks - * permissions and then grabs a task struct for use of the actual - * ptrace implementation. - * - * Returns the task_struct for @pid or an ERR_PTR() on failure. - */ -struct task_struct *ptrace_get_task_struct(pid_t pid) +static struct task_struct *ptrace_get_task_struct(pid_t pid) { struct task_struct *child; - read_lock(&tasklist_lock); + rcu_read_lock(); child = find_task_by_vpid(pid); if (child) get_task_struct(child); + rcu_read_unlock(); - read_unlock(&tasklist_lock); if (!child) return ERR_PTR(-ESRCH); return child; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0dccfbba6d2..7717b95c202 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1533,7 +1533,7 @@ void __init __rcu_init(void) int j; struct rcu_node *rnp; - printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n"); + printk(KERN_INFO "Hierarchical RCU implementation.\n"); #ifdef CONFIG_RCU_CPU_STALL_DETECTOR printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ @@ -1546,7 +1546,6 @@ void __init __rcu_init(void) rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); /* Register notifier for non-boot CPUs */ register_cpu_notifier(&rcu_nb); - printk(KERN_WARNING "Experimental hierarchical RCU init done.\n"); } module_param(blimit, int, 0); diff --git a/kernel/res_counter.c b/kernel/res_counter.c index bf8e7534c80..e1338f07431 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -18,7 +18,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) { spin_lock_init(&counter->lock); - counter->limit = (unsigned long long)LLONG_MAX; + counter->limit = RESOURCE_MAX; counter->parent = parent; } @@ -133,6 +133,16 @@ int res_counter_memparse_write_strategy(const char *buf, unsigned long long *res) { char *end; + + /* return RESOURCE_MAX(unlimited) if "-1" is specified */ + if (*buf == '-') { + *res = simple_strtoull(buf + 1, &end, 10); + if (*res != 1 || *end != '\0') + return -EINVAL; + *res = RESOURCE_MAX; + return 0; + } + /* FIXME - make memparse() take const char* args */ *res = memparse((char *)buf, &end); if (*end != '\0') diff --git a/kernel/resource.c b/kernel/resource.c index ac5f3a36923..78b087221c1 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -787,7 +787,7 @@ static int __init reserve_setup(char *str) static struct resource reserve[MAXRESERVE]; for (;;) { - int io_start, io_num; + unsigned int io_start, io_num; int x = reserved; if (get_option (&str, &io_start) != 2) diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index fcd107a78c5..29bd4baf9e7 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { /* We got the lock for task. */ debug_rt_mutex_lock(lock); - rt_mutex_set_owner(lock, task, 0); - + spin_unlock(&lock->wait_lock); rt_mutex_deadlock_account_lock(lock, task); return 1; } ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); - if (ret && !waiter->task) { /* * Reset the return value. We might have diff --git a/kernel/sched.c b/kernel/sched.c index 8fb88a906aa..1b59e265273 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -493,6 +493,7 @@ struct rt_rq { #endif #ifdef CONFIG_SMP unsigned long rt_nr_migratory; + unsigned long rt_nr_total; int overloaded; struct plist_head pushable_tasks; #endif @@ -1978,7 +1979,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (task_hot(p, old_rq->clock, NULL)) schedstat_inc(p, se.nr_forced2_migrations); #endif - perf_counter_task_migration(p, new_cpu); + perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, + 1, 1, NULL, 0); } p->se.vruntime -= old_cfsrq->min_vruntime - new_cfsrq->min_vruntime; @@ -2570,15 +2572,37 @@ static void __sched_fork(struct task_struct *p) p->se.avg_wakeup = sysctl_sched_wakeup_granularity; #ifdef CONFIG_SCHEDSTATS - p->se.wait_start = 0; - p->se.sum_sleep_runtime = 0; - p->se.sleep_start = 0; - p->se.block_start = 0; - p->se.sleep_max = 0; - p->se.block_max = 0; - p->se.exec_max = 0; - p->se.slice_max = 0; - p->se.wait_max = 0; + p->se.wait_start = 0; + p->se.wait_max = 0; + p->se.wait_count = 0; + p->se.wait_sum = 0; + + p->se.sleep_start = 0; + p->se.sleep_max = 0; + p->se.sum_sleep_runtime = 0; + + p->se.block_start = 0; + p->se.block_max = 0; + p->se.exec_max = 0; + p->se.slice_max = 0; + + p->se.nr_migrations_cold = 0; + p->se.nr_failed_migrations_affine = 0; + p->se.nr_failed_migrations_running = 0; + p->se.nr_failed_migrations_hot = 0; + p->se.nr_forced_migrations = 0; + p->se.nr_forced2_migrations = 0; + + p->se.nr_wakeups = 0; + p->se.nr_wakeups_sync = 0; + p->se.nr_wakeups_migrate = 0; + p->se.nr_wakeups_local = 0; + p->se.nr_wakeups_remote = 0; + p->se.nr_wakeups_affine = 0; + p->se.nr_wakeups_affine_attempts = 0; + p->se.nr_wakeups_passive = 0; + p->se.nr_wakeups_idle = 0; + #endif INIT_LIST_HEAD(&p->rt.run_list); @@ -6540,6 +6564,11 @@ SYSCALL_DEFINE0(sched_yield) return 0; } +static inline int should_resched(void) +{ + return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); +} + static void __cond_resched(void) { #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP @@ -6559,8 +6588,7 @@ static void __cond_resched(void) int __sched _cond_resched(void) { - if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && - system_state == SYSTEM_RUNNING) { + if (should_resched()) { __cond_resched(); return 1; } @@ -6578,12 +6606,12 @@ EXPORT_SYMBOL(_cond_resched); */ int cond_resched_lock(spinlock_t *lock) { - int resched = need_resched() && system_state == SYSTEM_RUNNING; + int resched = should_resched(); int ret = 0; if (spin_needbreak(lock) || resched) { spin_unlock(lock); - if (resched && need_resched()) + if (resched) __cond_resched(); else cpu_relax(); @@ -6598,7 +6626,7 @@ int __sched cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (need_resched() && system_state == SYSTEM_RUNNING) { + if (should_resched()) { local_bh_enable(); __cond_resched(); local_bh_disable(); @@ -7045,7 +7073,7 @@ static int migration_thread(void *data) if (cpu_is_offline(cpu)) { spin_unlock_irq(&rq->lock); - goto wait_to_die; + break; } if (rq->active_balance) { @@ -7071,16 +7099,7 @@ static int migration_thread(void *data) complete(&req->done); } __set_current_state(TASK_RUNNING); - return 0; -wait_to_die: - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); return 0; } @@ -7270,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu) static void calc_global_load_remove(struct rq *rq) { atomic_long_sub(rq->calc_load_active, &calc_load_tasks); + rq->calc_load_active = 0; } #endif /* CONFIG_HOTPLUG_CPU */ @@ -7494,7 +7514,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) rq = task_rq_lock(p, &flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); task_rq_unlock(rq, &flags); + get_task_struct(p); cpu_rq(cpu)->migration_thread = p; + rq->calc_load_update = calc_load_update; break; case CPU_ONLINE: @@ -7505,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Update our root-domain */ rq = cpu_rq(cpu); spin_lock_irqsave(&rq->lock, flags); - rq->calc_load_update = calc_load_update; - rq->calc_load_active = 0; if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); @@ -7524,6 +7544,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) kthread_bind(cpu_rq(cpu)->migration_thread, cpumask_any(cpu_online_mask)); kthread_stop(cpu_rq(cpu)->migration_thread); + put_task_struct(cpu_rq(cpu)->migration_thread); cpu_rq(cpu)->migration_thread = NULL; break; @@ -7533,6 +7554,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); + put_task_struct(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ spin_lock_irq(&rq->lock); @@ -7828,7 +7850,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) free_rootdomain(old_rd); } -static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) +static int init_rootdomain(struct root_domain *rd, bool bootmem) { gfp_t gfp = GFP_KERNEL; @@ -9075,7 +9097,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) #ifdef CONFIG_SMP rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; - plist_head_init(&rq->rt.pushable_tasks, &rq->lock); + plist_head_init(&rt_rq->pushable_tasks, &rq->lock); #endif rt_rq->rt_time = 0; diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 7deffc9f0e5..d014efbf947 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; - if (lowest_mask) + if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + + /* + * We have to ensure that we have at least one bit + * still set in the array, since the map could have + * been concurrently emptied between the first and + * second reads of vec->mask. If we hit this + * condition, simply act as though we never hit this + * priority level and continue on. + */ + if (cpumask_any(lowest_mask) >= nr_cpu_ids) + continue; + } + return 1; } @@ -152,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * * Returns: -ENOMEM if memory fails. */ -int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) +int cpupri_init(struct cpupri *cp, bool bootmem) { gfp_t gfp = GFP_KERNEL; int i; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 467ca72f165..70c7e0b7994 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -162,7 +162,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; - struct rq *rq = &per_cpu(runqueues, cpu); + struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; @@ -191,7 +191,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; - rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; + rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); @@ -248,7 +248,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) static void print_cpu(struct seq_file *m, int cpu) { - struct rq *rq = &per_cpu(runqueues, cpu); + struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_X86 { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5f9650e8fe7..652e8bdef9a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) return min_vruntime; } +static inline int entity_before(struct sched_entity *a, + struct sched_entity *b) +{ + return (s64)(a->vruntime - b->vruntime) < 0; +} + static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) { return se->vruntime - cfs_rq->min_vruntime; @@ -430,12 +436,13 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) for_each_sched_entity(se) { struct load_weight *load; + struct load_weight lw; cfs_rq = cfs_rq_of(se); load = &cfs_rq->load; if (unlikely(!se->on_rq)) { - struct load_weight lw = cfs_rq->load; + lw = cfs_rq->load; update_load_add(&lw, se->load.weight); load = &lw; @@ -604,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) { #ifdef CONFIG_SCHEDSTATS + struct task_struct *tsk = NULL; + + if (entity_is_task(se)) + tsk = task_of(se); + if (se->sleep_start) { u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; - struct task_struct *tsk = task_of(se); if ((s64)delta < 0) delta = 0; @@ -617,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) se->sleep_start = 0; se->sum_sleep_runtime += delta; - account_scheduler_latency(tsk, delta >> 10, 1); + if (tsk) + account_scheduler_latency(tsk, delta >> 10, 1); } if (se->block_start) { u64 delta = rq_of(cfs_rq)->clock - se->block_start; - struct task_struct *tsk = task_of(se); if ((s64)delta < 0) delta = 0; @@ -632,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) se->block_start = 0; se->sum_sleep_runtime += delta; - /* - * Blocking time is in units of nanosecs, so shift by 20 to - * get a milliseconds-range estimation of the amount of - * time that the task spent sleeping: - */ - if (unlikely(prof_on == SLEEP_PROFILING)) { - - profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), - delta >> 20); + if (tsk) { + /* + * Blocking time is in units of nanosecs, so shift by + * 20 to get a milliseconds-range estimation of the + * amount of time that the task spent sleeping: + */ + if (unlikely(prof_on == SLEEP_PROFILING)) { + profile_hits(SLEEP_PROFILING, + (void *)get_wchan(tsk), + delta >> 20); + } + account_scheduler_latency(tsk, delta >> 10, 0); } - account_scheduler_latency(tsk, delta >> 10, 0); } #endif } @@ -686,7 +699,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) * all of which have the same weight. */ if (sched_feat(NORMALIZED_SLEEPER) && - task_of(se)->policy != SCHED_IDLE) + (!entity_is_task(se) || + task_of(se)->policy != SCHED_IDLE)) thresh = calc_delta_fair(thresh, se); vruntime -= thresh; @@ -1015,7 +1029,7 @@ static void yield_task_fair(struct rq *rq) /* * Already in the rightmost position? */ - if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) + if (unlikely(!rightmost || entity_before(rightmost, se))) return; /* @@ -1711,7 +1725,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) /* 'curr' will be NULL if the child belongs to a different group */ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && - curr && curr->vruntime < se->vruntime) { + curr && entity_before(curr, se)) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 9bf0d2a7304..3918e01994e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) #ifdef CONFIG_RT_GROUP_SCHED +#define rt_entity_is_task(rt_se) (!(rt_se)->my_q) + static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return rt_rq->rq; @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) #else /* CONFIG_RT_GROUP_SCHED */ +#define rt_entity_is_task(rt_se) (1) + static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return container_of(rt_rq, struct rq, rt); @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq) static void update_rt_migration(struct rt_rq *rt_rq) { - if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { + if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { if (!rt_rq->overloaded) { rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 1; @@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq) static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { + if (!rt_entity_is_task(rt_se)) + return; + + rt_rq = &rq_of_rt_rq(rt_rq)->rt; + + rt_rq->rt_nr_total++; if (rt_se->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; @@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { + if (!rt_entity_is_task(rt_se)) + return; + + rt_rq = &rq_of_rt_rq(rt_rq)->rt; + + rt_rq->rt_nr_total--; if (rt_se->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; diff --git a/kernel/signal.c b/kernel/signal.c index d81f4952eeb..64c5deeaca5 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1410,7 +1410,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) /* do_notify_parent_cldstop should have been called instead. */ BUG_ON(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + BUG_ON(!task_ptrace(tsk) && (tsk->group_leader != tsk || !thread_group_empty(tsk))); info.si_signo = sig; @@ -1449,7 +1449,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); - if (!tsk->ptrace && sig == SIGCHLD && + if (!task_ptrace(tsk) && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* @@ -1486,7 +1486,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) struct task_struct *parent; struct sighand_struct *sighand; - if (tsk->ptrace & PT_PTRACED) + if (task_ptrace(tsk)) parent = tsk->parent; else { tsk = tsk->group_leader; @@ -1499,7 +1499,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) * see comment in do_notify_parent() abot the following 3 lines */ rcu_read_lock(); - info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); @@ -1535,7 +1535,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) static inline int may_ptrace_stop(void) { - if (!likely(current->ptrace & PT_PTRACED)) + if (!likely(task_ptrace(current))) return 0; /* * Are we in the middle of do_coredump? @@ -1753,7 +1753,7 @@ static int do_signal_stop(int signr) static int ptrace_signal(int signr, siginfo_t *info, struct pt_regs *regs, void *cookie) { - if (!(current->ptrace & PT_PTRACED)) + if (!task_ptrace(current)) return signr; ptrace_signal_deliver(regs, cookie); @@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s stack_t oss; int error; - if (uoss) { - oss.ss_sp = (void __user *) current->sas_ss_sp; - oss.ss_size = current->sas_ss_size; - oss.ss_flags = sas_ss_flags(sp); - } + oss.ss_sp = (void __user *) current->sas_ss_sp; + oss.ss_size = current->sas_ss_size; + oss.ss_flags = sas_ss_flags(sp); if (uss) { void __user *ss_sp; @@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s int ss_flags; error = -EFAULT; - if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) - || __get_user(ss_sp, &uss->ss_sp) - || __get_user(ss_flags, &uss->ss_flags) - || __get_user(ss_size, &uss->ss_size)) + if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) + goto out; + error = __get_user(ss_sp, &uss->ss_sp) | + __get_user(ss_flags, &uss->ss_flags) | + __get_user(ss_size, &uss->ss_size); + if (error) goto out; error = -EPERM; @@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s current->sas_ss_size = ss_size; } + error = 0; if (uoss) { error = -EFAULT; - if (copy_to_user(uoss, &oss, sizeof(oss))) + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) goto out; + error = __put_user(oss.ss_sp, &uoss->ss_sp) | + __put_user(oss.ss_size, &uoss->ss_size) | + __put_user(oss.ss_flags, &uoss->ss_flags); } - error = 0; out: return error; } diff --git a/kernel/smp.c b/kernel/smp.c index ad63d850120..94188b8ecc3 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_BAD; break; -#ifdef CONFIG_CPU_HOTPLUG +#ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: diff --git a/kernel/softirq.c b/kernel/softirq.c index b41fb710e11..eb5e131a048 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -213,6 +213,7 @@ restart: do { if (pending & 1) { int prev_count = preempt_count(); + kstat_incr_softirqs_this_cpu(h - softirq_vec); trace_softirq_entry(h, softirq_vec); h->action(h); @@ -344,7 +345,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) softirq_vec[nr].action = action; } -/* Tasklets */ +/* + * Tasklets + */ struct tasklet_head { struct tasklet_struct *head; @@ -492,6 +495,66 @@ void tasklet_kill(struct tasklet_struct *t) EXPORT_SYMBOL(tasklet_kill); +/* + * tasklet_hrtimer + */ + +/* + * The trampoline is called when the hrtimer expires. If this is + * called from the hrtimer interrupt then we schedule the tasklet as + * the timer callback function expects to run in softirq context. If + * it's called in softirq context anyway (i.e. high resolution timers + * disabled) then the hrtimer callback is called right away. + */ +static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) +{ + struct tasklet_hrtimer *ttimer = + container_of(timer, struct tasklet_hrtimer, timer); + + if (hrtimer_is_hres_active(timer)) { + tasklet_hi_schedule(&ttimer->tasklet); + return HRTIMER_NORESTART; + } + return ttimer->function(timer); +} + +/* + * Helper function which calls the hrtimer callback from + * tasklet/softirq context + */ +static void __tasklet_hrtimer_trampoline(unsigned long data) +{ + struct tasklet_hrtimer *ttimer = (void *)data; + enum hrtimer_restart restart; + + restart = ttimer->function(&ttimer->timer); + if (restart != HRTIMER_NORESTART) + hrtimer_restart(&ttimer->timer); +} + +/** + * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks + * @ttimer: tasklet_hrtimer which is initialized + * @function: hrtimer callback funtion which gets called from softirq context + * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) + * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) + */ +void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t which_clock, enum hrtimer_mode mode) +{ + hrtimer_init(&ttimer->timer, which_clock, mode); + ttimer->timer.function = __hrtimer_tasklet_trampoline; + tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, + (unsigned long)ttimer); + ttimer->function = function; +} +EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); + +/* + * Remote softirq bits + */ + DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); EXPORT_PER_CPU_SYMBOL(softirq_work_list); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ab462b9968d..71d8dc7f992 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -335,7 +335,10 @@ static struct ctl_table kern_table[] = { .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, }, #endif { @@ -744,6 +747,14 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, { + .ctl_name = CTL_UNNUMBERED, + .procname = "panic_on_io_nmi", + .data = &panic_on_io_nmi, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { .ctl_name = KERN_BOOTLOADER_TYPE, .procname = "bootloader_type", .data = &bootloader_type, @@ -1295,10 +1306,10 @@ static struct ctl_table vm_table[] = { { .ctl_name = CTL_UNNUMBERED, .procname = "mmap_min_addr", - .data = &mmap_min_addr, - .maxlen = sizeof(unsigned long), + .data = &dac_mmap_min_addr, + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = &mmap_min_addr_handler, }, #ifdef CONFIG_NUMA { @@ -2283,7 +2294,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, void *data) { #define TMPBUFLEN 21 - int *i, vleft, first=1, neg, val; + int *i, vleft, first = 1, neg; unsigned long lval; size_t left, len; @@ -2336,8 +2347,6 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, len = p-buf; if ((len < left) && *p && !isspace(*p)) break; - if (neg) - val = -val; s += len; left -= len; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1ad6dd46111..620b58abdc3 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, */ int clockevents_register_notifier(struct notifier_block *nb) { + unsigned long flags; int ret; - spin_lock(&clockevents_lock); + spin_lock_irqsave(&clockevents_lock, flags); ret = raw_notifier_chain_register(&clockevents_chain, nb); - spin_unlock(&clockevents_lock); + spin_unlock_irqrestore(&clockevents_lock, flags); return ret; } @@ -178,16 +179,18 @@ static void clockevents_notify_released(void) */ void clockevents_register_device(struct clock_event_device *dev) { + unsigned long flags; + BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); BUG_ON(!dev->cpumask); - spin_lock(&clockevents_lock); + spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_notify_released(); - spin_unlock(&clockevents_lock); + spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_register_device); @@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old, void clockevents_notify(unsigned long reason, void *arg) { struct list_head *node, *tmp; + unsigned long flags; - spin_lock(&clockevents_lock); + spin_lock_irqsave(&clockevents_lock, flags); clockevents_do_notify(reason, arg); switch (reason) { @@ -251,18 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg) default: break; } - spin_unlock(&clockevents_lock); + spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_notify); - -ktime_t clockevents_get_next_event(int cpu) -{ - struct tick_device *td; - struct clock_event_device *dev; - - td = &per_cpu(tick_cpu_device, cpu); - dev = td->evtdev; - - return dev->next_event; -} #endif diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 592bf584d1d..7466cb81125 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, * Check to make sure we don't switch to a non-highres capable * clocksource if the tick code is in oneshot mode (highres or nohz) */ - if (tick_oneshot_mode_active() && + if (tick_oneshot_mode_active() && ovr && !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { printk(KERN_WARNING "%s clocksource is not HRT compatible. " "Cannot switch while in HRT/NOHZ mode\n", ovr->name); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 877dbedc311..c2ec25087a3 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) * Powerstate information: The system enters/leaves a state, where * affected devices might stop */ -static void tick_do_broadcast_on_off(void *why) +static void tick_do_broadcast_on_off(unsigned long *reason) { struct clock_event_device *bc, *dev; struct tick_device *td; - unsigned long flags, *reason = why; + unsigned long flags; int cpu, bc_stopped; spin_lock_irqsave(&tick_broadcast_lock, flags); @@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) printk(KERN_ERR "tick-broadcast: ignoring broadcast for " "offline CPU #%d\n", *oncpu); else - smp_call_function_single(*oncpu, tick_do_broadcast_on_off, - &reason, 1); + tick_do_broadcast_on_off(&reason); } /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2aff39c6f10..e0f59a21c06 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -222,6 +222,15 @@ void tick_nohz_stop_sched_tick(int inidle) cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); + + /* + * Call to tick_nohz_start_idle stops the last_update_time from being + * updated. Thus, it must not be called in the event we are called from + * irq_exit() with the prior state different than idle. + */ + if (!inidle && !ts->inidle) + goto end; + now = tick_nohz_start_idle(ts); /* @@ -239,9 +248,6 @@ void tick_nohz_stop_sched_tick(int inidle) if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) goto end; - if (!inidle && !ts->inidle) - goto end; - ts->inidle = 1; if (need_resched()) diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a999b92a127..fddd69d16e0 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -286,7 +286,7 @@ static int __init init_timer_list_procfs(void) { struct proc_dir_entry *pe; - pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); if (!pe) return -ENOMEM; return 0; diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index c994530d166..4cde8b9c716 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex); /* * Collection status, active/inactive: */ -static int __read_mostly active; +int __read_mostly timer_stats_active; /* * Beginning/end timestamps of measurement: @@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, struct entry *entry, input; unsigned long flags; - if (likely(!active)) + if (likely(!timer_stats_active)) return; lock = &per_cpu(lookup_lock, raw_smp_processor_id()); @@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, input.timer_flag = timer_flag; spin_lock_irqsave(lock, flags); - if (!active) + if (!timer_stats_active) goto out_unlock; entry = tstat_lookup(&input, comm); @@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v) /* * If still active then calculate up to now: */ - if (active) + if (timer_stats_active) time_stop = ktime_get(); time = ktime_sub(time_stop, time_start); @@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf, mutex_lock(&show_mutex); switch (ctl[0]) { case '0': - if (active) { - active = 0; + if (timer_stats_active) { + timer_stats_active = 0; time_stop = ktime_get(); sync_access(); } break; case '1': - if (!active) { + if (!timer_stats_active) { reset_entries(); time_start = ktime_get(); smp_mb(); - active = 1; + timer_stats_active = 1; } break; default: diff --git a/kernel/timer.c b/kernel/timer.c index 54d3912f8ca..a7f07d5a624 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -380,6 +380,8 @@ static void timer_stats_account_timer(struct timer_list *timer) { unsigned int flag = 0; + if (likely(!timer->start_site)) + return; if (unlikely(tbase_get_deferrable(timer->base))) flag |= TIMER_STATS_FLAG_DEFERRABLE; @@ -712,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires) * networking code - if the timer is re-modified * to be the same thing then just return: */ - if (timer->expires == expires && timer_pending(timer)) + if (timer_pending(timer) && timer->expires == expires) return 1; return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 61071fecc82..019f380fd76 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER config HAVE_FUNCTION_GRAPH_TRACER bool +config HAVE_FUNCTION_GRAPH_FP_TEST + bool + help + An arch may pass in a unique value (frame pointer) to both the + entering and exiting of a function. On exit, the value is compared + and if it does not match, then it will panic the kernel. + config HAVE_FUNCTION_TRACE_MCOUNT_TEST bool help @@ -121,6 +128,7 @@ config FUNCTION_GRAPH_TRACER bool "Kernel Function Graph Tracer" depends on HAVE_FUNCTION_GRAPH_TRACER depends on FUNCTION_TRACER + depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE default y help Enable the kernel to trace a function at both its return @@ -218,13 +226,13 @@ config BOOT_TRACER the timings of the initcalls and traces key events and the identity of tasks that can cause boot delays, such as context-switches. - Its aim is to be parsed by the /scripts/bootgraph.pl tool to + Its aim is to be parsed by the scripts/bootgraph.pl tool to produce pretty graphics about boot inefficiencies, giving a visual representation of the delays during initcalls - but the raw /debug/tracing/trace text output is readable too. - You must pass in ftrace=initcall to the kernel command line - to enable this on bootup. + You must pass in initcall_debug and ftrace=initcall to the kernel + command line to enable this on bootup. config TRACE_BRANCH_PROFILING bool diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 39af8af6fc3..7a34cb563fe 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/debugfs.h> +#include <linux/smp_lock.h> #include <linux/time.h> #include <linux/uaccess.h> @@ -266,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt) { debugfs_remove(bt->msg_file); debugfs_remove(bt->dropped_file); - debugfs_remove(bt->dir); relay_close(bt->rchan); + debugfs_remove(bt->dir); free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); @@ -377,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, static int blk_remove_buf_file_callback(struct dentry *dentry) { - struct dentry *parent = dentry->d_parent; debugfs_remove(dentry); - /* - * this will fail for all but the last file, but that is ok. what we - * care about is the top level buts->name directory going away, when - * the last trace file is gone. Then we don't have to rmdir() that - * manually on trace stop, so it nicely solves the issue with - * force killing of running traces. - */ - - debugfs_remove(parent); return 0; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index bb60732ade0..25edd5cc593 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -291,7 +291,9 @@ function_stat_next(void *v, int idx) pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); again: - rec++; + if (idx != 0) + rec++; + if ((void *)rec >= (void *)&pg->records[pg->index]) { pg = pg->next; if (!pg) @@ -766,7 +768,7 @@ static struct tracer_stat function_stats __initdata = { .stat_show = function_stat_show }; -static void ftrace_profile_debugfs(struct dentry *d_tracer) +static __init void ftrace_profile_debugfs(struct dentry *d_tracer) { struct ftrace_profile_stat *stat; struct dentry *entry; @@ -784,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) * The files created are permanent, if something happens * we still do not free memory. */ - kfree(stat); WARN(1, "Could not allocate stat file for cpu %d\n", cpu); @@ -811,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) } #else /* CONFIG_FUNCTION_PROFILER */ -static void ftrace_profile_debugfs(struct dentry *d_tracer) +static __init void ftrace_profile_debugfs(struct dentry *d_tracer) { } #endif /* CONFIG_FUNCTION_PROFILER */ @@ -1224,6 +1225,13 @@ static void ftrace_shutdown(int command) return; ftrace_start_up--; + /* + * Just warn in case of unbalance, no need to kill ftrace, it's not + * critical but the ftrace_call callers may be never nopped again after + * further ftrace uses. + */ + WARN_ON_ONCE(ftrace_start_up < 0); + if (!ftrace_start_up) command |= FTRACE_DISABLE_CALLS; @@ -1410,10 +1418,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; void *p = NULL; + loff_t l; + + if (!(iter->flags & FTRACE_ITER_HASH)) + *pos = 0; iter->flags |= FTRACE_ITER_HASH; - return t_hash_next(m, p, pos); + iter->hidx = 0; + for (l = 0; l <= *pos; ) { + p = t_hash_next(m, p, &l); + if (!p) + break; + } + return p; } static int t_hash_show(struct seq_file *m, void *v) @@ -1460,8 +1478,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) iter->pg = iter->pg->next; iter->idx = 0; goto retry; - } else { - iter->idx = -1; } } else { rec = &iter->pg->records[iter->idx++]; @@ -1490,6 +1506,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; void *p = NULL; + loff_t l; mutex_lock(&ftrace_lock); /* @@ -1501,23 +1518,21 @@ static void *t_start(struct seq_file *m, loff_t *pos) if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; - (*pos)++; return iter; } if (iter->flags & FTRACE_ITER_HASH) return t_hash_start(m, pos); - if (*pos > 0) { - if (iter->idx < 0) - return p; - (*pos)--; - iter->idx--; + iter->pg = ftrace_pages_start; + iter->idx = 0; + for (l = 0; l <= *pos; ) { + p = t_next(m, p, &l); + if (!p) + break; } - p = t_next(m, p, pos); - - if (!p) + if (!p && iter->flags & FTRACE_ITER_FILTER) return t_hash_start(m, pos); return p; @@ -1647,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) mutex_lock(&ftrace_regex_lock); if ((file->f_mode & FMODE_WRITE) && - !(file->f_flags & O_APPEND)) + (file->f_flags & O_TRUNC)) ftrace_filter_reset(enable); if (file->f_mode & FMODE_READ) { @@ -2263,7 +2278,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, read++; cnt--; - if (!(iter->flags & ~FTRACE_ITER_CONT)) { + /* + * If the parser haven't finished with the last write, + * continue reading the user input without skipping spaces. + */ + if (!(iter->flags & FTRACE_ITER_CONT)) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); @@ -2273,8 +2292,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, cnt--; } + /* only spaces were written */ if (isspace(ch)) { - file->f_pos += read; + *ppos += read; ret = read; goto out; } @@ -2304,12 +2324,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (ret) goto out; iter->buffer_idx = 0; - } else + } else { iter->flags |= FTRACE_ITER_CONT; + iter->buffer[iter->buffer_idx++] = ch; + } - - file->f_pos += read; - + *ppos += read; ret = read; out: mutex_unlock(&ftrace_regex_lock); @@ -2493,32 +2513,31 @@ int ftrace_graph_count; unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; static void * -g_next(struct seq_file *m, void *v, loff_t *pos) +__g_next(struct seq_file *m, loff_t *pos) { unsigned long *array = m->private; - int index = *pos; - - (*pos)++; - if (index >= ftrace_graph_count) + if (*pos >= ftrace_graph_count) return NULL; + return &array[*pos]; +} - return &array[index]; +static void * +g_next(struct seq_file *m, void *v, loff_t *pos) +{ + (*pos)++; + return __g_next(m, pos); } static void *g_start(struct seq_file *m, loff_t *pos) { - void *p = NULL; - mutex_lock(&graph_lock); /* Nothing, tell g_show to print all functions are enabled */ if (!ftrace_graph_count && !*pos) return (void *)1; - p = g_next(m, p, pos); - - return p; + return __g_next(m, pos); } static void g_stop(struct seq_file *m, void *p) @@ -2563,7 +2582,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) mutex_lock(&graph_lock); if ((file->f_mode & FMODE_WRITE) && - !(file->f_flags & O_APPEND)) { + (file->f_flags & O_TRUNC)) { ftrace_graph_count = 0; memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); } @@ -2582,6 +2601,14 @@ ftrace_graph_open(struct inode *inode, struct file *file) } static int +ftrace_graph_release(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) + seq_release(inode, file); + return 0; +} + +static int ftrace_set_func(unsigned long *array, int *idx, char *buffer) { struct dyn_ftrace *rec; @@ -2710,9 +2737,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, } static const struct file_operations ftrace_graph_fops = { - .open = ftrace_graph_open, - .read = seq_read, - .write = ftrace_graph_write, + .open = ftrace_graph_open, + .read = seq_read, + .write = ftrace_graph_write, + .release = ftrace_graph_release, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -3145,10 +3173,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ret = proc_dointvec(table, write, file, buffer, lenp, ppos); - if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) + if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) goto out; - last_ftrace_enabled = ftrace_enabled; + last_ftrace_enabled = !!ftrace_enabled; if (ftrace_enabled) { diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 86cdf671d7e..1edaa9516e8 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c @@ -186,7 +186,7 @@ static int kmem_trace_init(struct trace_array *tr) int cpu; kmemtrace_array = tr; - for_each_cpu_mask(cpu, cpu_possible_map) + for_each_cpu(cpu, cpu_possible_mask) tracing_reset(tr, cpu); kmemtrace_start_probes(); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index dc4dc70171c..a330513d96c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -206,6 +206,7 @@ EXPORT_SYMBOL_GPL(tracing_is_on); #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) #define RB_ALIGNMENT 4U #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) +#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX @@ -415,6 +416,8 @@ struct ring_buffer_per_cpu { unsigned long overrun; unsigned long read; local_t entries; + local_t committing; + local_t commits; u64 write_stamp; u64 read_stamp; atomic_t record_disabled; @@ -618,12 +621,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) kfree(cpu_buffer); } -/* - * Causes compile errors if the struct buffer_page gets bigger - * than the struct page. - */ -extern int ring_buffer_page_too_big(void); - #ifdef CONFIG_HOTPLUG_CPU static int rb_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); @@ -646,11 +643,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, int bsize; int cpu; - /* Paranoid! Optimizes out when all is well */ - if (sizeof(struct buffer_page) > sizeof(struct page)) - ring_buffer_page_too_big(); - - /* keep it in its own cache line */ buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), GFP_KERNEL); @@ -666,8 +658,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, buffer->reader_lock_key = key; /* need at least two pages */ - if (buffer->pages == 1) - buffer->pages++; + if (buffer->pages < 2) + buffer->pages = 2; /* * In case of non-hotplug cpu, if the ring-buffer is allocated @@ -743,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) put_online_cpus(); + kfree(buffer->buffers); free_cpumask_var(buffer->cpumask); kfree(buffer); @@ -1011,12 +1004,12 @@ rb_event_index(struct ring_buffer_event *event) { unsigned long addr = (unsigned long)event; - return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); + return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; } static inline int -rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, - struct ring_buffer_event *event) +rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event) { unsigned long addr = (unsigned long)event; unsigned long index; @@ -1029,31 +1022,6 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, } static void -rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, - struct ring_buffer_event *event) -{ - unsigned long addr = (unsigned long)event; - unsigned long index; - - index = rb_event_index(event); - addr &= PAGE_MASK; - - while (cpu_buffer->commit_page->page != (void *)addr) { - if (RB_WARN_ON(cpu_buffer, - cpu_buffer->commit_page == cpu_buffer->tail_page)) - return; - cpu_buffer->commit_page->page->commit = - cpu_buffer->commit_page->write; - rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); - cpu_buffer->write_stamp = - cpu_buffer->commit_page->page->time_stamp; - } - - /* Now set the commit to the event's index */ - local_set(&cpu_buffer->commit_page->page->commit, index); -} - -static void rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) { /* @@ -1171,6 +1139,60 @@ static unsigned rb_calculate_event_length(unsigned length) return length; } +static inline void +rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + struct buffer_page *tail_page, + unsigned long tail, unsigned long length) +{ + struct ring_buffer_event *event; + + /* + * Only the event that crossed the page boundary + * must fill the old tail_page with padding. + */ + if (tail >= BUF_PAGE_SIZE) { + local_sub(length, &tail_page->write); + return; + } + + event = __rb_page_index(tail_page, tail); + kmemcheck_annotate_bitfield(event, bitfield); + + /* + * If this event is bigger than the minimum size, then + * we need to be careful that we don't subtract the + * write counter enough to allow another writer to slip + * in on this page. + * We put in a discarded commit instead, to make sure + * that this space is not used again. + * + * If we are less than the minimum size, we don't need to + * worry about it. + */ + if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { + /* No room for any events */ + + /* Mark the rest of the page with padding */ + rb_event_set_padding(event); + + /* Set the write back to the previous setting */ + local_sub(length, &tail_page->write); + return; + } + + /* Put in a discarded event */ + event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; + event->type_len = RINGBUF_TYPE_PADDING; + /* time delta must be non zero */ + event->time_delta = 1; + /* Account for this as an entry */ + local_inc(&tail_page->entries); + local_inc(&cpu_buffer->entries); + + /* Set write to end of buffer */ + length = (tail + length) - BUF_PAGE_SIZE; + local_sub(length, &tail_page->write); +} static struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, @@ -1180,7 +1202,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, { struct buffer_page *next_page, *head_page, *reader_page; struct ring_buffer *buffer = cpu_buffer->buffer; - struct ring_buffer_event *event; bool lock_taken = false; unsigned long flags; @@ -1265,27 +1286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, cpu_buffer->tail_page->page->time_stamp = *ts; } - /* - * The actual tail page has moved forward. - */ - if (tail < BUF_PAGE_SIZE) { - /* Mark the rest of the page with padding */ - event = __rb_page_index(tail_page, tail); - kmemcheck_annotate_bitfield(event, bitfield); - rb_event_set_padding(event); - } - - /* Set the write back to the previous setting */ - local_sub(length, &tail_page->write); - - /* - * If this was a commit entry that failed, - * increment that too - */ - if (tail_page == cpu_buffer->commit_page && - tail == rb_commit_index(cpu_buffer)) { - rb_set_commit_to_write(cpu_buffer); - } + rb_reset_tail(cpu_buffer, tail_page, tail, length); __raw_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); @@ -1295,7 +1296,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, out_reset: /* reset write */ - local_sub(length, &tail_page->write); + rb_reset_tail(cpu_buffer, tail_page, tail, length); if (likely(lock_taken)) __raw_spin_unlock(&cpu_buffer->lock); @@ -1325,9 +1326,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, /* We reserved something on the buffer */ - if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) - return NULL; - event = __rb_page_index(tail_page, tail); kmemcheck_annotate_bitfield(event, bitfield); rb_update_event(event, type, length); @@ -1337,11 +1335,11 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, local_inc(&tail_page->entries); /* - * If this is a commit and the tail is zero, then update - * this page's time stamp. + * If this is the first commit on the page, then update + * its timestamp. */ - if (!tail && rb_is_commit(cpu_buffer, event)) - cpu_buffer->commit_page->page->time_stamp = *ts; + if (!tail) + tail_page->page->time_stamp = *ts; return event; } @@ -1410,16 +1408,16 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, return -EAGAIN; /* Only a commited time event can update the write stamp */ - if (rb_is_commit(cpu_buffer, event)) { + if (rb_event_is_commit(cpu_buffer, event)) { /* - * If this is the first on the page, then we need to - * update the page itself, and just put in a zero. + * If this is the first on the page, then it was + * updated with the page itself. Try to discard it + * and if we can't just make it zero. */ if (rb_event_index(event)) { event->time_delta = *delta & TS_MASK; event->array[0] = *delta >> TS_SHIFT; } else { - cpu_buffer->commit_page->page->time_stamp = *ts; /* try to discard, since we do not need this */ if (!rb_try_to_discard(cpu_buffer, event)) { /* nope, just zero it */ @@ -1445,6 +1443,44 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, return ret; } +static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) +{ + local_inc(&cpu_buffer->committing); + local_inc(&cpu_buffer->commits); +} + +static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) +{ + unsigned long commits; + + if (RB_WARN_ON(cpu_buffer, + !local_read(&cpu_buffer->committing))) + return; + + again: + commits = local_read(&cpu_buffer->commits); + /* synchronize with interrupts */ + barrier(); + if (local_read(&cpu_buffer->committing) == 1) + rb_set_commit_to_write(cpu_buffer); + + local_dec(&cpu_buffer->committing); + + /* synchronize with interrupts */ + barrier(); + + /* + * Need to account for interrupts coming in between the + * updating of the commit page and the clearing of the + * committing counter. + */ + if (unlikely(local_read(&cpu_buffer->commits) != commits) && + !local_read(&cpu_buffer->committing)) { + local_inc(&cpu_buffer->committing); + goto again; + } +} + static struct ring_buffer_event * rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length) @@ -1454,6 +1490,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, int commit = 0; int nr_loops = 0; + rb_start_commit(cpu_buffer); + length = rb_calculate_event_length(length); again: /* @@ -1466,7 +1504,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, * Bail! */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) - return NULL; + goto out_fail; ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); @@ -1497,7 +1535,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); if (commit == -EBUSY) - return NULL; + goto out_fail; if (commit == -EAGAIN) goto again; @@ -1511,30 +1549,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; - if (!event) { - if (unlikely(commit)) - /* - * Ouch! We needed a timestamp and it was commited. But - * we didn't get our event reserved. - */ - rb_set_commit_to_write(cpu_buffer); - return NULL; - } + if (!event) + goto out_fail; - /* - * If the timestamp was commited, make the commit our entry - * now so that we will update it when needed. - */ - if (unlikely(commit)) - rb_set_commit_event(cpu_buffer, event); - else if (!rb_is_commit(cpu_buffer, event)) + if (!rb_event_is_commit(cpu_buffer, event)) delta = 0; event->time_delta = delta; return event; + + out_fail: + rb_end_commit(cpu_buffer); + return NULL; } +#ifdef CONFIG_TRACING + #define TRACE_RECURSIVE_DEPTH 16 static int trace_recursive_lock(void) @@ -1565,6 +1596,13 @@ static void trace_recursive_unlock(void) current->trace_recursion--; } +#else + +#define trace_recursive_lock() (0) +#define trace_recursive_unlock() do { } while (0) + +#endif + static DEFINE_PER_CPU(int, rb_need_resched); /** @@ -1642,13 +1680,14 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, { local_inc(&cpu_buffer->entries); - /* Only process further if we own the commit */ - if (!rb_is_commit(cpu_buffer, event)) - return; - - cpu_buffer->write_stamp += event->time_delta; + /* + * The event first in the commit queue updates the + * time stamp. + */ + if (rb_event_is_commit(cpu_buffer, event)) + cpu_buffer->write_stamp += event->time_delta; - rb_set_commit_to_write(cpu_buffer); + rb_end_commit(cpu_buffer); } /** @@ -1737,17 +1776,17 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, /* The event is discarded regardless */ rb_event_discard(event); + cpu = smp_processor_id(); + cpu_buffer = buffer->buffers[cpu]; + /* * This must only be called if the event has not been * committed yet. Thus we can assume that preemption * is still disabled. */ - RB_WARN_ON(buffer, preemptible()); + RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); - cpu = smp_processor_id(); - cpu_buffer = buffer->buffers[cpu]; - - if (!rb_try_to_discard(cpu_buffer, event)) + if (rb_try_to_discard(cpu_buffer, event)) goto out; /* @@ -1756,13 +1795,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, */ local_inc(&cpu_buffer->entries); out: - /* - * If a write came in and pushed the tail page - * we still need to update the commit pointer - * if we were the commit. - */ - if (rb_is_commit(cpu_buffer, event)) - rb_set_commit_to_write(cpu_buffer); + rb_end_commit(cpu_buffer); trace_recursive_unlock(); @@ -2351,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) * the box. Return the padding, and we will release * the current locks, and try again. */ - rb_advance_reader(cpu_buffer); return event; case RINGBUF_TYPE_TIME_EXTEND: @@ -2446,6 +2478,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) } EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); +static inline int rb_ok_to_lock(void) +{ + /* + * If an NMI die dumps out the content of the ring buffer + * do not grab locks. We also permanently disable the ring + * buffer too. A one time deal is all you get from reading + * the ring buffer from an NMI. + */ + if (likely(!in_nmi())) + return 1; + + tracing_off_permanent(); + return 0; +} + /** * ring_buffer_peek - peek at the next event to be read * @buffer: The ring buffer to read @@ -2461,14 +2508,22 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_event *event; unsigned long flags; + int dolock; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return NULL; + dolock = rb_ok_to_lock(); again: - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + local_irq_save(flags); + if (dolock) + spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(buffer, cpu, ts); - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + if (event && event->type_len == RINGBUF_TYPE_PADDING) + rb_advance_reader(cpu_buffer); + if (dolock) + spin_unlock(&cpu_buffer->reader_lock); + local_irq_restore(flags); if (event && event->type_len == RINGBUF_TYPE_PADDING) { cpu_relax(); @@ -2520,6 +2575,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event = NULL; unsigned long flags; + int dolock; + + dolock = rb_ok_to_lock(); again: /* might be called in atomic */ @@ -2529,16 +2587,17 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) goto out; cpu_buffer = buffer->buffers[cpu]; - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + local_irq_save(flags); + if (dolock) + spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(buffer, cpu, ts); - if (!event) - goto out_unlock; - - rb_advance_reader(cpu_buffer); + if (event) + rb_advance_reader(cpu_buffer); - out_unlock: - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + if (dolock) + spin_unlock(&cpu_buffer->reader_lock); + local_irq_restore(flags); out: preempt_enable(); @@ -2680,6 +2739,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->overrun = 0; cpu_buffer->read = 0; local_set(&cpu_buffer->entries, 0); + local_set(&cpu_buffer->committing, 0); + local_set(&cpu_buffer->commits, 0); cpu_buffer->write_stamp = 0; cpu_buffer->read_stamp = 0; @@ -2734,12 +2795,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); int ring_buffer_empty(struct ring_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; + int dolock; int cpu; + int ret; + + dolock = rb_ok_to_lock(); /* yes this is racy, but if you don't like the race, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; - if (!rb_per_cpu_empty(cpu_buffer)) + local_irq_save(flags); + if (dolock) + spin_lock(&cpu_buffer->reader_lock); + ret = rb_per_cpu_empty(cpu_buffer); + if (dolock) + spin_unlock(&cpu_buffer->reader_lock); + local_irq_restore(flags); + + if (!ret) return 0; } @@ -2755,14 +2829,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; + int dolock; int ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 1; + dolock = rb_ok_to_lock(); + cpu_buffer = buffer->buffers[cpu]; + local_irq_save(flags); + if (dolock) + spin_lock(&cpu_buffer->reader_lock); ret = rb_per_cpu_empty(cpu_buffer); - + if (dolock) + spin_unlock(&cpu_buffer->reader_lock); + local_irq_restore(flags); return ret; } @@ -3029,6 +3112,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, } EXPORT_SYMBOL_GPL(ring_buffer_read_page); +#ifdef CONFIG_TRACING static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) @@ -3096,6 +3180,7 @@ static __init int rb_init_debugfs(void) } fs_initcall(rb_init_debugfs); +#endif #ifdef CONFIG_HOTPLUG_CPU static int rb_cpu_notify(struct notifier_block *self, @@ -3108,7 +3193,7 @@ static int rb_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (cpu_isset(cpu, *buffer->cpumask)) + if (cpumask_test_cpu(cpu, buffer->cpumask)) return NOTIFY_OK; buffer->buffers[cpu] = @@ -3119,7 +3204,7 @@ static int rb_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } smp_wmb(); - cpu_set(cpu, *buffer->cpumask); + cpumask_set_cpu(cpu, buffer->cpumask); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 8d68e149a8b..573d3cc762c 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -102,8 +102,10 @@ static enum event_status read_page(int cpu) event = (void *)&rpage->data[i]; switch (event->type_len) { case RINGBUF_TYPE_PADDING: - /* We don't expect any padding */ - KILL_TEST(); + /* failed writes may be discarded events */ + if (!event->time_delta) + KILL_TEST(); + inc = event->array[0] + 4; break; case RINGBUF_TYPE_TIME_EXTEND: inc = 8; @@ -119,7 +121,7 @@ static enum event_status read_page(int cpu) KILL_TEST(); break; } - inc = event->array[0]; + inc = event->array[0] + 4; break; default: entry = ring_buffer_event_data(event); @@ -201,7 +203,7 @@ static void ring_buffer_producer(void) * Hammer the buffer for 10 secs (this may * make the system stall) */ - pr_info("Starting ring buffer hammer\n"); + trace_printk("Starting ring buffer hammer\n"); do_gettimeofday(&start_tv); do { struct ring_buffer_event *event; @@ -237,7 +239,7 @@ static void ring_buffer_producer(void) #endif } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); - pr_info("End ring buffer hammer\n"); + trace_printk("End ring buffer hammer\n"); if (consumer) { /* Init both completions here to avoid races */ @@ -260,49 +262,50 @@ static void ring_buffer_producer(void) overruns = ring_buffer_overruns(buffer); if (kill_test) - pr_info("ERROR!\n"); - pr_info("Time: %lld (usecs)\n", time); - pr_info("Overruns: %lld\n", overruns); + trace_printk("ERROR!\n"); + trace_printk("Time: %lld (usecs)\n", time); + trace_printk("Overruns: %lld\n", overruns); if (disable_reader) - pr_info("Read: (reader disabled)\n"); + trace_printk("Read: (reader disabled)\n"); else - pr_info("Read: %ld (by %s)\n", read, + trace_printk("Read: %ld (by %s)\n", read, read_events ? "events" : "pages"); - pr_info("Entries: %lld\n", entries); - pr_info("Total: %lld\n", entries + overruns + read); - pr_info("Missed: %ld\n", missed); - pr_info("Hit: %ld\n", hit); + trace_printk("Entries: %lld\n", entries); + trace_printk("Total: %lld\n", entries + overruns + read); + trace_printk("Missed: %ld\n", missed); + trace_printk("Hit: %ld\n", hit); /* Convert time from usecs to millisecs */ do_div(time, USEC_PER_MSEC); if (time) hit /= (long)time; else - pr_info("TIME IS ZERO??\n"); + trace_printk("TIME IS ZERO??\n"); - pr_info("Entries per millisec: %ld\n", hit); + trace_printk("Entries per millisec: %ld\n", hit); if (hit) { /* Calculate the average time in nanosecs */ avg = NSEC_PER_MSEC / hit; - pr_info("%ld ns per entry\n", avg); + trace_printk("%ld ns per entry\n", avg); } if (missed) { if (time) missed /= (long)time; - pr_info("Total iterations per millisec: %ld\n", hit + missed); + trace_printk("Total iterations per millisec: %ld\n", + hit + missed); /* it is possible that hit + missed will overflow and be zero */ if (!(hit + missed)) { - pr_info("hit + missed overflowed and totalled zero!\n"); + trace_printk("hit + missed overflowed and totalled zero!\n"); hit--; /* make it non zero */ } /* Caculate the average time in nanosecs */ avg = NSEC_PER_MSEC / (hit + missed); - pr_info("%ld ns per entry\n", avg); + trace_printk("%ld ns per entry\n", avg); } } @@ -353,7 +356,7 @@ static int ring_buffer_producer_thread(void *arg) ring_buffer_producer(); - pr_info("Sleeping for 10 secs\n"); + trace_printk("Sleeping for 10 secs\n"); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ * SLEEP_TIME); __set_current_state(TASK_RUNNING); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c1878bfb2e1..8c358395d33 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -17,6 +17,7 @@ #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> +#include <linux/smp_lock.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> @@ -284,13 +285,12 @@ void trace_wake_up(void) static int __init set_buf_size(char *str) { unsigned long buf_size; - int ret; if (!str) return 0; - ret = strict_strtoul(str, 0, &buf_size); + buf_size = memparse(str, &str); /* nr_entries can not be zero */ - if (ret < 0 || buf_size == 0) + if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; @@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); } +EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, int type, @@ -2031,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file) /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && - !(file->f_flags & O_APPEND)) { + (file->f_flags & O_TRUNC)) { long cpu = (long) inode->i_private; if (cpu == TRACE_PIPE_ALL_CPU) @@ -2053,25 +2054,23 @@ static int tracing_open(struct inode *inode, struct file *file) static void * t_next(struct seq_file *m, void *v, loff_t *pos) { - struct tracer *t = m->private; + struct tracer *t = v; (*pos)++; if (t) t = t->next; - m->private = t; - return t; } static void *t_start(struct seq_file *m, loff_t *pos) { - struct tracer *t = m->private; + struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); - for (; t && l < *pos; t = t_next(m, t, &l)) + for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) ; return t; @@ -2107,18 +2106,10 @@ static struct seq_operations show_traces_seq_ops = { static int show_traces_open(struct inode *inode, struct file *file) { - int ret; - if (tracing_disabled) return -ENODEV; - ret = seq_open(file, &show_traces_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = trace_types; - } - - return ret; + return seq_open(file, &show_traces_seq_ops); } static ssize_t @@ -2191,11 +2182,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; - mutex_lock(&tracing_cpumask_update_lock); err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; + mutex_lock(&tracing_cpumask_update_lock); + local_irq_disable(); __raw_spin_lock(&ftrace_max_lock); for_each_tracing_cpu(cpu) { @@ -2223,8 +2215,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, return count; err_unlock: - mutex_unlock(&tracing_cpumask_update_lock); - free_cpumask_var(tracing_cpumask); + free_cpumask_var(tracing_cpumask_new); return err; } @@ -3095,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) break; } - trace_consume(iter); + if (ret != TRACE_TYPE_NO_CONSUME) + trace_consume(iter); rem -= count; if (!find_next_entry_inc(iter)) { rem = 0; @@ -3626,7 +3618,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, struct trace_seq *s; unsigned long cnt; - s = kmalloc(sizeof(*s), GFP_ATOMIC); + s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return ENOMEM; @@ -3904,17 +3896,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, if (ret < 0) return ret; - switch (val) { - case 0: - trace_flags &= ~(1 << index); - break; - case 1: - trace_flags |= 1 << index; - break; - - default: + if (val != 0 && val != 1) return -EINVAL; - } + set_tracer_flags(1 << index, val); *ppos += cnt; @@ -4243,8 +4227,11 @@ static void __ftrace_dump(bool disable_tracing) iter.pos = -1; if (find_next_entry_inc(&iter) != NULL) { - print_trace_line(&iter); - trace_consume(&iter); + int ret; + + ret = print_trace_line(&iter); + if (ret != TRACE_TYPE_NO_CONSUME) + trace_consume(&iter); } trace_printk_seq(&iter.seq); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6e735d4771f..8b9f4f6e955 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); -void tracing_generic_entry_update(struct trace_entry *entry, - unsigned long flags, - int pc); - void default_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter); @@ -597,6 +593,7 @@ print_graph_function(struct trace_iterator *iter) extern struct pid *ftrace_pid_trace; +#ifdef CONFIG_FUNCTION_TRACER static inline int ftrace_trace_task(struct task_struct *task) { if (!ftrace_pid_trace) @@ -604,6 +601,12 @@ static inline int ftrace_trace_task(struct task_struct *task) return test_tsk_trace_trace(task); } +#else +static inline int ftrace_trace_task(struct task_struct *task) +{ + return 1; +} +#endif /* * trace_iterator_flags is an enumeration that defines bit diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 5b5895afecf..11ba5bb4ed0 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id) mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { + if (event->id == event_id && event->profile_enable) { ret = event->profile_enable(event); break; } diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 5e32e375134..6db005e1248 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h @@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET, ftrace_graph_ret_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ret.func, func) + TRACE_FIELD(unsigned long long, ret.calltime, calltime) + TRACE_FIELD(unsigned long long, ret.rettime, rettime) + TRACE_FIELD(unsigned long, ret.overrun, overrun) TRACE_FIELD(int, ret.depth, depth) ), TP_RAW_FMT("<-- %lx (%d)") diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index aa08be69a1b..e75276a49cf 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos) { + struct ftrace_event_call *call = NULL; + loff_t l; + mutex_lock(&event_mutex); - if (*pos == 0) - m->private = ftrace_events.next; - return t_next(m, NULL, pos); + + m->private = ftrace_events.next; + for (l = 0; l <= *pos; ) { + call = t_next(m, NULL, &l); + if (!call) + break; + } + return call; } static void * @@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos) static void *s_start(struct seq_file *m, loff_t *pos) { + struct ftrace_event_call *call = NULL; + loff_t l; + mutex_lock(&event_mutex); - if (*pos == 0) - m->private = ftrace_events.next; - return s_next(m, NULL, pos); + + m->private = ftrace_events.next; + for (l = 0; l <= *pos; ) { + call = s_next(m, NULL, &l); + if (!call) + break; + } + return call; } static int t_show(struct seq_file *m, void *v) @@ -360,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) const struct seq_operations *seq_ops; if ((file->f_mode & FMODE_WRITE) && - !(file->f_flags & O_APPEND)) + (file->f_flags & O_TRUNC)) ftrace_clear_events(); seq_ops = inode->i_private; @@ -924,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, entry = trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id) + if (call->id && call->profile_enable) entry = trace_create_file("id", 0444, call->dir, call, id); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index db6e54bdb59..f32dc9d1ea7 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -27,8 +27,6 @@ #include "trace.h" #include "trace_output.h" -static DEFINE_MUTEX(filter_mutex); - enum filter_op_ids { OP_OR, @@ -178,7 +176,7 @@ static int filter_pred_string(struct filter_pred *pred, void *event, static int filter_pred_strloc(struct filter_pred *pred, void *event, int val1, int val2) { - int str_loc = *(int *)(event + pred->offset); + unsigned short str_loc = *(unsigned short *)(event + pred->offset); char *addr = (char *)(event + str_loc); int cmp, match; @@ -294,12 +292,12 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) { struct event_filter *filter = call->filter; - mutex_lock(&filter_mutex); + mutex_lock(&event_mutex); if (filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_printf(s, "none\n"); - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); } void print_subsystem_event_filter(struct event_subsystem *system, @@ -307,12 +305,12 @@ void print_subsystem_event_filter(struct event_subsystem *system, { struct event_filter *filter = system->filter; - mutex_lock(&filter_mutex); + mutex_lock(&event_mutex); if (filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_printf(s, "none\n"); - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); } static struct ftrace_event_field * @@ -381,6 +379,7 @@ void destroy_preds(struct ftrace_event_call *call) filter_free_pred(filter->preds[i]); } kfree(filter->preds); + kfree(filter->filter_string); kfree(filter); call->filter = NULL; } @@ -433,7 +432,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) filter->n_preds = 0; } - mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { if (!call->define_fields) continue; @@ -443,7 +441,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) remove_filter_string(call->filter); } } - mutex_unlock(&event_mutex); } static int filter_add_pred_fn(struct filter_parse_state *ps, @@ -546,6 +543,7 @@ static int filter_add_pred(struct filter_parse_state *ps, filter_pred_fn_t fn; unsigned long long val; int string_type; + int ret; pred->fn = filter_pred_none; @@ -581,7 +579,11 @@ static int filter_add_pred(struct filter_parse_state *ps, pred->not = 1; return filter_add_pred_fn(ps, call, pred, fn); } else { - if (strict_strtoull(pred->str_val, 0, &val)) { + if (field->is_signed) + ret = strict_strtoll(pred->str_val, 0, &val); + else + ret = strict_strtoull(pred->str_val, 0, &val); + if (ret) { parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); return -EINVAL; } @@ -622,10 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, return -ENOSPC; } - filter->preds[filter->n_preds] = pred; - filter->n_preds++; - - mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { if (!call->define_fields) @@ -636,14 +634,15 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, err = filter_add_pred(ps, call, pred); if (err) { - mutex_unlock(&event_mutex); filter_free_subsystem_preds(system); parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); goto out; } replace_filter_string(call->filter, filter_string); } - mutex_unlock(&event_mutex); + + filter->preds[filter->n_preds] = pred; + filter->n_preds++; out: return err; } @@ -1030,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, if (elt->op == OP_AND || elt->op == OP_OR) { pred = create_logical_pred(elt->op); + if (!pred) + return -ENOMEM; if (call) { err = filter_add_pred(ps, call, pred); filter_free_pred(pred); - } else + } else { err = filter_add_subsystem_pred(ps, system, pred, filter_string); + if (err) + filter_free_pred(pred); + } if (err) return err; @@ -1049,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, } pred = create_pred(elt->op, operand1, operand2); + if (!pred) + return -ENOMEM; if (call) { err = filter_add_pred(ps, call, pred); filter_free_pred(pred); - } else + } else { err = filter_add_subsystem_pred(ps, system, pred, filter_string); + if (err) + filter_free_pred(pred); + } if (err) return err; @@ -1070,12 +1079,12 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) struct filter_parse_state *ps; - mutex_lock(&filter_mutex); + mutex_lock(&event_mutex); if (!strcmp(strstrip(filter_string), "0")) { filter_disable_preds(call); remove_filter_string(call->filter); - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); return 0; } @@ -1103,7 +1112,7 @@ out: postfix_clear(ps); kfree(ps); out_unlock: - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); return err; } @@ -1115,12 +1124,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system, struct filter_parse_state *ps; - mutex_lock(&filter_mutex); + mutex_lock(&event_mutex); if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(system); remove_filter_string(system->filter); - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); return 0; } @@ -1148,7 +1157,7 @@ out: postfix_clear(ps); kfree(ps); out_unlock: - mutex_unlock(&filter_mutex); + mutex_unlock(&event_mutex); return err; } diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index c9a0b7df44f..75ef000613c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -193,9 +193,11 @@ static void tracing_start_function_trace(void) static void tracing_stop_function_trace(void) { ftrace_function_enabled = 0; - /* OK if they are not registered */ - unregister_ftrace_function(&trace_stack_ops); - unregister_ftrace_function(&trace_ops); + + if (func_flags.val & TRACE_FUNC_OPT_STACK) + unregister_ftrace_function(&trace_stack_ops); + else + unregister_ftrace_function(&trace_ops); } static int func_set_flag(u32 old_flags, u32 bit, int set) @@ -300,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, if (count == -1) seq_printf(m, ":unlimited\n"); else - seq_printf(m, ":count=%ld", count); - seq_putc(m, '\n'); + seq_printf(m, ":count=%ld\n", count); return 0; } @@ -362,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) out_reg: ret = register_ftrace_function_probe(glob, ops, count); - return ret; + return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_traceon_cmd = { diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8b592418d8b..420ec348757 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = { /* Add a function return address to the trace stack on thread info.*/ int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer) { unsigned long long calltime; int index; @@ -85,6 +86,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) current->ret_stack[index].func = func; current->ret_stack[index].calltime = calltime; current->ret_stack[index].subtime = 0; + current->ret_stack[index].fp = frame_pointer; *depth = index; return 0; @@ -92,7 +94,8 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) /* Retrieve a function return address to the trace stack on thread info.*/ static void -ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, + unsigned long frame_pointer) { int index; @@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) return; } +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + /* + * The arch may choose to record the frame pointer used + * and check it here to make sure that it is what we expect it + * to be. If gcc does not set the place holder of the return + * address in the frame pointer, and does a copy instead, then + * the function graph trace will fail. This test detects this + * case. + * + * Currently, x86_32 with optimize for size (-Os) makes the latest + * gcc do the above. + */ + if (unlikely(current->ret_stack[index].fp != frame_pointer)) { + ftrace_graph_stop(); + WARN(1, "Bad frame pointer: expected %lx, received %lx\n" + " from func %pF return to %lx\n", + current->ret_stack[index].fp, + frame_pointer, + (void *)current->ret_stack[index].func, + current->ret_stack[index].ret); + *ret = (unsigned long)panic; + return; + } +#endif + *ret = current->ret_stack[index].ret; trace->func = current->ret_stack[index].func; trace->calltime = current->ret_stack[index].calltime; @@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) * Send the trace to the ring-buffer. * @return the original return address. */ -unsigned long ftrace_return_to_handler(void) +unsigned long ftrace_return_to_handler(unsigned long frame_pointer) { struct ftrace_graph_ret trace; unsigned long ret; - ftrace_pop_return_trace(&trace, &ret); + ftrace_pop_return_trace(&trace, &ret, frame_pointer); trace.rettime = trace_clock_local(); ftrace_graph_return(&trace); barrier(); @@ -815,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) switch (entry->type) { case TRACE_GRAPH_ENT: { - struct ftrace_graph_ent_entry *field; + /* + * print_graph_entry() may consume the current event, + * thus @field may become invalid, so we need to save it. + * sizeof(struct ftrace_graph_ent_entry) is very small, + * it can be safely saved at the stack. + */ + struct ftrace_graph_ent_entry *field, saved; trace_assign_type(field, entry); - return print_graph_entry(field, s, iter); + saved = *field; + return print_graph_entry(&saved, s, iter); } case TRACE_GRAPH_RET: { struct ftrace_graph_ret_entry *field; diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 7938f3ae93e..e0c2545622e 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; - s->buffer[len] = 0; - seq_puts(m, s->buffer); + seq_write(m, s->buffer, len); trace_seq_init(s); } diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 9bece9687b6..687699d365a 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) EXPORT_SYMBOL_GPL(__ftrace_vprintk); static void * -t_next(struct seq_file *m, void *v, loff_t *pos) +t_start(struct seq_file *m, loff_t *pos) { - const char **fmt = m->private; - const char **next = fmt; - - (*pos)++; + const char **fmt = __start___trace_bprintk_fmt + *pos; if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) return NULL; - - next = fmt; - m->private = ++next; - return fmt; } -static void *t_start(struct seq_file *m, loff_t *pos) +static void *t_next(struct seq_file *m, void * v, loff_t *pos) { - return t_next(m, NULL, pos); + (*pos)++; + return t_start(m, pos); } static int t_show(struct seq_file *m, void *v) @@ -182,7 +176,7 @@ static int t_show(struct seq_file *m, void *v) const char *str = *fmt; int i; - seq_printf(m, "0x%lx : \"", (unsigned long)fmt); + seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); /* * Tabs and new lines need to be converted. @@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = { static int ftrace_formats_open(struct inode *inode, struct file *file) { - int ret; - - ret = seq_open(file, &show_format_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - - m->private = __start___trace_bprintk_fmt; - } - return ret; + return seq_open(file, &show_format_seq_ops); } static const struct file_operations ftrace_formats_fops = { diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 2d7aebd71db..6a2a9d484cd 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = { static int stack_trace_open(struct inode *inode, struct file *file) { - int ret; - - ret = seq_open(file, &stack_trace_seq_ops); - - return ret; + return seq_open(file, &stack_trace_seq_ops); } static const struct file_operations stack_trace_fops = { .open = stack_trace_open, .read = seq_read, .llseek = seq_lseek, + .release = seq_release, }; int @@ -326,10 +323,10 @@ stack_trace_sysctl(struct ctl_table *table, int write, ret = proc_dointvec(table, write, file, buffer, lenp, ppos); if (ret || !write || - (last_stack_tracer_enabled == stack_tracer_enabled)) + (last_stack_tracer_enabled == !!stack_tracer_enabled)) goto out; - last_stack_tracer_enabled = stack_tracer_enabled; + last_stack_tracer_enabled = !!stack_tracer_enabled; if (stack_tracer_enabled) register_ftrace_function(&trace_ops); diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index c00643733f4..aea321c82fa 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node) } } -static void reset_stat_session(struct stat_session *session) +static void __reset_stat_session(struct stat_session *session) { struct rb_node *node = session->stat_root.rb_node; @@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session) session->stat_root = RB_ROOT; } +static void reset_stat_session(struct stat_session *session) +{ + mutex_lock(&session->stat_mutex); + __reset_stat_session(session); + mutex_unlock(&session->stat_mutex); +} + static void destroy_session(struct stat_session *session) { debugfs_remove(session->file); - reset_stat_session(session); + __reset_stat_session(session); mutex_destroy(&session->stat_mutex); kfree(session); } @@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session) int i; mutex_lock(&session->stat_mutex); - reset_stat_session(session); + __reset_stat_session(session); if (!ts->stat_cmp) ts->stat_cmp = dummy_cmp; @@ -183,7 +190,7 @@ exit: return ret; exit_free_rbtree: - reset_stat_session(session); + __reset_stat_session(session); mutex_unlock(&session->stat_mutex); return ret; } @@ -199,17 +206,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos) mutex_lock(&session->stat_mutex); /* If we are in the beginning of the file, print the headers */ - if (!*pos && session->ts->stat_headers) { - (*pos)++; + if (!*pos && session->ts->stat_headers) return SEQ_START_TOKEN; - } node = rb_first(&session->stat_root); for (i = 0; node && i < *pos; i++) node = rb_next(node); - (*pos)++; - return node; } @@ -254,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = { static int tracing_stat_open(struct inode *inode, struct file *file) { int ret; - + struct seq_file *m; struct stat_session *session = inode->i_private; + ret = stat_seq_init(session); + if (ret) + return ret; + ret = seq_open(file, &trace_stat_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = session; - ret = stat_seq_init(session); + if (ret) { + reset_stat_session(session); + return ret; } + m = file->private_data; + m->private = session; return ret; } @@ -274,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f) { struct stat_session *session = i->i_private; - mutex_lock(&session->stat_mutex); reset_stat_session(session); - mutex_unlock(&session->stat_mutex); - return 0; + return seq_release(i, f); } static const struct file_operations tracing_stat_fops = { diff --git a/kernel/utsname.c b/kernel/utsname.c index 815237a55af..8a82b4b8ea5 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -15,6 +15,16 @@ #include <linux/err.h> #include <linux/slab.h> +static struct uts_namespace *create_uts_ns(void) +{ + struct uts_namespace *uts_ns; + + uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); + if (uts_ns) + kref_init(&uts_ns->kref); + return uts_ns; +} + /* * Clone a new ns copying an original utsname, setting refcount to 1 * @old_ns: namespace to clone @@ -24,14 +34,13 @@ static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns) { struct uts_namespace *ns; - ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); + ns = create_uts_ns(); if (!ns) return ERR_PTR(-ENOMEM); down_read(&uts_sem); memcpy(&ns->name, &old_ns->name, sizeof(ns->name)); up_read(&uts_sem); - kref_init(&ns->kref); return ns; } diff --git a/kernel/wait.c b/kernel/wait.c index ea7c3b4275c..c4bd3d825f3 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -10,13 +10,14 @@ #include <linux/wait.h> #include <linux/hash.h> -void init_waitqueue_head(wait_queue_head_t *q) +void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) { spin_lock_init(&q->lock); + lockdep_set_class(&q->lock, key); INIT_LIST_HEAD(&q->task_list); } -EXPORT_SYMBOL(init_waitqueue_head); +EXPORT_SYMBOL(__init_waitqueue_head); void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { |