diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
commit | bcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch) | |
tree | e420679a5db6ea4e1694eef57f9abb6acac8d4d3 /net/unix/diag.c | |
parent | 26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff) | |
parent | 000078bc3ee69efb1124b8478c7527389a826074 (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
* Fix include order for bison/flex-generated C files, from Ben Hutchings
* Build fixes and documentation corrections from David Ahern
* Group parsing support, from Jiri Olsa
* UI/gtk refactorings and improvements from Namhyung Kim
* NULL deref fix for perf script, from Namhyung Kim
* Assorted cleanups from Robert Richter
* Let O= makes handle relative paths, from Steven Rostedt
* perf script python fixes, from Feng Tang.
* Improve 'perf lock' error message when the needed tracepoints
are not present, from David Ahern.
* Initial bash completion support, from Frederic Weisbecker
* Allow building without libelf, from Namhyung Kim.
* Support DWARF CFI based unwind to have callchains when %bp
based unwinding is not possible, from Jiri Olsa.
* Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF
section was the end goal, several fixes for code that handles all
architectures and cleanups are included, from Cody Schafer.
* Add a description for the JIT interface, from Andi Kleen.
* Assorted fixes for Documentation and build in 32 bit, from Robert Richter
* Add support for non-tracepoint events in perf script python, from Feng Tang
* Cache the libtraceevent event_format associated to each evsel early, so that we
avoid relookups, i.e. calling pevent_find_event repeatedly when processing
tracepoint events.
[ This is to reduce the surface contact with libtraceevents and make clear what
is that the perf tools needs from that lib: so far parsing the common and per
event fields. ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/unix/diag.c')
-rw-r--r-- | net/unix/diag.c | 115 |
1 files changed, 55 insertions, 60 deletions
diff --git a/net/unix/diag.c b/net/unix/diag.c index 47d3002737f..750b1340844 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -8,40 +8,31 @@ #include <net/af_unix.h> #include <net/tcp_states.h> -#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \ - RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) - static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { struct unix_address *addr = unix_sk(sk)->addr; - char *s; - - if (addr) { - s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short)); - memcpy(s, addr->name->sun_path, addr->len - sizeof(short)); - } - return 0; + if (!addr) + return 0; -rtattr_failure: - return -EMSGSIZE; + return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short), + addr->name->sun_path); } static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) { struct dentry *dentry = unix_sk(sk)->path.dentry; - struct unix_diag_vfs *uv; if (dentry) { - uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); - uv->udiag_vfs_ino = dentry->d_inode->i_ino; - uv->udiag_vfs_dev = dentry->d_sb->s_dev; + struct unix_diag_vfs uv = { + .udiag_vfs_ino = dentry->d_inode->i_ino, + .udiag_vfs_dev = dentry->d_sb->s_dev, + }; + + return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); } return 0; - -rtattr_failure: - return -EMSGSIZE; } static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) @@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) unix_state_unlock(peer); sock_put(peer); - RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); + return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); } return 0; -rtattr_failure: - return -EMSGSIZE; } static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) { struct sk_buff *skb; + struct nlattr *attr; u32 *buf; int i; if (sk->sk_state == TCP_LISTEN) { spin_lock(&sk->sk_receive_queue.lock); - buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, - sk->sk_receive_queue.qlen * sizeof(u32)); + + attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, + sk->sk_receive_queue.qlen * sizeof(u32)); + if (!attr) + goto errout; + + buf = nla_data(attr); i = 0; skb_queue_walk(&sk->sk_receive_queue, skb) { struct sock *req, *peer; @@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) return 0; -rtattr_failure: +errout: spin_unlock(&sk->sk_receive_queue.lock); return -EMSGSIZE; } static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) { - struct unix_diag_rqlen *rql; - - rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql)); + struct unix_diag_rqlen rql; if (sk->sk_state == TCP_LISTEN) { - rql->udiag_rqueue = sk->sk_receive_queue.qlen; - rql->udiag_wqueue = sk->sk_max_ack_backlog; + rql.udiag_rqueue = sk->sk_receive_queue.qlen; + rql.udiag_wqueue = sk->sk_max_ack_backlog; } else { - rql->udiag_rqueue = (__u32)unix_inq_len(sk); - rql->udiag_wqueue = (__u32)unix_outq_len(sk); + rql.udiag_rqueue = (u32) unix_inq_len(sk); + rql.udiag_wqueue = (u32) unix_outq_len(sk); } - return 0; - -rtattr_failure: - return -EMSGSIZE; + return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, u32 pid, u32 seq, u32 flags, int sk_ino) { - unsigned char *b = skb_tail_pointer(skb); struct nlmsghdr *nlh; struct unix_diag_msg *rep; - nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); - nlh->nlmsg_flags = flags; - - rep = NLMSG_DATA(nlh); + nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), + flags); + if (!nlh) + return -EMSGSIZE; + rep = nlmsg_data(nlh); rep->udiag_family = AF_UNIX; rep->udiag_type = sk->sk_type; rep->udiag_state = sk->sk_state; @@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r if ((req->udiag_show & UDIAG_SHOW_NAME) && sk_diag_dump_name(sk, skb)) - goto nlmsg_failure; + goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_VFS) && sk_diag_dump_vfs(sk, skb)) - goto nlmsg_failure; + goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_PEER) && sk_diag_dump_peer(sk, skb)) - goto nlmsg_failure; + goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_ICONS) && sk_diag_dump_icons(sk, skb)) - goto nlmsg_failure; + goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_RQLEN) && sk_diag_show_rqlen(sk, skb)) - goto nlmsg_failure; + goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) - goto nlmsg_failure; + goto out_nlmsg_trim; - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); -nlmsg_failure: - nlmsg_trim(skb, b); +out_nlmsg_trim: + nlmsg_cancel(skb, nlh); return -EMSGSIZE; } @@ -188,19 +177,24 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct unix_diag_req *req; int num, s_num, slot, s_slot; + struct net *net = sock_net(skb->sk); - req = NLMSG_DATA(cb->nlh); + req = nlmsg_data(cb->nlh); s_slot = cb->args[0]; num = s_num = cb->args[1]; spin_lock(&unix_table_lock); - for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { + for (slot = s_slot; + slot < ARRAY_SIZE(unix_socket_table); + s_num = 0, slot++) { struct sock *sk; struct hlist_node *node; num = 0; sk_for_each(sk, node, &unix_socket_table[slot]) { + if (!net_eq(sock_net(sk), net)) + continue; if (num < s_num) goto next; if (!(req->udiag_states & (1 << sk->sk_state))) @@ -228,7 +222,7 @@ static struct sock *unix_lookup_by_ino(int ino) struct sock *sk; spin_lock(&unix_table_lock); - for (i = 0; i <= UNIX_HASH_SIZE; i++) { + for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { struct hlist_node *node; sk_for_each(sk, node, &unix_socket_table[i]) @@ -252,6 +246,7 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, struct sock *sk; struct sk_buff *rep; unsigned int extra_len; + struct net *net = sock_net(in_skb->sk); if (req->udiag_ino == 0) goto out_nosk; @@ -268,22 +263,21 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, extra_len = 256; again: err = -ENOMEM; - rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), - GFP_KERNEL); + rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); if (!rep) goto out; err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 0, req->udiag_ino); if (err < 0) { - kfree_skb(rep); + nlmsg_free(rep); extra_len += 256; if (extra_len >= PAGE_SIZE) goto out; goto again; } - err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, + err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); if (err > 0) err = 0; @@ -297,6 +291,7 @@ out_nosk: static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) { int hdrlen = sizeof(struct unix_diag_req); + struct net *net = sock_net(skb->sk); if (nlmsg_len(h) < hdrlen) return -EINVAL; @@ -305,9 +300,9 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) struct netlink_dump_control c = { .dump = unix_diag_dump, }; - return netlink_dump_start(sock_diag_nlsk, skb, h, &c); + return netlink_dump_start(net->diag_nlsk, skb, h, &c); } else - return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); + return unix_diag_get_exact(skb, h, nlmsg_data(h)); } static const struct sock_diag_handler unix_diag_handler = { |