diff options
Diffstat (limited to 'net/tipc')
-rw-r--r-- | net/tipc/Makefile | 4 | ||||
-rw-r--r-- | net/tipc/bcast.c | 109 | ||||
-rw-r--r-- | net/tipc/bcast.h | 2 | ||||
-rw-r--r-- | net/tipc/bearer.h | 2 | ||||
-rw-r--r-- | net/tipc/core.h | 1 | ||||
-rw-r--r-- | net/tipc/link.c | 514 | ||||
-rw-r--r-- | net/tipc/link.h | 48 | ||||
-rw-r--r-- | net/tipc/msg.c | 125 | ||||
-rw-r--r-- | net/tipc/msg.h | 16 | ||||
-rw-r--r-- | net/tipc/name_distr.c | 98 | ||||
-rw-r--r-- | net/tipc/name_distr.h | 1 | ||||
-rw-r--r-- | net/tipc/name_table.c | 2 | ||||
-rw-r--r-- | net/tipc/name_table.h | 6 | ||||
-rw-r--r-- | net/tipc/node.c | 10 | ||||
-rw-r--r-- | net/tipc/node.h | 12 | ||||
-rw-r--r-- | net/tipc/node_subscr.c | 96 | ||||
-rw-r--r-- | net/tipc/node_subscr.h | 63 | ||||
-rw-r--r-- | net/tipc/socket.c | 127 |
18 files changed, 496 insertions, 740 deletions
diff --git a/net/tipc/Makefile b/net/tipc/Makefile index b8a13caad59..333e4592772 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o tipc-y += addr.o bcast.o bearer.o config.o \ core.o link.o discover.o msg.o \ name_distr.o subscr.o name_table.o net.o \ - netlink.o node.o node_subscr.o \ - socket.o log.o eth_media.o server.o + netlink.o node.o socket.o log.o eth_media.o \ + server.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o tipc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 556b26ad4b1..f0761c77173 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -217,12 +217,13 @@ struct tipc_node *tipc_bclink_retransmit_to(void) */ static void bclink_retransmit_pkt(u32 after, u32 to) { - struct sk_buff *buf; + struct sk_buff *skb; - buf = bcl->first_out; - while (buf && less_eq(buf_seqno(buf), after)) - buf = buf->next; - tipc_link_retransmit(bcl, buf, mod(to - after)); + skb_queue_walk(&bcl->outqueue, skb) { + if (more(buf_seqno(skb), after)) + break; + } + tipc_link_retransmit(bcl, skb, mod(to - after)); } /** @@ -245,14 +246,14 @@ void tipc_bclink_wakeup_users(void) */ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { - struct sk_buff *crs; + struct sk_buff *skb, *tmp; struct sk_buff *next; unsigned int released = 0; tipc_bclink_lock(); /* Bail out if tx queue is empty (no clean up is required) */ - crs = bcl->first_out; - if (!crs) + skb = skb_peek(&bcl->outqueue); + if (!skb) goto exit; /* Determine which messages need to be acknowledged */ @@ -271,43 +272,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) * Bail out if specified sequence number does not correspond * to a message that has been sent and not yet acknowledged */ - if (less(acked, buf_seqno(crs)) || + if (less(acked, buf_seqno(skb)) || less(bcl->fsm_msg_cnt, acked) || less_eq(acked, n_ptr->bclink.acked)) goto exit; } /* Skip over packets that node has previously acknowledged */ - while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) - crs = crs->next; + skb_queue_walk(&bcl->outqueue, skb) { + if (more(buf_seqno(skb), n_ptr->bclink.acked)) + break; + } /* Update packets that node is now acknowledging */ + skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) { + if (more(buf_seqno(skb), acked)) + break; - while (crs && less_eq(buf_seqno(crs), acked)) { - next = crs->next; - - if (crs != bcl->next_out) - bcbuf_decr_acks(crs); - else { - bcbuf_set_acks(crs, 0); + next = tipc_skb_queue_next(&bcl->outqueue, skb); + if (skb != bcl->next_out) { + bcbuf_decr_acks(skb); + } else { + bcbuf_set_acks(skb, 0); bcl->next_out = next; bclink_set_last_sent(); } - if (bcbuf_acks(crs) == 0) { - bcl->first_out = next; - bcl->out_queue_size--; - kfree_skb(crs); + if (bcbuf_acks(skb) == 0) { + __skb_unlink(skb, &bcl->outqueue); + kfree_skb(skb); released = 1; } - crs = next; } n_ptr->bclink.acked = acked; /* Try resolving broadcast link congestion, if necessary */ - if (unlikely(bcl->next_out)) { - tipc_link_push_queue(bcl); + tipc_link_push_packets(bcl); bclink_set_last_sent(); } if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) @@ -327,19 +328,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) struct sk_buff *buf; /* Ignore "stale" link state info */ - if (less_eq(last_sent, n_ptr->bclink.last_in)) return; /* Update link synchronization state; quit if in sync */ - bclink_update_last_sent(n_ptr, last_sent); if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) return; /* Update out-of-sync state; quit if loss is still unconfirmed */ - if ((++n_ptr->bclink.oos_state) == 1) { if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) return; @@ -347,15 +345,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) } /* Don't NACK if one has been recently sent (or seen) */ - if (n_ptr->bclink.oos_state & 0x1) return; /* Send NACK */ - buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); + u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, n_ptr->addr); @@ -363,9 +361,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) msg_set_mc_netid(msg, tipc_net_id); msg_set_bcast_ack(msg, n_ptr->bclink.last_in); msg_set_bcgap_after(msg, n_ptr->bclink.last_in); - msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head - ? buf_seqno(n_ptr->bclink.deferred_head) - 1 - : n_ptr->bclink.last_sent); + msg_set_bcgap_to(msg, to); tipc_bclink_lock(); tipc_bearer_send(MAX_BEARERS, buf, NULL); @@ -402,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg) /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster * and to identified node local sockets - * @buf: chain of buffers containing message + * @list: chain of buffers containing message * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE */ -int tipc_bclink_xmit(struct sk_buff *buf) +int tipc_bclink_xmit(struct sk_buff_head *list) { int rc = 0; int bc = 0; - struct sk_buff *clbuf; + struct sk_buff *skb; /* Prepare clone of message for local node */ - clbuf = tipc_msg_reassemble(buf); - if (unlikely(!clbuf)) { - kfree_skb_list(buf); + skb = tipc_msg_reassemble(list); + if (unlikely(!skb)) { + __skb_queue_purge(list); return -EHOSTUNREACH; } @@ -423,11 +419,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) if (likely(bclink)) { tipc_bclink_lock(); if (likely(bclink->bcast_nodes.count)) { - rc = __tipc_link_xmit(bcl, buf); + rc = __tipc_link_xmit(bcl, list); if (likely(!rc)) { + u32 len = skb_queue_len(&bcl->outqueue); + bclink_set_last_sent(); bcl->stats.queue_sz_counts++; - bcl->stats.accu_queue_sz += bcl->out_queue_size; + bcl->stats.accu_queue_sz += len; } bc = 1; } @@ -435,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) } if (unlikely(!bc)) - kfree_skb_list(buf); + __skb_queue_purge(list); /* Deliver message clone */ if (likely(!rc)) - tipc_sk_mcast_rcv(clbuf); + tipc_sk_mcast_rcv(skb); else - kfree_skb(clbuf); + kfree_skb(skb); return rc; } @@ -462,7 +460,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) * Unicast an ACK periodically, ensuring that * all nodes in the cluster don't ACK at the same time */ - if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { tipc_link_proto_xmit(node->active_links[node->addr & 1], STATE_MSG, 0, 0, 0, 0, 0); @@ -484,7 +481,6 @@ void tipc_bclink_rcv(struct sk_buff *buf) int deferred = 0; /* Screen out unwanted broadcast messages */ - if (msg_mc_netid(msg) != tipc_net_id) goto exit; @@ -497,7 +493,6 @@ void tipc_bclink_rcv(struct sk_buff *buf) goto unlock; /* Handle broadcast protocol message */ - if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { if (msg_type(msg) != STATE_MSG) goto unlock; @@ -518,14 +513,12 @@ void tipc_bclink_rcv(struct sk_buff *buf) } /* Handle in-sequence broadcast message */ - seqno = msg_seqno(msg); next_in = mod(node->bclink.last_in + 1); if (likely(seqno == next_in)) { receive: /* Deliver message to destination */ - if (likely(msg_isdata(msg))) { tipc_bclink_lock(); bclink_accept_pkt(node, seqno); @@ -574,7 +567,6 @@ receive: buf = NULL; /* Determine new synchronization state */ - tipc_node_lock(node); if (unlikely(!tipc_node_is_up(node))) goto unlock; @@ -582,33 +574,26 @@ receive: if (node->bclink.last_in == node->bclink.last_sent) goto unlock; - if (!node->bclink.deferred_head) { + if (skb_queue_empty(&node->bclink.deferred_queue)) { node->bclink.oos_state = 1; goto unlock; } - msg = buf_msg(node->bclink.deferred_head); + msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); seqno = msg_seqno(msg); next_in = mod(next_in + 1); if (seqno != next_in) goto unlock; /* Take in-sequence message from deferred queue & deliver it */ - - buf = node->bclink.deferred_head; - node->bclink.deferred_head = buf->next; - buf->next = NULL; - node->bclink.deferred_size--; + buf = __skb_dequeue(&node->bclink.deferred_queue); goto receive; } /* Handle out-of-sequence broadcast message */ - if (less(next_in, seqno)) { - deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, - &node->bclink.deferred_tail, + deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, buf); - node->bclink.deferred_size += deferred; bclink_update_last_sent(node, seqno); buf = NULL; } @@ -963,6 +948,8 @@ int tipc_bclink_init(void) sprintf(bcbearer->media.name, "tipc-broadcast"); spin_lock_init(&bclink->lock); + __skb_queue_head_init(&bcl->outqueue); + __skb_queue_head_init(&bcl->deferred_queue); __skb_queue_head_init(&bcl->waiting_sks); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 443de084d3e..644d79129fb 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void); int tipc_bclink_set_queue_limits(u32 limit); void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); uint tipc_bclink_get_mtu(void); -int tipc_bclink_xmit(struct sk_buff *buf); +int tipc_bclink_xmit(struct sk_buff_head *list); void tipc_bclink_wakeup_users(void); int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index b1d905209e8..2c1230ac5df 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -165,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[]; * TIPC routines available to supported media types */ -void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr); +void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr); int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority); int tipc_disable_bearer(const char *name); diff --git a/net/tipc/core.h b/net/tipc/core.h index b578b10feef..84602137ce2 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -192,6 +192,7 @@ struct tipc_skb_cb { struct sk_buff *tail; bool deferred; bool wakeup_pending; + bool bundling; u16 chain_sz; u16 chain_imp; }; diff --git a/net/tipc/link.c b/net/tipc/link.c index 4738cb1bf7c..34bf15c90c7 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -149,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr) l_ptr->max_pkt_probes = 0; } -static u32 link_next_sent(struct tipc_link *l_ptr) -{ - if (l_ptr->next_out) - return buf_seqno(l_ptr->next_out); - return mod(l_ptr->next_out_no); -} - -static u32 link_last_sent(struct tipc_link *l_ptr) -{ - return mod(link_next_sent(l_ptr) - 1); -} - /* * Simple non-static link routines (i.e. referenced outside this file) */ @@ -183,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr) */ static void link_timeout(struct tipc_link *l_ptr) { + struct sk_buff *skb; + tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ - l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; + l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); l_ptr->stats.queue_sz_counts++; - if (l_ptr->first_out) { - struct tipc_msg *msg = buf_msg(l_ptr->first_out); + skb = skb_peek(&l_ptr->outqueue); + if (skb) { + struct tipc_msg *msg = buf_msg(skb); u32 length = msg_size(msg); if ((msg_user(msg) == MSG_FRAGMENTER) && @@ -218,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr) } /* do all other link processing performed on a periodic basis */ - link_state_event(l_ptr, TIMEOUT_EVT); if (l_ptr->next_out) - tipc_link_push_queue(l_ptr); + tipc_link_push_packets(l_ptr); tipc_node_unlock(l_ptr->owner); } @@ -301,6 +291,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, link_init_max_pkt(l_ptr); l_ptr->next_out_no = 1; + __skb_queue_head_init(&l_ptr->outqueue); + __skb_queue_head_init(&l_ptr->deferred_queue); __skb_queue_head_init(&l_ptr->waiting_sks); link_reset_statistics(l_ptr); @@ -379,30 +371,19 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, */ static void link_prepare_wakeup(struct tipc_link *link) { - struct sk_buff_head *wq = &link->waiting_sks; - struct sk_buff *buf; - uint pend_qsz = link->out_queue_size; + uint pend_qsz = skb_queue_len(&link->outqueue); + struct sk_buff *skb, *tmp; - for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) { - if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp]) + skb_queue_walk_safe(&link->waiting_sks, skb, tmp) { + if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) break; - pend_qsz += TIPC_SKB_CB(buf)->chain_sz; - __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq)); + pend_qsz += TIPC_SKB_CB(skb)->chain_sz; + __skb_unlink(skb, &link->waiting_sks); + __skb_queue_tail(&link->owner->waiting_sks, skb); } } /** - * link_release_outqueue - purge link's outbound message queue - * @l_ptr: pointer to link - */ -static void link_release_outqueue(struct tipc_link *l_ptr) -{ - kfree_skb_list(l_ptr->first_out); - l_ptr->first_out = NULL; - l_ptr->out_queue_size = 0; -} - -/** * tipc_link_reset_fragments - purge link's inbound message fragments queue * @l_ptr: pointer to link */ @@ -418,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) */ void tipc_link_purge_queues(struct tipc_link *l_ptr) { - kfree_skb_list(l_ptr->oldest_deferred_in); - kfree_skb_list(l_ptr->first_out); + __skb_queue_purge(&l_ptr->deferred_queue); + __skb_queue_purge(&l_ptr->outqueue); tipc_link_reset_fragments(l_ptr); - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; } void tipc_link_reset(struct tipc_link *l_ptr) @@ -454,25 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr) } /* Clean up all queues: */ - link_release_outqueue(l_ptr); - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; - kfree_skb_list(l_ptr->oldest_deferred_in); + __skb_queue_purge(&l_ptr->outqueue); + __skb_queue_purge(&l_ptr->deferred_queue); if (!skb_queue_empty(&l_ptr->waiting_sks)) { skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); owner->action_flags |= TIPC_WAKEUP_USERS; } - l_ptr->retransm_queue_head = 0; - l_ptr->retransm_queue_size = 0; - l_ptr->last_out = NULL; - l_ptr->first_out = NULL; l_ptr->next_out = NULL; l_ptr->unacked_window = 0; l_ptr->checkpoint = 1; l_ptr->next_out_no = 1; - l_ptr->deferred_inqueue_sz = 0; - l_ptr->oldest_deferred_in = NULL; - l_ptr->newest_deferred_in = NULL; l_ptr->fsm_msg_cnt = 0; l_ptr->stale_count = 0; link_reset_statistics(l_ptr); @@ -694,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) * - For all other messages we discard the buffer and return -EHOSTUNREACH * - For TIPC internal messages we also reset the link */ -static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) +static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) { - struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *skb = skb_peek(list); + struct tipc_msg *msg = buf_msg(skb); uint imp = tipc_msg_tot_importance(msg); u32 oport = msg_tot_origport(msg); @@ -709,30 +680,30 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) goto drop; if (unlikely(msg_reroute_cnt(msg))) goto drop; - if (TIPC_SKB_CB(buf)->wakeup_pending) + if (TIPC_SKB_CB(skb)->wakeup_pending) return -ELINKCONG; - if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) + if (link_schedule_user(link, oport, skb_queue_len(list), imp)) return -ELINKCONG; drop: - kfree_skb_list(buf); + __skb_queue_purge(list); return -EHOSTUNREACH; } /** * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked * @link: link to use - * @buf: chain of buffers containing message + * @list: chain of buffers containing message + * * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket * user data messages) or -EHOSTUNREACH (all other messages/senders) * Only the socket functions tipc_send_stream() and tipc_send_packet() need * to act on the return value, since they may need to do more send attempts. */ -int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf) +int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list) { - struct tipc_msg *msg = buf_msg(buf); + struct tipc_msg *msg = buf_msg(skb_peek(list)); uint psz = msg_size(msg); - uint qsz = link->out_queue_size; uint sndlim = link->queue_limit[0]; uint imp = tipc_msg_tot_importance(msg); uint mtu = link->max_pkt; @@ -740,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf) uint seqno = link->next_out_no; uint bc_last_in = link->owner->bclink.last_in; struct tipc_media_addr *addr = &link->media_addr; - struct sk_buff *next = buf->next; + struct sk_buff_head *outqueue = &link->outqueue; + struct sk_buff *skb, *tmp; /* Match queue limits against msg importance: */ - if (unlikely(qsz >= link->queue_limit[imp])) - return tipc_link_cong(link, buf); + if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) + return tipc_link_cong(link, list); /* Has valid packet limit been used ? */ if (unlikely(psz > mtu)) { - kfree_skb_list(buf); + __skb_queue_purge(list); return -EMSGSIZE; } /* Prepare each packet for sending, and add to outqueue: */ - while (buf) { - next = buf->next; - msg = buf_msg(buf); + skb_queue_walk_safe(list, skb, tmp) { + __skb_unlink(skb, list); + msg = buf_msg(skb); msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); msg_set_bcast_ack(msg, bc_last_in); - if (!link->first_out) { - link->first_out = buf; - } else if (qsz < sndlim) { - link->last_out->next = buf; - } else if (tipc_msg_bundle(link->last_out, buf, mtu)) { + if (skb_queue_len(outqueue) < sndlim) { + __skb_queue_tail(outqueue, skb); + tipc_bearer_send(link->bearer_id, skb, addr); + link->next_out = NULL; + link->unacked_window = 0; + } else if (tipc_msg_bundle(outqueue, skb, mtu)) { link->stats.sent_bundled++; - buf = next; - next = buf->next; continue; - } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) { + } else if (tipc_msg_make_bundle(outqueue, skb, mtu, + link->addr)) { link->stats.sent_bundled++; link->stats.sent_bundles++; - link->last_out->next = buf; if (!link->next_out) - link->next_out = buf; + link->next_out = skb_peek_tail(outqueue); } else { - link->last_out->next = buf; + __skb_queue_tail(outqueue, skb); if (!link->next_out) - link->next_out = buf; - } - - /* Send packet if possible: */ - if (likely(++qsz <= sndlim)) { - tipc_bearer_send(link->bearer_id, buf, addr); - link->next_out = next; - link->unacked_window = 0; + link->next_out = skb; } seqno++; - link->last_out = buf; - buf = next; } link->next_out_no = seqno; - link->out_queue_size = qsz; return 0; } +static void skb2list(struct sk_buff *skb, struct sk_buff_head *list) +{ + __skb_queue_head_init(list); + __skb_queue_tail(list, skb); +} + +static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) +{ + struct sk_buff_head head; + + skb2list(skb, &head); + return __tipc_link_xmit(link, &head); +} + +int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) +{ + struct sk_buff_head head; + + skb2list(skb, &head); + return tipc_link_xmit(&head, dnode, selector); +} + /** * tipc_link_xmit() is the general link level function for message sending - * @buf: chain of buffers containing message + * @list: chain of buffers containing message * @dsz: amount of user data to be sent * @dnode: address of destination node * @selector: a number used for deterministic link selection * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE */ -int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) +int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector) { struct tipc_link *link = NULL; struct tipc_node *node; @@ -815,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) tipc_node_lock(node); link = node->active_links[selector & 1]; if (link) - rc = __tipc_link_xmit(link, buf); + rc = __tipc_link_xmit(link, list); tipc_node_unlock(node); } if (link) return rc; - if (likely(in_own_node(dnode))) - return tipc_sk_rcv(buf); + if (likely(in_own_node(dnode))) { + /* As a node local message chain never contains more than one + * buffer, we just need to dequeue one SKB buffer from the + * head list. + */ + return tipc_sk_rcv(__skb_dequeue(list)); + } + __skb_queue_purge(list); - kfree_skb_list(buf); return rc; } @@ -839,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) */ static void tipc_link_sync_xmit(struct tipc_link *link) { - struct sk_buff *buf; + struct sk_buff *skb; struct tipc_msg *msg; - buf = tipc_buf_acquire(INT_H_SIZE); - if (!buf) + skb = tipc_buf_acquire(INT_H_SIZE); + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); msg_set_last_bcast(msg, link->owner->bclink.acked); - __tipc_link_xmit(link, buf); + __tipc_link_xmit_skb(link, skb); } /* @@ -869,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) kfree_skb(buf); } +struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + if (skb_queue_is_last(list, skb)) + return NULL; + return skb->next; +} + /* - * tipc_link_push_packet: Push one unsent packet to the media + * tipc_link_push_packets - push unsent packets to bearer + * + * Push out the unsent messages of a link where congestion + * has abated. Node is locked. + * + * Called with node locked */ -static u32 tipc_link_push_packet(struct tipc_link *l_ptr) -{ - struct sk_buff *buf = l_ptr->first_out; - u32 r_q_size = l_ptr->retransm_queue_size; - u32 r_q_head = l_ptr->retransm_queue_head; - - /* Step to position where retransmission failed, if any, */ - /* consider that buffers may have been released in meantime */ - if (r_q_size && buf) { - u32 last = lesser(mod(r_q_head + r_q_size), - link_last_sent(l_ptr)); - u32 first = buf_seqno(buf); - - while (buf && less(first, r_q_head)) { - first = mod(first + 1); - buf = buf->next; - } - l_ptr->retransm_queue_head = r_q_head = first; - l_ptr->retransm_queue_size = r_q_size = mod(last - first); - } - - /* Continue retransmission now, if there is anything: */ - if (r_q_size && buf) { - msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); - msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - l_ptr->retransm_queue_head = mod(++r_q_head); - l_ptr->retransm_queue_size = --r_q_size; - l_ptr->stats.retransmitted++; - return 0; - } - - /* Send deferred protocol message, if any: */ - buf = l_ptr->proto_msg_queue; - if (buf) { - msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); - msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - l_ptr->unacked_window = 0; - kfree_skb(buf); - l_ptr->proto_msg_queue = NULL; - return 0; - } +void tipc_link_push_packets(struct tipc_link *l_ptr) +{ + struct sk_buff_head *outqueue = &l_ptr->outqueue; + struct sk_buff *skb = l_ptr->next_out; + struct tipc_msg *msg; + u32 next, first; - /* Send one deferred data message, if send window not full: */ - buf = l_ptr->next_out; - if (buf) { - struct tipc_msg *msg = buf_msg(buf); - u32 next = msg_seqno(msg); - u32 first = buf_seqno(l_ptr->first_out); + skb_queue_walk_from(outqueue, skb) { + msg = buf_msg(skb); + next = msg_seqno(msg); + first = buf_seqno(skb_peek(outqueue)); if (mod(next - first) < l_ptr->queue_limit[0]) { msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, - &l_ptr->media_addr); if (msg_user(msg) == MSG_BUNDLER) - msg_set_type(msg, BUNDLE_CLOSED); - l_ptr->next_out = buf->next; - return 0; + TIPC_SKB_CB(skb)->bundling = false; + tipc_bearer_send(l_ptr->bearer_id, skb, + &l_ptr->media_addr); + l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); + } else { + break; } } - return 1; -} - -/* - * push_queue(): push out the unsent messages of a link where - * congestion has abated. Node is locked - */ -void tipc_link_push_queue(struct tipc_link *l_ptr) -{ - u32 res; - - do { - res = tipc_link_push_packet(l_ptr); - } while (!res); } void tipc_link_reset_all(struct tipc_node *node) @@ -1011,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, } } -void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, +void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, u32 retransmits) { struct tipc_msg *msg; - if (!buf) + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); /* Detect repeated retransmit failures */ if (l_ptr->last_retransmitted == msg_seqno(msg)) { if (++l_ptr->stale_count > 100) { - link_retransmit_failure(l_ptr, buf); + link_retransmit_failure(l_ptr, skb); return; } } else { @@ -1032,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, l_ptr->stale_count = 1; } - while (retransmits && (buf != l_ptr->next_out) && buf) { - msg = buf_msg(buf); + skb_queue_walk_from(&l_ptr->outqueue, skb) { + if (!retransmits || skb == l_ptr->next_out) + break; + msg = buf_msg(skb); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - buf = buf->next; + tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr); retransmits--; l_ptr->stats.retransmitted++; } - - l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; } -/** - * link_insert_deferred_queue - insert deferred messages back into receive chain - */ -static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, - struct sk_buff *buf) +static void link_retrieve_defq(struct tipc_link *link, + struct sk_buff_head *list) { u32 seq_no; - if (l_ptr->oldest_deferred_in == NULL) - return buf; + if (skb_queue_empty(&link->deferred_queue)) + return; - seq_no = buf_seqno(l_ptr->oldest_deferred_in); - if (seq_no == mod(l_ptr->next_in_no)) { - l_ptr->newest_deferred_in->next = buf; - buf = l_ptr->oldest_deferred_in; - l_ptr->oldest_deferred_in = NULL; - l_ptr->deferred_inqueue_sz = 0; - } - return buf; + seq_no = buf_seqno(skb_peek(&link->deferred_queue)); + if (seq_no == mod(link->next_in_no)) + skb_queue_splice_tail_init(&link->deferred_queue, list); } /** @@ -1123,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf) /** * tipc_rcv - process TIPC packets/messages arriving from off-node - * @head: pointer to message buffer chain + * @skb: TIPC packet * @b_ptr: pointer to bearer message arrived on * * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. */ -void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) +void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) { - while (head) { - struct tipc_node *n_ptr; - struct tipc_link *l_ptr; - struct sk_buff *crs; - struct sk_buff *buf = head; - struct tipc_msg *msg; - u32 seq_no; - u32 ackd; - u32 released = 0; + struct sk_buff_head head; + struct tipc_node *n_ptr; + struct tipc_link *l_ptr; + struct sk_buff *skb1, *tmp; + struct tipc_msg *msg; + u32 seq_no; + u32 ackd; + u32 released; - head = head->next; - buf->next = NULL; + skb2list(skb, &head); + while ((skb = __skb_dequeue(&head))) { /* Ensure message is well-formed */ - if (unlikely(!link_recv_buf_validate(buf))) + if (unlikely(!link_recv_buf_validate(skb))) goto discard; /* Ensure message data is a single contiguous unit */ - if (unlikely(skb_linearize(buf))) + if (unlikely(skb_linearize(skb))) goto discard; /* Handle arrival of a non-unicast link message */ - msg = buf_msg(buf); + msg = buf_msg(skb); if (unlikely(msg_non_seq(msg))) { if (msg_user(msg) == LINK_CONFIG) - tipc_disc_rcv(buf, b_ptr); + tipc_disc_rcv(skb, b_ptr); else - tipc_bclink_rcv(buf); + tipc_bclink_rcv(skb); continue; } @@ -1198,22 +1137,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (n_ptr->bclink.recv_permitted) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); - crs = l_ptr->first_out; - while ((crs != l_ptr->next_out) && - less_eq(buf_seqno(crs), ackd)) { - struct sk_buff *next = crs->next; - kfree_skb(crs); - crs = next; - released++; - } - if (released) { - l_ptr->first_out = crs; - l_ptr->out_queue_size -= released; + released = 0; + skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { + if (skb1 == l_ptr->next_out || + more(buf_seqno(skb1), ackd)) + break; + __skb_unlink(skb1, &l_ptr->outqueue); + kfree_skb(skb1); + released = 1; } /* Try sending any messages link endpoint has pending */ if (unlikely(l_ptr->next_out)) - tipc_link_push_queue(l_ptr); + tipc_link_push_packets(l_ptr); if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { link_prepare_wakeup(l_ptr); @@ -1223,8 +1159,8 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) /* Process the incoming packet */ if (unlikely(!link_working_working(l_ptr))) { if (msg_user(msg) == LINK_PROTOCOL) { - tipc_link_proto_rcv(l_ptr, buf); - head = link_insert_deferred_queue(l_ptr, head); + tipc_link_proto_rcv(l_ptr, skb); + link_retrieve_defq(l_ptr, &head); tipc_node_unlock(n_ptr); continue; } @@ -1234,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (link_working_working(l_ptr)) { /* Re-insert buffer in front of queue */ - buf->next = head; - head = buf; + __skb_queue_head(&head, skb); tipc_node_unlock(n_ptr); continue; } @@ -1244,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) /* Link is now in state WORKING_WORKING */ if (unlikely(seq_no != mod(l_ptr->next_in_no))) { - link_handle_out_of_seq_msg(l_ptr, buf); - head = link_insert_deferred_queue(l_ptr, head); + link_handle_out_of_seq_msg(l_ptr, skb); + link_retrieve_defq(l_ptr, &head); tipc_node_unlock(n_ptr); continue; } l_ptr->next_in_no++; - if (unlikely(l_ptr->oldest_deferred_in)) - head = link_insert_deferred_queue(l_ptr, head); + if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) + link_retrieve_defq(l_ptr, &head); if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { l_ptr->stats.sent_acks++; tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } - if (tipc_link_prepare_input(l_ptr, &buf)) { + if (tipc_link_prepare_input(l_ptr, &skb)) { tipc_node_unlock(n_ptr); continue; } tipc_node_unlock(n_ptr); - msg = buf_msg(buf); - if (tipc_link_input(l_ptr, buf) != 0) + + if (tipc_link_input(l_ptr, skb) != 0) goto discard; continue; unlock_discard: tipc_node_unlock(n_ptr); discard: - kfree_skb(buf); + kfree_skb(skb); } } @@ -1353,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf) * * Returns increase in queue length (i.e. 0 or 1) */ -u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, - struct sk_buff *buf) +u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) { - struct sk_buff *queue_buf; - struct sk_buff **prev; - u32 seq_no = buf_seqno(buf); - - buf->next = NULL; + struct sk_buff *skb1; + u32 seq_no = buf_seqno(skb); /* Empty queue ? */ - if (*head == NULL) { - *head = *tail = buf; + if (skb_queue_empty(list)) { + __skb_queue_tail(list, skb); return 1; } /* Last ? */ - if (less(buf_seqno(*tail), seq_no)) { - (*tail)->next = buf; - *tail = buf; + if (less(buf_seqno(skb_peek_tail(list)), seq_no)) { + __skb_queue_tail(list, skb); return 1; } /* Locate insertion point in queue, then insert; discard if duplicate */ - prev = head; - queue_buf = *head; - for (;;) { - u32 curr_seqno = buf_seqno(queue_buf); + skb_queue_walk(list, skb1) { + u32 curr_seqno = buf_seqno(skb1); if (seq_no == curr_seqno) { - kfree_skb(buf); + kfree_skb(skb); return 0; } if (less(seq_no, curr_seqno)) break; - - prev = &queue_buf->next; - queue_buf = queue_buf->next; } - buf->next = queue_buf; - *prev = buf; + __skb_queue_before(list, skb1, skb); return 1; } @@ -1424,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, return; } - if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, - &l_ptr->newest_deferred_in, buf)) { - l_ptr->deferred_inqueue_sz++; + if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { l_ptr->stats.deferred_recv++; TIPC_SKB_CB(buf)->deferred = true; - if ((l_ptr->deferred_inqueue_sz % 16) == 1) + if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); - } else + } else { l_ptr->stats.duplicates++; + } } /* @@ -1446,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, u32 msg_size = sizeof(l_ptr->proto_msg); int r_flag; - /* Discard any previous message that was deferred due to congestion */ - if (l_ptr->proto_msg_queue) { - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; - } - /* Don't send protocol message during link changeover */ if (l_ptr->exp_msg_count) return; @@ -1474,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, if (l_ptr->next_out) next_sent = buf_seqno(l_ptr->next_out); msg_set_next_sent(msg, next_sent); - if (l_ptr->oldest_deferred_in) { - u32 rec = buf_seqno(l_ptr->oldest_deferred_in); + if (!skb_queue_empty(&l_ptr->deferred_queue)) { + u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); gap = mod(rec - mod(l_ptr->next_in_no)); } msg_set_seq_gap(msg, gap); @@ -1663,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) } if (msg_seq_gap(msg)) { l_ptr->stats.recv_nacks++; - tipc_link_retransmit(l_ptr, l_ptr->first_out, + tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), msg_seq_gap(msg)); } break; @@ -1682,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, u32 selector) { struct tipc_link *tunnel; - struct sk_buff *buf; + struct sk_buff *skb; u32 length = msg_size(msg); tunnel = l_ptr->owner->active_links[selector & 1]; @@ -1691,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, return; } msg_set_size(tunnel_hdr, length + INT_H_SIZE); - buf = tipc_buf_acquire(length + INT_H_SIZE); - if (!buf) { + skb = tipc_buf_acquire(length + INT_H_SIZE); + if (!skb) { pr_warn("%sunable to send tunnel msg\n", link_co_err); return; } - skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); - skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); - __tipc_link_xmit(tunnel, buf); + skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length); + __tipc_link_xmit_skb(tunnel, skb); } @@ -1710,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, */ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) { - u32 msgcount = l_ptr->out_queue_size; - struct sk_buff *crs = l_ptr->first_out; + u32 msgcount = skb_queue_len(&l_ptr->outqueue); struct tipc_link *tunnel = l_ptr->owner->active_links[0]; struct tipc_msg tunnel_hdr; + struct sk_buff *skb; int split_bundles; if (!tunnel) @@ -1724,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); msg_set_msgcnt(&tunnel_hdr, msgcount); - if (!l_ptr->first_out) { - struct sk_buff *buf; - - buf = tipc_buf_acquire(INT_H_SIZE); - if (buf) { - skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); + if (skb_queue_empty(&l_ptr->outqueue)) { + skb = tipc_buf_acquire(INT_H_SIZE); + if (skb) { + skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE); - __tipc_link_xmit(tunnel, buf); + __tipc_link_xmit_skb(tunnel, skb); } else { pr_warn("%sunable to send changeover msg\n", link_co_err); @@ -1742,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) split_bundles = (l_ptr->owner->active_links[0] != l_ptr->owner->active_links[1]); - while (crs) { - struct tipc_msg *msg = buf_msg(crs); + skb_queue_walk(&l_ptr->outqueue, skb) { + struct tipc_msg *msg = buf_msg(skb); if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { struct tipc_msg *m = msg_get_wrapped(msg); @@ -1761,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, msg_link_selector(msg)); } - crs = crs->next; } } @@ -1777,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *tunnel) { - struct sk_buff *iter; + struct sk_buff *skb; struct tipc_msg tunnel_hdr; tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); - msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); + msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); - iter = l_ptr->first_out; - while (iter) { - struct sk_buff *outbuf; - struct tipc_msg *msg = buf_msg(iter); + skb_queue_walk(&l_ptr->outqueue, skb) { + struct sk_buff *outskb; + struct tipc_msg *msg = buf_msg(skb); u32 length = msg_size(msg); if (msg_user(msg) == MSG_BUNDLER) @@ -1795,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); msg_set_size(&tunnel_hdr, length + INT_H_SIZE); - outbuf = tipc_buf_acquire(length + INT_H_SIZE); - if (outbuf == NULL) { + outskb = tipc_buf_acquire(length + INT_H_SIZE); + if (outskb == NULL) { pr_warn("%sunable to send duplicate msg\n", link_co_err); return; } - skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); - skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, + skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, length); - __tipc_link_xmit(tunnel, outbuf); + __tipc_link_xmit_skb(tunnel, outskb); if (!tipc_link_is_up(l_ptr)) return; - iter = iter->next; } } diff --git a/net/tipc/link.h b/net/tipc/link.h index f463e7be801..55812e87ca1 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -119,20 +119,13 @@ struct tipc_stats { * @max_pkt: current maximum packet size for this link * @max_pkt_target: desired maximum packet size for this link * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) - * @out_queue_size: # of messages in outbound message queue - * @first_out: ptr to first outbound message in queue - * @last_out: ptr to last outbound message in queue + * @outqueue: outbound message queue * @next_out_no: next sequence number to use for outbound messages * @last_retransmitted: sequence number of most recently retransmitted message * @stale_count: # of identical retransmit requests made by peer * @next_in_no: next sequence number to expect for inbound messages - * @deferred_inqueue_sz: # of messages in inbound message queue - * @oldest_deferred_in: ptr to first inbound message in queue - * @newest_deferred_in: ptr to last inbound message in queue + * @deferred_queue: deferred queue saved OOS b'cast message received from node * @unacked_window: # of inbound messages rx'd without ack'ing back to peer - * @proto_msg_queue: ptr to (single) outbound control message - * @retransm_queue_size: number of messages to retransmit - * @retransm_queue_head: sequence number of first message to retransmit * @next_out: ptr to first unsent outbound message in queue * @waiting_sks: linked list of sockets waiting for link congestion to abate * @long_msg_seq_no: next identifier to use for outbound fragmented messages @@ -176,24 +169,17 @@ struct tipc_link { u32 max_pkt_probes; /* Sending */ - u32 out_queue_size; - struct sk_buff *first_out; - struct sk_buff *last_out; + struct sk_buff_head outqueue; u32 next_out_no; u32 last_retransmitted; u32 stale_count; /* Reception */ u32 next_in_no; - u32 deferred_inqueue_sz; - struct sk_buff *oldest_deferred_in; - struct sk_buff *newest_deferred_in; + struct sk_buff_head deferred_queue; u32 unacked_window; /* Congestion handling */ - struct sk_buff *proto_msg_queue; - u32 retransm_queue_size; - u32 retransm_queue_head; struct sk_buff *next_out; struct sk_buff_head waiting_sks; @@ -227,18 +213,20 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, void tipc_link_reset_all(struct tipc_node *node); void tipc_link_reset(struct tipc_link *l_ptr); void tipc_link_reset_list(unsigned int bearer_id); -int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); -int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf); +int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector); +int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector); +int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list); u32 tipc_link_get_max_pkt(u32 dest, u32 selector); void tipc_link_bundle_rcv(struct sk_buff *buf); void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); -void tipc_link_push_queue(struct tipc_link *l_ptr); -u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, - struct sk_buff *buf); +void tipc_link_push_packets(struct tipc_link *l_ptr); +u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *start, u32 retransmits); +struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb); int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info); @@ -259,18 +247,14 @@ static inline u32 mod(u32 x) return x & 0xffffu; } -static inline int between(u32 lower, u32 upper, u32 n) +static inline int less_eq(u32 left, u32 right) { - if ((lower < n) && (n < upper)) - return 1; - if ((upper < lower) && ((n > lower) || (n < upper))) - return 1; - return 0; + return mod(right - left) < 32768u; } -static inline int less_eq(u32 left, u32 right) +static inline int more(u32 left, u32 right) { - return mod(right - left) < 32768u; + return !less_eq(left, right); } static inline int less(u32 left, u32 right) @@ -309,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr) static inline int link_congested(struct tipc_link *l_ptr) { - return l_ptr->out_queue_size >= l_ptr->queue_limit[0]; + return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0]; } #endif diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 9155496b8a8..5b0659791c0 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -166,11 +166,12 @@ err: * @offset: Posision in iov to start copying from * @dsz: Total length of user data * @pktmax: Max packet size that can be used - * @chain: Buffer or chain of buffers to be returned to caller + * @list: Buffer or chain of buffers to be returned to caller + * * Returns message data size or errno: -ENOMEM, -EFAULT */ -int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, - int offset, int dsz, int pktmax , struct sk_buff **chain) +int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, + int dsz, int pktmax, struct sk_buff_head *list) { int mhsz = msg_hdr_sz(mhdr); int msz = mhsz + dsz; @@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int pktrem = pktmax; int drem = dsz; struct tipc_msg pkthdr; - struct sk_buff *buf, *prev; + struct sk_buff *skb; char *pktpos; int rc; - uint chain_sz = 0; + msg_set_size(mhdr, msz); /* No fragmentation needed? */ if (likely(msz <= pktmax)) { - buf = tipc_buf_acquire(msz); - *chain = buf; - if (unlikely(!buf)) + skb = tipc_buf_acquire(msz); + if (unlikely(!skb)) return -ENOMEM; - skb_copy_to_linear_data(buf, mhdr, mhsz); - pktpos = buf->data + mhsz; - TIPC_SKB_CB(buf)->chain_sz = 1; - if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset, dsz)) + __skb_queue_tail(list, skb); + skb_copy_to_linear_data(skb, mhdr, mhsz); + pktpos = skb->data + mhsz; + if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset, + dsz)) return dsz; rc = -EFAULT; goto error; @@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, msg_set_fragm_no(&pkthdr, pktno); /* Prepare first fragment */ - *chain = buf = tipc_buf_acquire(pktmax); - if (!buf) + skb = tipc_buf_acquire(pktmax); + if (!skb) return -ENOMEM; - chain_sz = 1; - pktpos = buf->data; - skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); + __skb_queue_tail(list, skb); + pktpos = skb->data; + skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; pktrem -= INT_H_SIZE; - skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); + skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); pktpos += mhsz; pktrem -= mhsz; @@ -238,43 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, pktsz = drem + INT_H_SIZE; else pktsz = pktmax; - prev = buf; - buf = tipc_buf_acquire(pktsz); - if (!buf) { + skb = tipc_buf_acquire(pktsz); + if (!skb) { rc = -ENOMEM; goto error; } - chain_sz++; - prev->next = buf; + __skb_queue_tail(list, skb); msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); msg_set_fragm_no(&pkthdr, ++pktno); - skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); - pktpos = buf->data + INT_H_SIZE; + skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); + pktpos = skb->data + INT_H_SIZE; pktrem = pktsz - INT_H_SIZE; } while (1); - TIPC_SKB_CB(*chain)->chain_sz = chain_sz; - msg_set_type(buf_msg(buf), LAST_FRAGMENT); + msg_set_type(buf_msg(skb), LAST_FRAGMENT); return dsz; error: - kfree_skb_list(*chain); - *chain = NULL; + __skb_queue_purge(list); + __skb_queue_head_init(list); return rc; } /** * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one - * @bbuf: the existing buffer ("bundle") - * @buf: buffer to be appended + * @list: the buffer chain of the existing buffer ("bundle") + * @skb: buffer to be appended * @mtu: max allowable size for the bundle buffer * Consumes buffer if successful * Returns true if bundling could be performed, otherwise false */ -bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) +bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) { - struct tipc_msg *bmsg = buf_msg(bbuf); - struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *bskb = skb_peek_tail(list); + struct tipc_msg *bmsg = buf_msg(bskb); + struct tipc_msg *msg = buf_msg(skb); unsigned int bsz = msg_size(bmsg); unsigned int msz = msg_size(msg); u32 start = align(bsz); @@ -289,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) return false; if (likely(msg_user(bmsg) != MSG_BUNDLER)) return false; - if (likely(msg_type(bmsg) != BUNDLE_OPEN)) + if (likely(!TIPC_SKB_CB(bskb)->bundling)) return false; - if (unlikely(skb_tailroom(bbuf) < (pad + msz))) + if (unlikely(skb_tailroom(bskb) < (pad + msz))) return false; if (unlikely(max < (start + msz))) return false; - skb_put(bbuf, pad + msz); - skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); + skb_put(bskb, pad + msz); + skb_copy_to_linear_data_offset(bskb, start, skb->data, msz); msg_set_size(bmsg, start + msz); msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); - bbuf->next = buf->next; - kfree_skb(buf); + kfree_skb(skb); return true; } /** * tipc_msg_make_bundle(): Create bundle buf and append message to its tail - * @buf: buffer to be appended and replaced - * @mtu: max allowable size for the bundle buffer, inclusive header + * @list: the buffer chain + * @skb: buffer to be appended and replaced + * @mtu: max allowable size for the bundle buffer, inclusive header * @dnode: destination node for message. (Not always present in header) * Replaces buffer if successful * Returns true if success, otherwise false */ -bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) +bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, + u32 mtu, u32 dnode) { - struct sk_buff *bbuf; + struct sk_buff *bskb; struct tipc_msg *bmsg; - struct tipc_msg *msg = buf_msg(*buf); + struct tipc_msg *msg = buf_msg(skb); u32 msz = msg_size(msg); u32 max = mtu - INT_H_SIZE; @@ -330,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) if (msz > (max / 2)) return false; - bbuf = tipc_buf_acquire(max); - if (!bbuf) + bskb = tipc_buf_acquire(max); + if (!bskb) return false; - skb_trim(bbuf, INT_H_SIZE); - bmsg = buf_msg(bbuf); - tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); + skb_trim(bskb, INT_H_SIZE); + bmsg = buf_msg(bskb); + tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode); msg_set_seqno(bmsg, msg_seqno(msg)); msg_set_ack(bmsg, msg_ack(msg)); msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); - bbuf->next = (*buf)->next; - tipc_msg_bundle(bbuf, *buf, mtu); - *buf = bbuf; - return true; + TIPC_SKB_CB(bskb)->bundling = true; + __skb_queue_tail(list, bskb); + return tipc_msg_bundle(list, skb, mtu); } /** @@ -429,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) /* tipc_msg_reassemble() - clone a buffer chain of fragments and * reassemble the clones into one message */ -struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) +struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list) { - struct sk_buff *buf = chain; - struct sk_buff *frag = buf; + struct sk_buff *skb; + struct sk_buff *frag = NULL; struct sk_buff *head = NULL; int hdr_sz; /* Copy header if single buffer */ - if (!buf->next) { - hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); - return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); + if (skb_queue_len(list) == 1) { + skb = skb_peek(list); + hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); + return __pskb_copy(skb, hdr_sz, GFP_ATOMIC); } /* Clone all fragments and reassemble */ - while (buf) { - frag = skb_clone(buf, GFP_ATOMIC); + skb_queue_walk(list, skb) { + frag = skb_clone(skb, GFP_ATOMIC); if (!frag) goto error; frag->next = NULL; @@ -452,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) break; if (!head) goto error; - buf = buf->next; } return frag; error: diff --git a/net/tipc/msg.h b/net/tipc/msg.h index d7d2ba2afe6..d5c83d7ecb4 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -464,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) #define FRAGMENT 1 #define LAST_FRAGMENT 2 -/* Bundling protocol message types - */ -#define BUNDLE_OPEN 0 -#define BUNDLE_CLOSED 1 - /* * Link management protocol message types */ @@ -739,13 +734,14 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); -bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu); +bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); -bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode); +bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, + u32 mtu, u32 dnode); -int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, - int offset, int dsz, int mtu , struct sk_buff **chain); +int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, + int dsz, int mtu, struct sk_buff_head *list); -struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain); +struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); #endif diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 376d2bb51d8..56248db7527 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) return buf; } -void named_cluster_distribute(struct sk_buff *buf) +void named_cluster_distribute(struct sk_buff *skb) { - struct sk_buff *obuf; + struct sk_buff *oskb; struct tipc_node *node; u32 dnode; @@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf) continue; if (!tipc_node_active_links(node)) continue; - obuf = skb_copy(buf, GFP_ATOMIC); - if (!obuf) + oskb = skb_copy(skb, GFP_ATOMIC); + if (!oskb) break; - msg_set_destnode(buf_msg(obuf), dnode); - tipc_link_xmit(obuf, dnode, dnode); + msg_set_destnode(buf_msg(oskb), dnode); + tipc_link_xmit_skb(oskb, dnode, dnode); } rcu_read_unlock(); - kfree_skb(buf); + kfree_skb(skb); } /** @@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) /** * named_distribute - prepare name info for bulk distribution to another node - * @msg_list: list of messages (buffers) to be returned from this function + * @list: list of messages (buffers) to be returned from this function * @dnode: node to be updated * @pls: linked list of publication items to be packed into buffer chain */ -static void named_distribute(struct list_head *msg_list, u32 dnode, +static void named_distribute(struct sk_buff_head *list, u32 dnode, struct publ_list *pls) { struct publication *publ; - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; struct distr_item *item = NULL; uint dsz = pls->size * ITEM_SIZE; uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; @@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, list_for_each_entry(publ, &pls->list, local_list) { /* Prepare next buffer: */ - if (!buf) { + if (!skb) { msg_rem = min_t(uint, rem, msg_dsz); rem -= msg_rem; - buf = named_prepare_buf(PUBLICATION, msg_rem, dnode); - if (!buf) { + skb = named_prepare_buf(PUBLICATION, msg_rem, dnode); + if (!skb) { pr_warn("Bulk publication failure\n"); return; } - item = (struct distr_item *)msg_data(buf_msg(buf)); + item = (struct distr_item *)msg_data(buf_msg(skb)); } /* Pack publication into message: */ @@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, /* Append full buffer to list: */ if (!msg_rem) { - list_add_tail((struct list_head *)buf, msg_list); - buf = NULL; + __skb_queue_tail(list, skb); + skb = NULL; } } } @@ -236,27 +236,57 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, */ void tipc_named_node_up(u32 dnode) { - LIST_HEAD(msg_list); - struct sk_buff *buf_chain; + struct sk_buff_head head; + + __skb_queue_head_init(&head); read_lock_bh(&tipc_nametbl_lock); - named_distribute(&msg_list, dnode, &publ_cluster); - named_distribute(&msg_list, dnode, &publ_zone); + named_distribute(&head, dnode, &publ_cluster); + named_distribute(&head, dnode, &publ_zone); read_unlock_bh(&tipc_nametbl_lock); - /* Convert circular list to linear list and send: */ - buf_chain = (struct sk_buff *)msg_list.next; - ((struct sk_buff *)msg_list.prev)->next = NULL; - tipc_link_xmit(buf_chain, dnode, dnode); + tipc_link_xmit(&head, dnode, dnode); +} + +static void tipc_publ_subscribe(struct publication *publ, u32 addr) +{ + struct tipc_node *node; + + if (in_own_node(addr)) + return; + + node = tipc_node_find(addr); + if (!node) { + pr_warn("Node subscription rejected, unknown node 0x%x\n", + addr); + return; + } + + tipc_node_lock(node); + list_add_tail(&publ->nodesub_list, &node->publ_list); + tipc_node_unlock(node); +} + +static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) +{ + struct tipc_node *node; + + node = tipc_node_find(addr); + if (!node) + return; + + tipc_node_lock(node); + list_del_init(&publ->nodesub_list); + tipc_node_unlock(node); } /** - * named_purge_publ - remove publication associated with a failed node + * tipc_publ_purge - remove publication associated with a failed node * * Invoked for each publication issued by a newly failed node. * Removes publication structure from name table & deletes it. */ -static void named_purge_publ(struct publication *publ) +static void tipc_publ_purge(struct publication *publ, u32 addr) { struct publication *p; @@ -264,7 +294,7 @@ static void named_purge_publ(struct publication *publ) p = tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, publ->ref, publ->key); if (p) - tipc_nodesub_unsubscribe(&p->subscr); + tipc_publ_unsubscribe(p, addr); write_unlock_bh(&tipc_nametbl_lock); if (p != publ) { @@ -277,6 +307,14 @@ static void named_purge_publ(struct publication *publ) kfree(p); } +void tipc_publ_notify(struct list_head *nsub_list, u32 addr) +{ + struct publication *publ, *tmp; + + list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) + tipc_publ_purge(publ, addr); +} + /** * tipc_update_nametbl - try to process a nametable update and notify * subscribers @@ -294,9 +332,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) TIPC_CLUSTER_SCOPE, node, ntohl(i->ref), ntohl(i->key)); if (publ) { - tipc_nodesub_subscribe(&publ->subscr, node, publ, - (net_ev_handler) - named_purge_publ); + tipc_publ_subscribe(publ, node); return true; } } else if (dtype == WITHDRAWAL) { @@ -304,7 +340,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) node, ntohl(i->ref), ntohl(i->key)); if (publ) { - tipc_nodesub_unsubscribe(&publ->subscr); + tipc_publ_unsubscribe(publ, node); kfree(publ); return true; } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index b9e75feb343..cef55cedcfb 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -74,5 +74,6 @@ void tipc_named_node_up(u32 dnode); void tipc_named_rcv(struct sk_buff *buf); void tipc_named_reinit(void); void tipc_named_process_backlog(void); +void tipc_publ_notify(struct list_head *nsub_list, u32 addr); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 7cfb7a4aa58..772be1cd8bf 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -144,7 +144,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, publ->key = key; INIT_LIST_HEAD(&publ->local_list); INIT_LIST_HEAD(&publ->pport_list); - INIT_LIST_HEAD(&publ->subscr.nodesub_list); + INIT_LIST_HEAD(&publ->nodesub_list); return publ; } diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index b38ebecac76..c6287782665 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -37,8 +37,6 @@ #ifndef _TIPC_NAME_TABLE_H #define _TIPC_NAME_TABLE_H -#include "node_subscr.h" - struct tipc_subscription; struct tipc_port_list; @@ -56,7 +54,7 @@ struct tipc_port_list; * @node: network address of publishing port's node * @ref: publishing port * @key: publication key - * @subscr: subscription to "node down" event (for off-node publications only) + * @nodesub_list: subscription to "node down" event (off-node publication only) * @local_list: adjacent entries in list of publications made by this node * @pport_list: adjacent entries in list of publications made by this port * @node_list: adjacent matching name seq publications with >= node scope @@ -73,7 +71,7 @@ struct publication { u32 node; u32 ref; u32 key; - struct tipc_node_subscr subscr; + struct list_head nodesub_list; struct list_head local_list; struct list_head pport_list; struct list_head node_list; diff --git a/net/tipc/node.c b/net/tipc/node.c index 82e5edddc37..69b96be09a8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -113,9 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr) spin_lock_init(&n_ptr->lock); INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); - INIT_LIST_HEAD(&n_ptr->nsub); + INIT_LIST_HEAD(&n_ptr->publ_list); INIT_LIST_HEAD(&n_ptr->conn_sks); __skb_queue_head_init(&n_ptr->waiting_sks); + __skb_queue_head_init(&n_ptr->bclink.deferred_queue); hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); @@ -381,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) /* Flush broadcast link info associated with lost node */ if (n_ptr->bclink.recv_permitted) { - kfree_skb_list(n_ptr->bclink.deferred_head); - n_ptr->bclink.deferred_size = 0; + __skb_queue_purge(&n_ptr->bclink.deferred_queue); if (n_ptr->bclink.reasm_buf) { kfree_skb(n_ptr->bclink.reasm_buf); @@ -574,7 +574,7 @@ void tipc_node_unlock(struct tipc_node *node) skb_queue_splice_init(&node->waiting_sks, &waiting_sks); if (flags & TIPC_NOTIFY_NODE_DOWN) { - list_replace_init(&node->nsub, &nsub_list); + list_replace_init(&node->publ_list, &nsub_list); list_replace_init(&node->conn_sks, &conn_sks); } node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | @@ -591,7 +591,7 @@ void tipc_node_unlock(struct tipc_node *node) tipc_node_abort_sock_conns(&conn_sks); if (!list_empty(&nsub_list)) - tipc_nodesub_notify(&nsub_list); + tipc_publ_notify(&nsub_list, addr); if (flags & TIPC_WAKEUP_BCAST_USERS) tipc_bclink_wakeup_users(); diff --git a/net/tipc/node.h b/net/tipc/node.h index 005fbcef321..cbe0e950f1c 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -37,7 +37,6 @@ #ifndef _TIPC_NODE_H #define _TIPC_NODE_H -#include "node_subscr.h" #include "addr.h" #include "net.h" #include "bearer.h" @@ -72,9 +71,7 @@ enum { * @last_in: sequence # of last in-sequence b'cast message received from node * @last_sent: sequence # of last b'cast message sent by node * @oos_state: state tracker for handling OOS b'cast messages - * @deferred_size: number of OOS b'cast messages in deferred queue - * @deferred_head: oldest OOS b'cast message received from node - * @deferred_tail: newest OOS b'cast message received from node + * @deferred_queue: deferred queue saved OOS b'cast message received from node * @reasm_buf: broadcast reassembly queue head from node * @recv_permitted: true if node is allowed to receive b'cast messages */ @@ -84,8 +81,7 @@ struct tipc_node_bclink { u32 last_sent; u32 oos_state; u32 deferred_size; - struct sk_buff *deferred_head; - struct sk_buff *deferred_tail; + struct sk_buff_head deferred_queue; struct sk_buff *reasm_buf; bool recv_permitted; }; @@ -104,7 +100,7 @@ struct tipc_node_bclink { * @link_cnt: number of links to node * @signature: node instance identifier * @link_id: local and remote bearer ids of changing link, if any - * @nsub: list of "node down" subscriptions monitoring node + * @publ_list: list of publications * @rcu: rcu struct for tipc_node */ struct tipc_node { @@ -121,7 +117,7 @@ struct tipc_node { int working_links; u32 signature; u32 link_id; - struct list_head nsub; + struct list_head publ_list; struct sk_buff_head waiting_sks; struct list_head conn_sks; struct rcu_head rcu; diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c deleted file mode 100644 index 2d13eea8574..00000000000 --- a/net/tipc/node_subscr.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * net/tipc/node_subscr.c: TIPC "node down" subscription handling - * - * Copyright (c) 1995-2006, Ericsson AB - * Copyright (c) 2005, 2010-2011, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "node_subscr.h" -#include "node.h" - -/** - * tipc_nodesub_subscribe - create "node down" subscription for specified node - */ -void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, - void *usr_handle, net_ev_handler handle_down) -{ - if (in_own_node(addr)) { - node_sub->node = NULL; - return; - } - - node_sub->node = tipc_node_find(addr); - if (!node_sub->node) { - pr_warn("Node subscription rejected, unknown node 0x%x\n", - addr); - return; - } - node_sub->handle_node_down = handle_down; - node_sub->usr_handle = usr_handle; - - tipc_node_lock(node_sub->node); - list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); - tipc_node_unlock(node_sub->node); -} - -/** - * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) - */ -void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) -{ - if (!node_sub->node) - return; - - tipc_node_lock(node_sub->node); - list_del_init(&node_sub->nodesub_list); - tipc_node_unlock(node_sub->node); -} - -/** - * tipc_nodesub_notify - notify subscribers that a node is unreachable - * - * Note: node is locked by caller - */ -void tipc_nodesub_notify(struct list_head *nsub_list) -{ - struct tipc_node_subscr *ns, *safe; - net_ev_handler handle_node_down; - - list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) { - handle_node_down = ns->handle_node_down; - if (handle_node_down) { - ns->handle_node_down = NULL; - handle_node_down(ns->usr_handle); - } - } -} diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h deleted file mode 100644 index d91b8cc81e3..00000000000 --- a/net/tipc/node_subscr.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling - * - * Copyright (c) 1995-2006, Ericsson AB - * Copyright (c) 2005, 2010-2011, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _TIPC_NODE_SUBSCR_H -#define _TIPC_NODE_SUBSCR_H - -#include "addr.h" - -typedef void (*net_ev_handler) (void *usr_handle); - -/** - * struct tipc_node_subscr - "node down" subscription entry - * @node: ptr to node structure of interest (or NULL, if none) - * @handle_node_down: routine to invoke when node fails - * @usr_handle: argument to pass to routine when node fails - * @nodesub_list: adjacent entries in list of subscriptions for the node - */ -struct tipc_node_subscr { - struct tipc_node *node; - net_ev_handler handle_node_down; - void *usr_handle; - struct list_head nodesub_list; -}; - -void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, - void *usr_handle, net_ev_handler handle_down); -void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); -void tipc_nodesub_notify(struct list_head *nsub_list); - -#endif diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 341fbd1b5f7..9658d9b6387 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -244,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk) */ static void tsk_rej_rx_queue(struct sock *sk) { - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; - while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { - if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) - tipc_link_xmit(buf, dnode, 0); + while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { + if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) + tipc_link_xmit_skb(skb, dnode, 0); } } @@ -462,7 +462,7 @@ static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_sock *tsk; - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; /* @@ -481,11 +481,11 @@ static int tipc_release(struct socket *sock) */ dnode = tsk_peer_node(tsk); while (sock->state != SS_DISCONNECTING) { - buf = __skb_dequeue(&sk->sk_receive_queue); - if (buf == NULL) + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb == NULL) break; - if (TIPC_SKB_CB(buf)->handle != NULL) - kfree_skb(buf); + if (TIPC_SKB_CB(skb)->handle != NULL) + kfree_skb(skb); else { if ((sock->state == SS_CONNECTING) || (sock->state == SS_CONNECTED)) { @@ -493,8 +493,8 @@ static int tipc_release(struct socket *sock) tsk->connected = 0; tipc_node_remove_conn(dnode, tsk->ref); } - if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) - tipc_link_xmit(buf, dnode, 0); + if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) + tipc_link_xmit_skb(skb, dnode, 0); } } @@ -502,12 +502,12 @@ static int tipc_release(struct socket *sock) tipc_sk_ref_discard(tsk->ref); k_cancel_timer(&tsk->timer); if (tsk->connected) { - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tipc_own_addr, tsk_peer_port(tsk), tsk->ref, TIPC_ERR_NO_PORT); - if (buf) - tipc_link_xmit(buf, dnode, tsk->ref); + if (skb) + tipc_link_xmit_skb(skb, dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref); } k_term_timer(&tsk->timer); @@ -712,7 +712,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, { struct sock *sk = sock->sk; struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; - struct sk_buff *buf; + struct sk_buff_head head; uint mtu; int rc; @@ -727,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, new_mtu: mtu = tipc_bclink_get_mtu(); - rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head); if (unlikely(rc < 0)) return rc; do { - rc = tipc_bclink_xmit(buf); + rc = tipc_bclink_xmit(&head); if (likely(rc >= 0)) { rc = dsz; break; @@ -744,7 +745,7 @@ new_mtu: tipc_sk(sk)->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); return rc; } @@ -906,7 +907,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; u32 dnode, dport; - struct sk_buff *buf; + struct sk_buff_head head; + struct sk_buff *skb; struct tipc_name_seq *seq = &dest->addr.nameseq; u32 mtu; long timeo; @@ -981,13 +983,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, new_mtu: mtu = tipc_node_get_mtu(dnode, tsk->ref); - rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head); if (rc < 0) goto exit; do { - TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; - rc = tipc_link_xmit(buf, dnode, tsk->ref); + skb = skb_peek(&head); + TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; + rc = tipc_link_xmit(&head, dnode, tsk->ref); if (likely(rc >= 0)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -1001,7 +1005,7 @@ new_mtu: tsk->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); exit: if (iocb) @@ -1058,7 +1062,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff *buf; + struct sk_buff_head head; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); u32 ref = tsk->ref; int rc = -EINVAL; @@ -1093,12 +1097,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, next: mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); - rc = tipc_msg_build(mhdr, m, sent, send, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head); if (unlikely(rc < 0)) goto exit; do { if (likely(!tsk_conn_cong(tsk))) { - rc = tipc_link_xmit(buf, dnode, ref); + rc = tipc_link_xmit(&head, dnode, ref); if (likely(!rc)) { tsk->sent_unacked++; sent += send; @@ -1116,7 +1121,7 @@ next: } rc = tipc_wait_for_sndpkt(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); exit: if (iocb) @@ -1261,20 +1266,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) { - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; struct tipc_msg *msg; u32 peer_port = tsk_peer_port(tsk); u32 dnode = tsk_peer_node(tsk); if (!tsk->connected) return; - buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, + skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, tipc_own_addr, peer_port, tsk->ref, TIPC_OK); - if (!buf) + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); msg_set_msgcnt(msg, ack); - tipc_link_xmit(buf, dnode, msg_link_selector(msg)); + tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg)); } static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) @@ -1729,20 +1734,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) /** * tipc_backlog_rcv - handle incoming message from backlog queue * @sk: socket - * @buf: message + * @skb: message * * Caller must hold socket lock, but not port lock. * * Returns 0 */ -static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) +static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int rc; u32 onode; struct tipc_sock *tsk = tipc_sk(sk); - uint truesize = buf->truesize; + uint truesize = skb->truesize; - rc = filter_rcv(sk, buf); + rc = filter_rcv(sk, skb); if (likely(!rc)) { if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) @@ -1750,25 +1755,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) return 0; } - if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) + if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc)) return 0; - tipc_link_xmit(buf, onode, 0); + tipc_link_xmit_skb(skb, onode, 0); return 0; } /** * tipc_sk_rcv - handle incoming message - * @buf: buffer containing arriving message + * @skb: buffer containing arriving message * Consumes buffer * Returns 0 if success, or errno: -EHOSTUNREACH */ -int tipc_sk_rcv(struct sk_buff *buf) +int tipc_sk_rcv(struct sk_buff *skb) { struct tipc_sock *tsk; struct sock *sk; - u32 dport = msg_destport(buf_msg(buf)); + u32 dport = msg_destport(buf_msg(skb)); int rc = TIPC_OK; uint limit; u32 dnode; @@ -1776,7 +1781,7 @@ int tipc_sk_rcv(struct sk_buff *buf) /* Validate destination and message */ tsk = tipc_sk_get(dport); if (unlikely(!tsk)) { - rc = tipc_msg_eval(buf, &dnode); + rc = tipc_msg_eval(skb, &dnode); goto exit; } sk = &tsk->sk; @@ -1785,12 +1790,12 @@ int tipc_sk_rcv(struct sk_buff *buf) spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) { - rc = filter_rcv(sk, buf); + rc = filter_rcv(sk, skb); } else { if (sk->sk_backlog.len == 0) atomic_set(&tsk->dupl_rcvcnt, 0); - limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); - if (sk_add_backlog(sk, buf, limit)) + limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt); + if (sk_add_backlog(sk, skb, limit)) rc = -TIPC_ERR_OVERLOAD; } spin_unlock_bh(&sk->sk_lock.slock); @@ -1798,10 +1803,10 @@ int tipc_sk_rcv(struct sk_buff *buf) if (likely(!rc)) return 0; exit: - if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) + if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc)) return -EHOSTUNREACH; - tipc_link_xmit(buf, dnode, 0); + tipc_link_xmit_skb(skb, dnode, 0); return (rc < 0) ? -EHOSTUNREACH : 0; } @@ -2059,7 +2064,7 @@ static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; int res; @@ -2074,23 +2079,23 @@ static int tipc_shutdown(struct socket *sock, int how) restart: /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ - buf = __skb_dequeue(&sk->sk_receive_queue); - if (buf) { - if (TIPC_SKB_CB(buf)->handle != NULL) { - kfree_skb(buf); + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb) { + if (TIPC_SKB_CB(skb)->handle != NULL) { + kfree_skb(skb); goto restart; } - if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) - tipc_link_xmit(buf, dnode, tsk->ref); + if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) + tipc_link_xmit_skb(skb, dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref); } else { dnode = tsk_peer_node(tsk); - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tipc_own_addr, tsk_peer_port(tsk), tsk->ref, TIPC_CONN_SHUTDOWN); - tipc_link_xmit(buf, dnode, tsk->ref); + tipc_link_xmit_skb(skb, dnode, tsk->ref); } tsk->connected = 0; sock->state = SS_DISCONNECTING; @@ -2119,7 +2124,7 @@ static void tipc_sk_timeout(unsigned long ref) { struct tipc_sock *tsk; struct sock *sk; - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; u32 peer_port, peer_node; tsk = tipc_sk_get(ref); @@ -2137,20 +2142,20 @@ static void tipc_sk_timeout(unsigned long ref) if (tsk->probing_state == TIPC_CONN_PROBING) { /* Previous probe not answered -> self abort */ - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, tipc_own_addr, peer_node, ref, peer_port, TIPC_ERR_NO_PORT); } else { - buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, + skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, peer_node, tipc_own_addr, peer_port, ref, TIPC_OK); tsk->probing_state = TIPC_CONN_PROBING; k_start_timer(&tsk->timer, tsk->probing_interval); } bh_unlock_sock(sk); - if (buf) - tipc_link_xmit(buf, peer_node, ref); + if (skb) + tipc_link_xmit_skb(skb, peer_node, ref); exit: tipc_sk_put(tsk); } |