diff options
author | Allan Stephens <allan.stephens@windriver.com> | 2006-06-25 23:40:01 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-06-25 23:40:01 -0700 |
commit | d356eeba8e34786621d85468e5176052813a3059 (patch) | |
tree | 0f802091559dfafdc62fa6c3059fcebff60b2bc7 /net/tipc/bcast.c | |
parent | 260082471ed3f6d751e9767e5a278d4e495d83f7 (diff) |
[TIPC]: Multicast link failure now resets all links to "nacking" node.
This fix prevents node from crashing.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Per Liden <per.liden@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r-- | net/tipc/bcast.c | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 2c4ecbe5008..00691b7c35f 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -49,13 +49,19 @@ #include "name_table.h" #include "bcast.h" - #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ #define BCLINK_LOG_BUF_SIZE 0 +/* + * Loss rate for incoming broadcast frames; used to test retransmission code. + * Set to N to cause every N'th frame to be discarded; 0 => don't discard any. + */ + +#define TIPC_BCAST_LOSS_RATE 0 + /** * struct bcbearer_pair - a pair of bearers used by broadcast link * @primary: pointer to primary bearer @@ -165,21 +171,18 @@ static int bclink_ack_allowed(u32 n) * @after: sequence number of last packet to *not* retransmit * @to: sequence number of last packet to retransmit * - * Called with 'node' locked, bc_lock unlocked + * Called with bc_lock locked */ static void bclink_retransmit_pkt(u32 after, u32 to) { struct sk_buff *buf; - spin_lock_bh(&bc_lock); buf = bcl->first_out; while (buf && less_eq(buf_seqno(buf), after)) { buf = buf->next; } - if (buf != NULL) - tipc_link_retransmit(bcl, buf, mod(to - after)); - spin_unlock_bh(&bc_lock); + tipc_link_retransmit(bcl, buf, mod(to - after)); } /** @@ -399,7 +402,10 @@ int tipc_bclink_send_msg(struct sk_buff *buf) */ void tipc_bclink_recv_pkt(struct sk_buff *buf) -{ +{ +#if (TIPC_BCAST_LOSS_RATE) + static int rx_count = 0; +#endif struct tipc_msg *msg = buf_msg(buf); struct node* node = tipc_node_find(msg_prevnode(msg)); u32 next_in; @@ -420,9 +426,13 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) tipc_node_lock(node); tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); tipc_node_unlock(node); + spin_lock_bh(&bc_lock); bcl->stats.recv_nacks++; + bcl->owner->next = node; /* remember requestor */ bclink_retransmit_pkt(msg_bcgap_after(msg), msg_bcgap_to(msg)); + bcl->owner->next = NULL; + spin_unlock_bh(&bc_lock); } else { tipc_bclink_peek_nack(msg_destnode(msg), msg_bcast_tag(msg), @@ -433,6 +443,14 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) return; } +#if (TIPC_BCAST_LOSS_RATE) + if (++rx_count == TIPC_BCAST_LOSS_RATE) { + rx_count = 0; + buf_discard(buf); + return; + } +#endif + tipc_node_lock(node); receive: deferred = node->bclink.deferred_head; |