summaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c499
1 files changed, 211 insertions, 288 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a9e174fc0f9..3e41704832d 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
- * Copyright (c) 2004-2006, 2014, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
@@ -35,77 +35,14 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "core.h"
-#include "link.h"
#include "socket.h"
#include "msg.h"
#include "bcast.h"
#include "name_distr.h"
+#include "core.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
-#define BCBEARER MAX_BEARERS
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
- struct tipc_bearer *primary;
- struct tipc_bearer *secondary;
-};
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines. Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
- struct tipc_bearer bearer;
- struct tipc_media media;
- struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
- struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
- struct tipc_node_map remains;
- struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
- spinlock_t lock;
- struct tipc_link link;
- struct tipc_node node;
- unsigned int flags;
- struct tipc_node_map bcast_nodes;
- struct tipc_node *retransmit_to;
-};
-
-static struct tipc_bcbearer *bcbearer;
-static struct tipc_bclink *bclink;
-static struct tipc_link *bcl;
const char tipc_bclink_name[] = "broadcast-link";
@@ -115,38 +52,50 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-static void tipc_bclink_lock(void)
+static void tipc_bclink_lock(struct net *net)
{
- spin_lock_bh(&bclink->lock);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ spin_lock_bh(&tn->bclink->lock);
}
-static void tipc_bclink_unlock(void)
+static void tipc_bclink_unlock(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node = NULL;
- if (likely(!bclink->flags)) {
- spin_unlock_bh(&bclink->lock);
+ if (likely(!tn->bclink->flags)) {
+ spin_unlock_bh(&tn->bclink->lock);
return;
}
- if (bclink->flags & TIPC_BCLINK_RESET) {
- bclink->flags &= ~TIPC_BCLINK_RESET;
- node = tipc_bclink_retransmit_to();
+ if (tn->bclink->flags & TIPC_BCLINK_RESET) {
+ tn->bclink->flags &= ~TIPC_BCLINK_RESET;
+ node = tipc_bclink_retransmit_to(net);
}
- spin_unlock_bh(&bclink->lock);
+ spin_unlock_bh(&tn->bclink->lock);
if (node)
tipc_link_reset_all(node);
}
+void tipc_bclink_input(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
+}
+
uint tipc_bclink_get_mtu(void)
{
return MAX_PKT_DEFAULT_MCAST;
}
-void tipc_bclink_set_flags(unsigned int flags)
+void tipc_bclink_set_flags(struct net *net, unsigned int flags)
{
- bclink->flags |= flags;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tn->bclink->flags |= flags;
}
static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +113,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
-void tipc_bclink_add_node(u32 addr)
+void tipc_bclink_add_node(struct net *net, u32 addr)
{
- tipc_bclink_lock();
- tipc_nmap_add(&bclink->bcast_nodes, addr);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_bclink_lock(net);
+ tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
+ tipc_bclink_unlock(net);
}
-void tipc_bclink_remove_node(u32 addr)
+void tipc_bclink_remove_node(struct net *net, u32 addr)
{
- tipc_bclink_lock();
- tipc_nmap_remove(&bclink->bcast_nodes, addr);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ tipc_bclink_lock(net);
+ tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+ tipc_bclink_unlock(net);
}
-static void bclink_set_last_sent(void)
+static void bclink_set_last_sent(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+
if (bcl->next_out)
bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
else
bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}
-u32 tipc_bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(struct net *net)
{
- return bcl->fsm_msg_cnt;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->bcl->fsm_msg_cnt;
}
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +161,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
*
* Called with bclink_lock locked
*/
-struct tipc_node *tipc_bclink_retransmit_to(void)
+struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
{
- return bclink->retransmit_to;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->bclink->retransmit_to;
}
/**
@@ -215,9 +175,10 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
*
* Called with bclink_lock locked
*/
-static void bclink_retransmit_pkt(u32 after, u32 to)
+static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
{
struct sk_buff *skb;
+ struct tipc_link *bcl = tn->bcl;
skb_queue_walk(&bcl->outqueue, skb) {
if (more(buf_seqno(skb), after)) {
@@ -232,13 +193,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
*
* Called with no locks taken
*/
-void tipc_bclink_wakeup_users(void)
+void tipc_bclink_wakeup_users(struct net *net)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
- tipc_sk_rcv(skb);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
}
/**
@@ -253,10 +212,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
struct sk_buff *skb, *tmp;
struct sk_buff *next;
unsigned int released = 0;
+ struct net *net = n_ptr->net;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
/* Bail out if tx queue is empty (no clean up is required) */
- skb = skb_peek(&bcl->outqueue);
+ skb = skb_peek(&tn->bcl->outqueue);
if (!skb)
goto exit;
@@ -267,43 +228,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
* acknowledge sent messages only (if other nodes still exist)
* or both sent and unsent messages (otherwise)
*/
- if (bclink->bcast_nodes.count)
- acked = bcl->fsm_msg_cnt;
+ if (tn->bclink->bcast_nodes.count)
+ acked = tn->bcl->fsm_msg_cnt;
else
- acked = bcl->next_out_no;
+ acked = tn->bcl->next_out_no;
} else {
/*
* Bail out if specified sequence number does not correspond
* to a message that has been sent and not yet acknowledged
*/
if (less(acked, buf_seqno(skb)) ||
- less(bcl->fsm_msg_cnt, acked) ||
+ less(tn->bcl->fsm_msg_cnt, acked) ||
less_eq(acked, n_ptr->bclink.acked))
goto exit;
}
/* Skip over packets that node has previously acknowledged */
- skb_queue_walk(&bcl->outqueue, skb) {
+ skb_queue_walk(&tn->bcl->outqueue, skb) {
if (more(buf_seqno(skb), n_ptr->bclink.acked))
break;
}
/* Update packets that node is now acknowledging */
- skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+ skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
if (more(buf_seqno(skb), acked))
break;
- next = tipc_skb_queue_next(&bcl->outqueue, skb);
- if (skb != bcl->next_out) {
+ next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
+ if (skb != tn->bcl->next_out) {
bcbuf_decr_acks(skb);
} else {
bcbuf_set_acks(skb, 0);
- bcl->next_out = next;
- bclink_set_last_sent();
+ tn->bcl->next_out = next;
+ bclink_set_last_sent(net);
}
if (bcbuf_acks(skb) == 0) {
- __skb_unlink(skb, &bcl->outqueue);
+ __skb_unlink(skb, &tn->bcl->outqueue);
kfree_skb(skb);
released = 1;
}
@@ -311,15 +272,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
n_ptr->bclink.acked = acked;
/* Try resolving broadcast link congestion, if necessary */
- if (unlikely(bcl->next_out)) {
- tipc_link_push_packets(bcl);
- bclink_set_last_sent();
+ if (unlikely(tn->bcl->next_out)) {
+ tipc_link_push_packets(tn->bcl);
+ bclink_set_last_sent(net);
}
- if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+ if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-
exit:
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
/**
@@ -327,9 +287,12 @@ exit:
*
* RCU and node lock set
*/
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
+ u32 last_sent)
{
struct sk_buff *buf;
+ struct net *net = n_ptr->net;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
/* Ignore "stale" link state info */
if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -359,18 +322,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
- tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tipc_net_id);
+ msg_set_mc_netid(msg, tn->net_id);
msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
msg_set_bcgap_to(msg, to);
- tipc_bclink_lock();
- tipc_bearer_send(MAX_BEARERS, buf, NULL);
- bcl->stats.sent_nacks++;
- tipc_bclink_unlock();
+ tipc_bclink_lock(net);
+ tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
+ tn->bcl->stats.sent_nacks++;
+ tipc_bclink_unlock(net);
kfree_skb(buf);
n_ptr->bclink.oos_state++;
@@ -383,9 +346,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
* Delay any upcoming NACK by this node if another node has already
* requested the first message this node is going to ask for.
*/
-static void bclink_peek_nack(struct tipc_msg *msg)
+static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
{
- struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
+ struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
if (unlikely(!n_ptr))
return;
@@ -400,17 +363,23 @@ static void bclink_peek_nack(struct tipc_msg *msg)
tipc_node_unlock(n_ptr);
}
-/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
+/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
* and to identified node local sockets
+ * @net: the applicable net namespace
* @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_bclink_xmit(struct sk_buff_head *list)
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+ struct tipc_bclink *bclink = tn->bclink;
int rc = 0;
int bc = 0;
struct sk_buff *skb;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq;
/* Prepare clone of message for local node */
skb = tipc_msg_reassemble(list);
@@ -419,32 +388,35 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
return -EHOSTUNREACH;
}
- /* Broadcast to all other nodes */
+ /* Broadcast to all nodes */
if (likely(bclink)) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (likely(bclink->bcast_nodes.count)) {
- rc = __tipc_link_xmit(bcl, list);
+ rc = __tipc_link_xmit(net, bcl, list);
if (likely(!rc)) {
u32 len = skb_queue_len(&bcl->outqueue);
- bclink_set_last_sent();
+ bclink_set_last_sent(net);
bcl->stats.queue_sz_counts++;
bcl->stats.accu_queue_sz += len;
}
bc = 1;
}
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
if (unlikely(!bc))
__skb_queue_purge(list);
- /* Deliver message clone */
- if (likely(!rc))
- tipc_sk_mcast_rcv(skb);
- else
+ if (unlikely(rc)) {
kfree_skb(skb);
-
+ return rc;
+ }
+ /* Deliver message clone */
+ __skb_queue_head_init(&arrvq);
+ skb_queue_head_init(&inputq);
+ __skb_queue_tail(&arrvq, skb);
+ tipc_sk_mcast_rcv(net, &arrvq, &inputq);
return rc;
}
@@ -455,19 +427,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
*/
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
+ struct tipc_net *tn = net_generic(node->net, tipc_net_id);
+
bclink_update_last_sent(node, seqno);
node->bclink.last_in = seqno;
node->bclink.oos_state = 0;
- bcl->stats.recv_info++;
+ tn->bcl->stats.recv_info++;
/*
* Unicast an ACK periodically, ensuring that
* all nodes in the cluster don't ACK at the same time
*/
- if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+ if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
tipc_link_proto_xmit(node->active_links[node->addr & 1],
STATE_MSG, 0, 0, 0, 0, 0);
- bcl->stats.sent_acks++;
+ tn->bcl->stats.sent_acks++;
}
}
@@ -476,19 +450,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*
* RCU is locked, no other locks set
*/
-void tipc_bclink_rcv(struct sk_buff *buf)
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
struct tipc_msg *msg = buf_msg(buf);
struct tipc_node *node;
u32 next_in;
u32 seqno;
int deferred = 0;
+ int pos = 0;
+ struct sk_buff *iskb;
+ struct sk_buff_head *arrvq, *inputq;
/* Screen out unwanted broadcast messages */
- if (msg_mc_netid(msg) != tipc_net_id)
+ if (msg_mc_netid(msg) != tn->net_id)
goto exit;
- node = tipc_node_find(msg_prevnode(msg));
+ node = tipc_node_find(net, msg_prevnode(msg));
if (unlikely(!node))
goto exit;
@@ -500,18 +479,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
if (msg_type(msg) != STATE_MSG)
goto unlock;
- if (msg_destnode(msg) == tipc_own_addr) {
+ if (msg_destnode(msg) == tn->own_addr) {
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bcl->stats.recv_nacks++;
- bclink->retransmit_to = node;
- bclink_retransmit_pkt(msg_bcgap_after(msg),
+ tn->bclink->retransmit_to = node;
+ bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
msg_bcgap_to(msg));
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
} else {
tipc_node_unlock(node);
- bclink_peek_nack(msg);
+ bclink_peek_nack(net, msg);
}
goto exit;
}
@@ -519,52 +498,54 @@ void tipc_bclink_rcv(struct sk_buff *buf)
/* Handle in-sequence broadcast message */
seqno = msg_seqno(msg);
next_in = mod(node->bclink.last_in + 1);
+ arrvq = &tn->bclink->arrvq;
+ inputq = &tn->bclink->inputq;
if (likely(seqno == next_in)) {
receive:
/* Deliver message to destination */
if (likely(msg_isdata(msg))) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
+ spin_lock_bh(&inputq->lock);
+ __skb_queue_tail(arrvq, buf);
+ spin_unlock_bh(&inputq->lock);
+ node->action_flags |= TIPC_BCAST_MSG_EVT;
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- if (likely(msg_mcast(msg)))
- tipc_sk_mcast_rcv(buf);
- else
- kfree_skb(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
- tipc_bclink_unlock();
+ pos = 0;
+ while (tipc_msg_extract(buf, &iskb, &pos)) {
+ spin_lock_bh(&inputq->lock);
+ __skb_queue_tail(arrvq, iskb);
+ spin_unlock_bh(&inputq->lock);
+ }
+ node->action_flags |= TIPC_BCAST_MSG_EVT;
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- tipc_link_bundle_rcv(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
tipc_buf_append(&node->bclink.reasm_buf, &buf);
if (unlikely(!buf && !node->bclink.reasm_buf))
goto unlock;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
bcl->stats.recv_fragments++;
if (buf) {
bcl->stats.recv_fragmented++;
msg = buf_msg(buf);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
goto receive;
}
- tipc_bclink_unlock();
- tipc_node_unlock(node);
- } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
- tipc_bclink_lock();
- bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
- tipc_named_rcv(buf);
} else {
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
bclink_accept_pkt(node, seqno);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
tipc_node_unlock(node);
kfree_skb(buf);
}
@@ -602,14 +583,14 @@ receive:
buf = NULL;
}
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (deferred)
bcl->stats.deferred_recv++;
else
bcl->stats.duplicates++;
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
unlock:
tipc_node_unlock(node);
@@ -620,7 +601,7 @@ exit:
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
return (n_ptr->bclink.recv_permitted &&
- (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
+ (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
}
@@ -633,11 +614,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
* Returns 0 (packet sent successfully) under all circumstances,
* since the broadcast link's pseudo-bearer never blocks
*/
-static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
{
int bp_index;
struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer = tn->bcbearer;
+ struct tipc_bclink *bclink = tn->bclink;
/* Prepare broadcast link message for reliable transmission,
* if first time trying to send it;
@@ -647,8 +632,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
if (likely(!msg_non_seq(buf_msg(buf)))) {
bcbuf_set_acks(buf, bclink->bcast_nodes.count);
msg_set_non_seq(msg, 1);
- msg_set_mc_netid(msg, tipc_net_id);
- bcl->stats.sent_info++;
+ msg_set_mc_netid(msg, tn->net_id);
+ tn->bcl->stats.sent_info++;
if (WARN_ON(!bclink->bcast_nodes.count)) {
dump_stack();
@@ -677,13 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
if (bp_index == 0) {
/* Use original buffer for first bearer */
- tipc_bearer_send(b->identity, buf, &b->bcast_addr);
+ tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
} else {
/* Avoid concurrent buffer access */
tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
if (!tbuf)
break;
- tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
+ tipc_bearer_send(net, b->identity, tbuf,
+ &b->bcast_addr);
kfree_skb(tbuf); /* Bearer keeps a clone */
}
if (bcbearer->remains_new.count == 0)
@@ -698,15 +684,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
/**
* tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
*/
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+ u32 node, bool action)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer = tn->bcbearer;
struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
struct tipc_bcbearer_pair *bp_curr;
struct tipc_bearer *b;
int b_index;
int pri;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
if (action)
tipc_nmap_add(nm_ptr, node);
@@ -718,7 +707,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
rcu_read_lock();
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- b = rcu_dereference_rtnl(bearer_list[b_index]);
+ b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
if (!b || !b->nodes.count)
continue;
@@ -753,7 +742,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
bp_curr++;
}
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -807,19 +796,21 @@ msg_full:
return -EMSGSIZE;
}
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
if (!bcl)
return 0;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
- hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
@@ -852,7 +843,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
if (err)
goto attr_msg_full;
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
@@ -863,79 +854,49 @@ prop_msg_full:
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
-int tipc_bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_reset_stats(struct net *net)
{
- int ret;
- struct tipc_stats *s;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
if (!bcl)
- return 0;
-
- tipc_bclink_lock();
-
- s = &bcl->stats;
-
- ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
- " Window:%u packets\n",
- bcl->name, bcl->queue_limit[0]);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- s->recv_info, s->recv_fragments,
- s->recv_fragmented, s->recv_bundles,
- s->recv_bundled);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- s->sent_info, s->sent_fragments,
- s->sent_fragmented, s->sent_bundles,
- s->sent_bundled);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " RX naks:%u defs:%u dups:%u\n",
- s->recv_nacks, s->deferred_recv, s->duplicates);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " TX naks:%u acks:%u dups:%u\n",
- s->sent_nacks, s->sent_acks, s->retransmitted);
- ret += tipc_snprintf(buf + ret, buf_size - ret,
- " Congestion link:%u Send queue max:%u avg:%u\n",
- s->link_congs, s->max_queue_sz,
- s->queue_sz_counts ?
- (s->accu_queue_sz / s->queue_sz_counts) : 0);
-
- tipc_bclink_unlock();
- return ret;
-}
-
-int tipc_bclink_reset_stats(void)
-{
- if (!bcl)
return -ENOPROTOOPT;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
memset(&bcl->stats, 0, sizeof(bcl->stats));
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
return 0;
}
-int tipc_bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+
if (!bcl)
return -ENOPROTOOPT;
if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
return -EINVAL;
- tipc_bclink_lock();
+ tipc_bclink_lock(net);
tipc_link_set_queue_limits(bcl, limit);
- tipc_bclink_unlock();
+ tipc_bclink_unlock(net);
return 0;
}
-int tipc_bclink_init(void)
+int tipc_bclink_init(struct net *net)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bcbearer *bcbearer;
+ struct tipc_bclink *bclink;
+ struct tipc_link *bcl;
+
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
if (!bcbearer)
return -ENOMEM;
@@ -954,30 +915,39 @@ int tipc_bclink_init(void)
spin_lock_init(&bclink->lock);
__skb_queue_head_init(&bcl->outqueue);
__skb_queue_head_init(&bcl->deferred_queue);
- skb_queue_head_init(&bcl->waiting_sks);
+ skb_queue_head_init(&bcl->wakeupq);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
- __skb_queue_head_init(&bclink->node.waiting_sks);
+ __skb_queue_head_init(&bclink->arrvq);
+ skb_queue_head_init(&bclink->inputq);
bcl->owner = &bclink->node;
+ bcl->owner->net = net;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->bearer_id = MAX_BEARERS;
- rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
+ rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
bcl->state = WORKING_WORKING;
+ bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
+ msg_set_prevnode(bcl->pmsg, tn->own_addr);
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+ tn->bcbearer = bcbearer;
+ tn->bclink = bclink;
+ tn->bcl = bcl;
return 0;
}
-void tipc_bclink_stop(void)
+void tipc_bclink_stop(struct net *net)
{
- tipc_bclink_lock();
- tipc_link_purge_queues(bcl);
- tipc_bclink_unlock();
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
- RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+ tipc_bclink_lock(net);
+ tipc_link_purge_queues(tn->bcl);
+ tipc_bclink_unlock(net);
+
+ RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
synchronize_net();
- kfree(bcbearer);
- kfree(bclink);
+ kfree(tn->bcbearer);
+ kfree(tn->bclink);
}
/**
@@ -1037,50 +1007,3 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
}
}
}
-
-/**
- * tipc_port_list_add - add a port to a port list, ensuring no duplicates
- */
-void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
-{
- struct tipc_port_list *item = pl_ptr;
- int i;
- int item_sz = PLSIZE;
- int cnt = pl_ptr->count;
-
- for (; ; cnt -= item_sz, item = item->next) {
- if (cnt < PLSIZE)
- item_sz = cnt;
- for (i = 0; i < item_sz; i++)
- if (item->ports[i] == port)
- return;
- if (i < PLSIZE) {
- item->ports[i] = port;
- pl_ptr->count++;
- return;
- }
- if (!item->next) {
- item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
- if (!item->next) {
- pr_warn("Incomplete multicast delivery, no memory\n");
- return;
- }
- item->next->next = NULL;
- }
- }
-}
-
-/**
- * tipc_port_list_free - free dynamically created entries in port_list chain
- *
- */
-void tipc_port_list_free(struct tipc_port_list *pl_ptr)
-{
- struct tipc_port_list *item;
- struct tipc_port_list *next;
-
- for (item = pl_ptr->next; item; item = next) {
- next = item->next;
- kfree(item);
- }
-}