summaryrefslogtreecommitdiffstats
path: root/net/batman-adv/unicast.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-10 14:33:53 +0000
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 12:50:03 +0100
commit44524fcdf6ca19b58c24f7622c4af1d8d8fe59f8 (patch)
tree297c76f80d68d56e3c65a23c70de645a1c93df47 /net/batman-adv/unicast.c
parenta4c135c561106c397bae33455acfca4aa8065a30 (diff)
batman-adv: Correct rcu refcounting for neigh_node
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/unicast.c')
-rw-r--r--net/batman-adv/unicast.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 00bfeaf9ece..7ca994ccac1 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -285,38 +285,42 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
struct unicast_packet *unicast_packet;
struct orig_node *orig_node = NULL;
struct batman_if *batman_if;
- struct neigh_node *router;
+ struct neigh_node *neigh_node;
int data_len = skb->len;
uint8_t dstaddr[6];
+ int ret = 1;
spin_lock_bh(&bat_priv->orig_hash_lock);
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest))
orig_node = (struct orig_node *)gw_get_selected(bat_priv);
+ if (orig_node) {
+ kref_get(&orig_node->refcount);
+ goto find_router;
+ }
- /* check for hna host */
- if (!orig_node)
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ /* check for hna host - increases orig_node refcount */
+ orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+find_router:
/* find_router() increases neigh_nodes refcount if found. */
- router = find_router(bat_priv, orig_node, NULL);
+ neigh_node = find_router(bat_priv, orig_node, NULL);
- if (!router)
+ if (!neigh_node)
goto unlock;
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = router->if_incoming;
- memcpy(dstaddr, router->addr, ETH_ALEN);
-
- spin_unlock_bh(&bat_priv->orig_hash_lock);
-
- if (batman_if->if_status != IF_ACTIVE)
- goto dropped;
+ if (neigh_node->if_incoming->if_status != IF_ACTIVE)
+ goto unlock;
if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
- goto dropped;
+ goto unlock;
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = neigh_node->if_incoming;
+ memcpy(dstaddr, neigh_node->addr, ETH_ALEN);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
unicast_packet = (struct unicast_packet *)skb->data;
@@ -330,18 +334,25 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
if (atomic_read(&bat_priv->fragmentation) &&
data_len + sizeof(struct unicast_packet) >
- batman_if->net_dev->mtu) {
+ batman_if->net_dev->mtu) {
/* send frag skb decreases ttl */
unicast_packet->ttl++;
- return frag_send_skb(skb, bat_priv, batman_if,
- dstaddr);
+ ret = frag_send_skb(skb, bat_priv, batman_if, dstaddr);
+ goto out;
}
+
send_skb_packet(skb, batman_if, dstaddr);
- return 0;
+ ret = 0;
+ goto out;
unlock:
spin_unlock_bh(&bat_priv->orig_hash_lock);
-dropped:
- kfree_skb(skb);
- return 1;
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ if (orig_node)
+ kref_put(&orig_node->refcount, orig_node_free_ref);
+ if (ret == 1)
+ kfree_skb(skb);
+ return ret;
}