summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/6lowpan.c860
-rw-r--r--net/bluetooth/6lowpan.h26
-rw-r--r--net/bluetooth/Makefile6
-rw-r--r--net/bluetooth/hci_core.c52
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/l2cap_sock.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c103
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c44
-rw-r--r--net/ieee802154/6lowpan.c796
-rw-r--r--net/ieee802154/6lowpan.h72
-rw-r--r--net/ieee802154/6lowpan_iphc.c799
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c44
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c60
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c10
-rw-r--r--net/ipv4/tcp_metrics.c151
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c65
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c10
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c10
-rw-r--r--net/mac80211/aes_cmac.c2
-rw-r--r--net/mac80211/aes_cmac.h2
-rw-r--r--net/mac80211/cfg.c54
-rw-r--r--net/mac80211/chan.c67
-rw-r--r--net/mac80211/ibss.c7
-rw-r--r--net/mac80211/ieee80211_i.h11
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/mlme.c25
-rw-r--r--net/mac80211/rc80211_minstrel.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/tkip.c2
-rw-r--r--net/mac80211/trace.h2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c150
-rw-r--r--net/mac80211/wme.c7
-rw-r--r--net/netfilter/Kconfig18
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c10
-rw-r--r--net/netfilter/nf_tables_api.c223
-rw-r--r--net/netfilter/nf_tables_core.c6
-rw-r--r--net/netfilter/nf_tables_inet.c104
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/nft_compat.c8
-rw-r--r--net/netfilter/nft_ct.c199
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_meta.c11
-rw-r--r--net/netfilter/nft_reject.c9
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netfilter/xt_connlimit.c2
-rw-r--r--net/netfilter/xt_hashlimit.c2
-rw-r--r--net/netfilter/xt_l2tp.c354
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/wireless/ap.c1
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/mesh.c1
-rw-r--r--net/wireless/nl80211.c160
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/trace.h40
-rw-r--r--net/wireless/util.c19
65 files changed, 3484 insertions, 1217 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
new file mode 100644
index 00000000000..adb3ea04ada
--- /dev/null
+++ b/net/bluetooth/6lowpan.c
@@ -0,0 +1,860 @@
+/*
+ Copyright (c) 2013 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include <net/af_ieee802154.h> /* to get the address type */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "6lowpan.h"
+
+#include "../ieee802154/6lowpan.h" /* for the compression support */
+
+#define IFACE_NAME_TEMPLATE "bt%d"
+#define EUI64_ADDR_LEN 8
+
+struct skb_cb {
+ struct in6_addr addr;
+ struct l2cap_conn *conn;
+};
+#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
+
+/* The devices list contains those devices that we are acting
+ * as a proxy. The BT 6LoWPAN device is a virtual device that
+ * connects to the Bluetooth LE device. The real connection to
+ * BT device is done via l2cap layer. There exists one
+ * virtual device / one BT 6LoWPAN network (=hciX device).
+ * The list contains struct lowpan_dev elements.
+ */
+static LIST_HEAD(bt_6lowpan_devices);
+static DEFINE_RWLOCK(devices_lock);
+
+struct lowpan_peer {
+ struct list_head list;
+ struct l2cap_conn *conn;
+
+ /* peer addresses in various formats */
+ unsigned char eui64_addr[EUI64_ADDR_LEN];
+ struct in6_addr peer_addr;
+};
+
+struct lowpan_dev {
+ struct list_head list;
+
+ struct hci_dev *hdev;
+ struct net_device *netdev;
+ struct list_head peers;
+ atomic_t peer_count; /* number of items in peers list */
+
+ struct work_struct delete_netdev;
+ struct delayed_work notify_peers;
+};
+
+static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
+{
+ return netdev_priv(netdev);
+}
+
+static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
+{
+ list_add(&peer->list, &dev->peers);
+ atomic_inc(&dev->peer_count);
+}
+
+static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
+{
+ list_del(&peer->list);
+
+ if (atomic_dec_and_test(&dev->peer_count)) {
+ BT_DBG("last peer");
+ return true;
+ }
+
+ return false;
+}
+
+static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
+ bdaddr_t *ba, __u8 type)
+{
+ struct lowpan_peer *peer, *tmp;
+
+ BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
+ ba, type);
+
+ list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ BT_DBG("addr %pMR type %d",
+ &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
+
+ if (bacmp(&peer->conn->hcon->dst, ba))
+ continue;
+
+ if (type == peer->conn->hcon->dst_type)
+ return peer;
+ }
+
+ return NULL;
+}
+
+static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
+ struct l2cap_conn *conn)
+{
+ struct lowpan_peer *peer, *tmp;
+
+ list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ if (peer->conn == conn)
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_peer *peer = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ peer = peer_lookup_conn(entry, conn);
+ if (peer)
+ break;
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ return peer;
+}
+
+static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *dev = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ if (conn->hcon->hdev == entry->hdev) {
+ dev = entry;
+ break;
+ }
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ return dev;
+}
+
+static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *skb_cp;
+ int ret;
+
+ skb_cp = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_cp)
+ return -ENOMEM;
+
+ ret = netif_rx(skb_cp);
+
+ BT_DBG("receive skb %d", ret);
+ if (ret < 0)
+ return NET_RX_DROP;
+
+ return ret;
+}
+
+static int process_data(struct sk_buff *skb, struct net_device *netdev,
+ struct l2cap_conn *conn)
+{
+ const u8 *saddr, *daddr;
+ u8 iphc0, iphc1;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ unsigned long flags;
+
+ dev = lowpan_dev(netdev);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_conn(dev, conn);
+ read_unlock_irqrestore(&devices_lock, flags);
+ if (!peer)
+ goto drop;
+
+ saddr = peer->eui64_addr;
+ daddr = dev->netdev->dev_addr;
+
+ /* at least two bytes will be used for the encoding */
+ if (skb->len < 2)
+ goto drop;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc1))
+ goto drop;
+
+ return lowpan_process_data(skb, netdev,
+ saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
+ daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
+ iphc0, iphc1, give_skb_to_upper);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ struct l2cap_conn *conn)
+{
+ struct sk_buff *local_skb;
+ int ret;
+
+ if (!netif_running(dev))
+ goto drop;
+
+ if (dev->type != ARPHRD_6LOWPAN)
+ goto drop;
+
+ /* check that it's our buffer */
+ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+ /* Copy the packet so that the IPv6 header is
+ * properly aligned.
+ */
+ local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
+ skb_tailroom(skb), GFP_ATOMIC);
+ if (!local_skb)
+ goto drop;
+
+ local_skb->protocol = htons(ETH_P_IPV6);
+ local_skb->pkt_type = PACKET_HOST;
+
+ skb_reset_network_header(local_skb);
+ skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
+
+ if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
+ kfree_skb(local_skb);
+ goto drop;
+ }
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+
+ kfree_skb(local_skb);
+ kfree_skb(skb);
+ } else {
+ switch (skb->data[0] & 0xe0) {
+ case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!local_skb)
+ goto drop;
+
+ ret = process_data(local_skb, dev, conn);
+ if (ret != NET_RX_SUCCESS)
+ goto drop;
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+
+ kfree_skb(skb);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/* Packet from BT LE device */
+int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ int err;
+
+ peer = lookup_peer(conn);
+ if (!peer)
+ return -ENOENT;
+
+ dev = lookup_dev(conn);
+ if (!dev || !dev->netdev)
+ return -ENOENT;
+
+ err = recv_pkt(skb, dev->netdev, conn);
+ BT_DBG("recv pkt %d", err);
+
+ return err;
+}
+
+static inline int skbuff_copy(void *msg, int len, int count, int mtu,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff **frag;
+ int sent = 0;
+
+ memcpy(skb_put(skb, count), msg, count);
+
+ sent += count;
+ msg += count;
+ len -= count;
+
+ dev->stats.tx_bytes += count;
+ dev->stats.tx_packets++;
+
+ raw_dump_table(__func__, "Sending", skb->data, skb->len);
+
+ /* Continuation fragments (no L2CAP header) */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len > 0) {
+ struct sk_buff *tmp;
+
+ count = min_t(unsigned int, mtu, len);
+
+ tmp = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ *frag = tmp;
+
+ memcpy(skb_put(*frag, count), msg, count);
+
+ raw_dump_table(__func__, "Sending fragment",
+ (*frag)->data, count);
+
+ (*frag)->priority = skb->priority;
+
+ sent += count;
+ msg += count;
+ len -= count;
+
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
+ frag = &(*frag)->next;
+
+ dev->stats.tx_bytes += count;
+ dev->stats.tx_packets++;
+ }
+
+ return sent;
+}
+
+static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
+ size_t len, u32 priority,
+ struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int err, count;
+ struct l2cap_hdr *lh;
+
+ /* FIXME: This mtu check should be not needed and atm is only used for
+ * testing purposes
+ */
+ if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
+ conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
+
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
+
+ BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
+
+ skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb->priority = priority;
+
+ lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
+ lh->len = cpu_to_le16(len);
+
+ err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ BT_DBG("skbuff copy %d failed", err);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+
+static int conn_send(struct l2cap_conn *conn,
+ void *msg, size_t len, u32 priority,
+ struct net_device *dev)
+{
+ struct sk_buff *skb;
+
+ skb = create_pdu(conn, msg, len, priority, dev);
+ if (IS_ERR(skb))
+ return -EINVAL;
+
+ BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
+ skb->priority);
+
+ hci_send_acl(conn->hchan, skb, ACL_START);
+
+ return 0;
+}
+
+static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
+ bdaddr_t *addr, u8 *addr_type)
+{
+ u8 *eui64;
+
+ eui64 = ip6_daddr->s6_addr + 8;
+
+ addr->b[0] = eui64[7];
+ addr->b[1] = eui64[6];
+ addr->b[2] = eui64[5];
+ addr->b[3] = eui64[2];
+ addr->b[4] = eui64[1];
+ addr->b[5] = eui64[0];
+
+ addr->b[5] ^= 2;
+
+ /* Set universal/local bit to 0 */
+ if (addr->b[5] & 1) {
+ addr->b[5] &= ~1;
+ *addr_type = ADDR_LE_DEV_PUBLIC;
+ } else {
+ *addr_type = ADDR_LE_DEV_RANDOM;
+ }
+}
+
+static int header_create(struct sk_buff *skb, struct net_device *netdev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len)
+{
+ struct ipv6hdr *hdr;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ bdaddr_t addr, *any = BDADDR_ANY;
+ u8 *saddr, *daddr = any->b;
+ u8 addr_type;
+
+ if (type != ETH_P_IPV6)
+ return -EINVAL;
+
+ hdr = ipv6_hdr(skb);
+
+ dev = lowpan_dev(netdev);
+
+ if (ipv6_addr_is_multicast(&hdr->daddr)) {
+ memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
+ sizeof(struct in6_addr));
+ lowpan_cb(skb)->conn = NULL;
+ } else {
+ unsigned long flags;
+
+ /* Get destination BT device from skb.
+ * If there is no such peer then discard the packet.
+ */
+ get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
+
+ BT_DBG("dest addr %pMR type %d", &addr, addr_type);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ if (!peer) {
+ BT_DBG("no such peer %pMR found", &addr);
+ return -ENOENT;
+ }
+
+ daddr = peer->eui64_addr;
+
+ memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
+ sizeof(struct in6_addr));
+ lowpan_cb(skb)->conn = peer->conn;
+ }
+
+ saddr = dev->netdev->dev_addr;
+
+ return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
+}
+
+/* Packet to BT LE device */
+static int send_pkt(struct l2cap_conn *conn, const void *saddr,
+ const void *daddr, struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ raw_dump_table(__func__, "raw skb data dump before fragmentation",
+ skb->data, skb->len);
+
+ return conn_send(conn, skb->data, skb->len, 0, netdev);
+}
+
+static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct sk_buff *local_skb;
+ struct lowpan_dev *entry, *tmp;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ struct lowpan_peer *pentry, *ptmp;
+ struct lowpan_dev *dev;
+
+ if (entry->netdev != netdev)
+ continue;
+
+ dev = lowpan_dev(entry->netdev);
+
+ list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+
+ send_pkt(pentry->conn, netdev->dev_addr,
+ pentry->eui64_addr, local_skb, netdev);
+
+ kfree_skb(local_skb);
+ }
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+}
+
+static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int err = 0;
+ unsigned char *eui64_addr;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ bdaddr_t addr;
+ u8 addr_type;
+
+ if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
+ /* We need to send the packet to every device
+ * behind this interface.
+ */
+ send_mcast_pkt(skb, netdev);
+ } else {
+ unsigned long flags;
+
+ get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
+ eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
+ dev = lowpan_dev(netdev);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
+ &addr, &lowpan_cb(skb)->addr, peer);
+
+ if (peer && peer->conn)
+ err = send_pkt(peer->conn, netdev->dev_addr,
+ eui64_addr, skb, netdev);
+ }
+ dev_kfree_skb(skb);
+
+ if (err)
+ BT_DBG("ERROR: xmit failed (%d)", err);
+
+ return (err < 0) ? NET_XMIT_DROP : err;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_start_xmit = bt_xmit,
+};
+
+static struct header_ops header_ops = {
+ .create = header_create,
+};
+
+static void netdev_setup(struct net_device *dev)
+{
+ dev->addr_len = EUI64_ADDR_LEN;
+ dev->type = ARPHRD_6LOWPAN;
+
+ dev->hard_header_len = 0;
+ dev->needed_tailroom = 0;
+ dev->mtu = IPV6_MIN_MTU;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
+ dev->watchdog_timeo = 0;
+
+ dev->netdev_ops = &netdev_ops;
+ dev->header_ops = &header_ops;
+ dev->destructor = free_netdev;
+}
+
+static struct device_type bt_type = {
+ .name = "bluetooth",
+};
+
+static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
+{
+ /* addr is the BT address in little-endian format */
+ eui[0] = addr[5];
+ eui[1] = addr[4];
+ eui[2] = addr[3];
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[5] = addr[2];
+ eui[6] = addr[1];
+ eui[7] = addr[0];
+
+ eui[0] ^= 2;
+
+ /* Universal/local bit set, RFC 4291 */
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+ eui[0] |= 1;
+ else
+ eui[0] &= ~1;
+}
+
+static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
+ u8 addr_type)
+{
+ netdev->addr_assign_type = NET_ADDR_PERM;
+ set_addr(netdev->dev_addr, addr->b, addr_type);
+ netdev->dev_addr[0] ^= 2;
+}
+
+static void ifup(struct net_device *netdev)
+{
+ int err;
+
+ rtnl_lock();
+ err = dev_open(netdev);
+ if (err < 0)
+ BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
+ rtnl_unlock();
+}
+
+static void do_notify_peers(struct work_struct *work)
+{
+ struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
+ notify_peers.work);
+
+ netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
+}
+
+static bool is_bt_6lowpan(struct hci_conn *hcon)
+{
+ if (hcon->type != LE_LINK)
+ return false;
+
+ return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
+}
+
+static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
+{
+ struct lowpan_peer *peer;
+ unsigned long flags;
+
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ return -ENOMEM;
+
+ peer->conn = conn;
+ memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
+
+ /* RFC 2464 ch. 5 */
+ peer->peer_addr.s6_addr[0] = 0xFE;
+ peer->peer_addr.s6_addr[1] = 0x80;
+ set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
+ conn->hcon->dst_type);
+
+ memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
+ EUI64_ADDR_LEN);
+ peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+
+ raw_dump_inline(__func__, "peer IPv6 address",
+ (unsigned char *)&peer->peer_addr, 16);
+ raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
+
+ write_lock_irqsave(&devices_lock, flags);
+ INIT_LIST_HEAD(&peer->list);
+ peer_add(dev, peer);
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ /* Notifying peers about us needs to be done without locks held */
+ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
+ schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
+
+ return 0;
+}
+
+/* This gets called when BT LE 6LoWPAN device is connected. We then
+ * create network device that acts as a proxy between BT LE device
+ * and kernel network stack.
+ */
+int bt_6lowpan_add_conn(struct l2cap_conn *conn)
+{
+ struct lowpan_peer *peer = NULL;
+ struct lowpan_dev *dev;
+ struct net_device *netdev;
+ int err = 0;
+ unsigned long flags;
+
+ if (!is_bt_6lowpan(conn->hcon))
+ return 0;
+
+ peer = lookup_peer(conn);
+ if (peer)
+ return -EEXIST;
+
+ dev = lookup_dev(conn);
+ if (dev)
+ return add_peer_conn(conn, dev);
+
+ netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
+
+ netdev->netdev_ops = &netdev_ops;
+ SET_NETDEV_DEV(netdev, &conn->hcon->dev);
+ SET_NETDEV_DEVTYPE(netdev, &bt_type);
+
+ err = register_netdev(netdev);
+ if (err < 0) {
+ BT_INFO("register_netdev failed %d", err);
+ free_netdev(netdev);
+ goto out;
+ }
+
+ BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
+ netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
+ set_bit(__LINK_STATE_PRESENT, &netdev->state);
+
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->hdev = conn->hcon->hdev;
+ INIT_LIST_HEAD(&dev->peers);
+
+ write_lock_irqsave(&devices_lock, flags);
+ INIT_LIST_HEAD(&dev->list);
+ list_add(&dev->list, &bt_6lowpan_devices);
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ ifup(netdev);
+
+ return add_peer_conn(conn, dev);
+
+out:
+ return err;
+}
+
+static void delete_netdev(struct work_struct *work)
+{
+ struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
+ delete_netdev);
+
+ unregister_netdev(entry->netdev);
+
+ /* The entry pointer is deleted in device_event() */
+}
+
+int bt_6lowpan_del_conn(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *dev = NULL;
+ struct lowpan_peer *peer;
+ int err = -ENOENT;
+ unsigned long flags;
+ bool last = false;
+
+ if (!conn || !is_bt_6lowpan(conn->hcon))
+ return 0;
+
+ write_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ dev = lowpan_dev(entry->netdev);
+ peer = peer_lookup_conn(dev, conn);
+ if (peer) {
+ last = peer_del(dev, peer);
+ err = 0;
+ break;
+ }
+ }
+
+ if (!err && last && dev && !atomic_read(&dev->peer_count)) {
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ cancel_delayed_work_sync(&dev->notify_peers);
+
+ /* bt_6lowpan_del_conn() is called with hci dev lock held which
+ * means that we must delete the netdevice in worker thread.
+ */
+ INIT_WORK(&entry->delete_netdev, delete_netdev);
+ schedule_work(&entry->delete_netdev);
+ } else {
+ write_unlock_irqrestore(&devices_lock, flags);
+ }
+
+ return err;
+}
+
+static int device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct lowpan_dev *entry, *tmp;
+ unsigned long flags;
+
+ if (netdev->type != ARPHRD_6LOWPAN)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ write_lock_irqsave(&devices_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
+ list) {
+ if (entry->netdev == netdev) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ write_unlock_irqrestore(&devices_lock, flags);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bt_6lowpan_dev_notifier = {
+ .notifier_call = device_event,
+};
+
+int bt_6lowpan_init(void)
+{
+ return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
+}
+
+void bt_6lowpan_cleanup(void)
+{
+ unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
+}
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
new file mode 100644
index 00000000000..680eac808d7
--- /dev/null
+++ b/net/bluetooth/6lowpan.h
@@ -0,0 +1,26 @@
+/*
+ Copyright (c) 2013 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __6LOWPAN_H
+#define __6LOWPAN_H
+
+#include <linux/skbuff.h>
+#include <net/bluetooth/l2cap.h>
+
+int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
+int bt_6lowpan_add_conn(struct l2cap_conn *conn);
+int bt_6lowpan_del_conn(struct l2cap_conn *conn);
+int bt_6lowpan_init(void);
+void bt_6lowpan_cleanup(void);
+
+#endif /* __6LOWPAN_H */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 6a791e73e39..cc6827e2ce6 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,6 +10,10 @@ obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
- a2mp.o amp.o
+ a2mp.o amp.o 6lowpan.o
+
+ifeq ($(CONFIG_IEEE802154_6LOWPAN),)
+ bluetooth-y += ../ieee802154/6lowpan_iphc.o
+endif
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b8b5f80dd8..5e8663c194c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -636,6 +636,49 @@ static int conn_max_interval_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
conn_max_interval_set, "%llu\n");
+static ssize_t lowpan_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ struct hci_dev *hdev = fp->private_data;
+ bool enable;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+
+ if (copy_from_user(buf, user_buffer, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &enable) < 0)
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
+ return -EALREADY;
+
+ change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
+
+ return count;
+}
+
+static const struct file_operations lowpan_debugfs_fops = {
+ .open = simple_open,
+ .read = lowpan_read,
+ .write = lowpan_write,
+ .llseek = default_llseek,
+};
+
/* ---- HCI requests ---- */
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -1261,8 +1304,13 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* as supported send it. If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway.
+ *
+ * Some controllers indicate that they support handling deleting
+ * stored link keys, but they don't. The quirk lets a driver
+ * just disable this command.
*/
- if (hdev->commands[6] & 0x80) {
+ if (hdev->commands[6] & 0x80 &&
+ !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
struct hci_cp_delete_stored_link_key cp;
bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -1406,6 +1454,8 @@ static int __hci_init(struct hci_dev *hdev)
hdev, &conn_min_interval_fops);
debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
hdev, &conn_max_interval_fops);
+ debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
+ &lowpan_debugfs_fops);
}
return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 5fb3df66c2c..5f812455a45 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3533,6 +3533,9 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
+ if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
+ set_bit(HCI_CONN_6LOWPAN, &conn->flags);
+
hci_conn_add_sysfs(conn);
hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b6bca64b320..b0ad2c752d7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -40,6 +40,7 @@
#include "smp.h"
#include "a2mp.h"
#include "amp.h"
+#include "6lowpan.h"
bool disable_ertm;
@@ -1468,6 +1469,8 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
BT_DBG("");
+ bt_6lowpan_add_conn(conn);
+
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
&hcon->src, &hcon->dst);
@@ -7119,6 +7122,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
l2cap_conn_del(conn->hcon, EACCES);
break;
+ case L2CAP_FC_6LOWPAN:
+ bt_6lowpan_recv(conn, skb);
+ break;
+
default:
l2cap_data_channel(conn, cid, skb);
break;
@@ -7186,6 +7193,8 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
+ bt_6lowpan_del_conn(hcon->l2cap_data);
+
l2cap_conn_del(hcon, bt_to_errno(reason));
}
@@ -7467,11 +7476,14 @@ int __init l2cap_init(void)
debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
&le_default_mps);
+ bt_6lowpan_init();
+
return 0;
}
void l2cap_exit(void)
{
+ bt_6lowpan_cleanup();
debugfs_remove(l2cap_debugfs);
l2cap_cleanup_sockets();
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e7806e6d282..20ef748b290 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -147,6 +147,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
__le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
chan->sec_level = BT_SECURITY_SDP;
break;
+ case L2CAP_CHAN_RAW:
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
}
bacpy(&chan->src, &la.l2_bdaddr);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 84fcf9fff3e..f9c0980abee 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,6 +58,7 @@ struct rfcomm_dev {
uint modem_status;
struct rfcomm_dlc *dlc;
+ wait_queue_head_t conn_wait;
struct device *tty_dev;
@@ -103,20 +104,60 @@ static void rfcomm_dev_destruct(struct tty_port *port)
module_put(THIS_MODULE);
}
-/* device-specific initialization: open the dlc */
-static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
+static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
{
- struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
- return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+ hdev = hci_get_route(&dev->dst, &dev->src);
+ if (!hdev)
+ return NULL;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
+
+ hci_dev_put(hdev);
+
+ return conn ? &conn->dev : NULL;
}
-/* we block the open until the dlc->state becomes BT_CONNECTED */
-static int rfcomm_dev_carrier_raised(struct tty_port *port)
+/* device-specific initialization: open the dlc */
+static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
{
struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+ DEFINE_WAIT(wait);
+ int err;
+
+ err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+ if (err)
+ return err;
+
+ while (1) {
+ prepare_to_wait(&dev->conn_wait, &wait, TASK_INTERRUPTIBLE);
+
+ if (dev->dlc->state == BT_CLOSED) {
+ err = -dev->err;
+ break;
+ }
+
+ if (dev->dlc->state == BT_CONNECTED)
+ break;
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ tty_unlock(tty);
+ schedule();
+ tty_lock(tty);
+ }
+ finish_wait(&dev->conn_wait, &wait);
+
+ if (!err)
+ device_move(dev->tty_dev, rfcomm_get_device(dev),
+ DPM_ORDER_DEV_AFTER_PARENT);
- return (dev->dlc->state == BT_CONNECTED);
+ return err;
}
/* device-specific cleanup: close the dlc */
@@ -135,7 +176,6 @@ static const struct tty_port_operations rfcomm_port_ops = {
.destruct = rfcomm_dev_destruct,
.activate = rfcomm_dev_activate,
.shutdown = rfcomm_dev_shutdown,
- .carrier_raised = rfcomm_dev_carrier_raised,
};
static struct rfcomm_dev *__rfcomm_dev_get(int id)
@@ -169,22 +209,6 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
return dev;
}
-static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
-{
- struct hci_dev *hdev;
- struct hci_conn *conn;
-
- hdev = hci_get_route(&dev->dst, &dev->src);
- if (!hdev)
- return NULL;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
-
- hci_dev_put(hdev);
-
- return conn ? &conn->dev : NULL;
-}
-
static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
@@ -258,6 +282,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
tty_port_init(&dev->port);
dev->port.ops = &rfcomm_port_ops;
+ init_waitqueue_head(&dev->conn_wait);
skb_queue_head_init(&dev->pending);
@@ -437,7 +462,8 @@ static int rfcomm_release_dev(void __user *arg)
tty_kref_put(tty);
}
- if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+ if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
+ !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
tty_port_put(&dev->port);
tty_port_put(&dev->port);
@@ -575,12 +601,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
dev->err = err;
- if (dlc->state == BT_CONNECTED) {
- device_move(dev->tty_dev, rfcomm_get_device(dev),
- DPM_ORDER_DEV_AFTER_PARENT);
+ wake_up_interruptible(&dev->conn_wait);
- wake_up_interruptible(&dev->port.open_wait);
- } else if (dlc->state == BT_CLOSED)
+ if (dlc->state == BT_CLOSED)
tty_port_tty_hangup(&dev->port, false);
}
@@ -670,10 +693,20 @@ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
/* install the tty_port */
err = tty_port_install(&dev->port, driver, tty);
- if (err)
+ if (err) {
rfcomm_tty_cleanup(tty);
+ return err;
+ }
- return err;
+ /* take over the tty_port reference if the port was created with the
+ * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port
+ * when the last process closes the tty. The behaviour is expected by
+ * userspace.
+ */
+ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
+ tty_port_put(&dev->port);
+
+ return 0;
}
static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
@@ -1010,10 +1043,6 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
BT_DBG("tty %p dev %p", tty, dev);
tty_port_hangup(&dev->port);
-
- if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
- !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
- tty_port_put(&dev->port);
}
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
@@ -1096,7 +1125,7 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
rfcomm_tty_driver->init_termios = tty_std_termios;
- rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
+ rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index cf54b22818c..5bcc0d8b31f 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -14,10 +14,30 @@
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
+static unsigned int
+nft_do_chain_bridge(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static struct nft_af_info nft_af_bridge __read_mostly = {
.family = NFPROTO_BRIDGE,
.nhooks = NF_BR_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
+ .hooks = {
+ [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
+ [NF_BR_FORWARD] = nft_do_chain_bridge,
+ [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
+ },
};
static int nf_tables_bridge_init_net(struct net *net)
@@ -48,32 +68,14 @@ static struct pernet_operations nf_tables_bridge_net_ops = {
.exit = nf_tables_bridge_exit_net,
};
-static unsigned int
-nft_do_chain_bridge(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_bridge = {
- .family = NFPROTO_BRIDGE,
+static const struct nf_chain_type filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_BRIDGE,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT),
- .fn = {
- [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
- [NF_BR_FORWARD] = nft_do_chain_bridge,
- [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
- },
};
static int __init nf_tables_bridge_init(void)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index a2d2456a557..48b25c0af4d 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -62,9 +62,6 @@
#include "6lowpan.h"
-/* TTL uncompression values */
-static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
-
static LIST_HEAD(lowpan_devices);
/* private device info */
@@ -104,378 +101,14 @@ static inline void lowpan_address_flip(u8 *src, u8 *dest)
(dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
}
-/* list of all 6lowpan devices, uses for package delivering */
-/* print data in line */
-static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
- unsigned char *buf, int len)
-{
-#ifdef DEBUG
- if (msg)
- pr_debug("(%s) %s: ", caller, msg);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
- 16, 1, buf, len, false);
-#endif /* DEBUG */
-}
-
-/*
- * print data in a table format:
- *
- * addr: xx xx xx xx xx xx
- * addr: xx xx xx xx xx xx
- * ...
- */
-static inline void lowpan_raw_dump_table(const char *caller, char *msg,
- unsigned char *buf, int len)
-{
-#ifdef DEBUG
- if (msg)
- pr_debug("(%s) %s:\n", caller, msg);
- print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
- 16, 1, buf, len, false);
-#endif /* DEBUG */
-}
-
-static u8
-lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
- const unsigned char *lladdr)
-{
- u8 val = 0;
-
- if (is_addr_mac_addr_based(ipaddr, lladdr))
- val = 3; /* 0-bits */
- else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
- /* compress IID to 16 bits xxxx::XXXX */
- memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
- *hc06_ptr += 2;
- val = 2; /* 16-bits */
- } else {
- /* do not compress IID => xxxx::IID */
- memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
- *hc06_ptr += 8;
- val = 1; /* 64-bits */
- }
-
- return rol8(val, shift);
-}
-
-/*
- * Uncompress address function for source and
- * destination address(non-multicast).
- *
- * address_mode is sam value or dam value.
- */
-static int
-lowpan_uncompress_addr(struct sk_buff *skb,
- struct in6_addr *ipaddr,
- const u8 address_mode,
- const struct ieee802154_addr *lladdr)
-{
- bool fail;
-
- switch (address_mode) {
- case LOWPAN_IPHC_ADDR_00:
- /* for global link addresses */
- fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
- break;
- case LOWPAN_IPHC_ADDR_01:
- /* fe:80::XXXX:XXXX:XXXX:XXXX */
- ipaddr->s6_addr[0] = 0xFE;
- ipaddr->s6_addr[1] = 0x80;
- fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
- break;
- case LOWPAN_IPHC_ADDR_02:
- /* fe:80::ff:fe00:XXXX */
- ipaddr->s6_addr[0] = 0xFE;
- ipaddr->s6_addr[1] = 0x80;
- ipaddr->s6_addr[11] = 0xFF;
- ipaddr->s6_addr[12] = 0xFE;
- fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
- break;
- case LOWPAN_IPHC_ADDR_03:
- fail = false;
- switch (lladdr->addr_type) {
- case IEEE802154_ADDR_LONG:
- /* fe:80::XXXX:XXXX:XXXX:XXXX
- * \_________________/
- * hwaddr
- */
- ipaddr->s6_addr[0] = 0xFE;
- ipaddr->s6_addr[1] = 0x80;
- memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
- IEEE802154_ADDR_LEN);
- /* second bit-flip (Universe/Local)
- * is done according RFC2464
- */
- ipaddr->s6_addr[8] ^= 0x02;
- break;
- case IEEE802154_ADDR_SHORT:
- /* fe:80::ff:fe00:XXXX
- * \__/
- * short_addr
- *
- * Universe/Local bit is zero.
- */
- ipaddr->s6_addr[0] = 0xFE;
- ipaddr->s6_addr[1] = 0x80;
- ipaddr->s6_addr[11] = 0xFF;
- ipaddr->s6_addr[12] = 0xFE;
- ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
- break;
- default:
- pr_debug("Invalid addr_type set\n");
- return -EINVAL;
- }
- break;
- default:
- pr_debug("Invalid address mode value: 0x%x\n", address_mode);
- return -EINVAL;
- }
-
- if (fail) {
- pr_debug("Failed to fetch skb data\n");
- return -EIO;
- }
-
- lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
- ipaddr->s6_addr, 16);
-
- return 0;
-}
-
-/* Uncompress address function for source context
- * based address(non-multicast).
- */
-static int
-lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
- struct in6_addr *ipaddr,
- const u8 sam)
-{
- switch (sam) {
- case LOWPAN_IPHC_ADDR_00:
- /* unspec address ::
- * Do nothing, address is already ::
- */
- break;
- case LOWPAN_IPHC_ADDR_01:
- /* TODO */
- case LOWPAN_IPHC_ADDR_02:
- /* TODO */
- case LOWPAN_IPHC_ADDR_03:
- /* TODO */
- netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
- return -EINVAL;
- default:
- pr_debug("Invalid sam value: 0x%x\n", sam);
- return -EINVAL;
- }
-
- lowpan_raw_dump_inline(NULL,
- "Reconstructed context based ipv6 src addr is:\n",
- ipaddr->s6_addr, 16);
-
- return 0;
-}
-
-/* Uncompress function for multicast destination address,
- * when M bit is set.
- */
-static int
-lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
- struct in6_addr *ipaddr,
- const u8 dam)
-{
- bool fail;
-
- switch (dam) {
- case LOWPAN_IPHC_DAM_00:
- /* 00: 128 bits. The full address
- * is carried in-line.
- */
- fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
- break;
- case LOWPAN_IPHC_DAM_01:
- /* 01: 48 bits. The address takes
- * the form ffXX::00XX:XXXX:XXXX.
- */
- ipaddr->s6_addr[0] = 0xFF;
- fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
- fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
- break;
- case LOWPAN_IPHC_DAM_10:
- /* 10: 32 bits. The address takes
- * the form ffXX::00XX:XXXX.
- */
- ipaddr->s6_addr[0] = 0xFF;
- fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
- fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
- break;
- case LOWPAN_IPHC_DAM_11:
- /* 11: 8 bits. The address takes
- * the form ff02::00XX.
- */
- ipaddr->s6_addr[0] = 0xFF;
- ipaddr->s6_addr[1] = 0x02;
- fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
- break;
- default:
- pr_debug("DAM value has a wrong value: 0x%x\n", dam);
- return -EINVAL;
- }
-
- if (fail) {
- pr_debug("Failed to fetch skb data\n");
- return -EIO;
- }
-
- lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
- ipaddr->s6_addr, 16);
-
- return 0;
-}
-
-static void
-lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
-{
- struct udphdr *uh = udp_hdr(skb);
-
- if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
- LOWPAN_NHC_UDP_4BIT_PORT) &&
- ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
- LOWPAN_NHC_UDP_4BIT_PORT)) {
- pr_debug("UDP header: both ports compression to 4 bits\n");
- **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
- **(hc06_ptr + 1) = /* subtraction is faster */
- (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
- ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
- *hc06_ptr += 2;
- } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
- LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("UDP header: remove 8 bits of dest\n");
- **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
- memcpy(*hc06_ptr + 1, &uh->source, 2);
- **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
- *hc06_ptr += 4;
- } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
- LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("UDP header: remove 8 bits of source\n");
- **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
- memcpy(*hc06_ptr + 1, &uh->dest, 2);
- **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
- *hc06_ptr += 4;
- } else {
- pr_debug("UDP header: can't compress\n");
- **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
- memcpy(*hc06_ptr + 1, &uh->source, 2);
- memcpy(*hc06_ptr + 3, &uh->dest, 2);
- *hc06_ptr += 5;
- }
-
- /* checksum is always inline */
- memcpy(*hc06_ptr, &uh->check, 2);
- *hc06_ptr += 2;
-
- /* skip the UDP header */
- skb_pull(skb, sizeof(struct udphdr));
-}
-
-static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
-{
- if (unlikely(!pskb_may_pull(skb, 1)))
- return -EINVAL;
-
- *val = skb->data[0];
- skb_pull(skb, 1);
-
- return 0;
-}
-
-static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
-{
- if (unlikely(!pskb_may_pull(skb, 2)))
- return -EINVAL;
-
- *val = (skb->data[0] << 8) | skb->data[1];
- skb_pull(skb, 2);
-
- return 0;
-}
-
-static int
-lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
-{
- u8 tmp;
-
- if (!uh)
- goto err;
-
- if (lowpan_fetch_skb_u8(skb, &tmp))
- goto err;
-
- if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
- pr_debug("UDP header uncompression\n");
- switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
- case LOWPAN_NHC_UDP_CS_P_00:
- memcpy(&uh->source, &skb->data[0], 2);
- memcpy(&uh->dest, &skb->data[2], 2);
- skb_pull(skb, 4);
- break;
- case LOWPAN_NHC_UDP_CS_P_01:
- memcpy(&uh->source, &skb->data[0], 2);
- uh->dest =
- skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
- skb_pull(skb, 3);
- break;
- case LOWPAN_NHC_UDP_CS_P_10:
- uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
- memcpy(&uh->dest, &skb->data[1], 2);
- skb_pull(skb, 3);
- break;
- case LOWPAN_NHC_UDP_CS_P_11:
- uh->source =
- LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
- uh->dest =
- LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
- skb_pull(skb, 1);
- break;
- default:
- pr_debug("ERROR: unknown UDP format\n");
- goto err;
- }
-
- pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
- uh->source, uh->dest);
-
- /* copy checksum */
- memcpy(&uh->check, &skb->data[0], 2);
- skb_pull(skb, 2);
-
- /*
- * UDP lenght needs to be infered from the lower layers
- * here, we obtain the hint from the remaining size of the
- * frame
- */
- uh->len = htons(skb->len + sizeof(struct udphdr));
- pr_debug("uncompressed UDP length: src = %d", uh->len);
- } else {
- pr_debug("ERROR: unsupported NH format\n");
- goto err;
- }
-
- return 0;
-err:
- return -EINVAL;
-}
-
static int lowpan_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
- u8 tmp, iphc0, iphc1, *hc06_ptr;
struct ipv6hdr *hdr;
const u8 *saddr = _saddr;
const u8 *daddr = _daddr;
- u8 head[100];
struct ieee802154_addr sa, da;
/* TODO:
@@ -485,181 +118,14 @@ static int lowpan_header_create(struct sk_buff *skb,
return 0;
hdr = ipv6_hdr(skb);
- hc06_ptr = head + 2;
-
- pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
- "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
- ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
-
- lowpan_raw_dump_table(__func__, "raw skb network header dump",
- skb_network_header(skb), sizeof(struct ipv6hdr));
if (!saddr)
saddr = dev->dev_addr;
- lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
-
- /*
- * As we copy some bit-length fields, in the IPHC encoding bytes,
- * we sometimes use |=
- * If the field is 0, and the current bit value in memory is 1,
- * this does not work. We therefore reset the IPHC encoding here
- */
- iphc0 = LOWPAN_DISPATCH_IPHC;
- iphc1 = 0;
-
- /* TODO: context lookup */
+ raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+ raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
- lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
-
- /*
- * Traffic class, flow label
- * If flow label is 0, compress it. If traffic class is 0, compress it
- * We have to process both in the same time as the offset of traffic
- * class depends on the presence of version and flow label
- */
-
- /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
- tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
- tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
-
- if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
- (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
- /* flow label can be compressed */
- iphc0 |= LOWPAN_IPHC_FL_C;
- if ((hdr->priority == 0) &&
- ((hdr->flow_lbl[0] & 0xF0) == 0)) {
- /* compress (elide) all */
- iphc0 |= LOWPAN_IPHC_TC_C;
- } else {
- /* compress only the flow label */
- *hc06_ptr = tmp;
- hc06_ptr += 1;
- }
- } else {
- /* Flow label cannot be compressed */
- if ((hdr->priority == 0) &&
- ((hdr->flow_lbl[0] & 0xF0) == 0)) {
- /* compress only traffic class */
- iphc0 |= LOWPAN_IPHC_TC_C;
- *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
- memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
- hc06_ptr += 3;
- } else {
- /* compress nothing */
- memcpy(hc06_ptr, hdr, 4);
- /* replace the top byte with new ECN | DSCP format */
- *hc06_ptr = tmp;
- hc06_ptr += 4;
- }
- }
-
- /* NOTE: payload length is always compressed */
-
- /* Next Header is compress if UDP */
- if (hdr->nexthdr == UIP_PROTO_UDP)
- iphc0 |= LOWPAN_IPHC_NH_C;
-
- if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
- *hc06_ptr = hdr->nexthdr;
- hc06_ptr += 1;
- }
-
- /*
- * Hop limit
- * if 1: compress, encoding is 01
- * if 64: compress, encoding is 10
- * if 255: compress, encoding is 11
- * else do not compress
- */
- switch (hdr->hop_limit) {
- case 1:
- iphc0 |= LOWPAN_IPHC_TTL_1;
- break;
- case 64:
- iphc0 |= LOWPAN_IPHC_TTL_64;
- break;
- case 255:
- iphc0 |= LOWPAN_IPHC_TTL_255;
- break;
- default:
- *hc06_ptr = hdr->hop_limit;
- hc06_ptr += 1;
- break;
- }
-
- /* source address compression */
- if (is_addr_unspecified(&hdr->saddr)) {
- pr_debug("source address is unspecified, setting SAC\n");
- iphc1 |= LOWPAN_IPHC_SAC;
- /* TODO: context lookup */
- } else if (is_addr_link_local(&hdr->saddr)) {
- pr_debug("source address is link-local\n");
- iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
- LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
- } else {
- pr_debug("send the full source address\n");
- memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
- hc06_ptr += 16;
- }
-
- /* destination address compression */
- if (is_addr_mcast(&hdr->daddr)) {
- pr_debug("destination address is multicast: ");
- iphc1 |= LOWPAN_IPHC_M;
- if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
- pr_debug("compressed to 1 octet\n");
- iphc1 |= LOWPAN_IPHC_DAM_11;
- /* use last byte */
- *hc06_ptr = hdr->daddr.s6_addr[15];
- hc06_ptr += 1;
- } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
- pr_debug("compressed to 4 octets\n");
- iphc1 |= LOWPAN_IPHC_DAM_10;
- /* second byte + the last three */
- *hc06_ptr = hdr->daddr.s6_addr[1];
- memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
- hc06_ptr += 4;
- } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
- pr_debug("compressed to 6 octets\n");
- iphc1 |= LOWPAN_IPHC_DAM_01;
- /* second byte + the last five */
- *hc06_ptr = hdr->daddr.s6_addr[1];
- memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
- hc06_ptr += 6;
- } else {
- pr_debug("using full address\n");
- iphc1 |= LOWPAN_IPHC_DAM_00;
- memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
- hc06_ptr += 16;
- }
- } else {
- /* TODO: context lookup */
- if (is_addr_link_local(&hdr->daddr)) {
- pr_debug("dest address is unicast and link-local\n");
- iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
- LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
- } else {
- pr_debug("dest address is unicast: using full one\n");
- memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
- hc06_ptr += 16;
- }
- }
-
- /* UDP header compression */
- if (hdr->nexthdr == UIP_PROTO_UDP)
- lowpan_compress_udp_header(&hc06_ptr, skb);
-
- head[0] = iphc0;
- head[1] = iphc1;
-
- skb_pull(skb, sizeof(struct ipv6hdr));
- skb_reset_transport_header(skb);
- memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
- skb_reset_network_header(skb);
-
- lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
- skb->len);
+ lowpan_header_compress(skb, dev, type, daddr, saddr, len);
/*
* NOTE1: I'm still unsure about the fact that compression and WPAN
@@ -671,39 +137,38 @@ static int lowpan_header_create(struct sk_buff *skb,
* from MAC subif of the 'dev' and 'real_dev' network devices, but
* this isn't implemented in mainline yet, so currently we assign 0xff
*/
- {
- mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
- mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+ mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
- /* prepare wpan address data */
- sa.addr_type = IEEE802154_ADDR_LONG;
- sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ /* prepare wpan address data */
+ sa.addr_type = IEEE802154_ADDR_LONG;
+ sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- memcpy(&(sa.hwaddr), saddr, 8);
- /* intra-PAN communications */
- da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ memcpy(&(sa.hwaddr), saddr, 8);
+ /* intra-PAN communications */
+ da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- /*
- * if the destination address is the broadcast address, use the
- * corresponding short address
- */
- if (lowpan_is_addr_broadcast(daddr)) {
- da.addr_type = IEEE802154_ADDR_SHORT;
- da.short_addr = IEEE802154_ADDR_BROADCAST;
- } else {
- da.addr_type = IEEE802154_ADDR_LONG;
- memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
-
- /* request acknowledgment */
- mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
- }
+ /*
+ * if the destination address is the broadcast address, use the
+ * corresponding short address
+ */
+ if (lowpan_is_addr_broadcast(daddr)) {
+ da.addr_type = IEEE802154_ADDR_SHORT;
+ da.short_addr = IEEE802154_ADDR_BROADCAST;
+ } else {
+ da.addr_type = IEEE802154_ADDR_LONG;
+ memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
- return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
- type, (void *)&da, (void *)&sa, skb->len);
+ /* request acknowledgment */
+ mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
}
+
+ return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+ type, (void *)&da, (void *)&sa, skb->len);
}
-static int lowpan_give_skb_to_devices(struct sk_buff *skb)
+static int lowpan_give_skb_to_devices(struct sk_buff *skb,
+ struct net_device *dev)
{
struct lowpan_dev_record *entry;
struct sk_buff *skb_cp;
@@ -726,31 +191,6 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb)
return stat;
}
-static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
-{
- struct sk_buff *new;
- int stat = NET_RX_SUCCESS;
-
- new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
- GFP_ATOMIC);
- kfree_skb(skb);
-
- if (!new)
- return -ENOMEM;
-
- skb_push(new, sizeof(struct ipv6hdr));
- skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
-
- new->protocol = htons(ETH_P_IPV6);
- new->pkt_type = PACKET_HOST;
-
- stat = lowpan_give_skb_to_devices(new);
-
- kfree_skb(new);
-
- return stat;
-}
-
static void lowpan_fragment_timer_expired(unsigned long entry_addr)
{
struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
@@ -814,16 +254,12 @@ frame_err:
return NULL;
}
-static int
-lowpan_process_data(struct sk_buff *skb)
+static int process_data(struct sk_buff *skb)
{
- struct ipv6hdr hdr = {};
- u8 tmp, iphc0, iphc1, num_context = 0;
+ u8 iphc0, iphc1;
const struct ieee802154_addr *_saddr, *_daddr;
- int err;
- lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
- skb->len);
+ raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
/* at least two bytes will be used for the encoding */
if (skb->len < 2)
goto drop;
@@ -925,162 +361,11 @@ lowpan_process_data(struct sk_buff *skb)
_saddr = &mac_cb(skb)->sa;
_daddr = &mac_cb(skb)->da;
- pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
-
- /* another if the CID flag is set */
- if (iphc1 & LOWPAN_IPHC_CID) {
- pr_debug("CID flag is set, increase header with one\n");
- if (lowpan_fetch_skb_u8(skb, &num_context))
- goto drop;
- }
-
- hdr.version = 6;
-
- /* Traffic Class and Flow Label */
- switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
- /*
- * Traffic Class and FLow Label carried in-line
- * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
- */
- case 0: /* 00b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
- goto drop;
-
- memcpy(&hdr.flow_lbl, &skb->data[0], 3);
- skb_pull(skb, 3);
- hdr.priority = ((tmp >> 2) & 0x0f);
- hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
- (hdr.flow_lbl[0] & 0x0f);
- break;
- /*
- * Traffic class carried in-line
- * ECN + DSCP (1 byte), Flow Label is elided
- */
- case 2: /* 10b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
- goto drop;
-
- hdr.priority = ((tmp >> 2) & 0x0f);
- hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
- break;
- /*
- * Flow Label carried in-line
- * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
- */
- case 1: /* 01b */
- if (lowpan_fetch_skb_u8(skb, &tmp))
- goto drop;
-
- hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
- memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
- skb_pull(skb, 2);
- break;
- /* Traffic Class and Flow Label are elided */
- case 3: /* 11b */
- break;
- default:
- break;
- }
-
- /* Next Header */
- if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
- /* Next header is carried inline */
- if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
- goto drop;
-
- pr_debug("NH flag is set, next header carried inline: %02x\n",
- hdr.nexthdr);
- }
-
- /* Hop Limit */
- if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
- hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
- else {
- if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
- goto drop;
- }
-
- /* Extract SAM to the tmp variable */
- tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
-
- if (iphc1 & LOWPAN_IPHC_SAC) {
- /* Source address context based uncompression */
- pr_debug("SAC bit is set. Handle context based source address.\n");
- err = lowpan_uncompress_context_based_src_addr(
- skb, &hdr.saddr, tmp);
- } else {
- /* Source address uncompression */
- pr_debug("source address stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
- }
-
- /* Check on error of previous branch */
- if (err)
- goto drop;
-
- /* Extract DAM to the tmp variable */
- tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
-
- /* check for Multicast Compression */
- if (iphc1 & LOWPAN_IPHC_M) {
- if (iphc1 & LOWPAN_IPHC_DAC) {
- pr_debug("dest: context-based mcast compression\n");
- /* TODO: implement this */
- } else {
- err = lowpan_uncompress_multicast_daddr(
- skb, &hdr.daddr, tmp);
- if (err)
- goto drop;
- }
- } else {
- pr_debug("dest: stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
- if (err)
- goto drop;
- }
-
- /* UDP data uncompression */
- if (iphc0 & LOWPAN_IPHC_NH_C) {
- struct udphdr uh;
- struct sk_buff *new;
- if (lowpan_uncompress_udp_header(skb, &uh))
- goto drop;
-
- /*
- * replace the compressed UDP head by the uncompressed UDP
- * header
- */
- new = skb_copy_expand(skb, sizeof(struct udphdr),
- skb_tailroom(skb), GFP_ATOMIC);
- kfree_skb(skb);
-
- if (!new)
- return -ENOMEM;
-
- skb = new;
-
- skb_push(skb, sizeof(struct udphdr));
- skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
-
- lowpan_raw_dump_table(__func__, "raw UDP header dump",
- (u8 *)&uh, sizeof(uh));
-
- hdr.nexthdr = UIP_PROTO_UDP;
- }
-
- /* Not fragmented package */
- hdr.payload_len = htons(skb->len);
-
- pr_debug("skb headroom size = %d, data length = %d\n",
- skb_headroom(skb), skb->len);
-
- pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
- "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
- ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
-
- lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
- sizeof(hdr));
- return lowpan_skb_deliver(skb, &hdr);
+ return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
+ _saddr->addr_type, IEEE802154_ADDR_LEN,
+ (u8 *)_daddr->hwaddr, _daddr->addr_type,
+ IEEE802154_ADDR_LEN, iphc0, iphc1,
+ lowpan_give_skb_to_devices);
unlock_and_drop:
spin_unlock_bh(&flist_lock);
@@ -1112,7 +397,7 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
- lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+ raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
frag = netdev_alloc_skb(skb->dev,
hlen + mlen + plen + IEEE802154_MFR_SIZE);
@@ -1132,8 +417,7 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
skb_copy_to_linear_data_offset(frag, mlen + hlen,
skb_network_header(skb) + offset, plen);
- lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
- frag->len);
+ raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
return dev_queue_xmit(frag);
}
@@ -1316,7 +600,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(local_skb, 1);
- lowpan_give_skb_to_devices(local_skb);
+ lowpan_give_skb_to_devices(local_skb, NULL);
kfree_skb(local_skb);
kfree_skb(skb);
@@ -1328,7 +612,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
local_skb = skb_clone(skb, GFP_ATOMIC);
if (!local_skb)
goto drop;
- lowpan_process_data(local_skb);
+ process_data(local_skb);
kfree_skb(skb);
break;
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 2869c0526da..2b835db3bda 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -231,6 +231,61 @@
#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
+
+#ifdef DEBUG
+/* print data in line */
+static inline void raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s: ", caller, msg);
+
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
+}
+
+/* print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+ if (msg)
+ pr_debug("%s():%s:\n", caller, msg);
+
+ print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+}
+#else
+static inline void raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len) { }
+static inline void raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len) { }
+#endif
+
+static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
+
+ *val = skb->data[0];
+ skb_pull(skb, 1);
+
+ return 0;
+}
+
+static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
+
+ *val = (skb->data[0] << 8) | skb->data[1];
+ skb_pull(skb, 2);
+
+ return 0;
+}
static inline bool lowpan_fetch_skb(struct sk_buff *skb,
void *data, const unsigned int len)
@@ -244,4 +299,21 @@ static inline bool lowpan_fetch_skb(struct sk_buff *skb,
return false;
}
+static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
+ const size_t len)
+{
+ memcpy(*hc_ptr, data, len);
+ *hc_ptr += len;
+}
+
+typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
+
+int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
+ const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
+int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len);
+
#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/6lowpan_iphc.c b/net/ieee802154/6lowpan_iphc.c
new file mode 100644
index 00000000000..11840f9e46d
--- /dev/null
+++ b/net/ieee802154/6lowpan_iphc.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <net/ipv6.h>
+#include <net/af_ieee802154.h>
+
+#include "6lowpan.h"
+
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int uncompress_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr, const u8 address_mode,
+ const u8 *lladdr, const u8 addr_type,
+ const u8 addr_len)
+{
+ bool fail;
+
+ switch (address_mode) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* for global link addresses */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ break;
+ case LOWPAN_IPHC_ADDR_02:
+ /* fe:80::ff:fe00:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ break;
+ case LOWPAN_IPHC_ADDR_03:
+ fail = false;
+ switch (addr_type) {
+ case IEEE802154_ADDR_LONG:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ /* fe:80::ff:fe00:XXXX
+ * \__/
+ * short_addr
+ *
+ * Universe/Local bit is zero.
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
+ break;
+ default:
+ pr_debug("Invalid addr_type set\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ raw_dump_inline(NULL, "Reconstructed ipv6 addr is",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
+
+/*
+ * Uncompress address function for source context
+ * based address(non-multicast).
+ */
+static int uncompress_context_based_src_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 sam)
+{
+ switch (sam) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* unspec address ::
+ * Do nothing, address is already ::
+ */
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_02:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_03:
+ /* TODO */
+ netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+ return -EINVAL;
+ default:
+ pr_debug("Invalid sam value: 0x%x\n", sam);
+ return -EINVAL;
+ }
+
+ raw_dump_inline(NULL,
+ "Reconstructed context based ipv6 src addr is",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
+
+static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
+ struct net_device *dev, skb_delivery_cb deliver_skb)
+{
+ struct sk_buff *new;
+ int stat;
+
+ new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
+ GFP_ATOMIC);
+ kfree_skb(skb);
+
+ if (!new)
+ return -ENOMEM;
+
+ skb_push(new, sizeof(struct ipv6hdr));
+ skb_reset_network_header(new);
+ skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
+
+ new->protocol = htons(ETH_P_IPV6);
+ new->pkt_type = PACKET_HOST;
+ new->dev = dev;
+
+ raw_dump_table(__func__, "raw skb data dump before receiving",
+ new->data, new->len);
+
+ stat = deliver_skb(new, dev);
+
+ kfree_skb(new);
+
+ return stat;
+}
+
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
+{
+ bool fail;
+
+ switch (dam) {
+ case LOWPAN_IPHC_DAM_00:
+ /* 00: 128 bits. The full address
+ * is carried in-line.
+ */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ /* 01: 48 bits. The address takes
+ * the form ffXX::00XX:XXXX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ /* 10: 32 bits. The address takes
+ * the form ffXX::00XX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ /* 11: 8 bits. The address takes
+ * the form ff02::00XX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ ipaddr->s6_addr[1] = 0x02;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+ break;
+ default:
+ pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
+
+static int
+uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
+{
+ bool fail;
+ u8 tmp = 0, val = 0;
+
+ if (!uh)
+ goto err;
+
+ fail = lowpan_fetch_skb(skb, &tmp, 1);
+
+ if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+ pr_debug("UDP header uncompression\n");
+ switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ fail |= lowpan_fetch_skb(skb, &uh->source, 2);
+ fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ fail |= lowpan_fetch_skb(skb, &uh->source, 2);
+ fail |= lowpan_fetch_skb(skb, &val, 1);
+ uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_10:
+ fail |= lowpan_fetch_skb(skb, &val, 1);
+ uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
+ fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ fail |= lowpan_fetch_skb(skb, &val, 1);
+ uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
+ (val >> 4));
+ uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
+ (val & 0x0f));
+ break;
+ default:
+ pr_debug("ERROR: unknown UDP format\n");
+ goto err;
+ break;
+ }
+
+ pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
+ ntohs(uh->source), ntohs(uh->dest));
+
+ /* checksum */
+ if (tmp & LOWPAN_NHC_UDP_CS_C) {
+ pr_debug_ratelimited("checksum elided currently not supported\n");
+ goto err;
+ } else {
+ fail |= lowpan_fetch_skb(skb, &uh->check, 2);
+ }
+
+ /*
+ * UDP lenght needs to be infered from the lower layers
+ * here, we obtain the hint from the remaining size of the
+ * frame
+ */
+ uh->len = htons(skb->len + sizeof(struct udphdr));
+ pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
+ } else {
+ pr_debug("ERROR: unsupported NH format\n");
+ goto err;
+ }
+
+ if (fail)
+ goto err;
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+/* TTL uncompression values */
+static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
+
+int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
+ const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
+{
+ struct ipv6hdr hdr = {};
+ u8 tmp, num_context = 0;
+ int err;
+
+ raw_dump_table(__func__, "raw skb data dump uncompressed",
+ skb->data, skb->len);
+
+ /* another if the CID flag is set */
+ if (iphc1 & LOWPAN_IPHC_CID) {
+ pr_debug("CID flag is set, increase header with one\n");
+ if (lowpan_fetch_skb_u8(skb, &num_context))
+ goto drop;
+ }
+
+ hdr.version = 6;
+
+ /* Traffic Class and Flow Label */
+ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+ /*
+ * Traffic Class and FLow Label carried in-line
+ * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+ */
+ case 0: /* 00b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+ memcpy(&hdr.flow_lbl, &skb->data[0], 3);
+ skb_pull(skb, 3);
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
+ (hdr.flow_lbl[0] & 0x0f);
+ break;
+ /*
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+ case 2: /* 10b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
+ break;
+ /*
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+ case 1: /* 01b */
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto drop;
+
+ hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+ memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
+ skb_pull(skb, 2);
+ break;
+ /* Traffic Class and Flow Label are elided */
+ case 3: /* 11b */
+ break;
+ default:
+ break;
+ }
+
+ /* Next Header */
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ /* Next header is carried inline */
+ if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
+ goto drop;
+
+ pr_debug("NH flag is set, next header carried inline: %02x\n",
+ hdr.nexthdr);
+ }
+
+ /* Hop Limit */
+ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
+ hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
+ else {
+ if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
+ goto drop;
+ }
+
+ /* Extract SAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
+
+ if (iphc1 & LOWPAN_IPHC_SAC) {
+ /* Source address context based uncompression */
+ pr_debug("SAC bit is set. Handle context based source address.\n");
+ err = uncompress_context_based_src_addr(
+ skb, &hdr.saddr, tmp);
+ } else {
+ /* Source address uncompression */
+ pr_debug("source address stateless compression\n");
+ err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
+ saddr_type, saddr_len);
+ }
+
+ /* Check on error of previous branch */
+ if (err)
+ goto drop;
+
+ /* Extract DAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
+
+ /* check for Multicast Compression */
+ if (iphc1 & LOWPAN_IPHC_M) {
+ if (iphc1 & LOWPAN_IPHC_DAC) {
+ pr_debug("dest: context-based mcast compression\n");
+ /* TODO: implement this */
+ } else {
+ err = lowpan_uncompress_multicast_daddr(
+ skb, &hdr.daddr, tmp);
+ if (err)
+ goto drop;
+ }
+ } else {
+ err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
+ daddr_type, daddr_len);
+ pr_debug("dest: stateless compression mode %d dest %pI6c\n",
+ tmp, &hdr.daddr);
+ if (err)
+ goto drop;
+ }
+
+ /* UDP data uncompression */
+ if (iphc0 & LOWPAN_IPHC_NH_C) {
+ struct udphdr uh;
+ struct sk_buff *new;
+ if (uncompress_udp_header(skb, &uh))
+ goto drop;
+
+ /*
+ * replace the compressed UDP head by the uncompressed UDP
+ * header
+ */
+ new = skb_copy_expand(skb, sizeof(struct udphdr),
+ skb_tailroom(skb), GFP_ATOMIC);
+ kfree_skb(skb);
+
+ if (!new)
+ return -ENOMEM;
+
+ skb = new;
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+ skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
+
+ raw_dump_table(__func__, "raw UDP header dump",
+ (u8 *)&uh, sizeof(uh));
+
+ hdr.nexthdr = UIP_PROTO_UDP;
+ }
+
+ hdr.payload_len = htons(skb->len);
+
+ pr_debug("skb headroom size = %d, data length = %d\n",
+ skb_headroom(skb), skb->len);
+
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
+ "nexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
+ hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
+ hdr.hop_limit, &hdr.daddr);
+
+ raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
+ sizeof(hdr));
+
+ return skb_deliver(skb, &hdr, dev, deliver_skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(lowpan_process_data);
+
+static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
+ const struct in6_addr *ipaddr,
+ const unsigned char *lladdr)
+{
+ u8 val = 0;
+
+ if (is_addr_mac_addr_based(ipaddr, lladdr)) {
+ val = 3; /* 0-bits */
+ pr_debug("address compression 0 bits\n");
+ } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
+ /* compress IID to 16 bits xxxx::XXXX */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
+ *hc06_ptr += 2;
+ val = 2; /* 16-bits */
+ raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
+ *hc06_ptr - 2, 2);
+ } else {
+ /* do not compress IID => xxxx::IID */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
+ *hc06_ptr += 8;
+ val = 1; /* 64-bits */
+ raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
+ *hc06_ptr - 8, 8);
+ }
+
+ return rol8(val, shift);
+}
+
+static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ u8 tmp;
+
+ if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT) &&
+ ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT)) {
+ pr_debug("UDP header: both ports compression to 4 bits\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_11;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ /* source and destination port */
+ tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
+ ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("UDP header: remove 8 bits of dest\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_01;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
+ /* destination port */
+ tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("UDP header: remove 8 bits of source\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_10;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ /* destination port */
+ lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
+ } else {
+ pr_debug("UDP header: can't compress\n");
+ /* compression value */
+ tmp = LOWPAN_NHC_UDP_CS_P_00;
+ lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
+ /* source port */
+ lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
+ /* destination port */
+ lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
+ }
+
+ /* checksum is always inline */
+ lowpan_push_hc_data(hc06_ptr, &uh->check, sizeof(uh->check));
+
+ /* skip the UDP header */
+ skb_pull(skb, sizeof(struct udphdr));
+}
+
+int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len)
+{
+ u8 tmp, iphc0, iphc1, *hc06_ptr;
+ struct ipv6hdr *hdr;
+ u8 head[100] = {};
+
+ if (type != ETH_P_IPV6)
+ return -EINVAL;
+
+ hdr = ipv6_hdr(skb);
+ hc06_ptr = head + 2;
+
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
+ "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
+ hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
+ hdr->hop_limit, &hdr->daddr);
+
+ raw_dump_table(__func__, "raw skb network header dump",
+ skb_network_header(skb), sizeof(struct ipv6hdr));
+
+ /*
+ * As we copy some bit-length fields, in the IPHC encoding bytes,
+ * we sometimes use |=
+ * If the field is 0, and the current bit value in memory is 1,
+ * this does not work. We therefore reset the IPHC encoding here
+ */
+ iphc0 = LOWPAN_DISPATCH_IPHC;
+ iphc1 = 0;
+
+ /* TODO: context lookup */
+
+ raw_dump_inline(__func__, "saddr",
+ (unsigned char *)_saddr, IEEE802154_ADDR_LEN);
+ raw_dump_inline(__func__, "daddr",
+ (unsigned char *)_daddr, IEEE802154_ADDR_LEN);
+
+ raw_dump_table(__func__,
+ "sending raw skb network uncompressed packet",
+ skb->data, skb->len);
+
+ /*
+ * Traffic class, flow label
+ * If flow label is 0, compress it. If traffic class is 0, compress it
+ * We have to process both in the same time as the offset of traffic
+ * class depends on the presence of version and flow label
+ */
+
+ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
+ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
+ tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
+
+ if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
+ (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
+ /* flow label can be compressed */
+ iphc0 |= LOWPAN_IPHC_FL_C;
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress (elide) all */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ } else {
+ /* compress only the flow label */
+ *hc06_ptr = tmp;
+ hc06_ptr += 1;
+ }
+ } else {
+ /* Flow label cannot be compressed */
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress only traffic class */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
+ memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
+ hc06_ptr += 3;
+ } else {
+ /* compress nothing */
+ memcpy(hc06_ptr, &hdr, 4);
+ /* replace the top byte with new ECN | DSCP format */
+ *hc06_ptr = tmp;
+ hc06_ptr += 4;
+ }
+ }
+
+ /* NOTE: payload length is always compressed */
+
+ /* Next Header is compress if UDP */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ iphc0 |= LOWPAN_IPHC_NH_C;
+
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ *hc06_ptr = hdr->nexthdr;
+ hc06_ptr += 1;
+ }
+
+ /*
+ * Hop limit
+ * if 1: compress, encoding is 01
+ * if 64: compress, encoding is 10
+ * if 255: compress, encoding is 11
+ * else do not compress
+ */
+ switch (hdr->hop_limit) {
+ case 1:
+ iphc0 |= LOWPAN_IPHC_TTL_1;
+ break;
+ case 64:
+ iphc0 |= LOWPAN_IPHC_TTL_64;
+ break;
+ case 255:
+ iphc0 |= LOWPAN_IPHC_TTL_255;
+ break;
+ default:
+ *hc06_ptr = hdr->hop_limit;
+ hc06_ptr += 1;
+ break;
+ }
+
+ /* source address compression */
+ if (is_addr_unspecified(&hdr->saddr)) {
+ pr_debug("source address is unspecified, setting SAC\n");
+ iphc1 |= LOWPAN_IPHC_SAC;
+ /* TODO: context lookup */
+ } else if (is_addr_link_local(&hdr->saddr)) {
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_SAM_BIT, &hdr->saddr, _saddr);
+ pr_debug("source address unicast link-local %pI6c "
+ "iphc1 0x%02x\n", &hdr->saddr, iphc1);
+ } else {
+ pr_debug("send the full source address\n");
+ memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+
+ /* destination address compression */
+ if (is_addr_mcast(&hdr->daddr)) {
+ pr_debug("destination address is multicast: ");
+ iphc1 |= LOWPAN_IPHC_M;
+ if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
+ pr_debug("compressed to 1 octet\n");
+ iphc1 |= LOWPAN_IPHC_DAM_11;
+ /* use last byte */
+ *hc06_ptr = hdr->daddr.s6_addr[15];
+ hc06_ptr += 1;
+ } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
+ pr_debug("compressed to 4 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_10;
+ /* second byte + the last three */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
+ hc06_ptr += 4;
+ } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
+ pr_debug("compressed to 6 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_01;
+ /* second byte + the last five */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
+ hc06_ptr += 6;
+ } else {
+ pr_debug("using full address\n");
+ iphc1 |= LOWPAN_IPHC_DAM_00;
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
+ hc06_ptr += 16;
+ }
+ } else {
+ /* TODO: context lookup */
+ if (is_addr_link_local(&hdr->daddr)) {
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
+ pr_debug("dest address unicast link-local %pI6c "
+ "iphc1 0x%02x\n", &hdr->daddr, iphc1);
+ } else {
+ pr_debug("dest address unicast %pI6c\n", &hdr->daddr);
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+ }
+
+ /* UDP header compression */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ compress_udp_header(&hc06_ptr, skb);
+
+ head[0] = iphc0;
+ head[1] = iphc1;
+
+ skb_pull(skb, sizeof(struct ipv6hdr));
+ skb_reset_transport_header(skb);
+ memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+ skb_reset_network_header(skb);
+
+ pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len);
+
+ raw_dump_table(__func__, "raw skb data dump compressed",
+ skb->data, skb->len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lowpan_header_compress);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index d7716d64c6b..951a83ee8af 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
+obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o 6lowpan_iphc.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 07a5ed37426..d3929a69f00 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -113,7 +113,7 @@ static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
return dst;
}
-struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
+static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
{
struct dst_entry *dst = tunnel_dst_get(t);
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 3e67ef1c676..19412a4063f 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -14,10 +14,30 @@
#include <linux/netfilter_arp.h>
#include <net/netfilter/nf_tables.h>
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static struct nft_af_info nft_af_arp __read_mostly = {
.family = NFPROTO_ARP,
.nhooks = NF_ARP_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
+ .hooks = {
+ [NF_ARP_IN] = nft_do_chain_arp,
+ [NF_ARP_OUT] = nft_do_chain_arp,
+ [NF_ARP_FORWARD] = nft_do_chain_arp,
+ },
};
static int nf_tables_arp_init_net(struct net *net)
@@ -48,32 +68,14 @@ static struct pernet_operations nf_tables_arp_net_ops = {
.exit = nf_tables_arp_exit_net,
};
-static unsigned int
-nft_do_chain_arp(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_arp = {
- .family = NFPROTO_ARP,
+static const struct nf_chain_type filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_ARP,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT) |
(1 << NF_ARP_FORWARD),
- .fn = {
- [NF_ARP_IN] = nft_do_chain_arp,
- [NF_ARP_OUT] = nft_do_chain_arp,
- [NF_ARP_FORWARD] = nft_do_chain_arp,
- },
};
static int __init nf_tables_arp_init(void)
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 0f4cbfeb19b..6820c8c4084 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -18,14 +18,25 @@
#include <net/ip.h>
#include <net/netfilter/nf_tables_ipv4.h>
+static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct nft_pktinfo pkt;
-
if (unlikely(skb->len < sizeof(struct iphdr) ||
ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
if (net_ratelimit())
@@ -33,19 +44,24 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
"packet\n");
return NF_ACCEPT;
}
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
- return nft_do_chain_pktinfo(&pkt, ops);
+ return nft_do_chain_ipv4(ops, skb, in, out, okfn);
}
-static struct nft_af_info nft_af_ipv4 __read_mostly = {
+struct nft_af_info nft_af_ipv4 __read_mostly = {
.family = NFPROTO_IPV4,
.nhooks = NF_INET_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
.hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
[NF_INET_LOCAL_OUT] = nft_ipv4_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv4,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
},
};
+EXPORT_SYMBOL_GPL(nft_af_ipv4);
static int nf_tables_ipv4_init_net(struct net *net)
{
@@ -75,42 +91,28 @@ static struct pernet_operations nf_tables_ipv4_net_ops = {
.exit = nf_tables_ipv4_exit_net,
};
-static unsigned int
-nft_do_chain_ipv4(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type filter_ipv4 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
- .fn = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
- [NF_INET_LOCAL_OUT] = nft_ipv4_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv4,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
- },
};
static int __init nf_tables_ipv4_init(void)
{
+ int ret;
+
nft_register_chain_type(&filter_ipv4);
- return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+ ret = register_pernet_subsys(&nf_tables_ipv4_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_ipv4);
+
+ return ret;
}
static void __exit nf_tables_ipv4_exit(void)
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index cf2c792cd97..b5b256d45e6 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -75,7 +75,7 @@ static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_ACCEPT)
return ret;
if (!nf_nat_initialized(ct, maniptype)) {
@@ -164,21 +164,21 @@ static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_nat_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type nft_chain_nat_ipv4 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
- .fn = {
+ .hooks = {
[NF_INET_PRE_ROUTING] = nf_nat_prerouting,
[NF_INET_POST_ROUTING] = nf_nat_postrouting,
[NF_INET_LOCAL_OUT] = nf_nat_output,
[NF_INET_LOCAL_IN] = nf_nat_fn,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_nat_init(void)
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 4e6bf9a3d7a..125b66766c0 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -47,7 +47,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
daddr = iph->daddr;
tos = iph->tos;
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_DROP && ret != NF_QUEUE) {
iph = ip_hdr(skb);
@@ -61,15 +61,15 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_route_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type nft_chain_route_ipv4 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
- .fn = {
+ .hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_route_init(void)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 06493736fbc..699a42faab9 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -31,7 +31,8 @@ struct tcp_fastopen_metrics {
struct tcp_metrics_block {
struct tcp_metrics_block __rcu *tcpm_next;
- struct inetpeer_addr tcpm_addr;
+ struct inetpeer_addr tcpm_saddr;
+ struct inetpeer_addr tcpm_daddr;
unsigned long tcpm_stamp;
u32 tcpm_ts;
u32 tcpm_ts_stamp;
@@ -131,7 +132,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
}
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
- struct inetpeer_addr *addr,
+ struct inetpeer_addr *saddr,
+ struct inetpeer_addr *daddr,
unsigned int hash,
bool reclaim)
{
@@ -155,7 +157,8 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
if (!tm)
goto out_unlock;
}
- tm->tcpm_addr = *addr;
+ tm->tcpm_saddr = *saddr;
+ tm->tcpm_daddr = *daddr;
tcpm_suck_dst(tm, dst, true);
@@ -189,7 +192,8 @@ static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, in
return NULL;
}
-static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
+ const struct inetpeer_addr *daddr,
struct net *net, unsigned int hash)
{
struct tcp_metrics_block *tm;
@@ -197,7 +201,8 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *a
for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
- if (addr_same(&tm->tcpm_addr, addr))
+ if (addr_same(&tm->tcpm_saddr, saddr) &&
+ addr_same(&tm->tcpm_daddr, daddr))
break;
depth++;
}
@@ -208,19 +213,22 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
struct dst_entry *dst)
{
struct tcp_metrics_block *tm;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
- addr.family = req->rsk_ops->family;
- switch (addr.family) {
+ saddr.family = req->rsk_ops->family;
+ daddr.family = req->rsk_ops->family;
+ switch (daddr.family) {
case AF_INET:
- addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
- hash = (__force unsigned int) addr.addr.a4;
+ saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
+ daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
+ hash = (__force unsigned int) daddr.addr.a4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+ *(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
+ *(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
break;
#endif
@@ -233,7 +241,8 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
- if (addr_same(&tm->tcpm_addr, &addr))
+ if (addr_same(&tm->tcpm_saddr, &saddr) &&
+ addr_same(&tm->tcpm_daddr, &daddr))
break;
}
tcpm_check_stamp(tm, dst);
@@ -243,19 +252,22 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
{
struct tcp_metrics_block *tm;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
- addr.family = tw->tw_family;
- switch (addr.family) {
+ saddr.family = tw->tw_family;
+ daddr.family = tw->tw_family;
+ switch (daddr.family) {
case AF_INET:
- addr.addr.a4 = tw->tw_daddr;
- hash = (__force unsigned int) addr.addr.a4;
+ saddr.addr.a4 = tw->tw_rcv_saddr;
+ daddr.addr.a4 = tw->tw_daddr;
+ hash = (__force unsigned int) daddr.addr.a4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
+ *(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
+ *(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
hash = ipv6_addr_hash(&tw->tw_v6_daddr);
break;
#endif
@@ -268,7 +280,8 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
- if (addr_same(&tm->tcpm_addr, &addr))
+ if (addr_same(&tm->tcpm_saddr, &saddr) &&
+ addr_same(&tm->tcpm_daddr, &daddr))
break;
}
return tm;
@@ -279,20 +292,23 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
bool create)
{
struct tcp_metrics_block *tm;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net;
bool reclaim;
- addr.family = sk->sk_family;
- switch (addr.family) {
+ saddr.family = sk->sk_family;
+ daddr.family = sk->sk_family;
+ switch (daddr.family) {
case AF_INET:
- addr.addr.a4 = inet_sk(sk)->inet_daddr;
- hash = (__force unsigned int) addr.addr.a4;
+ saddr.addr.a4 = inet_sk(sk)->inet_saddr;
+ daddr.addr.a4 = inet_sk(sk)->inet_daddr;
+ hash = (__force unsigned int) daddr.addr.a4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
+ *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
+ *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
hash = ipv6_addr_hash(&sk->sk_v6_daddr);
break;
#endif
@@ -303,14 +319,14 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
net = dev_net(dst->dev);
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
- tm = __tcp_get_metrics(&addr, net, hash);
+ tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
reclaim = false;
if (tm == TCP_METRICS_RECLAIM_PTR) {
reclaim = true;
tm = NULL;
}
if (!tm && create)
- tm = tcpm_new(dst, &addr, hash, reclaim);
+ tm = tcpm_new(dst, &saddr, &daddr, hash, reclaim);
else
tcpm_check_stamp(tm, dst);
@@ -724,15 +740,21 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
struct nlattr *nest;
int i;
- switch (tm->tcpm_addr.family) {
+ switch (tm->tcpm_daddr.family) {
case AF_INET:
if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
- tm->tcpm_addr.addr.a4) < 0)
+ tm->tcpm_daddr.addr.a4) < 0)
+ goto nla_put_failure;
+ if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
+ tm->tcpm_saddr.addr.a4) < 0)
goto nla_put_failure;
break;
case AF_INET6:
if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
- tm->tcpm_addr.addr.a6) < 0)
+ tm->tcpm_daddr.addr.a6) < 0)
+ goto nla_put_failure;
+ if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
+ tm->tcpm_saddr.addr.a6) < 0)
goto nla_put_failure;
break;
default:
@@ -855,44 +877,66 @@ done:
return skb->len;
}
-static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
- unsigned int *hash, int optional)
+static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
+ unsigned int *hash, int optional, int v4, int v6)
{
struct nlattr *a;
- a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
+ a = info->attrs[v4];
if (a) {
addr->family = AF_INET;
addr->addr.a4 = nla_get_be32(a);
- *hash = (__force unsigned int) addr->addr.a4;
+ if (hash)
+ *hash = (__force unsigned int) addr->addr.a4;
return 0;
}
- a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
+ a = info->attrs[v6];
if (a) {
if (nla_len(a) != sizeof(struct in6_addr))
return -EINVAL;
addr->family = AF_INET6;
memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
- *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
+ if (hash)
+ *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
return 0;
}
return optional ? 1 : -EAFNOSUPPORT;
}
+static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
+ unsigned int *hash, int optional)
+{
+ return __parse_nl_addr(info, addr, hash, optional,
+ TCP_METRICS_ATTR_ADDR_IPV4,
+ TCP_METRICS_ATTR_ADDR_IPV6);
+}
+
+static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
+{
+ return __parse_nl_addr(info, addr, NULL, 0,
+ TCP_METRICS_ATTR_SADDR_IPV4,
+ TCP_METRICS_ATTR_SADDR_IPV6);
+}
+
static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct tcp_metrics_block *tm;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct sk_buff *msg;
struct net *net = genl_info_net(info);
void *reply;
int ret;
+ bool src = true;
- ret = parse_nl_addr(info, &addr, &hash, 0);
+ ret = parse_nl_addr(info, &daddr, &hash, 0);
if (ret < 0)
return ret;
+ ret = parse_nl_saddr(info, &saddr);
+ if (ret < 0)
+ src = false;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -907,7 +951,8 @@ static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
rcu_read_lock();
for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
tm = rcu_dereference(tm->tcpm_next)) {
- if (addr_same(&tm->tcpm_addr, &addr)) {
+ if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
ret = tcp_metrics_fill_info(msg, tm);
break;
}
@@ -960,34 +1005,44 @@ static int tcp_metrics_flush_all(struct net *net)
static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct tcpm_hash_bucket *hb;
- struct tcp_metrics_block *tm;
+ struct tcp_metrics_block *tm, *tmlist = NULL;
struct tcp_metrics_block __rcu **pp;
- struct inetpeer_addr addr;
+ struct inetpeer_addr saddr, daddr;
unsigned int hash;
struct net *net = genl_info_net(info);
int ret;
+ bool src = true;
- ret = parse_nl_addr(info, &addr, &hash, 1);
+ ret = parse_nl_addr(info, &daddr, &hash, 1);
if (ret < 0)
return ret;
if (ret > 0)
return tcp_metrics_flush_all(net);
+ ret = parse_nl_saddr(info, &saddr);
+ if (ret < 0)
+ src = false;
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
hb = net->ipv4.tcp_metrics_hash + hash;
pp = &hb->chain;
spin_lock_bh(&tcp_metrics_lock);
- for (tm = deref_locked_genl(*pp); tm;
- pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
- if (addr_same(&tm->tcpm_addr, &addr)) {
+ for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
+ if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
*pp = tm->tcpm_next;
- break;
+ tm->tcpm_next = tmlist;
+ tmlist = tm;
+ } else {
+ pp = &tm->tcpm_next;
}
}
spin_unlock_bh(&tcp_metrics_lock);
- if (!tm)
+ if (!tmlist)
return -ESRCH;
- kfree_rcu(tm, rcu_head);
+ for (tm = tmlist; tm; tm = tmlist) {
+ tmlist = tm->tcpm_next;
+ kfree_rcu(tm, rcu_head);
+ }
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 31f75ea9cb6..a9fa6c1feed 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1822,6 +1822,7 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
return addrconf_ifid_gre(eui, dev);
+ case ARPHRD_6LOWPAN:
case ARPHRD_IEEE802154:
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
@@ -2677,7 +2678,8 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_INFINIBAND) &&
(dev->type != ARPHRD_IEEE802154) &&
(dev->type != ARPHRD_IEEE1394) &&
- (dev->type != ARPHRD_TUNNEL6)) {
+ (dev->type != ARPHRD_TUNNEL6) &&
+ (dev->type != ARPHRD_6LOWPAN)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index d77db8a1350..0d812b31277 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -16,34 +16,51 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_ipv6.h>
+static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ /* malformed packet, drop it */
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ return NF_DROP;
+
+ return nft_do_chain(&pkt, ops);
+}
+
static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct nft_pktinfo pkt;
-
if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
if (net_ratelimit())
pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
"packet\n");
return NF_ACCEPT;
}
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
- return NF_DROP;
- return nft_do_chain_pktinfo(&pkt, ops);
+ return nft_do_chain_ipv6(ops, skb, in, out, okfn);
}
-static struct nft_af_info nft_af_ipv6 __read_mostly = {
+struct nft_af_info nft_af_ipv6 __read_mostly = {
.family = NFPROTO_IPV6,
.nhooks = NF_INET_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
.hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
[NF_INET_LOCAL_OUT] = nft_ipv6_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv6,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
},
};
+EXPORT_SYMBOL_GPL(nft_af_ipv6);
static int nf_tables_ipv6_init_net(struct net *net)
{
@@ -73,44 +90,28 @@ static struct pernet_operations nf_tables_ipv6_net_ops = {
.exit = nf_tables_ipv6_exit_net,
};
-static unsigned int
-nft_do_chain_ipv6(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- /* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
- return NF_DROP;
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type filter_ipv6 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
- .fn = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
- [NF_INET_LOCAL_OUT] = nft_ipv6_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv6,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
- },
};
static int __init nf_tables_ipv6_init(void)
{
+ int ret;
+
nft_register_chain_type(&filter_ipv6);
- return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+ ret = register_pernet_subsys(&nf_tables_ipv6_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_ipv6);
+
+ return ret;
}
static void __exit nf_tables_ipv6_exit(void)
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index e86dcd70dc7..9c3297a768f 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -79,7 +79,7 @@ static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_ACCEPT)
return ret;
if (!nf_nat_initialized(ct, maniptype)) {
@@ -170,21 +170,21 @@ static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_nat_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type nft_chain_nat_ipv6 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
- .fn = {
+ .hooks = {
[NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting,
[NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting,
[NF_INET_LOCAL_OUT] = nf_nat_ipv6_output,
[NF_INET_LOCAL_IN] = nf_nat_ipv6_fn,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_nat_ipv6_init(void)
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 3fe40f0456a..42031299585 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -47,7 +47,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u32 *)ipv6_hdr(skb));
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_DROP && ret != NF_QUEUE &&
(memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
@@ -59,15 +59,15 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_route_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type nft_chain_route_ipv6 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
- .fn = {
+ .hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_route_init(void)
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 537488cbf94..9b9009f9955 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -111,7 +111,7 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
}
-struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[])
{
struct crypto_cipher *tfm;
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
index 20785a64725..0ce6487af79 100644
--- a/net/mac80211/aes_cmac.h
+++ b/net/mac80211/aes_cmac.h
@@ -11,7 +11,7 @@
#include <linux/crypto.h>
-struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]);
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]);
void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic);
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ac185286842..09d2e58a2ba 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -828,6 +828,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
return 0;
+ mutex_lock(&local->mtx);
mutex_lock(&local->iflist_mtx);
if (local->use_chanctx) {
sdata = rcu_dereference_protected(
@@ -846,6 +847,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
if (ret == 0)
local->monitor_chandef = *chandef;
mutex_unlock(&local->iflist_mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -951,6 +953,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct beacon_data *old;
struct ieee80211_sub_if_data *vlan;
u32 changed = BSS_CHANGED_BEACON_INT |
@@ -969,8 +972,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->needed_rx_chains = sdata->local->rx_chains;
sdata->radar_required = params->radar_required;
+ mutex_lock(&local->mtx);
err = ieee80211_vif_use_channel(sdata, &params->chandef,
IEEE80211_CHANCTX_SHARED);
+ mutex_unlock(&local->mtx);
if (err)
return err;
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
@@ -1121,7 +1126,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
skb_queue_purge(&sdata->u.ap.ps.bc_buf);
ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
return 0;
}
@@ -1944,8 +1951,10 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
+ mutex_lock(&sdata->local->mtx);
err = ieee80211_vif_use_channel(sdata, &setup->chandef,
IEEE80211_CHANCTX_SHARED);
+ mutex_unlock(&sdata->local->mtx);
if (err)
return err;
@@ -1957,7 +1966,9 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
ieee80211_stop_mesh(sdata);
+ mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
return 0;
}
@@ -2895,26 +2906,29 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
unsigned long timeout;
int err;
- if (!list_empty(&local->roc_list) || local->scanning)
- return -EBUSY;
+ mutex_lock(&local->mtx);
+ if (!list_empty(&local->roc_list) || local->scanning) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
/* whatever, but channel contexts should not complain about that one */
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = local->rx_chains;
sdata->radar_required = true;
- mutex_lock(&local->iflist_mtx);
err = ieee80211_vif_use_channel(sdata, chandef,
IEEE80211_CHANCTX_SHARED);
- mutex_unlock(&local->iflist_mtx);
if (err)
- return err;
+ goto out_unlock;
timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
ieee80211_queue_delayed_work(&sdata->local->hw,
&sdata->dfs_cac_timer_work, timeout);
- return 0;
+ out_unlock:
+ mutex_unlock(&local->mtx);
+ return err;
}
static struct cfg80211_beacon_data *
@@ -2990,7 +3004,9 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
goto unlock;
sdata->radar_required = sdata->csa_radar_required;
+ mutex_lock(&local->mtx);
err = ieee80211_vif_change_channel(sdata, &changed);
+ mutex_unlock(&local->mtx);
if (WARN_ON(err < 0))
goto unlock;
@@ -3821,6 +3837,31 @@ static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
}
#endif
+static int ieee80211_set_qos_map(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_qos_map *qos_map)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct mac80211_qos_map *new_qos_map, *old_qos_map;
+
+ if (qos_map) {
+ new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL);
+ if (!new_qos_map)
+ return -ENOMEM;
+ memcpy(&new_qos_map->qos_map, qos_map, sizeof(*qos_map));
+ } else {
+ /* A NULL qos_map was passed to disable QoS mapping */
+ new_qos_map = NULL;
+ }
+
+ old_qos_map = rtnl_dereference(sdata->qos_map);
+ rcu_assign_pointer(sdata->qos_map, new_qos_map);
+ if (old_qos_map)
+ kfree_rcu(old_qos_map, rcu_head);
+
+ return 0;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3900,4 +3941,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_channel = ieee80211_cfg_get_channel,
.start_radar_detection = ieee80211_start_radar_detection,
.channel_switch = ieee80211_channel_switch,
+ .set_qos_map = ieee80211_set_qos_map,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index a57d5d9466b..f43613a97dd 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -232,8 +232,8 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
if (!local->use_chanctx)
local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
- /* acquire mutex to prevent idle from changing */
- mutex_lock(&local->mtx);
+ /* we hold the mutex to prevent idle from changing */
+ lockdep_assert_held(&local->mtx);
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
@@ -246,19 +246,14 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
err = drv_add_chanctx(local, ctx);
if (err) {
kfree(ctx);
- ctx = ERR_PTR(err);
-
ieee80211_recalc_idle(local);
- goto out;
+ return ERR_PTR(err);
}
}
/* and keep the mutex held until the new chanctx is on the list */
list_add_rcu(&ctx->list, &local->chanctx_list);
- out:
- mutex_unlock(&local->mtx);
-
return ctx;
}
@@ -294,9 +289,7 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
/* throw a warning if this wasn't the only channel context. */
WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
- mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
}
static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -358,6 +351,31 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
ieee80211_change_chanctx(local, ctx, compat);
}
+static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
+ struct ieee80211_chanctx *chanctx)
+{
+ bool radar_enabled;
+
+ lockdep_assert_held(&local->chanctx_mtx);
+ /* for setting local->radar_detect_enabled */
+ lockdep_assert_held(&local->mtx);
+
+ radar_enabled = ieee80211_is_radar_required(local);
+
+ if (radar_enabled == chanctx->conf.radar_enabled)
+ return;
+
+ chanctx->conf.radar_enabled = radar_enabled;
+ local->radar_detect_enabled = chanctx->conf.radar_enabled;
+
+ if (!local->use_chanctx) {
+ local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ }
+
+ drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
+}
+
static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
struct ieee80211_chanctx *ctx)
{
@@ -404,29 +422,6 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
ieee80211_free_chanctx(local, ctx);
}
-void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
- struct ieee80211_chanctx *chanctx)
-{
- bool radar_enabled;
-
- lockdep_assert_held(&local->chanctx_mtx);
-
- radar_enabled = ieee80211_is_radar_required(local);
-
- if (radar_enabled == chanctx->conf.radar_enabled)
- return;
-
- chanctx->conf.radar_enabled = radar_enabled;
- local->radar_detect_enabled = chanctx->conf.radar_enabled;
-
- if (!local->use_chanctx) {
- local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
-
- drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
-}
-
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx)
{
@@ -518,6 +513,8 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_chanctx *ctx;
int ret;
+ lockdep_assert_held(&local->mtx);
+
WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
mutex_lock(&local->chanctx_mtx);
@@ -558,6 +555,8 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
int ret;
u32 chanctx_changed = 0;
+ lockdep_assert_held(&local->mtx);
+
/* should never be called if not performing a channel switch. */
if (WARN_ON(!sdata->vif.csa_active))
return -EINVAL;
@@ -655,6 +654,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
{
WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
+ lockdep_assert_held(&sdata->local->mtx);
+
mutex_lock(&sdata->local->chanctx_mtx);
__ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->chanctx_mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index d6ba841437b..771080ec721 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -293,14 +293,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = true;
}
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
if (ieee80211_vif_use_channel(sdata, &chandef,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
sdata_info(sdata, "Failed to join IBSS, no channel context\n");
+ mutex_unlock(&local->mtx);
return;
}
+ mutex_unlock(&local->mtx);
memcpy(ifibss->bssid, bssid, ETH_ALEN);
@@ -363,7 +366,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.ssid_len = 0;
RCU_INIT_POINTER(ifibss->presp, NULL);
kfree_rcu(presp, rcu_head);
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
err);
return;
@@ -747,7 +752,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
drv_leave_ibss(local, sdata);
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
}
static void ieee80211_csa_connection_drop_work(struct work_struct *work)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index fb5dbcb79a1..953b9e29454 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -246,7 +246,8 @@ struct ps_data {
/* yes, this looks ugly, but guarantees that we can later use
* bitmap_empty :)
* NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
- u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
+ u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]
+ __aligned(__alignof__(unsigned long));
struct sk_buff_head bc_buf;
atomic_t num_sta_ps; /* number of stations in PS mode */
int dtim_count;
@@ -693,6 +694,11 @@ struct ieee80211_chanctx {
struct ieee80211_chanctx_conf conf;
};
+struct mac80211_qos_map {
+ struct cfg80211_qos_map qos_map;
+ struct rcu_head rcu_head;
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -738,6 +744,7 @@ struct ieee80211_sub_if_data {
int encrypt_headroom;
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
+ struct mac80211_qos_map __rcu *qos_map;
struct work_struct csa_finalize_work;
int csa_counter_offset_beacon;
@@ -1775,8 +1782,6 @@ void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx);
-void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
- struct ieee80211_chanctx *chanctx);
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d624ed49a7d..b2c83c0f06d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,8 +418,10 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
+ mutex_lock(&local->mtx);
ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
IEEE80211_CHANCTX_EXCLUSIVE);
+ mutex_unlock(&local->mtx);
if (ret) {
drv_remove_interface(local, sdata);
kfree(sdata);
@@ -456,7 +458,9 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
synchronize_net();
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
drv_remove_interface(local, sdata);
@@ -826,9 +830,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chandef;
WARN_ON(local->suspended);
- mutex_lock(&local->iflist_mtx);
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
- mutex_unlock(&local->iflist_mtx);
+ mutex_unlock(&local->mtx);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
GFP_KERNEL);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9c2c7ee2cc3..fc1d82465b3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -888,7 +888,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (!ifmgd->associated)
goto out;
+ mutex_lock(&local->mtx);
ret = ieee80211_vif_change_channel(sdata, &changed);
+ mutex_unlock(&local->mtx);
if (ret) {
sdata_info(sdata,
"vif channel switch failed, disconnecting\n");
@@ -1401,10 +1403,14 @@ void ieee80211_dfs_cac_timer_work(struct work_struct *work)
dfs_cac_timer_work);
struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chandef;
- ieee80211_vif_release_channel(sdata);
- cfg80211_cac_event(sdata->dev, &chandef,
- NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ mutex_lock(&sdata->local->mtx);
+ if (sdata->wdev.cac_started) {
+ ieee80211_vif_release_channel(sdata);
+ cfg80211_cac_event(sdata->dev, &chandef,
+ NL80211_RADAR_CAC_FINISHED,
+ GFP_KERNEL);
+ }
+ mutex_unlock(&sdata->local->mtx);
}
/* MLME */
@@ -1747,7 +1753,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ifmgd->have_beacon = false;
ifmgd->flags = 0;
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
}
@@ -2070,7 +2078,9 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
+ mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
}
cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
@@ -2319,7 +2329,9 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
+ mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
}
kfree(assoc_data);
@@ -3670,6 +3682,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
/* will change later if needed */
sdata->smps_mode = IEEE80211_SMPS_OFF;
+ mutex_lock(&local->mtx);
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
@@ -3681,13 +3694,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
/* don't downgrade for 5 and 10 MHz channels, though. */
if (chandef.width == NL80211_CHAN_WIDTH_5 ||
chandef.width == NL80211_CHAN_WIDTH_10)
- return ret;
+ goto out;
while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
ret = ieee80211_vif_use_channel(sdata, &chandef,
IEEE80211_CHANCTX_SHARED);
}
+ out:
+ mutex_unlock(&local->mtx);
return ret;
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index d2f19f7e709..f3d88b0c054 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -135,7 +135,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
u32 usecs;
int i;
- for (i=0; i < MAX_THR_RATES; i++)
+ for (i = 0; i < MAX_THR_RATES; i++)
tmp_tp_rate[i] = 0;
for (i = 0; i < mi->n_rates; i++) {
@@ -190,7 +190,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is choosen as max_prob_rate */
- if (mr->probability >= MINSTREL_FRAC(95,100)) {
+ if (mr->probability >= MINSTREL_FRAC(95, 100)) {
if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
tmp_prob_rate = i;
} else {
@@ -220,7 +220,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
static void
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
struct minstrel_priv *mp = priv;
@@ -260,7 +260,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
static inline unsigned int
minstrel_get_retry_count(struct minstrel_rate *mr,
- struct ieee80211_tx_info *info)
+ struct ieee80211_tx_info *info)
{
unsigned int retry = mr->adjusted_retry_count;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index d2ed18d82fe..c1b5b73c5b9 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -63,7 +63,7 @@
#define CCK_DURATION(_bitrate, _short, _len) \
(1000 * (10 /* SIFS */ + \
- (_short ? 72 + 24 : 144 + 48 ) + \
+ (_short ? 72 + 24 : 144 + 48) + \
(8 * (_len + 4) * 10) / (_bitrate)))
#define CCK_ACK_DURATION(_bitrate, _short) \
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 124b1fdc20d..0ae207771a5 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -186,7 +186,7 @@ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
- const u8 *ta, u32 iv32, u16 *p1k)
+ const u8 *ta, u32 iv32, u16 *p1k)
{
const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
struct tkip_ctx ctx;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 3a669d7ec7a..da9366632f3 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -553,7 +553,7 @@ TRACE_EVENT(drv_update_tkip_key,
TP_printk(
LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
- LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->iv32
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2f0e176e798..377cf974d97 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2161,7 +2161,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (ieee80211_is_data_qos(fc)) {
__le16 *qos_control;
- qos_control = (__le16*) skb_push(skb, 2);
+ qos_control = (__le16 *) skb_push(skb, 2);
memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
/*
* Maybe we could actually set some fields here, for now just
@@ -2323,7 +2323,7 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
if (atomic_read(&ps->num_sta_ps) > 0)
/* in the hope that this is faster than
* checking byte-for-byte */
- have_bits = !bitmap_empty((unsigned long*)ps->tim,
+ have_bits = !bitmap_empty((unsigned long *)ps->tim,
IEEE80211_MAX_AID+1);
if (ps->dtim_count == 0)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 591b46b7246..df00f1978a7 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -76,7 +76,7 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
}
if (ieee80211_is_ctl(fc)) {
- if(ieee80211_is_pspoll(fc))
+ if (ieee80211_is_pspoll(fc))
return hdr->addr1;
if (ieee80211_is_back_req(fc)) {
@@ -2315,9 +2315,14 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef;
+ mutex_lock(&local->mtx);
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+ /* it might be waiting for the local->mtx, but then
+ * by the time it gets it, sdata->wdev.cac_started
+ * will no longer be true
+ */
+ cancel_delayed_work(&sdata->dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
chandef = sdata->vif.bss_conf.chandef;
@@ -2329,6 +2334,7 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
}
}
mutex_unlock(&local->iflist_mtx);
+ mutex_unlock(&local->mtx);
}
void ieee80211_dfs_radar_detected_work(struct work_struct *work)
@@ -2588,3 +2594,143 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
return headroom;
}
+
+static bool
+ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
+{
+ s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1);
+ int skip;
+
+ if (end > 0)
+ return false;
+
+ /* End time is in the past, check for repetitions */
+ skip = DIV_ROUND_UP(-end, data->desc[i].interval);
+ if (data->count[i] < 255) {
+ if (data->count[i] <= skip) {
+ data->count[i] = 0;
+ return false;
+ }
+
+ data->count[i] -= skip;
+ }
+
+ data->desc[i].start += skip * data->desc[i].interval;
+
+ return true;
+}
+
+static bool
+ieee80211_extend_absent_time(struct ieee80211_noa_data *data, u32 tsf,
+ s32 *offset)
+{
+ bool ret = false;
+ int i;
+
+ for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
+ s32 cur;
+
+ if (!data->count[i])
+ continue;
+
+ if (ieee80211_extend_noa_desc(data, tsf + *offset, i))
+ ret = true;
+
+ cur = data->desc[i].start - tsf;
+ if (cur > *offset)
+ continue;
+
+ cur = data->desc[i].start + data->desc[i].duration - tsf;
+ if (cur > *offset)
+ *offset = cur;
+ }
+
+ return ret;
+}
+
+static u32
+ieee80211_get_noa_absent_time(struct ieee80211_noa_data *data, u32 tsf)
+{
+ s32 offset = 0;
+ int tries = 0;
+ /*
+ * arbitrary limit, used to avoid infinite loops when combined NoA
+ * descriptors cover the full time period.
+ */
+ int max_tries = 5;
+
+ ieee80211_extend_absent_time(data, tsf, &offset);
+ do {
+ if (!ieee80211_extend_absent_time(data, tsf, &offset))
+ break;
+
+ tries++;
+ } while (tries < max_tries);
+
+ return offset;
+}
+
+void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf)
+{
+ u32 next_offset = BIT(31) - 1;
+ int i;
+
+ data->absent = 0;
+ data->has_next_tsf = false;
+ for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
+ s32 start;
+
+ if (!data->count[i])
+ continue;
+
+ ieee80211_extend_noa_desc(data, tsf, i);
+ start = data->desc[i].start - tsf;
+ if (start <= 0)
+ data->absent |= BIT(i);
+
+ if (next_offset > start)
+ next_offset = start;
+
+ data->has_next_tsf = true;
+ }
+
+ if (data->absent)
+ next_offset = ieee80211_get_noa_absent_time(data, tsf);
+
+ data->next_tsf = tsf + next_offset;
+}
+EXPORT_SYMBOL(ieee80211_update_p2p_noa);
+
+int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
+ struct ieee80211_noa_data *data, u32 tsf)
+{
+ int ret = 0;
+ int i;
+
+ memset(data, 0, sizeof(*data));
+
+ for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
+ const struct ieee80211_p2p_noa_desc *desc = &attr->desc[i];
+
+ if (!desc->count || !desc->duration)
+ continue;
+
+ data->count[i] = desc->count;
+ data->desc[i].start = le32_to_cpu(desc->start_time);
+ data->desc[i].duration = le32_to_cpu(desc->duration);
+ data->desc[i].interval = le32_to_cpu(desc->interval);
+
+ if (data->count[i] > 1 &&
+ data->desc[i].interval < data->desc[i].duration)
+ continue;
+
+ ieee80211_extend_noa_desc(data, tsf, i);
+ ret++;
+ }
+
+ if (ret)
+ ieee80211_update_p2p_noa(data, tsf);
+
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_parse_p2p_noa);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index afba19cb6f8..21211c60ca9 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -106,6 +106,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta = NULL;
const u8 *ra = NULL;
bool qos = false;
+ struct mac80211_qos_map *qos_map;
if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
skb->priority = 0; /* required for correct WPA/11i MIC */
@@ -155,7 +156,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
/* use the data classifier to determine what 802.1d tag the
* data frame has */
- skb->priority = cfg80211_classify8021d(skb);
+ rcu_read_lock();
+ qos_map = rcu_dereference(sdata->qos_map);
+ skb->priority = cfg80211_classify8021d(skb, qos_map ?
+ &qos_map->qos_map : NULL);
+ rcu_read_unlock();
return ieee80211_downgrade_queue(sdata, skb);
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3b3b26c4c4..afe50c0f526 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -428,6 +428,14 @@ config NF_TABLES
To compile it as a module, choose M here.
+config NF_TABLES_INET
+ depends on NF_TABLES
+ select NF_TABLES_IPV4
+ select NF_TABLES_IPV6
+ tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support"
+ help
+ This option enables support for a mixed IPv4/IPv6 "inet" table.
+
config NFT_EXTHDR
depends on NF_TABLES
tristate "Netfilter nf_tables IPv6 exthdr module"
@@ -1131,6 +1139,16 @@ config NETFILTER_XT_MATCH_IPVS
If unsure, say N.
+config NETFILTER_XT_MATCH_L2TP
+ tristate '"l2tp" match support'
+ depends on NETFILTER_ADVANCED
+ default L2TP
+ ---help---
+ This option adds an "L2TP" match, which allows you to match against
+ L2TP protocol header fields.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_LENGTH
tristate '"length" match support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 78b4e1c9c59..ee9c4de5f8e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -70,6 +70,7 @@ nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
+obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
obj-$(CONFIG_NFT_META) += nft_meta.o
@@ -137,6 +138,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPCOMP) += xt_ipcomp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_L2TP) += xt_l2tp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index a99b6c3427b..cb372f96f10 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const char *msg;
u_int8_t state;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
- NULL, msg);
+ NULL, "%s", msg);
return false;
}
@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
unsigned int cscov;
const char *msg;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1fcef1ec1dc..36add31e08e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -124,37 +124,43 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
return ++table->hgenerator;
}
-static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+static const struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
-static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+static const struct nf_chain_type *
+__nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
{
int i;
- for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+ for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
if (chain_type[family][i] != NULL &&
!nla_strcmp(nla, chain_type[family][i]->name))
- return i;
+ return chain_type[family][i];
}
- return -1;
+ return NULL;
}
-static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
- const struct nlattr *nla,
- bool autoload)
+static const struct nf_chain_type *
+nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+ const struct nlattr *nla,
+ bool autoload)
{
- int type;
+ const struct nf_chain_type *type;
type = __nf_tables_chain_type_lookup(afi->family, nla);
+ if (type != NULL)
+ return type;
#ifdef CONFIG_MODULES
- if (type < 0 && autoload) {
+ if (autoload) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
request_module("nft-chain-%u-%*.s", afi->family,
nla_len(nla)-1, (const char *)nla_data(nla));
nfnl_lock(NFNL_SUBSYS_NFTABLES);
type = __nf_tables_chain_type_lookup(afi->family, nla);
+ if (type != NULL)
+ return ERR_PTR(-EAGAIN);
}
#endif
- return type;
+ return ERR_PTR(-ENOENT);
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
@@ -307,7 +313,8 @@ err:
return err;
}
-static int nf_tables_table_enable(struct nft_table *table)
+static int nf_tables_table_enable(const struct nft_af_info *afi,
+ struct nft_table *table)
{
struct nft_chain *chain;
int err, i = 0;
@@ -316,7 +323,7 @@ static int nf_tables_table_enable(struct nft_table *table)
if (!(chain->flags & NFT_BASE_CHAIN))
continue;
- err = nf_register_hook(&nft_base_chain(chain)->ops);
+ err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
if (err < 0)
goto err;
@@ -331,18 +338,20 @@ err:
if (i-- <= 0)
break;
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
}
return err;
}
-static int nf_tables_table_disable(struct nft_table *table)
+static int nf_tables_table_disable(const struct nft_af_info *afi,
+ struct nft_table *table)
{
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) {
if (chain->flags & NFT_BASE_CHAIN)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ nf_unregister_hooks(nft_base_chain(chain)->ops,
+ afi->nops);
}
return 0;
@@ -357,7 +366,7 @@ static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
int family = nfmsg->nfgen_family, ret = 0;
if (nla[NFTA_TABLE_FLAGS]) {
- __be32 flags;
+ u32 flags;
flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
if (flags & ~NFT_TABLE_F_DORMANT)
@@ -365,12 +374,12 @@ static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
if ((flags & NFT_TABLE_F_DORMANT) &&
!(table->flags & NFT_TABLE_F_DORMANT)) {
- ret = nf_tables_table_disable(table);
+ ret = nf_tables_table_disable(afi, table);
if (ret >= 0)
table->flags |= NFT_TABLE_F_DORMANT;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(table);
+ ret = nf_tables_table_enable(afi, table);
if (ret >= 0)
table->flags &= ~NFT_TABLE_F_DORMANT;
}
@@ -393,6 +402,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
struct nft_table *table;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ u32 flags = 0;
afi = nf_tables_afinfo_lookup(net, family, true);
if (IS_ERR(afi))
@@ -414,25 +424,25 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
}
+ if (nla[NFTA_TABLE_FLAGS]) {
+ flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+ if (flags & ~NFT_TABLE_F_DORMANT)
+ return -EINVAL;
+ }
+
+ if (!try_module_get(afi->owner))
+ return -EAFNOSUPPORT;
+
table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
- if (table == NULL)
+ if (table == NULL) {
+ module_put(afi->owner);
return -ENOMEM;
+ }
nla_strlcpy(table->name, name, nla_len(name));
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
-
- if (nla[NFTA_TABLE_FLAGS]) {
- __be32 flags;
-
- flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
- if (flags & ~NFT_TABLE_F_DORMANT) {
- kfree(table);
- return -EINVAL;
- }
-
- table->flags |= flags;
- }
+ table->flags = flags;
list_add_tail(&table->list, &afi->tables);
nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
@@ -457,16 +467,17 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- if (table->use)
+ if (!list_empty(&table->chains) || !list_empty(&table->sets))
return -EBUSY;
list_del(&table->list);
nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
kfree(table);
+ module_put(afi->owner);
return 0;
}
-int nft_register_chain_type(struct nf_chain_type *ctype)
+int nft_register_chain_type(const struct nf_chain_type *ctype)
{
int err = 0;
@@ -475,10 +486,6 @@ int nft_register_chain_type(struct nf_chain_type *ctype)
err = -EBUSY;
goto out;
}
-
- if (!try_module_get(ctype->me))
- goto out;
-
chain_type[ctype->family][ctype->type] = ctype;
out:
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
@@ -486,11 +493,10 @@ out:
}
EXPORT_SYMBOL_GPL(nft_register_chain_type);
-void nft_unregister_chain_type(struct nf_chain_type *ctype)
+void nft_unregister_chain_type(const struct nf_chain_type *ctype)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
chain_type[ctype->family][ctype->type] = NULL;
- module_put(ctype->me);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
@@ -598,7 +604,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
if (chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
struct nlattr *nest;
nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
@@ -614,9 +620,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
htonl(basechain->policy)))
goto nla_put_failure;
- if (nla_put_string(skb, NFTA_CHAIN_TYPE,
- chain_type[ops->pf][nft_base_chain(chain)->type]->name))
- goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
+ goto nla_put_failure;
if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
goto nla_put_failure;
@@ -757,22 +762,6 @@ err:
return err;
}
-static int
-nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
-{
- switch (ntohl(nla_get_be32(attr))) {
- case NF_DROP:
- chain->policy = NF_DROP;
- break;
- case NF_ACCEPT:
- chain->policy = NF_ACCEPT;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
[NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
[NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
@@ -831,7 +820,9 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
struct nlattr *ha[NFTA_HOOK_MAX + 1];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ u8 policy = NF_ACCEPT;
u64 handle = 0;
+ unsigned int i;
int err;
bool create;
@@ -845,9 +836,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- if (table->use == UINT_MAX)
- return -EOVERFLOW;
-
chain = NULL;
name = nla[NFTA_CHAIN_NAME];
@@ -865,6 +853,22 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
}
}
+ if (nla[NFTA_CHAIN_POLICY]) {
+ if ((chain != NULL &&
+ !(chain->flags & NFT_BASE_CHAIN)) ||
+ nla[NFTA_CHAIN_HOOK] == NULL)
+ return -EOPNOTSUPP;
+
+ policy = nla_get_be32(nla[NFTA_CHAIN_POLICY]);
+ switch (policy) {
+ case NF_DROP:
+ case NF_ACCEPT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (chain != NULL) {
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
@@ -875,16 +879,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
!IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
return -EEXIST;
- if (nla[NFTA_CHAIN_POLICY]) {
- if (!(chain->flags & NFT_BASE_CHAIN))
- return -EOPNOTSUPP;
-
- err = nf_tables_chain_policy(nft_base_chain(chain),
- nla[NFTA_CHAIN_POLICY]);
- if (err < 0)
- return err;
- }
-
if (nla[NFTA_CHAIN_COUNTERS]) {
if (!(chain->flags & NFT_BASE_CHAIN))
return -EOPNOTSUPP;
@@ -895,24 +889,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
return err;
}
+ if (nla[NFTA_CHAIN_POLICY])
+ nft_base_chain(chain)->policy = policy;
+
if (nla[NFTA_CHAIN_HANDLE] && name)
nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
goto notify;
}
+ if (table->use == UINT_MAX)
+ return -EOVERFLOW;
+
if (nla[NFTA_CHAIN_HOOK]) {
+ const struct nf_chain_type *type;
struct nf_hook_ops *ops;
nf_hookfn *hookfn;
- u32 hooknum;
- int type = NFT_CHAIN_T_DEFAULT;
+ u32 hooknum, priority;
+ type = chain_type[family][NFT_CHAIN_T_DEFAULT];
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(afi,
nla[NFTA_CHAIN_TYPE],
create);
- if (type < 0)
- return -ENOENT;
+ if (IS_ERR(type))
+ return PTR_ERR(type);
}
err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
@@ -926,46 +927,23 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
if (hooknum >= afi->nhooks)
return -EINVAL;
+ priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- hookfn = chain_type[family][type]->fn[hooknum];
- if (hookfn == NULL)
+ if (!(type->hook_mask & (1 << hooknum)))
return -EOPNOTSUPP;
+ if (!try_module_get(type->owner))
+ return -ENOENT;
+ hookfn = type->hooks[hooknum];
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
if (basechain == NULL)
return -ENOMEM;
- basechain->type = type;
- chain = &basechain->chain;
-
- ops = &basechain->ops;
- ops->pf = family;
- ops->owner = afi->owner;
- ops->hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
- ops->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- ops->priv = chain;
- ops->hook = hookfn;
- if (afi->hooks[ops->hooknum])
- ops->hook = afi->hooks[ops->hooknum];
-
- chain->flags |= NFT_BASE_CHAIN;
-
- if (nla[NFTA_CHAIN_POLICY]) {
- err = nf_tables_chain_policy(basechain,
- nla[NFTA_CHAIN_POLICY]);
- if (err < 0) {
- free_percpu(basechain->stats);
- kfree(basechain);
- return err;
- }
- } else
- basechain->policy = NF_ACCEPT;
-
if (nla[NFTA_CHAIN_COUNTERS]) {
err = nf_tables_counters(basechain,
nla[NFTA_CHAIN_COUNTERS]);
if (err < 0) {
- free_percpu(basechain->stats);
+ module_put(type->owner);
kfree(basechain);
return err;
}
@@ -973,12 +951,33 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
struct nft_stats __percpu *newstats;
newstats = alloc_percpu(struct nft_stats);
- if (newstats == NULL)
+ if (newstats == NULL) {
+ module_put(type->owner);
+ kfree(basechain);
return -ENOMEM;
+ }
+ rcu_assign_pointer(basechain->stats, newstats);
+ }
- rcu_assign_pointer(nft_base_chain(chain)->stats,
- newstats);
+ basechain->type = type;
+ chain = &basechain->chain;
+
+ for (i = 0; i < afi->nops; i++) {
+ ops = &basechain->ops[i];
+ ops->pf = family;
+ ops->owner = afi->owner;
+ ops->hooknum = hooknum;
+ ops->priority = priority;
+ ops->priv = chain;
+ ops->hook = afi->hooks[ops->hooknum];
+ if (hookfn)
+ ops->hook = hookfn;
+ if (afi->hook_ops_init)
+ afi->hook_ops_init(ops, i);
}
+
+ chain->flags |= NFT_BASE_CHAIN;
+ basechain->policy = policy;
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (chain == NULL)
@@ -993,8 +992,9 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
chain->flags & NFT_BASE_CHAIN) {
- err = nf_register_hook(&nft_base_chain(chain)->ops);
+ err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
if (err < 0) {
+ module_put(basechain->type->owner);
free_percpu(basechain->stats);
kfree(basechain);
return err;
@@ -1015,6 +1015,7 @@ static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
BUG_ON(chain->use > 0);
if (chain->flags & NFT_BASE_CHAIN) {
+ module_put(nft_base_chain(chain)->type->owner);
free_percpu(nft_base_chain(chain)->stats);
kfree(nft_base_chain(chain));
} else
@@ -1052,7 +1053,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
chain->flags & NFT_BASE_CHAIN)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
family);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index e8fcc343c2b..0d879fcb876 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,14 +109,14 @@ static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
chain->table->name, chain->name, comments[type],
rulenum);
}
unsigned int
-nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
const struct nft_chain *chain = ops->priv;
const struct nft_rule *rule;
@@ -216,7 +216,7 @@ next_rule:
return nft_base_chain(chain)->policy;
}
-EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+EXPORT_SYMBOL_GPL(nft_do_chain);
int __init nf_tables_core_module_init(void)
{
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
new file mode 100644
index 00000000000..9dd2d216cfc
--- /dev/null
+++ b/net/netfilter/nf_tables_inet.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/ip.h>
+
+static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n)
+{
+ struct nft_af_info *afi;
+
+ if (n == 1)
+ afi = &nft_af_ipv4;
+ else
+ afi = &nft_af_ipv6;
+
+ ops->pf = afi->family;
+ if (afi->hooks[ops->hooknum])
+ ops->hook = afi->hooks[ops->hooknum];
+}
+
+static struct nft_af_info nft_af_inet __read_mostly = {
+ .family = NFPROTO_INET,
+ .nhooks = NF_INET_NUMHOOKS,
+ .owner = THIS_MODULE,
+ .nops = 2,
+ .hook_ops_init = nft_inet_hook_ops_init,
+};
+
+static int __net_init nf_tables_inet_init_net(struct net *net)
+{
+ net->nft.inet = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.inet == NULL)
+ return -ENOMEM;
+ memcpy(net->nft.inet, &nft_af_inet, sizeof(nft_af_inet));
+
+ if (nft_register_afinfo(net, net->nft.inet) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(net->nft.inet);
+ return -ENOMEM;
+}
+
+static void __net_exit nf_tables_inet_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.inet);
+ kfree(net->nft.inet);
+}
+
+static struct pernet_operations nf_tables_inet_net_ops = {
+ .init = nf_tables_inet_init_net,
+ .exit = nf_tables_inet_exit_net,
+};
+
+static const struct nf_chain_type filter_inet = {
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_INET,
+ .owner = THIS_MODULE,
+ .hook_mask = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING),
+};
+
+static int __init nf_tables_inet_init(void)
+{
+ int ret;
+
+ nft_register_chain_type(&filter_inet);
+ ret = register_pernet_subsys(&nf_tables_inet_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_inet);
+
+ return ret;
+}
+
+static void __exit nf_tables_inet_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_inet_net_ops);
+ nft_unregister_chain_type(&filter_inet);
+}
+
+module_init(nf_tables_inet_init);
+module_exit(nf_tables_inet_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(1);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index d292c8d286e..a155d19a225 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -28,6 +28,8 @@
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/netfilter/nf_log.h>
@@ -73,6 +75,7 @@ struct nfulnl_instance {
};
#define INSTANCE_BUCKETS 16
+static unsigned int hash_init;
static int nfnl_log_net_id __read_mostly;
@@ -1064,6 +1067,11 @@ static int __init nfnetlink_log_init(void)
{
int status = -ENOMEM;
+ /* it's not really all that important to have a random value, so
+ * we can do this from the init function, even if there hasn't
+ * been that much entropy yet */
+ get_random_bytes(&hash_init, sizeof(hash_init));
+
netlink_register_notifier(&nfulnl_rtnl_notifier);
status = nfnetlink_subsys_register(&nfulnl_subsys);
if (status < 0) {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index da0c1f4ada1..82cb8236f8a 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -92,7 +92,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
par->hook_mask = 1 << ops->hooknum;
}
@@ -253,7 +253,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (hook_mask & target->hooks)
@@ -323,7 +323,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
par->hook_mask = 1 << ops->hooknum;
}
@@ -449,7 +449,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (hook_mask & match->hooks)
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 955f4e6e708..c7c12858e11 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -18,17 +18,21 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
struct nft_ct {
enum nft_ct_keys key:8;
enum ip_conntrack_dir dir:8;
- enum nft_registers dreg:8;
+ union{
+ enum nft_registers dreg:8;
+ enum nft_registers sreg:8;
+ };
uint8_t family;
};
-static void nft_ct_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_ct_get_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
struct nft_data *dest = &data[priv->dreg];
@@ -123,24 +127,77 @@ err:
data[NFT_REG_VERDICT].verdict = NFT_BREAK;
}
+static void nft_ct_set_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ u32 value = data[priv->sreg].data[0];
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return;
+
+ switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case NFT_CT_MARK:
+ if (ct->mark != value) {
+ ct->mark = value;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+#endif
+ }
+}
+
static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
[NFTA_CT_DREG] = { .type = NLA_U32 },
[NFTA_CT_KEY] = { .type = NLA_U32 },
[NFTA_CT_DIRECTION] = { .type = NLA_U8 },
+ [NFTA_CT_SREG] = { .type = NLA_U32 },
};
-static int nft_ct_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_ct_l3proto_try_module_get(uint8_t family)
{
- struct nft_ct *priv = nft_expr_priv(expr);
int err;
- if (tb[NFTA_CT_DREG] == NULL ||
- tb[NFTA_CT_KEY] == NULL)
- return -EINVAL;
+ if (family == NFPROTO_INET) {
+ err = nf_ct_l3proto_try_module_get(NFPROTO_IPV4);
+ if (err < 0)
+ goto err1;
+ err = nf_ct_l3proto_try_module_get(NFPROTO_IPV6);
+ if (err < 0)
+ goto err2;
+ } else {
+ err = nf_ct_l3proto_try_module_get(family);
+ if (err < 0)
+ goto err1;
+ }
+ return 0;
+
+err2:
+ nf_ct_l3proto_module_put(NFPROTO_IPV4);
+err1:
+ return err;
+}
+
+static void nft_ct_l3proto_module_put(uint8_t family)
+{
+ if (family == NFPROTO_INET) {
+ nf_ct_l3proto_module_put(NFPROTO_IPV4);
+ nf_ct_l3proto_module_put(NFPROTO_IPV6);
+ } else
+ nf_ct_l3proto_module_put(family);
+}
+
+static int nft_ct_init_validate_get(const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
- priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
if (tb[NFTA_CT_DIRECTION] != NULL) {
priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
switch (priv->dir) {
@@ -179,34 +236,72 @@ static int nft_ct_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+ return 0;
+}
+
+static int nft_ct_init_validate_set(uint32_t key)
+{
+ switch (key) {
+ case NFT_CT_MARK:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+
+ if (tb[NFTA_CT_DREG]) {
+ err = nft_ct_init_validate_get(expr, tb);
+ if (err < 0)
+ return err;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+
+ err = nft_validate_data_load(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE);
+ if (err < 0)
+ return err;
+ } else {
+ err = nft_ct_init_validate_set(priv->key);
+ if (err < 0)
+ return err;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
+ }
+
+ err = nft_ct_l3proto_try_module_get(ctx->afi->family);
if (err < 0)
return err;
- priv->family = ctx->afi->family;
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- goto err1;
+ priv->family = ctx->afi->family;
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- goto err1;
return 0;
-
-err1:
- nf_ct_l3proto_module_put(ctx->afi->family);
- return err;
}
static void nft_ct_destroy(const struct nft_expr *expr)
{
struct nft_ct *priv = nft_expr_priv(expr);
- nf_ct_l3proto_module_put(priv->family);
+ nft_ct_l3proto_module_put(priv->family);
}
-static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ct *priv = nft_expr_priv(expr);
@@ -222,19 +317,61 @@ nla_put_failure:
return -1;
}
+static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_CT_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
static struct nft_expr_type nft_ct_type;
-static const struct nft_expr_ops nft_ct_ops = {
+static const struct nft_expr_ops nft_ct_get_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
- .eval = nft_ct_eval,
+ .eval = nft_ct_get_eval,
.init = nft_ct_init,
.destroy = nft_ct_destroy,
- .dump = nft_ct_dump,
+ .dump = nft_ct_get_dump,
};
+static const struct nft_expr_ops nft_ct_set_ops = {
+ .type = &nft_ct_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+ .eval = nft_ct_set_eval,
+ .init = nft_ct_init,
+ .destroy = nft_ct_destroy,
+ .dump = nft_ct_set_dump,
+};
+
+static const struct nft_expr_ops *
+nft_ct_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_CT_KEY] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_CT_DREG])
+ return &nft_ct_get_ops;
+
+ if (tb[NFTA_CT_SREG])
+ return &nft_ct_set_ops;
+
+ return ERR_PTR(-EINVAL);
+}
+
static struct nft_expr_type nft_ct_type __read_mostly = {
.name = "ct",
- .ops = &nft_ct_ops,
+ .select_ops = &nft_ct_select_ops,
.policy = nft_ct_policy,
.maxattr = NFTA_CT_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 6aae699aeb4..3d3f8fce10a 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -164,7 +164,7 @@ static int nft_hash_init(const struct nft_set *set,
unsigned int cnt, i;
if (unlikely(!nft_hash_rnd_initted)) {
- nft_hash_rnd = prandom_u32();
+ get_random_bytes(&nft_hash_rnd, 4);
nft_hash_rnd_initted = true;
}
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 57cad072a13..5af790123ad 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -33,7 +33,7 @@ static void nft_log_eval(const struct nft_expr *expr,
const struct nft_log *priv = nft_expr_priv(expr);
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &priv->loginfo, "%s", priv->prefix);
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 1ceaaa6dfe7..e8254ad2e5a 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -43,6 +43,12 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
case NFT_META_PROTOCOL:
*(__be16 *)dest->data = skb->protocol;
break;
+ case NFT_META_NFPROTO:
+ dest->data[0] = pkt->ops->pf;
+ break;
+ case NFT_META_L4PROTO:
+ dest->data[0] = pkt->tprot;
+ break;
case NFT_META_PRIORITY:
dest->data[0] = skb->priority;
break;
@@ -181,6 +187,8 @@ static int nft_meta_init_validate_get(uint32_t key)
switch (key) {
case NFT_META_LEN:
case NFT_META_PROTOCOL:
+ case NFT_META_NFPROTO:
+ case NFT_META_L4PROTO:
case NFT_META_PRIORITY:
case NFT_META_MARK:
case NFT_META_IIF:
@@ -231,6 +239,9 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return err;
priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
return 0;
}
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 0d690d4101d..5e204711d70 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -34,8 +34,9 @@ static void nft_reject_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
-
+#endif
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
if (priv->family == NFPROTO_IPV4)
@@ -43,15 +44,15 @@ static void nft_reject_eval(const struct nft_expr *expr,
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
else if (priv->family == NFPROTO_IPV6)
nf_send_unreach6(net, pkt->skb, priv->icmp_code,
- pkt->hooknum);
+ pkt->ops->hooknum);
#endif
break;
case NFT_REJECT_TCP_RST:
if (priv->family == NFPROTO_IPV4)
- nf_send_reset(pkt->skb, pkt->hooknum);
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
else if (priv->family == NFPROTO_IPV6)
- nf_send_reset6(net, pkt->skb, pkt->hooknum);
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
#endif
break;
}
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 190854be762..370adf622ce 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -100,7 +100,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
int ret;
if (unlikely(!rnd_inited)) {
- jhash_rnd = prandom_u32();
+ get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
rnd_inited = true;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 7671e821491..c40b2695633 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -229,7 +229,7 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
u_int32_t rand;
do {
- rand = prandom_u32();
+ get_random_bytes(&rand, sizeof(rand));
} while (!rand);
cmpxchg(&connlimit_rnd, 0, rand);
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d819f62b3b7..a3910fc2122 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -177,7 +177,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
/* initialize hash with random val at the time we allocate
* the first hashtable entry */
if (unlikely(!ht->rnd_initialized)) {
- ht->rnd = prandom_u32();
+ get_random_bytes(&ht->rnd, sizeof(ht->rnd));
ht->rnd_initialized = true;
}
diff --git a/net/netfilter/xt_l2tp.c b/net/netfilter/xt_l2tp.c
new file mode 100644
index 00000000000..8aee572771f
--- /dev/null
+++ b/net/netfilter/xt_l2tp.c
@@ -0,0 +1,354 @@
+/* Kernel module to match L2TP header parameters. */
+
+/* (C) 2013 James Chapman <jchapman@katalix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/l2tp.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_l2tp.h>
+
+/* L2TP header masks */
+#define L2TP_HDR_T_BIT 0x8000
+#define L2TP_HDR_L_BIT 0x4000
+#define L2TP_HDR_VER 0x000f
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("Xtables: L2TP header match");
+MODULE_ALIAS("ipt_l2tp");
+MODULE_ALIAS("ip6t_l2tp");
+
+/* The L2TP fields that can be matched */
+struct l2tp_data {
+ u32 tid;
+ u32 sid;
+ u8 type;
+ u8 version;
+};
+
+union l2tp_val {
+ __be16 val16[2];
+ __be32 val32;
+};
+
+static bool l2tp_match(const struct xt_l2tp_info *info, struct l2tp_data *data)
+{
+ if ((info->flags & XT_L2TP_TYPE) && (info->type != data->type))
+ return false;
+
+ if ((info->flags & XT_L2TP_VERSION) && (info->version != data->version))
+ return false;
+
+ /* Check tid only for L2TPv3 control or any L2TPv2 packets */
+ if ((info->flags & XT_L2TP_TID) &&
+ ((data->type == XT_L2TP_TYPE_CONTROL) || (data->version == 2)) &&
+ (info->tid != data->tid))
+ return false;
+
+ /* Check sid only for L2TP data packets */
+ if ((info->flags & XT_L2TP_SID) && (data->type == XT_L2TP_TYPE_DATA) &&
+ (info->sid != data->sid))
+ return false;
+
+ return true;
+}
+
+/* Parse L2TP header fields when UDP encapsulation is used. Handles
+ * L2TPv2 and L2TPv3. Note the L2TPv3 control and data packets have a
+ * different format. See
+ * RFC2661, Section 3.1, L2TPv2 Header Format
+ * RFC3931, Section 3.2.1, L2TPv3 Control Message Header
+ * RFC3931, Section 3.2.2, L2TPv3 Data Message Header
+ * RFC3931, Section 4.1.2.1, L2TPv3 Session Header over UDP
+ */
+static bool l2tp_udp_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
+{
+ const struct xt_l2tp_info *info = par->matchinfo;
+ int uhlen = sizeof(struct udphdr);
+ int offs = thoff + uhlen;
+ union l2tp_val *lh;
+ union l2tp_val lhbuf;
+ u16 flags;
+ struct l2tp_data data = { 0, };
+
+ if (par->fragoff != 0)
+ return false;
+
+ /* Extract L2TP header fields. The flags in the first 16 bits
+ * tell us where the other fields are.
+ */
+ lh = skb_header_pointer(skb, offs, 2, &lhbuf);
+ if (lh == NULL)
+ return false;
+
+ flags = ntohs(lh->val16[0]);
+ if (flags & L2TP_HDR_T_BIT)
+ data.type = XT_L2TP_TYPE_CONTROL;
+ else
+ data.type = XT_L2TP_TYPE_DATA;
+ data.version = (u8) flags & L2TP_HDR_VER;
+
+ /* Now extract the L2TP tid/sid. These are in different places
+ * for L2TPv2 (rfc2661) and L2TPv3 (rfc3931). For L2TPv2, we
+ * must also check to see if the length field is present,
+ * since this affects the offsets into the packet of the
+ * tid/sid fields.
+ */
+ if (data.version == 3) {
+ lh = skb_header_pointer(skb, offs + 4, 4, &lhbuf);
+ if (lh == NULL)
+ return false;
+ if (data.type == XT_L2TP_TYPE_CONTROL)
+ data.tid = ntohl(lh->val32);
+ else
+ data.sid = ntohl(lh->val32);
+ } else if (data.version == 2) {
+ if (flags & L2TP_HDR_L_BIT)
+ offs += 2;
+ lh = skb_header_pointer(skb, offs + 2, 4, &lhbuf);
+ if (lh == NULL)
+ return false;
+ data.tid = (u32) ntohs(lh->val16[0]);
+ data.sid = (u32) ntohs(lh->val16[1]);
+ } else
+ return false;
+
+ return l2tp_match(info, &data);
+}
+
+/* Parse L2TP header fields for IP encapsulation (no UDP header).
+ * L2TPv3 data packets have a different form with IP encap. See
+ * RC3931, Section 4.1.1.1, L2TPv3 Session Header over IP.
+ * RC3931, Section 4.1.1.2, L2TPv3 Control and Data Traffic over IP.
+ */
+static bool l2tp_ip_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 thoff)
+{
+ const struct xt_l2tp_info *info = par->matchinfo;
+ union l2tp_val *lh;
+ union l2tp_val lhbuf;
+ struct l2tp_data data = { 0, };
+
+ /* For IP encap, the L2TP sid is the first 32-bits. */
+ lh = skb_header_pointer(skb, thoff, sizeof(lhbuf), &lhbuf);
+ if (lh == NULL)
+ return false;
+ if (lh->val32 == 0) {
+ /* Must be a control packet. The L2TP tid is further
+ * into the packet.
+ */
+ data.type = XT_L2TP_TYPE_CONTROL;
+ lh = skb_header_pointer(skb, thoff + 8, sizeof(lhbuf),
+ &lhbuf);
+ if (lh == NULL)
+ return false;
+ data.tid = ntohl(lh->val32);
+ } else {
+ data.sid = ntohl(lh->val32);
+ data.type = XT_L2TP_TYPE_DATA;
+ }
+
+ data.version = 3;
+
+ return l2tp_match(info, &data);
+}
+
+static bool l2tp_mt4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ u8 ipproto = iph->protocol;
+
+ /* l2tp_mt_check4 already restricts the transport protocol */
+ switch (ipproto) {
+ case IPPROTO_UDP:
+ return l2tp_udp_mt(skb, par, par->thoff);
+ case IPPROTO_L2TP:
+ return l2tp_ip_mt(skb, par, par->thoff);
+ }
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static bool l2tp_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ unsigned int thoff = 0;
+ unsigned short fragoff = 0;
+ int ipproto;
+
+ ipproto = ipv6_find_hdr(skb, &thoff, -1, &fragoff, NULL);
+ if (fragoff != 0)
+ return false;
+
+ /* l2tp_mt_check6 already restricts the transport protocol */
+ switch (ipproto) {
+ case IPPROTO_UDP:
+ return l2tp_udp_mt(skb, par, thoff);
+ case IPPROTO_L2TP:
+ return l2tp_ip_mt(skb, par, thoff);
+ }
+
+ return false;
+}
+#endif
+
+static int l2tp_mt_check(const struct xt_mtchk_param *par)
+{
+ const struct xt_l2tp_info *info = par->matchinfo;
+
+ /* Check for invalid flags */
+ if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
+ XT_L2TP_TYPE)) {
+ pr_info("unknown flags: %x\n", info->flags);
+ return -EINVAL;
+ }
+
+ /* At least one of tid, sid or type=control must be specified */
+ if ((!(info->flags & XT_L2TP_TID)) &&
+ (!(info->flags & XT_L2TP_SID)) &&
+ ((!(info->flags & XT_L2TP_TYPE)) ||
+ (info->type != XT_L2TP_TYPE_CONTROL))) {
+ pr_info("invalid flags combination: %x\n", info->flags);
+ return -EINVAL;
+ }
+
+ /* If version 2 is specified, check that incompatible params
+ * are not supplied
+ */
+ if (info->flags & XT_L2TP_VERSION) {
+ if ((info->version < 2) || (info->version > 3)) {
+ pr_info("wrong L2TP version: %u\n", info->version);
+ return -EINVAL;
+ }
+
+ if (info->version == 2) {
+ if ((info->flags & XT_L2TP_TID) &&
+ (info->tid > 0xffff)) {
+ pr_info("v2 tid > 0xffff: %u\n", info->tid);
+ return -EINVAL;
+ }
+ if ((info->flags & XT_L2TP_SID) &&
+ (info->sid > 0xffff)) {
+ pr_info("v2 sid > 0xffff: %u\n", info->sid);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int l2tp_mt_check4(const struct xt_mtchk_param *par)
+{
+ const struct xt_l2tp_info *info = par->matchinfo;
+ const struct ipt_entry *e = par->entryinfo;
+ const struct ipt_ip *ip = &e->ip;
+ int ret;
+
+ ret = l2tp_mt_check(par);
+ if (ret != 0)
+ return ret;
+
+ if ((ip->proto != IPPROTO_UDP) &&
+ (ip->proto != IPPROTO_L2TP)) {
+ pr_info("missing protocol rule (udp|l2tpip)\n");
+ return -EINVAL;
+ }
+
+ if ((ip->proto == IPPROTO_L2TP) &&
+ (info->version == 2)) {
+ pr_info("v2 doesn't support IP mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int l2tp_mt_check6(const struct xt_mtchk_param *par)
+{
+ const struct xt_l2tp_info *info = par->matchinfo;
+ const struct ip6t_entry *e = par->entryinfo;
+ const struct ip6t_ip6 *ip = &e->ipv6;
+ int ret;
+
+ ret = l2tp_mt_check(par);
+ if (ret != 0)
+ return ret;
+
+ if ((ip->proto != IPPROTO_UDP) &&
+ (ip->proto != IPPROTO_L2TP)) {
+ pr_info("missing protocol rule (udp|l2tpip)\n");
+ return -EINVAL;
+ }
+
+ if ((ip->proto == IPPROTO_L2TP) &&
+ (info->version == 2)) {
+ pr_info("v2 doesn't support IP mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static struct xt_match l2tp_mt_reg[] __read_mostly = {
+ {
+ .name = "l2tp",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .match = l2tp_mt4,
+ .matchsize = XT_ALIGN(sizeof(struct xt_l2tp_info)),
+ .checkentry = l2tp_mt_check4,
+ .hooks = ((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD)),
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "l2tp",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .match = l2tp_mt6,
+ .matchsize = XT_ALIGN(sizeof(struct xt_l2tp_info)),
+ .checkentry = l2tp_mt_check6,
+ .hooks = ((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD)),
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init l2tp_mt_init(void)
+{
+ return xt_register_matches(&l2tp_mt_reg[0], ARRAY_SIZE(l2tp_mt_reg));
+}
+
+static void __exit l2tp_mt_exit(void)
+{
+ xt_unregister_matches(&l2tp_mt_reg[0], ARRAY_SIZE(l2tp_mt_reg));
+}
+
+module_init(l2tp_mt_init);
+module_exit(l2tp_mt_exit);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index bfdc29f1a04..1e657cf715c 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -334,7 +334,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
size_t sz;
if (unlikely(!hash_rnd_inited)) {
- hash_rnd = prandom_u32();
+ get_random_bytes(&hash_rnd, sizeof(hash_rnd));
hash_rnd_inited = true;
}
if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index b430d42b2d0..c58a0fe3c88 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -104,7 +104,7 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
}
return flow;
err:
- kfree(flow);
+ kmem_cache_free(flow_cache, flow);
return ERR_PTR(-ENOMEM);
}
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 324e8d851dc..11ee4ed04f7 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -29,6 +29,7 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
wdev->beacon_interval = 0;
wdev->channel = NULL;
wdev->ssid_len = 0;
+ rdev_set_qos_map(rdev, dev, NULL);
}
return err;
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 730147ed8e6..f911c5f9f90 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -183,6 +183,8 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
+ rdev_set_qos_map(rdev, dev, NULL);
+
/*
* Delete all the keys ... pairwise keys can't really
* exist any more anyway, but default keys might.
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 9c7a11ae793..885862447b6 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -277,6 +277,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
if (!err) {
wdev->mesh_id_len = 0;
wdev->channel = NULL;
+ rdev_set_qos_map(rdev, dev, NULL);
}
return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 04681a46eda..b4f40fe84a0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -53,6 +53,7 @@ enum nl80211_multicast_groups {
NL80211_MCGRP_SCAN,
NL80211_MCGRP_REGULATORY,
NL80211_MCGRP_MLME,
+ NL80211_MCGRP_VENDOR,
NL80211_MCGRP_TESTMODE /* keep last - ifdef! */
};
@@ -61,6 +62,7 @@ static const struct genl_multicast_group nl80211_mcgrps[] = {
[NL80211_MCGRP_SCAN] = { .name = "scan", },
[NL80211_MCGRP_REGULATORY] = { .name = "regulatory", },
[NL80211_MCGRP_MLME] = { .name = "mlme", },
+ [NL80211_MCGRP_VENDOR] = { .name = "vendor", },
#ifdef CONFIG_NL80211_TESTMODE
[NL80211_MCGRP_TESTMODE] = { .name = "testmode", }
#endif
@@ -380,6 +382,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+ [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
+ .len = IEEE80211_QOS_MAP_LEN_MAX },
};
/* policy for the key attributes */
@@ -1188,7 +1192,6 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
struct nlattr *nl_bands, *nl_band;
struct nlattr *nl_freqs, *nl_freq;
struct nlattr *nl_cmds;
- struct nlattr *nl_vendor_cmds;
enum ieee80211_band band;
struct ieee80211_channel *chan;
int i;
@@ -1455,6 +1458,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
CMD(channel_switch, CHANNEL_SWITCH);
}
+ CMD(set_qos_map, SET_QOS_MAP);
#ifdef CONFIG_NL80211_TESTMODE
CMD(testmode_cmd, TESTMODE);
@@ -1587,16 +1591,38 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
state->split_start++;
break;
case 11:
- nl_vendor_cmds = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA);
- if (!nl_vendor_cmds)
- goto nla_put_failure;
+ if (dev->wiphy.n_vendor_commands) {
+ const struct nl80211_vendor_cmd_info *info;
+ struct nlattr *nested;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+
+ for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
+ info = &dev->wiphy.vendor_commands[i].info;
+ if (nla_put(msg, i + 1, sizeof(*info), info))
+ goto nla_put_failure;
+ }
+ nla_nest_end(msg, nested);
+ }
+
+ if (dev->wiphy.n_vendor_events) {
+ const struct nl80211_vendor_cmd_info *info;
+ struct nlattr *nested;
- for (i = 0; i < dev->wiphy.n_vendor_commands; i++)
- if (nla_put(msg, i + 1,
- sizeof(struct nl80211_vendor_cmd_info),
- &dev->wiphy.vendor_commands[i].info))
+ nested = nla_nest_start(msg,
+ NL80211_ATTR_VENDOR_EVENTS);
+ if (!nested)
goto nla_put_failure;
- nla_nest_end(msg, nl_vendor_cmds);
+
+ for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
+ info = &dev->wiphy.vendor_events[i];
+ if (nla_put(msg, i + 1, sizeof(*info), info))
+ goto nla_put_failure;
+ }
+ nla_nest_end(msg, nested);
+ }
/* done */
state->split_start = 0;
@@ -6726,7 +6752,9 @@ static struct sk_buff *
__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
int approxlen, u32 portid, u32 seq,
enum nl80211_commands cmd,
- enum nl80211_attrs attr, gfp_t gfp)
+ enum nl80211_attrs attr,
+ const struct nl80211_vendor_cmd_info *info,
+ gfp_t gfp)
{
struct sk_buff *skb;
void *hdr;
@@ -6744,6 +6772,16 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
goto nla_put_failure;
+
+ if (info) {
+ if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID,
+ info->vendor_id))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD,
+ info->subcmd))
+ goto nla_put_failure;
+ }
+
data = nla_nest_start(skb, attr);
((void **)skb->cb)[0] = rdev;
@@ -6884,29 +6922,54 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
return err;
}
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
- int approxlen, gfp_t gfp)
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ enum nl80211_commands cmd,
+ enum nl80211_attrs attr,
+ int vendor_event_idx,
+ int approxlen, gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ const struct nl80211_vendor_cmd_info *info;
+
+ switch (cmd) {
+ case NL80211_CMD_TESTMODE:
+ if (WARN_ON(vendor_event_idx != -1))
+ return NULL;
+ info = NULL;
+ break;
+ case NL80211_CMD_VENDOR:
+ if (WARN_ON(vendor_event_idx < 0 ||
+ vendor_event_idx >= wiphy->n_vendor_events))
+ return NULL;
+ info = &wiphy->vendor_events[vendor_event_idx];
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
- NL80211_CMD_TESTMODE,
- NL80211_ATTR_TESTDATA, gfp);
+ cmd, attr, info, gfp);
}
-EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
+EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
{
struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
+ enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
+
+ if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
+ mcgrp = NL80211_MCGRP_VENDOR;
+
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0,
- NL80211_MCGRP_TESTMODE, gfp);
+ mcgrp, gfp);
}
-EXPORT_SYMBOL(cfg80211_testmode_event);
+EXPORT_SYMBOL(__cfg80211_send_event_skb);
#endif
static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
@@ -9039,7 +9102,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
return __cfg80211_alloc_vendor_skb(rdev, approxlen,
rdev->cur_cmd_info->snd_portid,
rdev->cur_cmd_info->snd_seq,
- cmd, attr, GFP_KERNEL);
+ cmd, attr, NULL, GFP_KERNEL);
}
EXPORT_SYMBOL(__cfg80211_alloc_reply_skb);
@@ -9061,6 +9124,57 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
+static int nl80211_set_qos_map(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_qos_map *qos_map = NULL;
+ struct net_device *dev = info->user_ptr[1];
+ u8 *pos, len, num_des, des_len, des;
+ int ret;
+
+ if (!rdev->ops->set_qos_map)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_QOS_MAP]) {
+ pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]);
+ len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
+
+ if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN ||
+ len > IEEE80211_QOS_MAP_LEN_MAX)
+ return -EINVAL;
+
+ qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL);
+ if (!qos_map)
+ return -ENOMEM;
+
+ num_des = (len - IEEE80211_QOS_MAP_LEN_MIN) >> 1;
+ if (num_des) {
+ des_len = num_des *
+ sizeof(struct cfg80211_dscp_exception);
+ memcpy(qos_map->dscp_exception, pos, des_len);
+ qos_map->num_des = num_des;
+ for (des = 0; des < num_des; des++) {
+ if (qos_map->dscp_exception[des].up > 7) {
+ kfree(qos_map);
+ return -EINVAL;
+ }
+ }
+ pos += des_len;
+ }
+ memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN);
+ }
+
+ wdev_lock(dev->ieee80211_ptr);
+ ret = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (!ret)
+ ret = rdev_set_qos_map(rdev, dev, qos_map);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ kfree(qos_map);
+ return ret;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -9793,6 +9907,14 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_WIPHY |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_QOS_MAP,
+ .doit = nl80211_set_qos_map,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
/* notification functions */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index a6c03ab14a0..c8e225947ad 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -932,4 +932,19 @@ static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_qos_map *qos_map)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (rdev->ops->set_qos_map) {
+ trace_rdev_set_qos_map(&rdev->wiphy, dev, qos_map);
+ ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ }
+
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d3c5bd7c6b5..5d6e7bb2fc8 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -872,6 +872,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
for (i = 0; i < 6; i++)
rdev_del_key(rdev, dev, i, false, NULL);
+ rdev_set_qos_map(rdev, dev, NULL);
+
#ifdef CONFIG_CFG80211_WEXT
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index f7aa7a72d9b..fbcc23edee5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -186,6 +186,28 @@
#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
+#define QOS_MAP_ENTRY __field(u8, num_des) \
+ __array(u8, dscp_exception, \
+ 2 * IEEE80211_QOS_MAP_MAX_EX) \
+ __array(u8, up, IEEE80211_QOS_MAP_LEN_MIN)
+#define QOS_MAP_ASSIGN(qos_map) \
+ do { \
+ if ((qos_map)) { \
+ __entry->num_des = (qos_map)->num_des; \
+ memcpy(__entry->dscp_exception, \
+ &(qos_map)->dscp_exception, \
+ 2 * IEEE80211_QOS_MAP_MAX_EX); \
+ memcpy(__entry->up, &(qos_map)->up, \
+ IEEE80211_QOS_MAP_LEN_MIN); \
+ } else { \
+ __entry->num_des = 0; \
+ memset(__entry->dscp_exception, 0, \
+ 2 * IEEE80211_QOS_MAP_MAX_EX); \
+ memset(__entry->up, 0, \
+ IEEE80211_QOS_MAP_LEN_MIN); \
+ } \
+ } while (0)
+
/*************************************************************
* rdev->ops traces *
*************************************************************/
@@ -1875,6 +1897,24 @@ TRACE_EVENT(rdev_channel_switch,
__entry->counter_offset_presp)
);
+TRACE_EVENT(rdev_set_qos_map,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_qos_map *qos_map),
+ TP_ARGS(wiphy, netdev, qos_map),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ QOS_MAP_ENTRY
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ QOS_MAP_ASSIGN(qos_map);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", num_des: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 935dea9485d..5618888853b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -689,7 +689,8 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
/* Given a data frame determine the 802.1p/1d tag to use. */
-unsigned int cfg80211_classify8021d(struct sk_buff *skb)
+unsigned int cfg80211_classify8021d(struct sk_buff *skb,
+ struct cfg80211_qos_map *qos_map)
{
unsigned int dscp;
unsigned char vlan_priority;
@@ -720,6 +721,21 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
return 0;
}
+ if (qos_map) {
+ unsigned int i, tmp_dscp = dscp >> 2;
+
+ for (i = 0; i < qos_map->num_des; i++) {
+ if (tmp_dscp == qos_map->dscp_exception[i].dscp)
+ return qos_map->dscp_exception[i].up;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (tmp_dscp >= qos_map->up[i].low &&
+ tmp_dscp <= qos_map->up[i].high)
+ return i;
+ }
+ }
+
return dscp >> 5;
}
EXPORT_SYMBOL(cfg80211_classify8021d);
@@ -863,6 +879,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
+ rdev_set_qos_map(rdev, dev, NULL);
switch (otype) {
case NL80211_IFTYPE_AP: